content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
\name{landmass}
\alias{landmass}
\docType{data}
\title{
Global Coastlines
}
\description{
A \code{SpatialPolygonsDataFrame} with global coastlines.
}
\usage{data("landmass")}
\note{
Most of the times it might be desirable to only flag records far away from the coast as problematic rather than those close to the coastline (which might be due to disagreements in coastlines, or low gps uncertainty). For these cases, there is a alternative coastline reference buffered by one degree available at \url{https://github.com/azizka/CoordinateCleaner/tree/master/extra_gazetteers}.
}
\source{
\url{http://www.naturalearthdata.com/downloads/10m-physical-vectors/}
}
\examples{
data("landmass")
\dontrun{
plot(landmass)
}
}
\keyword{gazetteers}
|
/man/landmass.Rd
|
no_license
|
azizka/speciesgeocodeR
|
R
| false
| false
| 741
|
rd
|
\name{landmass}
\alias{landmass}
\docType{data}
\title{
Global Coastlines
}
\description{
A \code{SpatialPolygonsDataFrame} with global coastlines.
}
\usage{data("landmass")}
\note{
Most of the times it might be desirable to only flag records far away from the coast as problematic rather than those close to the coastline (which might be due to disagreements in coastlines, or low gps uncertainty). For these cases, there is a alternative coastline reference buffered by one degree available at \url{https://github.com/azizka/CoordinateCleaner/tree/master/extra_gazetteers}.
}
\source{
\url{http://www.naturalearthdata.com/downloads/10m-physical-vectors/}
}
\examples{
data("landmass")
\dontrun{
plot(landmass)
}
}
\keyword{gazetteers}
|
#' .. content for \description{} (no empty lines) ..
#'
#' .. content for \details{} ..
#'
#' @title
#' @param Phenotypes
#' @param dest
clean_Pop33_phenos <- function(Phenotypes = PhenoFile, dest = here("data",
"Pop33_Geno_csvs.csv")) {
# Read in the phenotype file
Pop33Pheno <- read_excel(Phenotypes, sheet = "pop_33")
# Get the column names of the phenotypes so that they can be converted to numerics
PhenoCols <- colnames(Pop33Pheno)[2:ncol(Pop33Pheno)]
# Clean up the genotype names
Pop33Pheno_Clean <- Pop33Pheno %>%
rename(Genotype = IND) %>%
mutate(Genotype = str_replace(Genotype, "X033\\.", "033-"),
across(all_of(PhenoCols), as.numeric))
# Write the cleaned phenotypes to a csv file
write_csv(Pop33Pheno_Clean, file = dest, na = "")
# Return the file path that the data was saved to
return(dest)
}
|
/R/clean_Pop33_phenos.R
|
no_license
|
jhgille2/OH_33_34_Manuscript
|
R
| false
| false
| 896
|
r
|
#' .. content for \description{} (no empty lines) ..
#'
#' .. content for \details{} ..
#'
#' @title
#' @param Phenotypes
#' @param dest
clean_Pop33_phenos <- function(Phenotypes = PhenoFile, dest = here("data",
"Pop33_Geno_csvs.csv")) {
# Read in the phenotype file
Pop33Pheno <- read_excel(Phenotypes, sheet = "pop_33")
# Get the column names of the phenotypes so that they can be converted to numerics
PhenoCols <- colnames(Pop33Pheno)[2:ncol(Pop33Pheno)]
# Clean up the genotype names
Pop33Pheno_Clean <- Pop33Pheno %>%
rename(Genotype = IND) %>%
mutate(Genotype = str_replace(Genotype, "X033\\.", "033-"),
across(all_of(PhenoCols), as.numeric))
# Write the cleaned phenotypes to a csv file
write_csv(Pop33Pheno_Clean, file = dest, na = "")
# Return the file path that the data was saved to
return(dest)
}
|
testlist <- list(doy = 2.84870483508747e-306, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615830703-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 832
|
r
|
testlist <- list(doy = 2.84870483508747e-306, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
textInput("name", "What's your name?")
renderText({
paste0("Hello ", input$name)
})
numericInput("age", "How old are you?")
textOutput("greeting")
tableOutput("mortgage")
renderPlot("histogram", {
hist(rnorm(1000))
})
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/final_project3/app.R
|
no_license
|
benjaminvillaw/The-Sacred-and-The-Profane-
|
R
| false
| false
| 1,601
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
textInput("name", "What's your name?")
renderText({
paste0("Hello ", input$name)
})
numericInput("age", "How old are you?")
textOutput("greeting")
tableOutput("mortgage")
renderPlot("histogram", {
hist(rnorm(1000))
})
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dataset_doc.R
\docType{data}
\name{bisland}
\alias{bisland}
\title{Icelandic coastline, hi-resolution}
\format{A data frame with 19841 observations on the following 2 variables.
\describe{ \item{lat}{a numeric vector} \item{lon}{a numeric
vector} }}
\description{
Icelandic coastline, hi-resolution, approx. 10 times that of \code{island}.
}
\keyword{datasets}
|
/man/bisland.Rd
|
no_license
|
cran/geo
|
R
| false
| false
| 448
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dataset_doc.R
\docType{data}
\name{bisland}
\alias{bisland}
\title{Icelandic coastline, hi-resolution}
\format{A data frame with 19841 observations on the following 2 variables.
\describe{ \item{lat}{a numeric vector} \item{lon}{a numeric
vector} }}
\description{
Icelandic coastline, hi-resolution, approx. 10 times that of \code{island}.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_regres_line.R
\name{add_regres_line}
\alias{add_regres_line}
\title{Add a regression line and confidence band to a plot}
\usage{
add_regres_line(fit, from = NULL, to = NULL, band = TRUE,
ci.col = "#BEBEBEB3", ...)
}
\arguments{
\item{fit}{Object returned by lm. Only models of the form \code{y ~ x} are supported, without expressions in \code{I()} (see Examples), or interactions, or multiple variables.}
\item{from}{Optional (read from fitted model); Draw from this X value.}
\item{to}{Optional (read from fitted model); Draw to this x value.}
\item{band}{Logical. Whether to add a confidence band.}
\item{ci.col}{Colour of the confidence band, if plotted. Defaults to a transparent grey colour.}
\item{\dots}{Further arguments passed to \code{\link{abline_range}}}
}
\description{
Plots a regression line from a simple linear model (of the form \code{lm(y ~ x)}) to a plot. Also plots the confidence band for the mean, which is calculated using \code{\link{predict.lm}}.
}
\examples{
#'Add a line across the range of the data from a regression object
with(mtcars, plot(1/wt, mpg, xlim=c(0,0.8), ylim=c(0,40)))
# add_regres_line does not allow I() expressions; yet.
mtcars$inv_wt <- 1 / mtcars$wt
fit <- lm(mpg ~ inv_wt, data=mtcars)
add_regres_line(fit)
# Add the regression line and confidence band behind the data
fit <- lm(height ~ age, data=Loblolly)
with(Loblolly, plot(age, height, pch=19, panel.first=add_regres_line(fit)))
}
|
/man/add_regres_line.Rd
|
no_license
|
RemkoDuursma/nlshelper
|
R
| false
| true
| 1,527
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_regres_line.R
\name{add_regres_line}
\alias{add_regres_line}
\title{Add a regression line and confidence band to a plot}
\usage{
add_regres_line(fit, from = NULL, to = NULL, band = TRUE,
ci.col = "#BEBEBEB3", ...)
}
\arguments{
\item{fit}{Object returned by lm. Only models of the form \code{y ~ x} are supported, without expressions in \code{I()} (see Examples), or interactions, or multiple variables.}
\item{from}{Optional (read from fitted model); Draw from this X value.}
\item{to}{Optional (read from fitted model); Draw to this x value.}
\item{band}{Logical. Whether to add a confidence band.}
\item{ci.col}{Colour of the confidence band, if plotted. Defaults to a transparent grey colour.}
\item{\dots}{Further arguments passed to \code{\link{abline_range}}}
}
\description{
Plots a regression line from a simple linear model (of the form \code{lm(y ~ x)}) to a plot. Also plots the confidence band for the mean, which is calculated using \code{\link{predict.lm}}.
}
\examples{
#'Add a line across the range of the data from a regression object
with(mtcars, plot(1/wt, mpg, xlim=c(0,0.8), ylim=c(0,40)))
# add_regres_line does not allow I() expressions; yet.
mtcars$inv_wt <- 1 / mtcars$wt
fit <- lm(mpg ~ inv_wt, data=mtcars)
add_regres_line(fit)
# Add the regression line and confidence band behind the data
fit <- lm(height ~ age, data=Loblolly)
with(Loblolly, plot(age, height, pch=19, panel.first=add_regres_line(fit)))
}
|
################# Part 3 - Main Simulation - Non-Destructive Search - Levy Model
Thresh<-Thresh # Threshold of visual range
Lags<-Lags # Lags at which there are effects of encounters on movement
steps<-Steps # Length of simulation
Reps <-Reps # Number of replications
N_Patches<-13 # Number of patches in environment. Note that we replace the GRF model from
# above with separate bivariate normals, due to the infeasible computational
# time required to simulate from a GRF on this grid
X_Mushrooms<-vector("list",N_Patches) # Location of prey
Y_Mushrooms<-vector("list",N_Patches) #
set.seed(123456) # Reset Seed
N_PerPatch<-rpois(N_Patches, 200) # Create some prey
for(i in 1:N_Patches){
X_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100
Y_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100
}
X_Mushrooms_per_step<-vector("list",steps) # Location of prey
Y_Mushrooms_per_step<-vector("list",steps) #
for(i in 1:steps){
X_Mushrooms_per_step[[i]]<-X_Mushrooms
Y_Mushrooms_per_step[[i]]<-Y_Mushrooms
}
################################################################################ Levy parameters
AlphaDist <- Phi0L
AlphaAngle <- Psi0L
BetaDist <- PhiL
BetaAngle <- PsiL
SDDist <- OmegaL
DAngle <- EtaL
################################################################################ Start Model
MeanSpeed<-SDSpeed<-MeanDHeading<-DDHeading<-SDHits<-MeanHits<-c() # Create storage
StoreSpeed <- matrix(NA,ncol=Reps,nrow=steps-1)
StoreAngDiff <- matrix(NA,ncol=Reps,nrow=steps-1)
StoreHits <- matrix(NA,ncol=Reps,nrow=steps-1)
for(q in 1:Reps){ # Rep 200 times
set.seed(123456) # Reset seed
N_PerPatch<-rpois(N_Patches, 200) # Choose number of items per patch
for(i in 1:N_Patches){
X_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100 # Make some patches of prey
Y_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100 #
}
X_Mushrooms_per_step<-vector("list",steps) # Location of prey
Y_Mushrooms_per_step<-vector("list",steps) #
for(i in 1:steps){
X_Mushrooms_per_step[[i]]<-X_Mushrooms
Y_Mushrooms_per_step[[i]]<-Y_Mushrooms
}
loc.x<-c() # Location of forager
loc.y<-c() #
speed<-c() # Speed or step size
heading<-c() # Absolute heading
d.heading<-c() # Heading change
Hits<-c() # Binary vector of encounters
loc.x[1:Lags]<-rep(0,Lags) # Intialize vectors
loc.y[1:Lags]<-rep(0,Lags) #
heading[1:Lags]<-rep(0,Lags) #
speed[1:Lags]<-rep(0,Lags) #
Hits[1:Lags]<-rep(0,Lags) #
plot(loc.x,loc.y,typ="l",ylim=c(-3500,3500),xlim=c(-3500,3500)) # Plot prey items
for(i in 1:N_Patches){ #
points(X_Mushrooms[[i]],Y_Mushrooms[[i]], col="red",pch=".") #
} #
set.seed(q*100)
################################################################################ Now model forager movement
for(s in (Lags+1): (steps-1)){
X_Mushrooms <- X_Mushrooms_per_step[[s]]
Y_Mushrooms <- Y_Mushrooms_per_step[[s]]
PredDist <- AlphaDist; # First calculate mean prediction conditional on encounters
for(k in 1:Lags){ #
PredDist <- PredDist + BetaDist[k]*ifelse(Hits[s-k]>0,1,0); #
} #
R<- exp(rnorm(1,PredDist,SDDist))*7.5 # Then simulate a step distance.
# Note that the 7.5 just scales the step sizes to better fit the grid.
PredAngle <- AlphaAngle; # Again calculate mean prediction conditional on encounters
for(k in 1:Lags){ #
PredAngle <- PredAngle + BetaAngle[k]*ifelse(Hits[s-k]>0,1,0); #
} #
Theta<- rbeta(1,inv_logit(PredAngle)*DAngle, # And then simulate a directional change
(1-inv_logit(PredAngle))*DAngle)*180*ifelse(runif(1,0,1)>0.5,1,-1) # The 180 shifts from the unit to the maximum absolute distance
# The ifelse just chooses a random direction
heading[s]<-(heading[s-1]+Theta)%%360 # Store new heading. Note that the %% is the mod operation to wrap around if needed
d.heading[s] <- abs(Theta)/180 # Also store just the delta heading
speed[s] <- R # And the speed slash step size
ynew <- R * sin(deg2rad(heading[s])) # Now convert polar to Cartesian, to get the offset for a new x and y pair
xnew <- R * cos(deg2rad(heading[s])) #
loc.x[s]<-loc.x[s-1]+xnew # Make the new x and y pair
loc.y[s]<-loc.y[s-1]+ynew #
############################################################################### Now check for an encounter
Scrap2<-c()
for(i in 1:N_Patches){ # For each patch
Scrap<-rep(NA,length(X_Mushrooms[[i]]))
for(j in 1:length(X_Mushrooms[[i]])){ # For each mushroom in patch
Scrap[j]<- dist(loc.x[s],X_Mushrooms[[i]][j],loc.y[s],Y_Mushrooms[[i]][j]); # Calculate the distance from the forager to the mushroom
if(Scrap[j]<Thresh){ # If the forager is closer than the visual radius slash encounter threshold
points(X_Mushrooms[[i]][j],Y_Mushrooms[[i]][j], col="blue",pch=20) # Plot a hit
X_Mushrooms_per_step[[s+1]][[i]][j]<-99999 # If this run is for non-destructive foraging
X_Mushrooms_per_step[[s+1]][[i]][j]<-99999 # disappear food for a single time step
}}
Scrap2[i]<- ifelse(sum(ifelse(Scrap<Thresh,1,0))>0,sum(ifelse(Scrap<Thresh,1,0)),0) # Check for hits, also can replace sum(ifelse(Scrap<Thresh,1,0)) with 1
}
Hits[s]<-ifelse(sum(Scrap2)==0,0,sum(Scrap2)) # If there is a hit, then set encounters to 1
lines(loc.x,loc.y,ylim=c(-2500,2500),xlim=c(-2500,2500)) # Plot updates to the foragers path
}
MeanHits[q]<-mean(Hits[(Lags+1):length(Hits)],na.rm=T) # For simulation q, calculate mean hits, speed, and delta heading
MeanSpeed[q]<-mean(log(speed)[(Lags+1):length(Hits)],na.rm=T)
FF<-fitdistr(d.heading[(Lags+1):length(Hits)],"beta",start=list(shape1=1,shape2=1))$estimate
MeanDHeading[q]<-(FF[1]/sum(FF))
SDHits[q]<-sd(Hits[(Lags+1):length(Hits)],na.rm=T) # As well as the dispesion metrics
SDSpeed[q]<-sd(log(speed)[(Lags+1):length(Hits)],na.rm=T)
DDHeading[q]<-sum(FF)
StoreAngDiff[,q] <- d.heading
StoreSpeed[,q] <- speed
StoreHits[,q] <- Hits
print(q)
}
################################################################################ Store Each Simulation Here
Res.d.h<-cbind(MeanSpeed, MeanDHeading, MeanHits, SDSpeed,DDHeading,SDHits)
StoreHits.d.h <- StoreHits
StoreAngDiff.d.h <- StoreAngDiff
StoreSpeed.d.h <- StoreSpeed
|
/Simulation-NonDestructive-LevySearch.R
|
no_license
|
ctross/adaptivesearch
|
R
| false
| false
| 10,405
|
r
|
################# Part 3 - Main Simulation - Non-Destructive Search - Levy Model
Thresh<-Thresh # Threshold of visual range
Lags<-Lags # Lags at which there are effects of encounters on movement
steps<-Steps # Length of simulation
Reps <-Reps # Number of replications
N_Patches<-13 # Number of patches in environment. Note that we replace the GRF model from
# above with separate bivariate normals, due to the infeasible computational
# time required to simulate from a GRF on this grid
X_Mushrooms<-vector("list",N_Patches) # Location of prey
Y_Mushrooms<-vector("list",N_Patches) #
set.seed(123456) # Reset Seed
N_PerPatch<-rpois(N_Patches, 200) # Create some prey
for(i in 1:N_Patches){
X_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100
Y_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100
}
X_Mushrooms_per_step<-vector("list",steps) # Location of prey
Y_Mushrooms_per_step<-vector("list",steps) #
for(i in 1:steps){
X_Mushrooms_per_step[[i]]<-X_Mushrooms
Y_Mushrooms_per_step[[i]]<-Y_Mushrooms
}
################################################################################ Levy parameters
AlphaDist <- Phi0L
AlphaAngle <- Psi0L
BetaDist <- PhiL
BetaAngle <- PsiL
SDDist <- OmegaL
DAngle <- EtaL
################################################################################ Start Model
MeanSpeed<-SDSpeed<-MeanDHeading<-DDHeading<-SDHits<-MeanHits<-c() # Create storage
StoreSpeed <- matrix(NA,ncol=Reps,nrow=steps-1)
StoreAngDiff <- matrix(NA,ncol=Reps,nrow=steps-1)
StoreHits <- matrix(NA,ncol=Reps,nrow=steps-1)
for(q in 1:Reps){ # Rep 200 times
set.seed(123456) # Reset seed
N_PerPatch<-rpois(N_Patches, 200) # Choose number of items per patch
for(i in 1:N_Patches){
X_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100 # Make some patches of prey
Y_Mushrooms[[i]]<-rnorm(N_PerPatch[i], runif(1,-2500,2500), rpois(1, 350))+100 #
}
X_Mushrooms_per_step<-vector("list",steps) # Location of prey
Y_Mushrooms_per_step<-vector("list",steps) #
for(i in 1:steps){
X_Mushrooms_per_step[[i]]<-X_Mushrooms
Y_Mushrooms_per_step[[i]]<-Y_Mushrooms
}
loc.x<-c() # Location of forager
loc.y<-c() #
speed<-c() # Speed or step size
heading<-c() # Absolute heading
d.heading<-c() # Heading change
Hits<-c() # Binary vector of encounters
loc.x[1:Lags]<-rep(0,Lags) # Intialize vectors
loc.y[1:Lags]<-rep(0,Lags) #
heading[1:Lags]<-rep(0,Lags) #
speed[1:Lags]<-rep(0,Lags) #
Hits[1:Lags]<-rep(0,Lags) #
plot(loc.x,loc.y,typ="l",ylim=c(-3500,3500),xlim=c(-3500,3500)) # Plot prey items
for(i in 1:N_Patches){ #
points(X_Mushrooms[[i]],Y_Mushrooms[[i]], col="red",pch=".") #
} #
set.seed(q*100)
################################################################################ Now model forager movement
for(s in (Lags+1): (steps-1)){
X_Mushrooms <- X_Mushrooms_per_step[[s]]
Y_Mushrooms <- Y_Mushrooms_per_step[[s]]
PredDist <- AlphaDist; # First calculate mean prediction conditional on encounters
for(k in 1:Lags){ #
PredDist <- PredDist + BetaDist[k]*ifelse(Hits[s-k]>0,1,0); #
} #
R<- exp(rnorm(1,PredDist,SDDist))*7.5 # Then simulate a step distance.
# Note that the 7.5 just scales the step sizes to better fit the grid.
PredAngle <- AlphaAngle; # Again calculate mean prediction conditional on encounters
for(k in 1:Lags){ #
PredAngle <- PredAngle + BetaAngle[k]*ifelse(Hits[s-k]>0,1,0); #
} #
Theta<- rbeta(1,inv_logit(PredAngle)*DAngle, # And then simulate a directional change
(1-inv_logit(PredAngle))*DAngle)*180*ifelse(runif(1,0,1)>0.5,1,-1) # The 180 shifts from the unit to the maximum absolute distance
# The ifelse just chooses a random direction
heading[s]<-(heading[s-1]+Theta)%%360 # Store new heading. Note that the %% is the mod operation to wrap around if needed
d.heading[s] <- abs(Theta)/180 # Also store just the delta heading
speed[s] <- R # And the speed slash step size
ynew <- R * sin(deg2rad(heading[s])) # Now convert polar to Cartesian, to get the offset for a new x and y pair
xnew <- R * cos(deg2rad(heading[s])) #
loc.x[s]<-loc.x[s-1]+xnew # Make the new x and y pair
loc.y[s]<-loc.y[s-1]+ynew #
############################################################################### Now check for an encounter
Scrap2<-c()
for(i in 1:N_Patches){ # For each patch
Scrap<-rep(NA,length(X_Mushrooms[[i]]))
for(j in 1:length(X_Mushrooms[[i]])){ # For each mushroom in patch
Scrap[j]<- dist(loc.x[s],X_Mushrooms[[i]][j],loc.y[s],Y_Mushrooms[[i]][j]); # Calculate the distance from the forager to the mushroom
if(Scrap[j]<Thresh){ # If the forager is closer than the visual radius slash encounter threshold
points(X_Mushrooms[[i]][j],Y_Mushrooms[[i]][j], col="blue",pch=20) # Plot a hit
X_Mushrooms_per_step[[s+1]][[i]][j]<-99999 # If this run is for non-destructive foraging
X_Mushrooms_per_step[[s+1]][[i]][j]<-99999 # disappear food for a single time step
}}
Scrap2[i]<- ifelse(sum(ifelse(Scrap<Thresh,1,0))>0,sum(ifelse(Scrap<Thresh,1,0)),0) # Check for hits, also can replace sum(ifelse(Scrap<Thresh,1,0)) with 1
}
Hits[s]<-ifelse(sum(Scrap2)==0,0,sum(Scrap2)) # If there is a hit, then set encounters to 1
lines(loc.x,loc.y,ylim=c(-2500,2500),xlim=c(-2500,2500)) # Plot updates to the foragers path
}
MeanHits[q]<-mean(Hits[(Lags+1):length(Hits)],na.rm=T) # For simulation q, calculate mean hits, speed, and delta heading
MeanSpeed[q]<-mean(log(speed)[(Lags+1):length(Hits)],na.rm=T)
FF<-fitdistr(d.heading[(Lags+1):length(Hits)],"beta",start=list(shape1=1,shape2=1))$estimate
MeanDHeading[q]<-(FF[1]/sum(FF))
SDHits[q]<-sd(Hits[(Lags+1):length(Hits)],na.rm=T) # As well as the dispesion metrics
SDSpeed[q]<-sd(log(speed)[(Lags+1):length(Hits)],na.rm=T)
DDHeading[q]<-sum(FF)
StoreAngDiff[,q] <- d.heading
StoreSpeed[,q] <- speed
StoreHits[,q] <- Hits
print(q)
}
################################################################################ Store Each Simulation Here
Res.d.h<-cbind(MeanSpeed, MeanDHeading, MeanHits, SDSpeed,DDHeading,SDHits)
StoreHits.d.h <- StoreHits
StoreAngDiff.d.h <- StoreAngDiff
StoreSpeed.d.h <- StoreSpeed
|
# -----------------------------------------------------------------------------
# Ordenar dados de Net
# coletas 024/17 até 260/2017 estava no arquivo de fluxos médios 30 min,
# TOA5_XF_ddddyy.data
# a partir de 300/2017 tem arquivo próprio com aquisiçao de 10 min
# TOA5_XF_ddddyy_10.data
# a partir de 115/2019 incluidos sensores de radiaçao par e global que precisam
# ser organizados.
# precisa dar uma saída mais analítica com estatísticas e algumas
# análises mais coerentes.
#------------------------------------------------------------------------------
rm(list=ls())
library(openair)
library(stringi)
#--------------------------------------------
# Verificar se há dados novos a processar
raw_data <- "/data1/DATA/LCB/EXT/origin"
flux1 <- list.files(raw_data, pattern = "TOA5_XF", recursive = T, full.names = T)
flux2 <- list.files(raw_data, pattern = "TOA5_EF", recursive = T, full.names = T)
log <- read.table('netRad.log', stringsAsFactors = FALSE, header = F)
flux <- c(flux1, flux2)
#---------------------------------------------
# lista de arquivos novos a sincronizar
flux <- flux[!(data.frame(V1=flux)$V1 %in% log$V1)]
if(length(flux) > 0){
a <- c("\"TIMESTAMP", "Net_Avg" )
# informação das colunas
labels_tt <- lapply(flux,
function(.file)
which(unlist(stri_split_fixed(readLines(.file, n=2)[2], '\",\"')) %in% a))
# dados e renomeação das colunas
tt = lapply(flux,
function(.file)
read.csv(.file,skip=4, na.strings='\"NAN\"', header=F))
for (i in 1:length(tt)){
names(tt[[i]]) <- unlist(stri_split_fixed(readLines(flux[[i]], n=2)[2], '\",\"'))
}
# dados a separar
aux <- list()
aux <- lapply(tt, "[", a)
new <- do.call(rbind, aux)
names(new)[1] <- "date"
new$date <- as.POSIXct(new$date, tz='GMT')
# organizar a série completa e de acordo com o banco
ref <- data.frame(date=seq.POSIXt(min(new$date), max(new$date), by='10 min'))
new <- merge(ref, new, by="date", all.x=TRUE)
# Verificar se existe arquivo anterior, concatenar e imprimir.
old_data <- "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_raw.csv"
if(file.exists(old_data)){
old <- read.csv(old_data, na.strings = '-9999')
old$date <- as.POSIXct(old$date, tz='GMT')
# verificar períodos
if((max(old$date) < min(new$date)) & (sum(old$date %in% new$date) == 0)){
ref = data.frame(date=seq.POSIXt(min(old$date), max(new$date), by='10 min'))
out <- rbind(old, new)
out2 <- merge(ref, out, all.x = T)
cat("Atualizando dados até ", substr(ref$date[nrow(ref)],1,10),"\n")
write.csv(out2, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_raw.csv",
row.names = F, quote = T, na='-9999')
out30 <- timeAverage(out2, avg.time = '30 min', data.thresh = 0)
write.csv(out30, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_30m.csv",
row.names = F, quote = T, na='-9999')
} else {
stop("\n*******************************************\n",
"*** DADOS REPLICADOS NOS ARQUIVOS NOVOS ***\n",
"*******************************************\n")
}
} else{
cat("Primeira Coleta")
write.csv(new, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_raw.csv",
row.names = F, quote = T, na='-9999')
new30 <- timeAverage(new, avg.time = '30 min', data.thresh = 0)
write.csv(new30, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_30m.csv",
row.names = F, quote = T, na='-9999')
}
# atualizar log
write.table(flux, '/data1/DATA/LCB/EXT/STORAGE/codes/Rad/netRad.log', append = T, row.names = F, col.names = F)
} else {
stop("\n**************************************\n",
"*** SEM DADOS NOVOS PARA ATUALIZAR ***\n",
"**************************************\n")
}
#===============================================================================
# new <- timeAverage(new, avg.time = '30 min',
# start.date = paste0(substr(new$date[1],1,14),'00:00'))
# ref <- data.frame(date=seq.POSIXt(new$date[1], new$date[nrow(new)], by='30 min'))
# new <- merge(ref,new, all=T)
#
# #------------------------------------------------
# # Ler arquivo original
# old_data <- "/data1/DATA/LCB/EXT/STORAGE/data/I"
# old <- read.csv(paste0(old_data,'/Net_Extrema_30m.csv'))
# old$date <- as.POSIXct(old$date, tz='GMT')
#
# # checar novamente se tem dado novo no new
# if(sum(!(new$date %in% old$date))==0){
# cat('\n Nada novo pra incluir \n')
# write.table(flux, 'netRad.log', append = T, row.names = F, col.names = F)
# } else {
# out <- merge(old, new[,c(1:2)], all= T )
# write.csv(out, '../../data/Radiation/Net_Extrema_30m.csv',
# row.names = FALSE, quote = FALSE)
# write.table(flux, 'netRad.log', append = T, row.names = F, col.names = F)
# }
#
#---------------------------------------------------------
# Código utilizado para integrar os dados até 2017
{
# #---------------------------------------------
# # arquivos 1 e 2 com 87 colunas
# tt1 <- list()
# for(i in 1:2){
# tt1[[i]] <- tt[[i]]
# }
# tt1 = do.call(rbind, tt1)
# names(tt1) = unlist(strsplit(readLines(flux[1],2)[2],'\",\"'))
# names(tt1)[1] = "date"
# tt1$date = as.POSIXct(tt1$date, format="%Y-%m-%d %H:%M:%S", tz = "GMT")
#
# #-------------------------------
# # arquivos 4 a 6 com 86 colunas
# tt2 <- list()
# for(i in 3:length(flux)){
# tt2[[i]] <- tt[[i]]
# }
# tt2 = do.call(rbind,tt2)
# names(tt2) = unlist(strsplit(readLines(flux[3],2)[2],'\",\"'))
# names(tt2)[1] = "date"
# tt2$date = as.POSIXct(tt2$date, format="%Y-%m-%d %H:%M:%S", tz = "GMT")
#
# #-------------------------------
# # Unir todos os arquivos de 2017
# temp = merge(tt1,tt2, all=T)
#
# #----------------------------------------
# # Corrigir dados com multiplicador errado
# # entre nov/2016 e abril/2018
# ref <- data.frame(date =
# seq.POSIXt(temp$date[1], as.POSIXct('2017-03-29 13:30:00', tz="GMT"), by='30 min'))
# pos <- which(temp$date %in% ref$date)
# temp$Net_Avg[pos] = (temp$Net_Avg[pos] -77.5194) * 77.5194
#
# #-----------------------------------------------
# # Net_30min - parte 1
# net_30min <- temp[,c(1,85)]
#
#
# #------------------------------------------------
# #ler dados após mudança
# flux <- list.files("../../..", pattern = "TOA5_XF", recursive = T, full.names = T)
# flux <- flux[substr(flux, nchar(flux)-5, nchar(flux)-4) == '10']
#
# tt = lapply(flux,
# function(.file)
# read.csv(.file, na.strings='\"NAN\"', header=F, skip=4))
# tt1 = do.call(rbind, tt)
# names(tt1) = unlist(strsplit(readLines(flux[1],2)[2],'\",\"'))
# names(tt1)[1] = "date"
# tt1$date = as.POSIXct(tt1$date, format="%Y-%m-%d %H:%M:%S", tz = "GMT")
# tt1_30m <- timeAverage(tt1, avg.time = '30 min')
}
#
#---------------------------------------------------------
|
/ext_codes/Rad/net_organize.R
|
no_license
|
ebrasilio/lcbtools
|
R
| false
| false
| 7,379
|
r
|
# -----------------------------------------------------------------------------
# Ordenar dados de Net
# coletas 024/17 até 260/2017 estava no arquivo de fluxos médios 30 min,
# TOA5_XF_ddddyy.data
# a partir de 300/2017 tem arquivo próprio com aquisiçao de 10 min
# TOA5_XF_ddddyy_10.data
# a partir de 115/2019 incluidos sensores de radiaçao par e global que precisam
# ser organizados.
# precisa dar uma saída mais analítica com estatísticas e algumas
# análises mais coerentes.
#------------------------------------------------------------------------------
rm(list=ls())
library(openair)
library(stringi)
#--------------------------------------------
# Verificar se há dados novos a processar
raw_data <- "/data1/DATA/LCB/EXT/origin"
flux1 <- list.files(raw_data, pattern = "TOA5_XF", recursive = T, full.names = T)
flux2 <- list.files(raw_data, pattern = "TOA5_EF", recursive = T, full.names = T)
log <- read.table('netRad.log', stringsAsFactors = FALSE, header = F)
flux <- c(flux1, flux2)
#---------------------------------------------
# lista de arquivos novos a sincronizar
flux <- flux[!(data.frame(V1=flux)$V1 %in% log$V1)]
if(length(flux) > 0){
a <- c("\"TIMESTAMP", "Net_Avg" )
# informação das colunas
labels_tt <- lapply(flux,
function(.file)
which(unlist(stri_split_fixed(readLines(.file, n=2)[2], '\",\"')) %in% a))
# dados e renomeação das colunas
tt = lapply(flux,
function(.file)
read.csv(.file,skip=4, na.strings='\"NAN\"', header=F))
for (i in 1:length(tt)){
names(tt[[i]]) <- unlist(stri_split_fixed(readLines(flux[[i]], n=2)[2], '\",\"'))
}
# dados a separar
aux <- list()
aux <- lapply(tt, "[", a)
new <- do.call(rbind, aux)
names(new)[1] <- "date"
new$date <- as.POSIXct(new$date, tz='GMT')
# organizar a série completa e de acordo com o banco
ref <- data.frame(date=seq.POSIXt(min(new$date), max(new$date), by='10 min'))
new <- merge(ref, new, by="date", all.x=TRUE)
# Verificar se existe arquivo anterior, concatenar e imprimir.
old_data <- "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_raw.csv"
if(file.exists(old_data)){
old <- read.csv(old_data, na.strings = '-9999')
old$date <- as.POSIXct(old$date, tz='GMT')
# verificar períodos
if((max(old$date) < min(new$date)) & (sum(old$date %in% new$date) == 0)){
ref = data.frame(date=seq.POSIXt(min(old$date), max(new$date), by='10 min'))
out <- rbind(old, new)
out2 <- merge(ref, out, all.x = T)
cat("Atualizando dados até ", substr(ref$date[nrow(ref)],1,10),"\n")
write.csv(out2, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_raw.csv",
row.names = F, quote = T, na='-9999')
out30 <- timeAverage(out2, avg.time = '30 min', data.thresh = 0)
write.csv(out30, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_30m.csv",
row.names = F, quote = T, na='-9999')
} else {
stop("\n*******************************************\n",
"*** DADOS REPLICADOS NOS ARQUIVOS NOVOS ***\n",
"*******************************************\n")
}
} else{
cat("Primeira Coleta")
write.csv(new, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_raw.csv",
row.names = F, quote = T, na='-9999')
new30 <- timeAverage(new, avg.time = '30 min', data.thresh = 0)
write.csv(new30, "/data1/DATA/LCB/EXT/STORAGE/data/I/Ext_Radiations_30m.csv",
row.names = F, quote = T, na='-9999')
}
# atualizar log
write.table(flux, '/data1/DATA/LCB/EXT/STORAGE/codes/Rad/netRad.log', append = T, row.names = F, col.names = F)
} else {
stop("\n**************************************\n",
"*** SEM DADOS NOVOS PARA ATUALIZAR ***\n",
"**************************************\n")
}
#===============================================================================
# new <- timeAverage(new, avg.time = '30 min',
# start.date = paste0(substr(new$date[1],1,14),'00:00'))
# ref <- data.frame(date=seq.POSIXt(new$date[1], new$date[nrow(new)], by='30 min'))
# new <- merge(ref,new, all=T)
#
# #------------------------------------------------
# # Ler arquivo original
# old_data <- "/data1/DATA/LCB/EXT/STORAGE/data/I"
# old <- read.csv(paste0(old_data,'/Net_Extrema_30m.csv'))
# old$date <- as.POSIXct(old$date, tz='GMT')
#
# # checar novamente se tem dado novo no new
# if(sum(!(new$date %in% old$date))==0){
# cat('\n Nada novo pra incluir \n')
# write.table(flux, 'netRad.log', append = T, row.names = F, col.names = F)
# } else {
# out <- merge(old, new[,c(1:2)], all= T )
# write.csv(out, '../../data/Radiation/Net_Extrema_30m.csv',
# row.names = FALSE, quote = FALSE)
# write.table(flux, 'netRad.log', append = T, row.names = F, col.names = F)
# }
#
#---------------------------------------------------------
# Código utilizado para integrar os dados até 2017
{
# #---------------------------------------------
# # arquivos 1 e 2 com 87 colunas
# tt1 <- list()
# for(i in 1:2){
# tt1[[i]] <- tt[[i]]
# }
# tt1 = do.call(rbind, tt1)
# names(tt1) = unlist(strsplit(readLines(flux[1],2)[2],'\",\"'))
# names(tt1)[1] = "date"
# tt1$date = as.POSIXct(tt1$date, format="%Y-%m-%d %H:%M:%S", tz = "GMT")
#
# #-------------------------------
# # arquivos 4 a 6 com 86 colunas
# tt2 <- list()
# for(i in 3:length(flux)){
# tt2[[i]] <- tt[[i]]
# }
# tt2 = do.call(rbind,tt2)
# names(tt2) = unlist(strsplit(readLines(flux[3],2)[2],'\",\"'))
# names(tt2)[1] = "date"
# tt2$date = as.POSIXct(tt2$date, format="%Y-%m-%d %H:%M:%S", tz = "GMT")
#
# #-------------------------------
# # Unir todos os arquivos de 2017
# temp = merge(tt1,tt2, all=T)
#
# #----------------------------------------
# # Corrigir dados com multiplicador errado
# # entre nov/2016 e abril/2018
# ref <- data.frame(date =
# seq.POSIXt(temp$date[1], as.POSIXct('2017-03-29 13:30:00', tz="GMT"), by='30 min'))
# pos <- which(temp$date %in% ref$date)
# temp$Net_Avg[pos] = (temp$Net_Avg[pos] -77.5194) * 77.5194
#
# #-----------------------------------------------
# # Net_30min - parte 1
# net_30min <- temp[,c(1,85)]
#
#
# #------------------------------------------------
# #ler dados após mudança
# flux <- list.files("../../..", pattern = "TOA5_XF", recursive = T, full.names = T)
# flux <- flux[substr(flux, nchar(flux)-5, nchar(flux)-4) == '10']
#
# tt = lapply(flux,
# function(.file)
# read.csv(.file, na.strings='\"NAN\"', header=F, skip=4))
# tt1 = do.call(rbind, tt)
# names(tt1) = unlist(strsplit(readLines(flux[1],2)[2],'\",\"'))
# names(tt1)[1] = "date"
# tt1$date = as.POSIXct(tt1$date, format="%Y-%m-%d %H:%M:%S", tz = "GMT")
# tt1_30m <- timeAverage(tt1, avg.time = '30 min')
}
#
#---------------------------------------------------------
|
# Analysis of the results
# Code to:
# - perform all the analyses of the article
# - draw Figure 3 (Individual pressures: regional comparisons)
# - draw Figure 4 (Density distribution of pressure percentiles and frequency of occurrence of top pressures in refugia vs non-refugia)
# - draw Figure S1 (Correlation among pressures)
# - draw Figure S2 (Density distribution of pressure raw values)
# - draw Figure S10 (Comparison of frequency of occurrence of top pressures between regions)
# - draw Figure S11 (Pressure intensity when top-ranked)
rm(list = ls())
library(tidyverse)
library(sf)
library(here)
library(corrgram)
library(RColorBrewer)
library(prettyR)
load(here("data", "allreefs.RData"))
# Names of the six pressure plus cumulative impact score in the columns of allreefs
vthreats <- c(
"grav_NC", "pop_count", "num_ports",
"reef_value", "sediment", "nutrient", "cumul_score"
)
# Names of the six pressures plus cumulative impact score
threat_names <- c(
"Fishing", "Coastal pop", "Industrial dev",
"Tourism", "Sediments", "Nitrogen", "Cumulative impact score"
)
# Colors: colorblind friendly palette
col_threats <- brewer.pal(6, "Set2")
colors <- c(
"Fishing" = col_threats[1],
"Coastal\npopulation" = col_threats[2],
"Industr_dev" = col_threats[3],
"Tourism" = col_threats[4],
"Sediments" = col_threats[5],
"Nitrogen" = col_threats[6],
"Cumulative" = "darkgrey"
)
# Titles
title.text <- c(
"Fishing", "Coastal\npopulation", "Industrial\ndevelopment",
"Tourism", "Sediments", "Nitrogen", "Cumulative"
)
# Just change the name to be more comfortable
data <- allreefs
rm(allreefs)
# Calculate global medians
glob.median <- vector()
for (i in 1:length(vthreats)) {
indicator <- vthreats[i]
glob.median[i] <- median(as.data.frame(data)[, indicator], na.rm = T)
}
names(glob.median) <- vthreats
# Theme
ggplot2::theme_set(theme_minimal(10))
ggplot2::theme_update(
axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7),
axis.title = element_blank(),
legend.text = element_text(size = 7),
legend.key.size = unit(.5, "cm"),
legend.position = "bottom",
plot.title = element_text(hjust = 0.5, size = 10)
)
# MAIN RESULTS
# Frequency of occurrence of top pressures
table(data$top_threat)
table(data$top_threat) / sum(table(data$top_threat))
#######################################################
# FIGURE 3. Individual pressures: regional comparisons
for (i in 1:7) {
if (i < 7) indicator <- vthreats[i] else indicator <- "cumul_score"
png(paste0("Boxplot_", indicator, ".png"), width = 10, height = 4, units = "cm", res = 300)
a <-
# This plots regions ordered by their median value:
# ggplot2::ggplot(data, aes(y = reorder(Region, !!sym(indicator), FUN = median, na.rm = T), x = !!sym(indicator))) +
# This plots regions in alphabetical order:
ggplot2::ggplot(data, aes(y = Region, x = !!sym(indicator))) +
ggplot2::geom_boxplot(fill = colors[i], size = 0.1, outlier.size = 0.1, show.legend = F) +
ggplot2::geom_vline(aes(xintercept = glob.median[i]), linetype = "dashed", size = 0.25, show.legend = F) +
ggplot2::scale_x_continuous(limits = c(0, 1)) +
ggplot2::labs(title = title.text[i])
print(a)
dev.off()
}
rm(a, i, indicator)
# Composed in powerpoint and saved as Figure 3
#######################################################
# FIGURE 4 (top). Density distribution of pressure percentiles in refugia vs non-refugia
# Retain only pressure percentiles and stack all pressure + refugia/non-refugia data in the same dataframe
data1 <- as.data.frame(data)[, c("is.bcu", vthreats[1:6])]
data2 <- prettyR::rep_n_stack(data1, to.stack = vthreats[1:6], stack.names = c("indicator", "value"))
# Reorder levels of pressures
data2$indicator <- factor(data2$indicator, levels = vthreats[1:6])
# Add title text (pretty name for pressures)
data2$title.text <- factor(title.text[data2$indicator], levels = title.text[1:6])
# Plot Figure 4 top
ggplot2::theme_update(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7)) # ,
# axis.text.y = element_blank())
png(paste0("Figure 4_top.png"), width = 18.5, height = 13, units = "cm", res = 300)
a <- ggplot2::ggplot(data2, aes(x = value, fill = is.bcu)) +
ggplot2::geom_density(na.rm = T, alpha = 0.5) +
ggplot2::facet_wrap(vars(title.text), scales = "free_y") +
ggplot2::scale_fill_brewer(name = "", type = "qual")
print(a)
dev.off()
rm(data1, data2, a)
#######################################################
# FIGURE 4 (bottom). Comparison of frequency of occurrence of top pressures in refugia vs non-refugia
ggplot2::theme_update(
axis.text.y = element_text(hjust = 1, vjust = 0.5, size = 7),
axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7)
)
# Calculate the frequency of occurrence of each pressure as top ranked in refugia vs non refugia
ta.bcu <- as.data.frame(table(data$is.bcu,
data$top_threat,
dnn = c("refugia", "threat")
))
# Add pressure names as factors
ta.bcu$top_threat <- threat_names[ta.bcu$threat]
ta.bcu$top_threat <- factor(ta.bcu$top_threat, levels = threat_names)
ta.bcu
# Plot Figure 4 (bottom)
png(paste0("Figure 4_bottom.png"), width = 10, height = 3.5, units = "cm", res = 300)
a <- ggplot2::ggplot(ta.bcu, aes_string(y = "refugia", x = "Freq", fill = "top_threat")) +
ggplot2::geom_col(position = position_fill(reverse = T)) +
ggplot2::scale_fill_manual(values = col_threats, name = "")
print(a)
dev.off()
# In power point, compose with Figure_4_top to create Figure 4
# # Chi-square tests to compare the frequency of occurrence of each pressure as top-ranked in refugia vs non-refugia
# nonBCU.tt <- table(data$top_threat[data$is.bcu == "non-refugia"])
# BCU.tt <- table(data$top_threat[data$is.bcu == "refugia"])
# chisq.test(rbind(nonBCU.tt, BCU.tt))
# rm(ta.bcu, a, nonBCU.tt, BCU.tt)
#######################################################
# FIGURE S1 - Correlation among pressures
png(paste0("Figure S1.png"), width = 12, height = 10, units = "cm", res = 300)
data_corr <- as.data.frame(data)[,vthreats[c(1:6)]]
names(data_corr) <- threat_names[1:6]
corrgram::corrgram(data_corr,
upper.panel=panel.cor,
lower.panel = NULL, cor.method="spearman")
dev.off()
#######################################################
# FIGURE S2 - Density distribution of pressure raw values
# Retain only pressure raw values
vthreats_raw <- paste0(vthreats[1:6],"_raw")
data1 <- as.data.frame(data)[, c("OBJECTID",vthreats_raw)]
# Update graphics layout
ggplot2::theme_update(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7),
axis.title = element_blank())
# Loop on the six pressures
quantiles <- array(NA,dim=c(6,6))
for (i.threat in 1 : 6) {
# Define pressure name
indicator <- vthreats_raw[i.threat]
# Retain only the values of that pressure
data_indicator <- data1[,c("OBJECTID",indicator)]
# Set the transformation: identity (for num_ports) or log10(x+1) for all the others
if (indicator == "num_ports_raw") {
data_indicator$value <- data_indicator[,2]
transformation <- "identity"
} else {
data_indicator$value <- data_indicator[,2]+1
transformation <- "log10"
}
# Calculate quantiles
quantiles_ithreat <- quantile(data_indicator$value,seq(0.1, 0.9, 0.2),na.rm=T)
# Plot density distribution with quantiles
png(paste0("Figure S2_",indicator,".png"), width = 6, height = 6, units = "cm", res = 300)
a <- ggplot(data_indicator, aes(x=value)) +
geom_density(na.rm = T) +
scale_x_continuous(trans=transformation) +
geom_vline(xintercept=quantiles_ithreat, colour=c("red","purple","blue","purple","red"), linetype=2,alpha=0.5) +
ggtitle(title.text[i.threat])
print(a)
dev.off()
# Calculate quantiles in the original scale
quantiles[i.threat,] <- quantile(data_indicator[,2],
c(0.1,0.3,0.5,0.7,0.9,1),
na.rm=T)
}
colnames(quantiles) <- c("10%","30%","50%","70%","90%","100%")
rownames(quantiles) <- title.text[1:6]
rownames(quantiles)[2] <- "Coastal population"
rownames(quantiles)[3] <- "Industrial development"
round(quantiles,2)
#######################################################
# FIGURE S10. Comparison of frequency of occurrence of top pressures between regions
ggplot2::theme_update(axis.text.y = element_text(hjust = 1, vjust = 0.5, size = 7))
# ta data frame gives how many reef cells have a given top pressure in each region
ta <- as.data.frame(table(data$Region,
data$top_threat,
dnn = c("Region", "threat")
))
ta$top_threat <- threat_names[ta$threat]
ta$top_threat <- factor(ta$top_threat, levels = threat_names)
# Plot Figure S7
png(paste0("Figure S10.png"), width = 10, height = 8, units = "cm", res = 300)
a <- ggplot2::ggplot(ta, aes_string(y = "Region", x = "Freq", fill = "top_threat")) +
ggplot2::geom_col(position = position_fill(reverse = T)) +
ggplot2::scale_fill_manual(values = col_threats, name = "")
print(a)
dev.off()
# Calculate relative frequency of occurrence of each pressure as top ranked
ta$n.reef <- table(data$Region)[ta$Region]
ta$Freq.rel <- ta$Freq / ta$n.reef
# Percent of reef cells where fishing is a top pressure, for each region
ta[ta$top_threat == "Fishing", c("Region", "Freq.rel")]
# Percent of reef cells where water pollution (nutrients + sediments) is a top pressure, for each region
data.frame(
Region = levels(ta$Region),
water.pollution = as.numeric(ta$Freq.rel[ta$threat == 5] + ta$Freq.rel[ta$threat == 6])
)
rm(a, ta)
#######################################################
# FIGURE S11. Pressure intensity when top-ranked
# Build a dataframe where each reef cell has the value of the threat that is top-ranked
a <- data.frame(
value = tibble::as_tibble(data) %>% dplyr::filter(top_threat == 1) %>% dplyr::select(!!sym(vthreats[1])) %>% as_vector(),
threat = vthreats[1]
)
for (i in 2:6) {
a <- rbind(
a,
data.frame(
value = tibble::as_tibble(data) %>% dplyr::filter(top_threat == i) %>% dplyr::select(!!sym(vthreats[i])) %>% as_vector(),
threat = vthreats[i]
)
)
}
a$threat <- factor(a$threat, levels=c("grav_NC","pop_count","num_ports","reef_value","sediment","nutrient"))
# Boxplots of the pressure percentiles that are top ranked
ggplot2::theme_update(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7))
png(paste0("Figure S11.png"), width = 10, height = 5, units = "cm", res = 300)
a.plot <-
ggplot2::ggplot(a, aes_string(x = "value", y = "threat", fill = "threat")) +
ggplot2::geom_boxplot(size = 0.1, outlier.size = 0.1, show.legend = F) +
ggplot2::scale_x_continuous(limits = c(0, 1)) +
ggplot2::scale_y_discrete(labels = threat_names) +
ggplot2::scale_fill_manual(values = col_threats, name = "")
print(a.plot)
dev.off()
# Summarize the statistics of each pressure when it is top-ranked
tapply(a$value, a$threat, summary)
rm(a, a.plot, i)
|
/analysis/Analysis.R
|
permissive
|
sparkgeo/local-reef-pressures
|
R
| false
| false
| 11,084
|
r
|
# Analysis of the results
# Code to:
# - perform all the analyses of the article
# - draw Figure 3 (Individual pressures: regional comparisons)
# - draw Figure 4 (Density distribution of pressure percentiles and frequency of occurrence of top pressures in refugia vs non-refugia)
# - draw Figure S1 (Correlation among pressures)
# - draw Figure S2 (Density distribution of pressure raw values)
# - draw Figure S10 (Comparison of frequency of occurrence of top pressures between regions)
# - draw Figure S11 (Pressure intensity when top-ranked)
rm(list = ls())
library(tidyverse)
library(sf)
library(here)
library(corrgram)
library(RColorBrewer)
library(prettyR)
load(here("data", "allreefs.RData"))
# Names of the six pressure plus cumulative impact score in the columns of allreefs
vthreats <- c(
"grav_NC", "pop_count", "num_ports",
"reef_value", "sediment", "nutrient", "cumul_score"
)
# Names of the six pressures plus cumulative impact score
threat_names <- c(
"Fishing", "Coastal pop", "Industrial dev",
"Tourism", "Sediments", "Nitrogen", "Cumulative impact score"
)
# Colors: colorblind friendly palette
col_threats <- brewer.pal(6, "Set2")
colors <- c(
"Fishing" = col_threats[1],
"Coastal\npopulation" = col_threats[2],
"Industr_dev" = col_threats[3],
"Tourism" = col_threats[4],
"Sediments" = col_threats[5],
"Nitrogen" = col_threats[6],
"Cumulative" = "darkgrey"
)
# Titles
title.text <- c(
"Fishing", "Coastal\npopulation", "Industrial\ndevelopment",
"Tourism", "Sediments", "Nitrogen", "Cumulative"
)
# Just change the name to be more comfortable
data <- allreefs
rm(allreefs)
# Calculate global medians
glob.median <- vector()
for (i in 1:length(vthreats)) {
indicator <- vthreats[i]
glob.median[i] <- median(as.data.frame(data)[, indicator], na.rm = T)
}
names(glob.median) <- vthreats
# Theme
ggplot2::theme_set(theme_minimal(10))
ggplot2::theme_update(
axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7),
axis.title = element_blank(),
legend.text = element_text(size = 7),
legend.key.size = unit(.5, "cm"),
legend.position = "bottom",
plot.title = element_text(hjust = 0.5, size = 10)
)
# MAIN RESULTS
# Frequency of occurrence of top pressures
table(data$top_threat)
table(data$top_threat) / sum(table(data$top_threat))
#######################################################
# FIGURE 3. Individual pressures: regional comparisons
for (i in 1:7) {
if (i < 7) indicator <- vthreats[i] else indicator <- "cumul_score"
png(paste0("Boxplot_", indicator, ".png"), width = 10, height = 4, units = "cm", res = 300)
a <-
# This plots regions ordered by their median value:
# ggplot2::ggplot(data, aes(y = reorder(Region, !!sym(indicator), FUN = median, na.rm = T), x = !!sym(indicator))) +
# This plots regions in alphabetical order:
ggplot2::ggplot(data, aes(y = Region, x = !!sym(indicator))) +
ggplot2::geom_boxplot(fill = colors[i], size = 0.1, outlier.size = 0.1, show.legend = F) +
ggplot2::geom_vline(aes(xintercept = glob.median[i]), linetype = "dashed", size = 0.25, show.legend = F) +
ggplot2::scale_x_continuous(limits = c(0, 1)) +
ggplot2::labs(title = title.text[i])
print(a)
dev.off()
}
rm(a, i, indicator)
# Composed in powerpoint and saved as Figure 3
#######################################################
# FIGURE 4 (top). Density distribution of pressure percentiles in refugia vs non-refugia
# Retain only pressure percentiles and stack all pressure + refugia/non-refugia data in the same dataframe
data1 <- as.data.frame(data)[, c("is.bcu", vthreats[1:6])]
data2 <- prettyR::rep_n_stack(data1, to.stack = vthreats[1:6], stack.names = c("indicator", "value"))
# Reorder levels of pressures
data2$indicator <- factor(data2$indicator, levels = vthreats[1:6])
# Add title text (pretty name for pressures)
data2$title.text <- factor(title.text[data2$indicator], levels = title.text[1:6])
# Plot Figure 4 top
ggplot2::theme_update(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7)) # ,
# axis.text.y = element_blank())
png(paste0("Figure 4_top.png"), width = 18.5, height = 13, units = "cm", res = 300)
a <- ggplot2::ggplot(data2, aes(x = value, fill = is.bcu)) +
ggplot2::geom_density(na.rm = T, alpha = 0.5) +
ggplot2::facet_wrap(vars(title.text), scales = "free_y") +
ggplot2::scale_fill_brewer(name = "", type = "qual")
print(a)
dev.off()
rm(data1, data2, a)
#######################################################
# FIGURE 4 (bottom). Comparison of frequency of occurrence of top pressures in refugia vs non-refugia
ggplot2::theme_update(
axis.text.y = element_text(hjust = 1, vjust = 0.5, size = 7),
axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7)
)
# Calculate the frequency of occurrence of each pressure as top ranked in refugia vs non refugia
ta.bcu <- as.data.frame(table(data$is.bcu,
data$top_threat,
dnn = c("refugia", "threat")
))
# Add pressure names as factors
ta.bcu$top_threat <- threat_names[ta.bcu$threat]
ta.bcu$top_threat <- factor(ta.bcu$top_threat, levels = threat_names)
ta.bcu
# Plot Figure 4 (bottom)
png(paste0("Figure 4_bottom.png"), width = 10, height = 3.5, units = "cm", res = 300)
a <- ggplot2::ggplot(ta.bcu, aes_string(y = "refugia", x = "Freq", fill = "top_threat")) +
ggplot2::geom_col(position = position_fill(reverse = T)) +
ggplot2::scale_fill_manual(values = col_threats, name = "")
print(a)
dev.off()
# In power point, compose with Figure_4_top to create Figure 4
# # Chi-square tests to compare the frequency of occurrence of each pressure as top-ranked in refugia vs non-refugia
# nonBCU.tt <- table(data$top_threat[data$is.bcu == "non-refugia"])
# BCU.tt <- table(data$top_threat[data$is.bcu == "refugia"])
# chisq.test(rbind(nonBCU.tt, BCU.tt))
# rm(ta.bcu, a, nonBCU.tt, BCU.tt)
#######################################################
# FIGURE S1 - Correlation among pressures
png(paste0("Figure S1.png"), width = 12, height = 10, units = "cm", res = 300)
data_corr <- as.data.frame(data)[,vthreats[c(1:6)]]
names(data_corr) <- threat_names[1:6]
corrgram::corrgram(data_corr,
upper.panel=panel.cor,
lower.panel = NULL, cor.method="spearman")
dev.off()
#######################################################
# FIGURE S2 - Density distribution of pressure raw values
# Retain only pressure raw values
vthreats_raw <- paste0(vthreats[1:6],"_raw")
data1 <- as.data.frame(data)[, c("OBJECTID",vthreats_raw)]
# Update graphics layout
ggplot2::theme_update(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7),
axis.title = element_blank())
# Loop on the six pressures
quantiles <- array(NA,dim=c(6,6))
for (i.threat in 1 : 6) {
# Define pressure name
indicator <- vthreats_raw[i.threat]
# Retain only the values of that pressure
data_indicator <- data1[,c("OBJECTID",indicator)]
# Set the transformation: identity (for num_ports) or log10(x+1) for all the others
if (indicator == "num_ports_raw") {
data_indicator$value <- data_indicator[,2]
transformation <- "identity"
} else {
data_indicator$value <- data_indicator[,2]+1
transformation <- "log10"
}
# Calculate quantiles
quantiles_ithreat <- quantile(data_indicator$value,seq(0.1, 0.9, 0.2),na.rm=T)
# Plot density distribution with quantiles
png(paste0("Figure S2_",indicator,".png"), width = 6, height = 6, units = "cm", res = 300)
a <- ggplot(data_indicator, aes(x=value)) +
geom_density(na.rm = T) +
scale_x_continuous(trans=transformation) +
geom_vline(xintercept=quantiles_ithreat, colour=c("red","purple","blue","purple","red"), linetype=2,alpha=0.5) +
ggtitle(title.text[i.threat])
print(a)
dev.off()
# Calculate quantiles in the original scale
quantiles[i.threat,] <- quantile(data_indicator[,2],
c(0.1,0.3,0.5,0.7,0.9,1),
na.rm=T)
}
colnames(quantiles) <- c("10%","30%","50%","70%","90%","100%")
rownames(quantiles) <- title.text[1:6]
rownames(quantiles)[2] <- "Coastal population"
rownames(quantiles)[3] <- "Industrial development"
round(quantiles,2)
#######################################################
# FIGURE S10. Comparison of frequency of occurrence of top pressures between regions
ggplot2::theme_update(axis.text.y = element_text(hjust = 1, vjust = 0.5, size = 7))
# ta data frame gives how many reef cells have a given top pressure in each region
ta <- as.data.frame(table(data$Region,
data$top_threat,
dnn = c("Region", "threat")
))
ta$top_threat <- threat_names[ta$threat]
ta$top_threat <- factor(ta$top_threat, levels = threat_names)
# Plot Figure S7
png(paste0("Figure S10.png"), width = 10, height = 8, units = "cm", res = 300)
a <- ggplot2::ggplot(ta, aes_string(y = "Region", x = "Freq", fill = "top_threat")) +
ggplot2::geom_col(position = position_fill(reverse = T)) +
ggplot2::scale_fill_manual(values = col_threats, name = "")
print(a)
dev.off()
# Calculate relative frequency of occurrence of each pressure as top ranked
ta$n.reef <- table(data$Region)[ta$Region]
ta$Freq.rel <- ta$Freq / ta$n.reef
# Percent of reef cells where fishing is a top pressure, for each region
ta[ta$top_threat == "Fishing", c("Region", "Freq.rel")]
# Percent of reef cells where water pollution (nutrients + sediments) is a top pressure, for each region
data.frame(
Region = levels(ta$Region),
water.pollution = as.numeric(ta$Freq.rel[ta$threat == 5] + ta$Freq.rel[ta$threat == 6])
)
rm(a, ta)
#######################################################
# FIGURE S11. Pressure intensity when top-ranked
# Build a dataframe where each reef cell has the value of the threat that is top-ranked
a <- data.frame(
value = tibble::as_tibble(data) %>% dplyr::filter(top_threat == 1) %>% dplyr::select(!!sym(vthreats[1])) %>% as_vector(),
threat = vthreats[1]
)
for (i in 2:6) {
a <- rbind(
a,
data.frame(
value = tibble::as_tibble(data) %>% dplyr::filter(top_threat == i) %>% dplyr::select(!!sym(vthreats[i])) %>% as_vector(),
threat = vthreats[i]
)
)
}
a$threat <- factor(a$threat, levels=c("grav_NC","pop_count","num_ports","reef_value","sediment","nutrient"))
# Boxplots of the pressure percentiles that are top ranked
ggplot2::theme_update(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5, size = 7))
png(paste0("Figure S11.png"), width = 10, height = 5, units = "cm", res = 300)
a.plot <-
ggplot2::ggplot(a, aes_string(x = "value", y = "threat", fill = "threat")) +
ggplot2::geom_boxplot(size = 0.1, outlier.size = 0.1, show.legend = F) +
ggplot2::scale_x_continuous(limits = c(0, 1)) +
ggplot2::scale_y_discrete(labels = threat_names) +
ggplot2::scale_fill_manual(values = col_threats, name = "")
print(a.plot)
dev.off()
# Summarize the statistics of each pressure when it is top-ranked
tapply(a$value, a$threat, summary)
rm(a, a.plot, i)
|
initialize.icm <- function(param, init, control) {
## Master List for Data ##
dat <- list()
dat$param <- param
dat$init <- init
dat$control <- control
# Set attributes
dat$attr <- list()
numeric.init <- init[which(sapply(init, class) == "numeric")]
n <- do.call("sum", numeric.init)
dat$attr$active <- rep(1, n)
if(!isFALSE(param$ages)) {
dat$attr$age_group <- sample(seq_along(param$ages$percent), n, replace = TRUE, prob = param$ages$percent)
} else {
dat$attr$age_group <- NULL
}
if (dat$param$groups == 1) {
dat$attr$group <- rep(1, n)
} else {
g2inits <- grep(".g2", names(numeric.init))
g1inits <- setdiff(1:length(numeric.init), g2inits)
nG1 <- sum(sapply(g1inits, function(x) init[[x]]))
nG2 <- sum(sapply(g2inits, function(x) init[[x]]))
dat$attr$group <- c(rep(1, nG1), rep(2, max(0, nG2)))
}
# Initialize status and infection time
dat <- init_status.icm(dat)
# Summary out list
dat <- get_prev.icm(dat, at = 1)
return(dat)
}
init_status.icm <- function(dat) {
# Variables ---------------------------------------------------------------
type <- dat$control$type
group <- dat$attr$group
nGroups <- dat$param$groups
nG1 <- sum(group == 1)
nG2 <- sum(group == 2)
e.num <- dat$init$e.num
i.num <- dat$init$i.num
q.num <- dat$init$q.num
h.num <- dat$init$h.num
r.num <- dat$init$r.num
f.num <- dat$init$f.num
e.num.g2 <- dat$init$e.num.g2
i.num.g2 <- dat$init$i.num.g2
q.num.g2 <- dat$init$q.num.g2
h.num.g2 <- dat$init$h.num.g2
r.num.g2 <- dat$init$r.num.g2
f.num.g2 <- dat$init$f.num.g2
# Status ------------------------------------------------------------------
status <- rep("s", nG1 + nG2)
status[sample(which(group == 1), size = i.num)] <- "i"
if (nGroups == 2) {
status[sample(which(group == 2), size = i.num.g2)] <- "i"
}
if (type %in% c("SIR", "SEIR", "SEIQHR", "SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = r.num)] <- "r"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = r.num.g2)] <- "r"
}
}
if (type %in% c("SEIR", "SEIQHR", "SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = e.num)] <- "e"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = e.num.g2)] <- "e"
}
}
if (type %in% c("SEIQHR", "SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = q.num)] <- "q"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = q.num.g2)] <- "q"
}
status[sample(which(group == 1 & status == "s"), size = h.num)] <- "h"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = h.num.g2)] <- "h"
}
}
if (type %in% c("SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = f.num)] <- "f"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = f.num.g2)] <- "f"
}
}
dat$attr$status <- status
# Exposure Time ----------------------------------------------------------
idsExp <- which(status == "e")
expTime <- rep(NA, length(status))
# leave exposure time uninitialised for now, and
# just set to NA at start.
dat$attr$expTime <- expTime
# Infection Time ----------------------------------------------------------
idsInf <- which(status == "i")
infTime <- rep(NA, length(status))
dat$attr$infTime <- infTime # overwritten below
# Recovery Time ----------------------------------------------------------
idsRecov <- which(status == "r")
recovTime <- rep(NA, length(status))
dat$attr$recovTime <- recovTime
# Need for Hospitalisation Time ----------------------------------------------------------
idsHosp <- which(status == "h")
hospTime <- rep(NA, length(status))
dat$attr$hospTime <- hospTime
# Quarantine Time ----------------------------------------------------------
idsQuar <- which(status == "q")
quarTime <- rep(NA, length(status))
dat$attr$quarTime <- quarTime
# Hospital-need cessation Time ----------------------------------------------------------
dischTime <- rep(NA, length(status))
dat$attr$dischTime <- dischTime
# Case-fatality Time ----------------------------------------------------------
fatTime <- rep(NA, length(status))
dat$attr$fatTime <- fatTime
# If vital=TRUE, infTime is a uniform draw over the duration of infection
# note the initial infections may have negative infTime!
if (FALSE) {
# not sure what the following section is trying to do, but it
# mucks up the gamma-distributed incumabtion periods, so set
# infTime for initial infected people to t=1 instead
if (dat$param$vital == TRUE && dat$param$di.rate > 0) {
infTime[idsInf] <- -rgeom(n = length(idsInf), prob = dat$param$di.rate) + 2
} else {
if (dat$control$type == "SI" || dat$param$rec.rate == 0) {
# infTime a uniform draw over the number of sim time steps
infTime[idsInf] <- ssample(1:(-dat$control$nsteps + 2),
length(idsInf), replace = TRUE)
} else {
if (nGroups == 1) {
infTime[idsInf] <- ssample(1:(-round(1 / dat$param$rec.rate) + 2),
length(idsInf), replace = TRUE)
}
if (nGroups == 2) {
infG1 <- which(status == "i" & group == 1)
infTime[infG1] <- ssample(1:(-round(1 / dat$param$rec.rate) + 2),
length(infG1), replace = TRUE)
infG2 <- which(status == "i" & group == 2)
infTime[infG2] <- ssample(1:(-round(1 / dat$param$rec.rate.g2) + 2),
length(infG2), replace = TRUE)
}
}
}
}
infTime[idsInf] <- 1
dat$attr$infTime <- infTime
return(dat)
}
|
/R/ext_exp/_icm.mod.init.seiqhrf.R
|
no_license
|
franzbischoff/covid-19-pt-north
|
R
| false
| false
| 5,842
|
r
|
initialize.icm <- function(param, init, control) {
## Master List for Data ##
dat <- list()
dat$param <- param
dat$init <- init
dat$control <- control
# Set attributes
dat$attr <- list()
numeric.init <- init[which(sapply(init, class) == "numeric")]
n <- do.call("sum", numeric.init)
dat$attr$active <- rep(1, n)
if(!isFALSE(param$ages)) {
dat$attr$age_group <- sample(seq_along(param$ages$percent), n, replace = TRUE, prob = param$ages$percent)
} else {
dat$attr$age_group <- NULL
}
if (dat$param$groups == 1) {
dat$attr$group <- rep(1, n)
} else {
g2inits <- grep(".g2", names(numeric.init))
g1inits <- setdiff(1:length(numeric.init), g2inits)
nG1 <- sum(sapply(g1inits, function(x) init[[x]]))
nG2 <- sum(sapply(g2inits, function(x) init[[x]]))
dat$attr$group <- c(rep(1, nG1), rep(2, max(0, nG2)))
}
# Initialize status and infection time
dat <- init_status.icm(dat)
# Summary out list
dat <- get_prev.icm(dat, at = 1)
return(dat)
}
init_status.icm <- function(dat) {
# Variables ---------------------------------------------------------------
type <- dat$control$type
group <- dat$attr$group
nGroups <- dat$param$groups
nG1 <- sum(group == 1)
nG2 <- sum(group == 2)
e.num <- dat$init$e.num
i.num <- dat$init$i.num
q.num <- dat$init$q.num
h.num <- dat$init$h.num
r.num <- dat$init$r.num
f.num <- dat$init$f.num
e.num.g2 <- dat$init$e.num.g2
i.num.g2 <- dat$init$i.num.g2
q.num.g2 <- dat$init$q.num.g2
h.num.g2 <- dat$init$h.num.g2
r.num.g2 <- dat$init$r.num.g2
f.num.g2 <- dat$init$f.num.g2
# Status ------------------------------------------------------------------
status <- rep("s", nG1 + nG2)
status[sample(which(group == 1), size = i.num)] <- "i"
if (nGroups == 2) {
status[sample(which(group == 2), size = i.num.g2)] <- "i"
}
if (type %in% c("SIR", "SEIR", "SEIQHR", "SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = r.num)] <- "r"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = r.num.g2)] <- "r"
}
}
if (type %in% c("SEIR", "SEIQHR", "SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = e.num)] <- "e"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = e.num.g2)] <- "e"
}
}
if (type %in% c("SEIQHR", "SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = q.num)] <- "q"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = q.num.g2)] <- "q"
}
status[sample(which(group == 1 & status == "s"), size = h.num)] <- "h"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = h.num.g2)] <- "h"
}
}
if (type %in% c("SEIQHRF")) {
status[sample(which(group == 1 & status == "s"), size = f.num)] <- "f"
if (nGroups == 2) {
status[sample(which(group == 2 & status == "s"), size = f.num.g2)] <- "f"
}
}
dat$attr$status <- status
# Exposure Time ----------------------------------------------------------
idsExp <- which(status == "e")
expTime <- rep(NA, length(status))
# leave exposure time uninitialised for now, and
# just set to NA at start.
dat$attr$expTime <- expTime
# Infection Time ----------------------------------------------------------
idsInf <- which(status == "i")
infTime <- rep(NA, length(status))
dat$attr$infTime <- infTime # overwritten below
# Recovery Time ----------------------------------------------------------
idsRecov <- which(status == "r")
recovTime <- rep(NA, length(status))
dat$attr$recovTime <- recovTime
# Need for Hospitalisation Time ----------------------------------------------------------
idsHosp <- which(status == "h")
hospTime <- rep(NA, length(status))
dat$attr$hospTime <- hospTime
# Quarantine Time ----------------------------------------------------------
idsQuar <- which(status == "q")
quarTime <- rep(NA, length(status))
dat$attr$quarTime <- quarTime
# Hospital-need cessation Time ----------------------------------------------------------
dischTime <- rep(NA, length(status))
dat$attr$dischTime <- dischTime
# Case-fatality Time ----------------------------------------------------------
fatTime <- rep(NA, length(status))
dat$attr$fatTime <- fatTime
# If vital=TRUE, infTime is a uniform draw over the duration of infection
# note the initial infections may have negative infTime!
if (FALSE) {
# not sure what the following section is trying to do, but it
# mucks up the gamma-distributed incumabtion periods, so set
# infTime for initial infected people to t=1 instead
if (dat$param$vital == TRUE && dat$param$di.rate > 0) {
infTime[idsInf] <- -rgeom(n = length(idsInf), prob = dat$param$di.rate) + 2
} else {
if (dat$control$type == "SI" || dat$param$rec.rate == 0) {
# infTime a uniform draw over the number of sim time steps
infTime[idsInf] <- ssample(1:(-dat$control$nsteps + 2),
length(idsInf), replace = TRUE)
} else {
if (nGroups == 1) {
infTime[idsInf] <- ssample(1:(-round(1 / dat$param$rec.rate) + 2),
length(idsInf), replace = TRUE)
}
if (nGroups == 2) {
infG1 <- which(status == "i" & group == 1)
infTime[infG1] <- ssample(1:(-round(1 / dat$param$rec.rate) + 2),
length(infG1), replace = TRUE)
infG2 <- which(status == "i" & group == 2)
infTime[infG2] <- ssample(1:(-round(1 / dat$param$rec.rate.g2) + 2),
length(infG2), replace = TRUE)
}
}
}
}
infTime[idsInf] <- 1
dat$attr$infTime <- infTime
return(dat)
}
|
VecPermutFun <- function(n.ages, n.classes, reps, alpha, gamma, intro.cost, sex.ratio,
samples.to.draw, tot.chains, joint.posterior.coda, posterior.names)
{
# intro.out.elast <- fade.out.elast <- rep(NA, reps)
# 19 age classes; 2 environmental states.
fade.out.elast <- intro.elast <- rep(NA, reps)
p = n.classes
s = n.ages
for(i in 1:reps){
# # P is the vec-permutation matrix (sensu Hunter and Caswell 2005 Ecological Modelling)
# P <- matrix(NA, nrow = 19 * 2, ncol = 19 * 2)
# e.full <- array(NA, dim = c(38, 38, 38))
# for(k in 1:2){
# for(j in 1:19){
# e.kj <- matrix(0, nrow = 2, ncol = 19)
# e.kj[k, j] <- 1
# e.full[, , (k - 1) * 19 + j ] <- kronecker(e.kj, t(e.kj))
# }}
# P <- apply(e.full, c(1, 2), sum)
P = matrix(0, s * p, s * p)
for(l in 1:s){
for(j in 1:p){
E = matrix(0, s, p)
E[l, j] = 1
E
P = P + kronecker(E, t(E))
}
}
# B is block diagonal, with 2 19x19 blocks for the 2 environmental states.
healthy.leslie <- UpdateLeslieFun(current.state = "healthy", sex.ratio, samples.to.draw, tot.chains, joint.posterior.coda, posterior.names, intro.cost)
spillover.leslie <- UpdateLeslieFun(current.state = "spillover", sex.ratio, samples.to.draw, tot.chains, joint.posterior.coda, posterior.names, intro.cost)
endemic.leslie <- UpdateLeslieFun(current.state = "infected", sex.ratio, samples.to.draw, tot.chains, joint.posterior.coda, posterior.names, intro.cost)
leslie.list <- list(healthy.leslie, spillover.leslie, endemic.leslie)
B <- bdiag(leslie.list)
# M is block diagonal with 19 2x2 blocks for the 19 demographic states
small.M <- cbind(c(1 - alpha, alpha, 0), c(gamma, 0, 1 - gamma), c(gamma, 0, 1 - gamma)) # set up to be columns into rows, so that columns of M sum to 1.
M <- bdiag(list(small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M))
# A is population projection matrix with environmental stochasticity
A <- B %*% t(P) %*% M %*% P
A_eigens <- eigen(A) # complex????
S_a <- sensitivity(A)
# Sensitivity (environmental transitions)
# assume disease status is updated before demography.
S_m <- P %*% t(B) %*% S_a %*% t(P)
# round(S_m, 2)[1:10, 1:10]
E_m <- (1 / Re(A_eigens$value)[1]) * M * S_m # regular * because it's a Hadamard production
# E_m <- (1 / .92) * M * S_m # regular * because it's a Hadamard production
# round(E_m, 2)[1:10, 1:10]
# compare elasticities of fade-out to elasticity of reintroduction
# even.indices <- seq(1:19) * 2
# odd.indices <- seq(1:19) * 2 - 1
upper.left.indices <- seq(1 : 19) * 3 - 2
# elasticites are sum (off-diagonal elasts in 2x2 blocks) - sum(main-diag elasts)
fade.out.elast[i] <- sum(E_m[upper.left.indices, upper.left.indices + 1] + E_m[upper.left.indices, upper.left.indices + 2]) - sum(E_m[upper.left.indices + 2, upper.left.indices + 1] + E_m[upper.left.indices + 2, upper.left.indices + 2])
# intro.indices <- seq(1:19) * 2 - 1
intro.elast[i] <- sum(E_m[upper.left.indices, upper.left.indices]) - sum(E_m[upper.left.indices + 1, upper.left.indices])
}
return(list(fade.out.elast = fade.out.elast, intro.elast = intro.elast))
}
|
/R/VecPermutFun.R
|
no_license
|
kmanlove/BighornIPM
|
R
| false
| false
| 3,462
|
r
|
VecPermutFun <- function(n.ages, n.classes, reps, alpha, gamma, intro.cost, sex.ratio,
samples.to.draw, tot.chains, joint.posterior.coda, posterior.names)
{
# intro.out.elast <- fade.out.elast <- rep(NA, reps)
# 19 age classes; 2 environmental states.
fade.out.elast <- intro.elast <- rep(NA, reps)
p = n.classes
s = n.ages
for(i in 1:reps){
# # P is the vec-permutation matrix (sensu Hunter and Caswell 2005 Ecological Modelling)
# P <- matrix(NA, nrow = 19 * 2, ncol = 19 * 2)
# e.full <- array(NA, dim = c(38, 38, 38))
# for(k in 1:2){
# for(j in 1:19){
# e.kj <- matrix(0, nrow = 2, ncol = 19)
# e.kj[k, j] <- 1
# e.full[, , (k - 1) * 19 + j ] <- kronecker(e.kj, t(e.kj))
# }}
# P <- apply(e.full, c(1, 2), sum)
P = matrix(0, s * p, s * p)
for(l in 1:s){
for(j in 1:p){
E = matrix(0, s, p)
E[l, j] = 1
E
P = P + kronecker(E, t(E))
}
}
# B is block diagonal, with 2 19x19 blocks for the 2 environmental states.
healthy.leslie <- UpdateLeslieFun(current.state = "healthy", sex.ratio, samples.to.draw, tot.chains, joint.posterior.coda, posterior.names, intro.cost)
spillover.leslie <- UpdateLeslieFun(current.state = "spillover", sex.ratio, samples.to.draw, tot.chains, joint.posterior.coda, posterior.names, intro.cost)
endemic.leslie <- UpdateLeslieFun(current.state = "infected", sex.ratio, samples.to.draw, tot.chains, joint.posterior.coda, posterior.names, intro.cost)
leslie.list <- list(healthy.leslie, spillover.leslie, endemic.leslie)
B <- bdiag(leslie.list)
# M is block diagonal with 19 2x2 blocks for the 19 demographic states
small.M <- cbind(c(1 - alpha, alpha, 0), c(gamma, 0, 1 - gamma), c(gamma, 0, 1 - gamma)) # set up to be columns into rows, so that columns of M sum to 1.
M <- bdiag(list(small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M, small.M))
# A is population projection matrix with environmental stochasticity
A <- B %*% t(P) %*% M %*% P
A_eigens <- eigen(A) # complex????
S_a <- sensitivity(A)
# Sensitivity (environmental transitions)
# assume disease status is updated before demography.
S_m <- P %*% t(B) %*% S_a %*% t(P)
# round(S_m, 2)[1:10, 1:10]
E_m <- (1 / Re(A_eigens$value)[1]) * M * S_m # regular * because it's a Hadamard production
# E_m <- (1 / .92) * M * S_m # regular * because it's a Hadamard production
# round(E_m, 2)[1:10, 1:10]
# compare elasticities of fade-out to elasticity of reintroduction
# even.indices <- seq(1:19) * 2
# odd.indices <- seq(1:19) * 2 - 1
upper.left.indices <- seq(1 : 19) * 3 - 2
# elasticites are sum (off-diagonal elasts in 2x2 blocks) - sum(main-diag elasts)
fade.out.elast[i] <- sum(E_m[upper.left.indices, upper.left.indices + 1] + E_m[upper.left.indices, upper.left.indices + 2]) - sum(E_m[upper.left.indices + 2, upper.left.indices + 1] + E_m[upper.left.indices + 2, upper.left.indices + 2])
# intro.indices <- seq(1:19) * 2 - 1
intro.elast[i] <- sum(E_m[upper.left.indices, upper.left.indices]) - sum(E_m[upper.left.indices + 1, upper.left.indices])
}
return(list(fade.out.elast = fade.out.elast, intro.elast = intro.elast))
}
|
#~ ,''''''''''''''.
#~~ / USEPA FISH \
#~ >~',*> < TOX TRANSLATOR )
#~~ \ v1.0 "Doloris" /
#~ `..............'
#~~
#~ N. Pollesch - pollesch.nathan@epa.gov
#' Kernel Component - Reproduction Kernel
#'
#' Integral projection models (Ellner et al., 2016) have the general structure of:
#' \deqn{n(z1,t+1) = \integral{ (P(z1,z) + R(z1,z))*n(z,t) dz}}
#' Here we define 'R' as 'ReproductionKernel' to b explicit, where it is the product size-dependent reproduction probability, \code{\link{Spawning}}, Size-dependent hatchlings per spawn, \code{\link{Fecundity}}, hatchling growth-transition CDF, \code{\link{HatchlingGrowth}}, and size-dependent survival, \code{\link{Survival}}.
#' \deqn{ReproductionKernel(z1,z,bt,pars,date) = sex_ratio * Spawning(z,pars,date) * Fecundity(z,pars,date)*HatchlingGrowth(z1,bt,pars,date)*Survival(z_hatch,pars,date)}
#'
#'
#' @param z1 Size at the end of the timestep, the size being transitioned to [float]
#' @param z Size at the beginning of the timestep [float]
#' @param bt Population biomass at the beginning of the timestep [float]
#' @param pars Data.frame containing the date-indexed parameters[data.frame]
#' @param date Ordinal day to reference proper 'pars' date-indexed parameters [integer]
#' @return Size-dependent reproduction kernel
#' @export
#' @note This function is provided as a standard to pass to \code{\link{SimulateModel}} for the 'reproductionComponent' argument
#' @family Kernel Components
ReproductionKernel<-function(z1,z,bt,pars,date)
{
return(pars$sex_ratio[date]*Spawning(z,pars,date)*Fecundity(z,pars,date)*HatchlingGrowth(z1,bt,pars,date)*Survival(pars$z_hatch[date],pars,date))
}
|
/R/ReproductionKernel.R
|
no_license
|
npollesch/FishToxTranslator
|
R
| false
| false
| 1,707
|
r
|
#~ ,''''''''''''''.
#~~ / USEPA FISH \
#~ >~',*> < TOX TRANSLATOR )
#~~ \ v1.0 "Doloris" /
#~ `..............'
#~~
#~ N. Pollesch - pollesch.nathan@epa.gov
#' Kernel Component - Reproduction Kernel
#'
#' Integral projection models (Ellner et al., 2016) have the general structure of:
#' \deqn{n(z1,t+1) = \integral{ (P(z1,z) + R(z1,z))*n(z,t) dz}}
#' Here we define 'R' as 'ReproductionKernel' to b explicit, where it is the product size-dependent reproduction probability, \code{\link{Spawning}}, Size-dependent hatchlings per spawn, \code{\link{Fecundity}}, hatchling growth-transition CDF, \code{\link{HatchlingGrowth}}, and size-dependent survival, \code{\link{Survival}}.
#' \deqn{ReproductionKernel(z1,z,bt,pars,date) = sex_ratio * Spawning(z,pars,date) * Fecundity(z,pars,date)*HatchlingGrowth(z1,bt,pars,date)*Survival(z_hatch,pars,date)}
#'
#'
#' @param z1 Size at the end of the timestep, the size being transitioned to [float]
#' @param z Size at the beginning of the timestep [float]
#' @param bt Population biomass at the beginning of the timestep [float]
#' @param pars Data.frame containing the date-indexed parameters[data.frame]
#' @param date Ordinal day to reference proper 'pars' date-indexed parameters [integer]
#' @return Size-dependent reproduction kernel
#' @export
#' @note This function is provided as a standard to pass to \code{\link{SimulateModel}} for the 'reproductionComponent' argument
#' @family Kernel Components
ReproductionKernel<-function(z1,z,bt,pars,date)
{
return(pars$sex_ratio[date]*Spawning(z,pars,date)*Fecundity(z,pars,date)*HatchlingGrowth(z1,bt,pars,date)*Survival(pars$z_hatch[date],pars,date))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{parameters}
\alias{parameters}
\title{Parameter names of an JointAI object}
\usage{
parameters(object, expand_ranef = FALSE, mess = TRUE, warn = TRUE, ...)
}
\arguments{
\item{object}{object inheriting from class 'JointAI'}
\item{expand_ranef}{logical; should all elements of the random effects
vectors/matrices be shown separately?}
\item{mess}{logical; should messages be given? Default is
\code{TRUE}.}
\item{warn}{logical; should warnings be given? Default is
\code{TRUE}.}
\item{...}{currently not used}
}
\description{
Returns the names of the parameters/nodes of an object of class 'JointAI' for
which a monitor is set.
}
\examples{
# (This function does not need MCMC samples to work, so we will set
# n.adapt = 0 and n.iter = 0 to reduce computational time)
mod1 <- lm_imp(y ~ C1 + C2 + M2 + O2 + B2, data = wideDF, n.adapt = 0,
n.iter = 0, mess = FALSE)
parameters(mod1)
}
|
/man/parameters.Rd
|
no_license
|
cran/JointAI
|
R
| false
| true
| 1,036
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{parameters}
\alias{parameters}
\title{Parameter names of an JointAI object}
\usage{
parameters(object, expand_ranef = FALSE, mess = TRUE, warn = TRUE, ...)
}
\arguments{
\item{object}{object inheriting from class 'JointAI'}
\item{expand_ranef}{logical; should all elements of the random effects
vectors/matrices be shown separately?}
\item{mess}{logical; should messages be given? Default is
\code{TRUE}.}
\item{warn}{logical; should warnings be given? Default is
\code{TRUE}.}
\item{...}{currently not used}
}
\description{
Returns the names of the parameters/nodes of an object of class 'JointAI' for
which a monitor is set.
}
\examples{
# (This function does not need MCMC samples to work, so we will set
# n.adapt = 0 and n.iter = 0 to reduce computational time)
mod1 <- lm_imp(y ~ C1 + C2 + M2 + O2 + B2, data = wideDF, n.adapt = 0,
n.iter = 0, mess = FALSE)
parameters(mod1)
}
|
#' Correction factor for Sun-Earth distance variation thoughout the year
#'
#'@param month is the month (1 to 12)
#'@param day is the day of the month (1-31)
#'
#'@return Returns a factor that multiply F0 to account for Sun-Earth distance variation at any given date.
#' This factor can change the irradiance up to 3.3\%.
#'
#'@author Bernard Gentili
#'
orbex <- function(month,day) {
ndays <- as.numeric(julian(
as.POSIXct(paste("2001",month,day,sep = "-")),
origin = as.POSIXct("2001-01-01")))
return((1 + 0.0167 * cos(2*pi* (ndays -3) / 365))^2)
}
|
/R/orbex.R
|
no_license
|
belasi01/Cops
|
R
| false
| false
| 558
|
r
|
#' Correction factor for Sun-Earth distance variation thoughout the year
#'
#'@param month is the month (1 to 12)
#'@param day is the day of the month (1-31)
#'
#'@return Returns a factor that multiply F0 to account for Sun-Earth distance variation at any given date.
#' This factor can change the irradiance up to 3.3\%.
#'
#'@author Bernard Gentili
#'
orbex <- function(month,day) {
ndays <- as.numeric(julian(
as.POSIXct(paste("2001",month,day,sep = "-")),
origin = as.POSIXct("2001-01-01")))
return((1 + 0.0167 * cos(2*pi* (ndays -3) / 365))^2)
}
|
library(emayili)
library(rmarkdown)
library(dplyr)
# This script will render a Rmd to HTML and attach it to an email.
#
# NOTE: Images in the HTML will not appear in the attachment.
render(
input = "junk-report.Rmd" ,
output_format = "html_document",
output_file = "junk-report.html"
)
SMTP_USERNAME = Sys.getenv("SMTP_USERNAME")
smtp <- server(
host = Sys.getenv("SMTP_SERVER"),
port = Sys.getenv("SMTP_PORT"),
username = SMTP_USERNAME,
password = Sys.getenv("SMTP_PASSWORD")
)
email <- envelope() %>%
from(SMTP_USERNAME) %>%
to(SMTP_USERNAME) %>%
subject("Junk Report")%>%
attachment("junk-report.html", disposition = "attachment")
smtp(email, verbose = TRUE)
|
/examples/rmarkdown-render-attachment/junk-report.R
|
no_license
|
adam-gruer/emayili
|
R
| false
| false
| 690
|
r
|
library(emayili)
library(rmarkdown)
library(dplyr)
# This script will render a Rmd to HTML and attach it to an email.
#
# NOTE: Images in the HTML will not appear in the attachment.
render(
input = "junk-report.Rmd" ,
output_format = "html_document",
output_file = "junk-report.html"
)
SMTP_USERNAME = Sys.getenv("SMTP_USERNAME")
smtp <- server(
host = Sys.getenv("SMTP_SERVER"),
port = Sys.getenv("SMTP_PORT"),
username = SMTP_USERNAME,
password = Sys.getenv("SMTP_PASSWORD")
)
email <- envelope() %>%
from(SMTP_USERNAME) %>%
to(SMTP_USERNAME) %>%
subject("Junk Report")%>%
attachment("junk-report.html", disposition = "attachment")
smtp(email, verbose = TRUE)
|
getIndicators = function(frequency = "daily", save = FALSE,
type = c("all", "policyTradeWeight",
"TradeWeight", "count", "countryIndicators"), aggregateRussia = F){
library(data.table)
library(zoo)
library(dygraphs)
library(xts)
library(countrycode)
source("R/policyStringData.R")
amisData = readRDS("Data/standardizedDailyAmisData.RDA")
oecdData = readRDS("Data/nonAmisData.RDA")
policyData = rbind(amisData, oecdData)
policyData[, value := as.numeric(value)]
stretchedPolicyData = rbindlist(lapply(unique(policyData[, cpl_id ]), policyTimeSeries, frequency = frequency, policyData = policyData))
stretchedPolicyData[, iso3c := as.character(countrycode(country_name,
origin = "country.name",
destination = "iso3c",
warn = T))]
stretchedPolicyData[, period := substring(timeLine,1,4)]
## USDA export data, this needs to be replaced with Trade map data and HS codes, preferrably monthly
if(aggregateRussia == F){
usdaExportData = readRDS("Data/usdaExportData.RDA")
}else{
usdaExportData = readRDS("Data/usdaExportDataRusAgg.RDA")
}
averageExportShares = data.table(aggregate(data = usdaExportData,
exportShare ~ iso3c + period, mean))
# lag exportshares for exogeneity
averageExportShares[, period := as.numeric(period) + 1]
# policy and trade weighted Indicator
policyCount = stretchedPolicyData[, .N, by = c("timeLine", "policymeasure_name")]
setnames(policyCount, "N", "globalPolicies")
policyShares = policyCount[, max(globalPolicies), by = "policymeasure_name"]
setnames(policyShares, "V1", "policyMax")
countryPolicyCount = stretchedPolicyData[, .N, by = c("timeLine","policymeasure_name", "iso3c")]
setnames(countryPolicyCount, "N", "countryCount")
#indicatorData[, indicatorCount := .N, by = timeLine]
if(aggregateRussia == T){
russiaAggregate = data.table(aggregate(data = countryPolicyCount[iso3c %in% c("KAZ", "RUS", "UKR", "KGZ"), ], countryCount ~ timeLine + policymeasure_name, sum))
russiaAggregate[, iso3c := "RUK"]
countryPolicyCount = rbind(countryPolicyCount[!iso3c %in% c("KAZ", "RUS", "UKR", "KGZ"), ], russiaAggregate)
}
setkey(countryPolicyCount, iso3c, timeLine )
# merge with trade data
countryPolicyCount[, period := substring(timeLine,1,4)]
indicatorData = merge(countryPolicyCount, usdaExportData, by=c("iso3c", "period"), all.x = T)
#indicatorData[, value := as.numeric(value)]
quotaBansCount = aggregate(data = countryPolicyCount[policymeasure_name != "Export tax", ],
countryCount ~ timeLine + iso3c, sum)
quotaBansGlobal = indicatorData[policymeasure_name != "Export tax", .N, by = c("timeLine")]
quotaBansData = merge(quotaBansGlobal, quotaBansCount, by = "timeLine", all.x = TRUE)
quotaBansData[, period := substring(timeLine,1,4)]
quotaBansIndicatorData = merge(quotaBansData, usdaExportData, all.x = TRUE, by = c("period", "iso3c"))
quotaBansIndicatorData[, quotaBans := ((countryCount / N) * exportShare) *100]
quotaBansIndicatorData[, quotaBansNoPWeight := (countryCount * exportShare) *100]
quotaBans = data.table(aggregate(data = quotaBansIndicatorData, quotaBans ~ timeLine, sum))
quotaBansNoPWeight = data.table(aggregate(data = quotaBansIndicatorData, quotaBansNoPWeight ~ timeLine, sum))
countData = merge(countryPolicyCount, policyCount, by = c("timeLine", "policymeasure_name"))
# countData[, period := substring(timeLine,1,4)]
newIndicator = merge(countData, usdaExportData, all.x = T, by = c("period", "iso3c"))
newIndicator = merge(newIndicator, policyShares, all.x = T, by ="policymeasure_name")
newIndicator[, indicatorNoPoliyWeight := ((countryCount * exportShare) *100)]
#newIndicator[, indicatorValue := ((countryCount / globalPolicies) * exportShare)]
newIndicator[, indicatorValue := ((countryCount / policyMax) * exportShare) * 100]
countryIndicators = dcast(newIndicator, timeLine ~ policymeasure_name + iso3c, value.var = "indicatorValue")
countryNoPolicyIndicators = dcast(newIndicator, timeLine ~ policymeasure_name + iso3c, value.var = "indicatorNoPoliyWeight")
globalIndicators = data.table(aggregate(data = newIndicator, indicatorValue ~ timeLine + policymeasure_name, sum))
globalNoPolicyIndicators = data.table(aggregate(data = newIndicator, indicatorNoPoliyWeight ~ timeLine + policymeasure_name, sum))
quotaValueIndicator = indicatorData[policymeasure_name == "Export quota",]
# countryQuotaIndicators = data.table(aggregate(data = quotaValueIndicator,
# indicatorValues ~ timeLine + country_name + iso3c + exportShare, sum))
#
# countryQuotaIndicators[, indicatorValues := indicatorValues * 100]
# countryQuotaIndicators[, weightedIndicator := exportShare * indicatorValues * 10000 ]
#
# quotaIndicators = data.table(aggregate(data = countryQuotaIndicators,
# indicatorValues ~ timeLine, sum))
#
indicatorDataWide = dcast(globalIndicators, timeLine ~ policymeasure_name, value.var = "indicatorValue")
# test = merge(indicatorDataWide, quotaBans, by = "timeLine", all.x = TRUE)
indicatorDataNoPolicyWide = dcast(globalNoPolicyIndicators, timeLine ~ policymeasure_name, value.var = "indicatorNoPoliyWeight")
setnames(indicatorDataNoPolicyWide, c("Export prohibition", "Export quota", "Export tax"), c("Export prohibitionNoPweight", "Export quotaNoPweight", "Export taxNoPweight"))
#indicatorDataWideExtended = merge(indicatorDataWide, quotaIndicators, all.x = T, by = "timeLine")
#setnames(indicatorDataWideExtended, "weightedIndicator", "weightedQuotas")
# taxValueIndicator = indicatorData[policymeasure_name == "Export tax",]
#
# countryTaxIndicators = data.table(aggregate(data = taxValueIndicator,
# indicatorValues ~ timeLine + country_name + iso3c + exportShare, sum))
#
# countryTaxIndicators[, indicatorValues := indicatorValues * 100]
#
#
# countryTaxIndicators[, weightedIndicator := exportShare * indicatorValues * 100 ]
#
# taxIndicators = data.table(aggregate(data = countryTaxIndicators,
# weightedIndicator ~ timeLine, sum))
# setnames(quotaIndicators, "indicatorValues", "measuredQuota")
# setnames(taxIndicators, "weightedIndicator", "measuredTax")
library(ggplot2)
ggplot(data=globalIndicators,
aes(x=timeLine, y=indicatorValue, colour=policymeasure_name)) +
geom_line()
indicatorDataWide = merge(indicatorDataWide, quotaBans, by = "timeLine", all.x = TRUE)
indicatorDataNoPolicyWide = merge(indicatorDataNoPolicyWide, quotaBansNoPWeight, by = "timeLine", all.x = TRUE)
if(save == TRUE){
if(frequency == "daily"){
saveRDS(indicatorDataWide, "Data/indicatorPolicyWeighted.RDA")
saveRDS(indicatorDataNoPolicyWide, "Data/indicatorNoPolicyWeighted.RDA")
saveRDS(policyCount, "Data/policyCount.RDA")
saveRDS(countryIndicators, "Data/countryIndicators.RDA")
}else{
saveRDS(indicatorDataWide, "Data/indicatorPolicyWeightedMonthly.RDA")
saveRDS(indicatorDataNoPolicyWide, "Data/indicatorNoPolicyWeightedMonthly.RDA")
saveRDS(policyCount, "Data/policyCountMonthly.RDA")
saveRDS(countryIndicators, "Data/countryIndicatorsMonthly.RDA")
}
}
if(type == "policyTradeWeight"){
return(indicatorDataWide)
}else if(type == "tradeWeight"){
return(indicatorDataNoPolicyWide)
}else if(type == "count"){
return(policyCount)
}else if(type == "countryIndicators"){
return(countryIndicators)
}else{
exportRestrictionsIndicatorsList = list(indicatorDataWide, indicatorDataNoPolicyWide, policyCount, countryIndicators)
return(exportRestrictionsIndicatorsList)
}
}
|
/R/getIndicators.R
|
no_license
|
BDalheimer/FoodPriceVolatility
|
R
| false
| false
| 7,811
|
r
|
getIndicators = function(frequency = "daily", save = FALSE,
type = c("all", "policyTradeWeight",
"TradeWeight", "count", "countryIndicators"), aggregateRussia = F){
library(data.table)
library(zoo)
library(dygraphs)
library(xts)
library(countrycode)
source("R/policyStringData.R")
amisData = readRDS("Data/standardizedDailyAmisData.RDA")
oecdData = readRDS("Data/nonAmisData.RDA")
policyData = rbind(amisData, oecdData)
policyData[, value := as.numeric(value)]
stretchedPolicyData = rbindlist(lapply(unique(policyData[, cpl_id ]), policyTimeSeries, frequency = frequency, policyData = policyData))
stretchedPolicyData[, iso3c := as.character(countrycode(country_name,
origin = "country.name",
destination = "iso3c",
warn = T))]
stretchedPolicyData[, period := substring(timeLine,1,4)]
## USDA export data, this needs to be replaced with Trade map data and HS codes, preferrably monthly
if(aggregateRussia == F){
usdaExportData = readRDS("Data/usdaExportData.RDA")
}else{
usdaExportData = readRDS("Data/usdaExportDataRusAgg.RDA")
}
averageExportShares = data.table(aggregate(data = usdaExportData,
exportShare ~ iso3c + period, mean))
# lag exportshares for exogeneity
averageExportShares[, period := as.numeric(period) + 1]
# policy and trade weighted Indicator
policyCount = stretchedPolicyData[, .N, by = c("timeLine", "policymeasure_name")]
setnames(policyCount, "N", "globalPolicies")
policyShares = policyCount[, max(globalPolicies), by = "policymeasure_name"]
setnames(policyShares, "V1", "policyMax")
countryPolicyCount = stretchedPolicyData[, .N, by = c("timeLine","policymeasure_name", "iso3c")]
setnames(countryPolicyCount, "N", "countryCount")
#indicatorData[, indicatorCount := .N, by = timeLine]
if(aggregateRussia == T){
russiaAggregate = data.table(aggregate(data = countryPolicyCount[iso3c %in% c("KAZ", "RUS", "UKR", "KGZ"), ], countryCount ~ timeLine + policymeasure_name, sum))
russiaAggregate[, iso3c := "RUK"]
countryPolicyCount = rbind(countryPolicyCount[!iso3c %in% c("KAZ", "RUS", "UKR", "KGZ"), ], russiaAggregate)
}
setkey(countryPolicyCount, iso3c, timeLine )
# merge with trade data
countryPolicyCount[, period := substring(timeLine,1,4)]
indicatorData = merge(countryPolicyCount, usdaExportData, by=c("iso3c", "period"), all.x = T)
#indicatorData[, value := as.numeric(value)]
quotaBansCount = aggregate(data = countryPolicyCount[policymeasure_name != "Export tax", ],
countryCount ~ timeLine + iso3c, sum)
quotaBansGlobal = indicatorData[policymeasure_name != "Export tax", .N, by = c("timeLine")]
quotaBansData = merge(quotaBansGlobal, quotaBansCount, by = "timeLine", all.x = TRUE)
quotaBansData[, period := substring(timeLine,1,4)]
quotaBansIndicatorData = merge(quotaBansData, usdaExportData, all.x = TRUE, by = c("period", "iso3c"))
quotaBansIndicatorData[, quotaBans := ((countryCount / N) * exportShare) *100]
quotaBansIndicatorData[, quotaBansNoPWeight := (countryCount * exportShare) *100]
quotaBans = data.table(aggregate(data = quotaBansIndicatorData, quotaBans ~ timeLine, sum))
quotaBansNoPWeight = data.table(aggregate(data = quotaBansIndicatorData, quotaBansNoPWeight ~ timeLine, sum))
countData = merge(countryPolicyCount, policyCount, by = c("timeLine", "policymeasure_name"))
# countData[, period := substring(timeLine,1,4)]
newIndicator = merge(countData, usdaExportData, all.x = T, by = c("period", "iso3c"))
newIndicator = merge(newIndicator, policyShares, all.x = T, by ="policymeasure_name")
newIndicator[, indicatorNoPoliyWeight := ((countryCount * exportShare) *100)]
#newIndicator[, indicatorValue := ((countryCount / globalPolicies) * exportShare)]
newIndicator[, indicatorValue := ((countryCount / policyMax) * exportShare) * 100]
countryIndicators = dcast(newIndicator, timeLine ~ policymeasure_name + iso3c, value.var = "indicatorValue")
countryNoPolicyIndicators = dcast(newIndicator, timeLine ~ policymeasure_name + iso3c, value.var = "indicatorNoPoliyWeight")
globalIndicators = data.table(aggregate(data = newIndicator, indicatorValue ~ timeLine + policymeasure_name, sum))
globalNoPolicyIndicators = data.table(aggregate(data = newIndicator, indicatorNoPoliyWeight ~ timeLine + policymeasure_name, sum))
quotaValueIndicator = indicatorData[policymeasure_name == "Export quota",]
# countryQuotaIndicators = data.table(aggregate(data = quotaValueIndicator,
# indicatorValues ~ timeLine + country_name + iso3c + exportShare, sum))
#
# countryQuotaIndicators[, indicatorValues := indicatorValues * 100]
# countryQuotaIndicators[, weightedIndicator := exportShare * indicatorValues * 10000 ]
#
# quotaIndicators = data.table(aggregate(data = countryQuotaIndicators,
# indicatorValues ~ timeLine, sum))
#
indicatorDataWide = dcast(globalIndicators, timeLine ~ policymeasure_name, value.var = "indicatorValue")
# test = merge(indicatorDataWide, quotaBans, by = "timeLine", all.x = TRUE)
indicatorDataNoPolicyWide = dcast(globalNoPolicyIndicators, timeLine ~ policymeasure_name, value.var = "indicatorNoPoliyWeight")
setnames(indicatorDataNoPolicyWide, c("Export prohibition", "Export quota", "Export tax"), c("Export prohibitionNoPweight", "Export quotaNoPweight", "Export taxNoPweight"))
#indicatorDataWideExtended = merge(indicatorDataWide, quotaIndicators, all.x = T, by = "timeLine")
#setnames(indicatorDataWideExtended, "weightedIndicator", "weightedQuotas")
# taxValueIndicator = indicatorData[policymeasure_name == "Export tax",]
#
# countryTaxIndicators = data.table(aggregate(data = taxValueIndicator,
# indicatorValues ~ timeLine + country_name + iso3c + exportShare, sum))
#
# countryTaxIndicators[, indicatorValues := indicatorValues * 100]
#
#
# countryTaxIndicators[, weightedIndicator := exportShare * indicatorValues * 100 ]
#
# taxIndicators = data.table(aggregate(data = countryTaxIndicators,
# weightedIndicator ~ timeLine, sum))
# setnames(quotaIndicators, "indicatorValues", "measuredQuota")
# setnames(taxIndicators, "weightedIndicator", "measuredTax")
library(ggplot2)
ggplot(data=globalIndicators,
aes(x=timeLine, y=indicatorValue, colour=policymeasure_name)) +
geom_line()
indicatorDataWide = merge(indicatorDataWide, quotaBans, by = "timeLine", all.x = TRUE)
indicatorDataNoPolicyWide = merge(indicatorDataNoPolicyWide, quotaBansNoPWeight, by = "timeLine", all.x = TRUE)
if(save == TRUE){
if(frequency == "daily"){
saveRDS(indicatorDataWide, "Data/indicatorPolicyWeighted.RDA")
saveRDS(indicatorDataNoPolicyWide, "Data/indicatorNoPolicyWeighted.RDA")
saveRDS(policyCount, "Data/policyCount.RDA")
saveRDS(countryIndicators, "Data/countryIndicators.RDA")
}else{
saveRDS(indicatorDataWide, "Data/indicatorPolicyWeightedMonthly.RDA")
saveRDS(indicatorDataNoPolicyWide, "Data/indicatorNoPolicyWeightedMonthly.RDA")
saveRDS(policyCount, "Data/policyCountMonthly.RDA")
saveRDS(countryIndicators, "Data/countryIndicatorsMonthly.RDA")
}
}
if(type == "policyTradeWeight"){
return(indicatorDataWide)
}else if(type == "tradeWeight"){
return(indicatorDataNoPolicyWide)
}else if(type == "count"){
return(policyCount)
}else if(type == "countryIndicators"){
return(countryIndicators)
}else{
exportRestrictionsIndicatorsList = list(indicatorDataWide, indicatorDataNoPolicyWide, policyCount, countryIndicators)
return(exportRestrictionsIndicatorsList)
}
}
|
#' Transform data to be vector of rank or value difference across defined time difference
#'
#' @param group1.1 is meant to be a dataframe with 2 columns. The columns should be the toxicity in a location and the count of the population of interest in
#' that location. Column names are not important, but columns must be ordered with concentration in the first column and population counts in the second column.
#' 1.1 should be the first group at the first time.
#' @param group1.2 should be the same format, but the first group at the second time
#' @param type defines how the function calcualtes the difference, with the options being "rank" and "value". The "rank" option caluclates the difference in
#' percentile for a specific number. The "value" option calculates the difference in toxicity at a specific percentile.
#' @param n is an optional parameter, with a default of 500. This defines at how many points the function calcualtes the differences.
#' @return Returns a dataframe with two columns. The first is either 'percentile' or 'value' depending on the type chosen, and the second is the difference between
#' groups over the selected time period at those input values.
#' @examples getTimeChangeData(t1990[, c("lconc", "hispanic")], t2010[, c("lconc", "hispanic")], t1990[, c("lconc", "white")], t2010[c("lconc", "white")], "value")
#' @export
getTimeChangeData = function(group1.1, group1.2, group2.1, group2.2, type, n = 20) {
if (type == "value") {
percentile = seq(0, 0.98, length.out = n)
difference = sapply(percentile, timeDifference, group1.1, group1.2, group2.1, group2.2, type)
df = data.frame(percentile, difference)
return(df)
}
else if (type == "rank") {
tru_min = min(c(min(group1.1[,1]), min(group1.2[,1]), min(group2.1[,1]), min(group2.2[,1])))
tru_max = max(c(max(group1.1[,1]), max(group1.2[,1]), max(group2.1[,1]), max(group2.2[,1])))
value = seq(tru_min, tru_max, length.out = n)
difference = sapply(value, timeDifference, group1.1, group1.2, group2.1, group2.2, type)
df = data.frame(value, difference)
return(df)
}
else {
print("Variable 'type' must be either 'rank' or 'value'.")
return(NA)
}
}
|
/R/getTimeChangeData.R
|
no_license
|
amd112/rseiAnalysis
|
R
| false
| false
| 2,199
|
r
|
#' Transform data to be vector of rank or value difference across defined time difference
#'
#' @param group1.1 is meant to be a dataframe with 2 columns. The columns should be the toxicity in a location and the count of the population of interest in
#' that location. Column names are not important, but columns must be ordered with concentration in the first column and population counts in the second column.
#' 1.1 should be the first group at the first time.
#' @param group1.2 should be the same format, but the first group at the second time
#' @param type defines how the function calcualtes the difference, with the options being "rank" and "value". The "rank" option caluclates the difference in
#' percentile for a specific number. The "value" option calculates the difference in toxicity at a specific percentile.
#' @param n is an optional parameter, with a default of 500. This defines at how many points the function calcualtes the differences.
#' @return Returns a dataframe with two columns. The first is either 'percentile' or 'value' depending on the type chosen, and the second is the difference between
#' groups over the selected time period at those input values.
#' @examples getTimeChangeData(t1990[, c("lconc", "hispanic")], t2010[, c("lconc", "hispanic")], t1990[, c("lconc", "white")], t2010[c("lconc", "white")], "value")
#' @export
getTimeChangeData = function(group1.1, group1.2, group2.1, group2.2, type, n = 20) {
if (type == "value") {
percentile = seq(0, 0.98, length.out = n)
difference = sapply(percentile, timeDifference, group1.1, group1.2, group2.1, group2.2, type)
df = data.frame(percentile, difference)
return(df)
}
else if (type == "rank") {
tru_min = min(c(min(group1.1[,1]), min(group1.2[,1]), min(group2.1[,1]), min(group2.2[,1])))
tru_max = max(c(max(group1.1[,1]), max(group1.2[,1]), max(group2.1[,1]), max(group2.2[,1])))
value = seq(tru_min, tru_max, length.out = n)
difference = sapply(value, timeDifference, group1.1, group1.2, group2.1, group2.2, type)
df = data.frame(value, difference)
return(df)
}
else {
print("Variable 'type' must be either 'rank' or 'value'.")
return(NA)
}
}
|
#' ADEPT Similarity Matrix Computation
#'
#' Compute ADEPT similarity matrix between a time-series \code{x} and a collection
#' of scaled pattern templates.
#'
#' @param x A numeric vector. A time-series \code{x}.
#' @param template.scaled A list of lists of numeric vectors, as returned by
#' \code{scaleTemplate}. Each element of
#' \code{template.scaled}
#' is a list of pattern templates interpolated to a particular vector length.
#' Number of elements in the \code{template.scaled} corresponds to the
#' number of unique template length values used in segmentation.
#' @param similarity.measure A character scalar. Statistic
#' used in similarity matrix computation; one of the following:
#' \itemize{
#' \item "cov" - for covariance,
#' \item "cor" - for correlation.
#' }
#'
#' @return A numeric matrix. Contains values of similarity between a time-series \code{x}
#' and scaled pattern templates.
#' \itemize{
#' \item Number of rows equals \code{template.scaled} length,
#' number of columns equals \code{x} length.
#' \item A particular matrix row consists of similarity statistic
#' between \code{x} and a pattern rescaled to a particular vector length.
#' Precisely, each row's element is a maximum out of similarity values
#' computed for each distinct template used in segmentation.
#' }
#'
#' @seealso \code{scaleTemplate {adept}}
#'
#' @export
#' @import runstats
#'
#' @examples
#' ## Simulate data
#' par(mfrow = c(1,1))
#' x0 <- sin(seq(0, 2 * pi * 100, length.out = 10000))
#' x <- x0 + rnorm(1000, sd = 0.1)
#' template <- list(x0[1:500])
#' template.vl <- seq(300, 700, by = 50)
#'
#' ## Rescale pattern
#' template.scaled <- scaleTemplate(template, template.vl)
#'
#' ## Compute ADEPT similarity matrix
#' out <- similarityMatrix(x, template.scaled, "cov")
#'
#' ## Visualize
#' par(mfrow = c(1,1))
#' image(t(out),
#' main = "ADEPT similarity matrix\nfor time-series x and scaled versions of pattern templates",
#' xlab = "Time index",
#' ylab = "Pattern vector length",
#' xaxt = "n", yaxt = "n")
#' xaxis <- c(1, seq(1000, length(x0), by = 1000))
#' yaxis <- template.vl
#' axis(1, at = xaxis/max(xaxis), labels = xaxis)
#' axis(2, at = (yaxis - min(yaxis))/(max(yaxis) - min(yaxis)), labels = yaxis)
#'
similarityMatrix <- function(x,
template.scaled,
similarity.measure){
runstat.func <- switch(similarity.measure,
"cov" = RunningCov,
"cor" = RunningCor)
## Outer lapply: iterate over pattern scales considered;
## each lapply iteration fills one row of the output similarity matrix.
similarity.list <- lapply(template.scaled, function(template.scaled.i){
## Inner lapply: iterate over, possibly, multiple patterns;
## each lapply iteration returns a vector whose each element corresponds
## to the highest value of similarity between signal \code{x} and
## a short pattern
## at a time point corresponding to this vector's element.
runstat.func.out0 <- lapply(template.scaled.i, function(template.scaled.ik){
do.call(runstat.func, list(x = x, y = template.scaled.ik))
})
do.call(pmax, runstat.func.out0)
})
## rbind list elements (which are vectors) into a matrix
similarity.mat <- do.call(rbind, similarity.list)
return(similarity.mat)
}
#' Template Index Matrix Computation
#'
#' Compute matrix of pattern templates yielding the highest similarity
#' between a time-series \code{x} and a collection
#' of scaled pattern templates.
#'
#' @param x A numeric vector. A time-series \code{x}.
#' @param template.scaled A list of lists of numeric vectors, as returned by
#' \code{scaleTemplate}. Each element of
#' \code{template.scaled}
#' is a list of pattern templates interpolated to a particular vector length.
#' Number of elements in the \code{template.scaled} corresponds to the
#' number of unique template length values used in segmentation.
#' @param similarity.measure A character scalar. Statistic
#' used in similarity matrix computation; one of the following:
#' \itemize{
#' \item "cov" - for covariance,
#' \item "cor" - for correlation.
#' }
#'
#' @return A numerc matrix. Represents number of pattern template
#' yielding the highest similarity
#' between a time-series \code{x} and a collection
#' of scaled pattern templates. Precisely, the number
#' is the order in which particular pattern template was provided in
#' the \code{template} list in \code{segmentPattern}.
#'
#' @import runstats
#'
#' @noRd
#'
templateIdxMatrix <- function(x,
template.scaled,
similarity.measure){
runstat.func <- switch(similarity.measure,
"cov" = RunningCov,
"cor" = RunningCor)
## Outer lapply: iterate over pattern scales considered;
## each lapply iteration fills one row of the output similarity matrix.
templateIdx.list <- lapply(template.scaled, function(template.scaled.i){
## Inner lapply: iterate over, possibly, multiple patterns;
## each lapply iteration returns a vector whose each element corresponds
## to the highest value of similarity between signal \code{x} and
## a short pattern
## at a time point corresponding to this vector's element.
runstat.func.out0 <- lapply(template.scaled.i, function(template.scaled.ik){
do.call(runstat.func, list(x = x, y = template.scaled.ik))
})
max.col(t(do.call(rbind, runstat.func.out0)), ties.method = "first")
})
## rbind list elements (which are vectors) into a matrix
templateIdx.mat <- do.call(rbind, templateIdx.list)
return(templateIdx.mat)
}
|
/R/similarityMatrix.R
|
no_license
|
oslerinhealth-releases/adept
|
R
| false
| false
| 5,729
|
r
|
#' ADEPT Similarity Matrix Computation
#'
#' Compute ADEPT similarity matrix between a time-series \code{x} and a collection
#' of scaled pattern templates.
#'
#' @param x A numeric vector. A time-series \code{x}.
#' @param template.scaled A list of lists of numeric vectors, as returned by
#' \code{scaleTemplate}. Each element of
#' \code{template.scaled}
#' is a list of pattern templates interpolated to a particular vector length.
#' Number of elements in the \code{template.scaled} corresponds to the
#' number of unique template length values used in segmentation.
#' @param similarity.measure A character scalar. Statistic
#' used in similarity matrix computation; one of the following:
#' \itemize{
#' \item "cov" - for covariance,
#' \item "cor" - for correlation.
#' }
#'
#' @return A numeric matrix. Contains values of similarity between a time-series \code{x}
#' and scaled pattern templates.
#' \itemize{
#' \item Number of rows equals \code{template.scaled} length,
#' number of columns equals \code{x} length.
#' \item A particular matrix row consists of similarity statistic
#' between \code{x} and a pattern rescaled to a particular vector length.
#' Precisely, each row's element is a maximum out of similarity values
#' computed for each distinct template used in segmentation.
#' }
#'
#' @seealso \code{scaleTemplate {adept}}
#'
#' @export
#' @import runstats
#'
#' @examples
#' ## Simulate data
#' par(mfrow = c(1,1))
#' x0 <- sin(seq(0, 2 * pi * 100, length.out = 10000))
#' x <- x0 + rnorm(1000, sd = 0.1)
#' template <- list(x0[1:500])
#' template.vl <- seq(300, 700, by = 50)
#'
#' ## Rescale pattern
#' template.scaled <- scaleTemplate(template, template.vl)
#'
#' ## Compute ADEPT similarity matrix
#' out <- similarityMatrix(x, template.scaled, "cov")
#'
#' ## Visualize
#' par(mfrow = c(1,1))
#' image(t(out),
#' main = "ADEPT similarity matrix\nfor time-series x and scaled versions of pattern templates",
#' xlab = "Time index",
#' ylab = "Pattern vector length",
#' xaxt = "n", yaxt = "n")
#' xaxis <- c(1, seq(1000, length(x0), by = 1000))
#' yaxis <- template.vl
#' axis(1, at = xaxis/max(xaxis), labels = xaxis)
#' axis(2, at = (yaxis - min(yaxis))/(max(yaxis) - min(yaxis)), labels = yaxis)
#'
similarityMatrix <- function(x,
template.scaled,
similarity.measure){
runstat.func <- switch(similarity.measure,
"cov" = RunningCov,
"cor" = RunningCor)
## Outer lapply: iterate over pattern scales considered;
## each lapply iteration fills one row of the output similarity matrix.
similarity.list <- lapply(template.scaled, function(template.scaled.i){
## Inner lapply: iterate over, possibly, multiple patterns;
## each lapply iteration returns a vector whose each element corresponds
## to the highest value of similarity between signal \code{x} and
## a short pattern
## at a time point corresponding to this vector's element.
runstat.func.out0 <- lapply(template.scaled.i, function(template.scaled.ik){
do.call(runstat.func, list(x = x, y = template.scaled.ik))
})
do.call(pmax, runstat.func.out0)
})
## rbind list elements (which are vectors) into a matrix
similarity.mat <- do.call(rbind, similarity.list)
return(similarity.mat)
}
#' Template Index Matrix Computation
#'
#' Compute matrix of pattern templates yielding the highest similarity
#' between a time-series \code{x} and a collection
#' of scaled pattern templates.
#'
#' @param x A numeric vector. A time-series \code{x}.
#' @param template.scaled A list of lists of numeric vectors, as returned by
#' \code{scaleTemplate}. Each element of
#' \code{template.scaled}
#' is a list of pattern templates interpolated to a particular vector length.
#' Number of elements in the \code{template.scaled} corresponds to the
#' number of unique template length values used in segmentation.
#' @param similarity.measure A character scalar. Statistic
#' used in similarity matrix computation; one of the following:
#' \itemize{
#' \item "cov" - for covariance,
#' \item "cor" - for correlation.
#' }
#'
#' @return A numerc matrix. Represents number of pattern template
#' yielding the highest similarity
#' between a time-series \code{x} and a collection
#' of scaled pattern templates. Precisely, the number
#' is the order in which particular pattern template was provided in
#' the \code{template} list in \code{segmentPattern}.
#'
#' @import runstats
#'
#' @noRd
#'
templateIdxMatrix <- function(x,
template.scaled,
similarity.measure){
runstat.func <- switch(similarity.measure,
"cov" = RunningCov,
"cor" = RunningCor)
## Outer lapply: iterate over pattern scales considered;
## each lapply iteration fills one row of the output similarity matrix.
templateIdx.list <- lapply(template.scaled, function(template.scaled.i){
## Inner lapply: iterate over, possibly, multiple patterns;
## each lapply iteration returns a vector whose each element corresponds
## to the highest value of similarity between signal \code{x} and
## a short pattern
## at a time point corresponding to this vector's element.
runstat.func.out0 <- lapply(template.scaled.i, function(template.scaled.ik){
do.call(runstat.func, list(x = x, y = template.scaled.ik))
})
max.col(t(do.call(rbind, runstat.func.out0)), ties.method = "first")
})
## rbind list elements (which are vectors) into a matrix
templateIdx.mat <- do.call(rbind, templateIdx.list)
return(templateIdx.mat)
}
|
BD_CalulateSenSpecNPVPPV<-function(ProbCalibStruct , prob_thresh){
DecsionVector <- ProbCalibStruct[ , 1] > prob_thresh
N <- sum(ProbCalibStruct[ , 2] == 0)
P <- sum(ProbCalibStruct[ , 2] == 1)
TP <- sum((DecsionVector == 1)*(ProbCalibStruct[ , 2] == 1))
TN <- sum((DecsionVector == 0)*(ProbCalibStruct[ , 2] == 0))
FP <- sum((DecsionVector == 1)*(ProbCalibStruct[ , 2] == 0))
FN <- sum((DecsionVector == 0)*(ProbCalibStruct[ , 2] == 1))
Sensitivity <- TP/P
Specifictity <- TN/N
PPV <- TP /(TP + FP)
NPV <- TN / (TN + FN)
output <- setNames(list(Sensitivity , Specifictity , PPV , NPV) , c('Sen' , 'Spec' , 'PPV' , 'NPV'))
return(output)
}
|
/WaveformCode/BayesianDiscrepancySourceFunctions.R
|
no_license
|
BenLopez/UHSM_BHF
|
R
| false
| false
| 680
|
r
|
BD_CalulateSenSpecNPVPPV<-function(ProbCalibStruct , prob_thresh){
DecsionVector <- ProbCalibStruct[ , 1] > prob_thresh
N <- sum(ProbCalibStruct[ , 2] == 0)
P <- sum(ProbCalibStruct[ , 2] == 1)
TP <- sum((DecsionVector == 1)*(ProbCalibStruct[ , 2] == 1))
TN <- sum((DecsionVector == 0)*(ProbCalibStruct[ , 2] == 0))
FP <- sum((DecsionVector == 1)*(ProbCalibStruct[ , 2] == 0))
FN <- sum((DecsionVector == 0)*(ProbCalibStruct[ , 2] == 1))
Sensitivity <- TP/P
Specifictity <- TN/N
PPV <- TP /(TP + FP)
NPV <- TN / (TN + FN)
output <- setNames(list(Sensitivity , Specifictity , PPV , NPV) , c('Sen' , 'Spec' , 'PPV' , 'NPV'))
return(output)
}
|
setwd('D:\\git_R\\papers\\Discovering_Play_Patterns')
options(scipen = 999, digits=21)
########################################################################################################
library(parallelDist) # https://www.rdocumentation.org/packages/parallelDist/versions/0.1.1/topics/parDist
source("corFuncPtr.R") # 자동으로 패키지 설치 안내문으뜨면 설치하면 된다.
source('df_prepro.R')
source('group_vis.R')
library(data.table) # read CSV file
library(dplyr)
library(reshape)
library(forecast) # MA 계산
library(ggplot2) # for vis
library(RColorBrewer) # for vis
library(gridExtra) # for vis
library(bigrquery) # 군집별 특성을 찾을 때 사용
########################################################################################################
# 데이터 읽기
# nid : 유저 아이디
# rn : 구분자 번호 (10분단위, 1일단위 등)
# pt : rn에 맞는 수치
data<- fread('SZ_daily_PT_sum_201909.csv', integer64 = 'numeric')
colnames(data)<- c('nid', 'rn', 'pt')
# Hyper parameter
max_rn <- max(data$rn)
MA <- 3
# 전처리 결과 불러오기
data<- df_prepro(data,seq_length = max_rn) # source('df_prepro.R')
# 전처리 단계 추가 --> rn 2이상 sum(pt) = 0 이면 data에서 제외
# 보통 30 ~ 50% 유저들이 걸러진다.
nid_pt_sum<-data %>% filter(rn > 1) %>% group_by(nid) %>% summarise(sum_pt = sum(pt))
nid_pt_sum<- nid_pt_sum %>% filter(sum_pt==0)
data<- data %>% filter( !(nid %in% nid_pt_sum$nid ))
# COR matrix 작성
# 유저간 trend 데이터를 담아두는 공간
nid_list<- as.character(unique(data$nid))
COR<- matrix(0, nrow = length(nid_list), ncol = max_rn - MA +1 )
rownames(COR)<- nid_list
# trend 계산
# nid(유저)별 모든 seq데이터를 생성해 놨기 때문에 아래와 같이 수행 가능
# max_rn 의 크기만큼 nid별 인덱스를 생성
start_list<- seq(1, nrow(data), by = max_rn)
end_list<- seq(1+max_rn-1, nrow(data)+max_rn-1, by = max_rn)
temp_list<- vector(mode ='list', length = length(nid_list))
for(i in 1:length(nid_list)){
temp_list[[i]]<- start_list[i] : end_list[i]
}
for(i in 1:length(nid_list)){
qq <- forecast::ma(data[temp_list[[i]],'pt'], order = MA)
qq <- qq[!is.na(qq)]
COR[i,]<-qq
}
# COR + trend 구하기
d<- parDist(COR, method="custom", func = corFuncPtr)
d[is.na(d)]<-0
# 군집
# Hierarchical clustering using Ward Linkage
hc1 <- fastcluster::hclust(d, method = "ward.D" )
# Plot the obtained dendrogram
# 데이터가 많으면 굳이 안그릴 것을 추천
# plot(hc1, cex = 0.01, hang = -1, main= 'SZ 2019-10 가입유저 - 가입 후 7일 PT', label = FALSE)
# rect.hclust(hc1, k = 6, border = 2:8)
# Cut tree into 3 groups
sub_grp <- cutree(hc1, k = 6)
# Number of members in each cluster
table(sub_grp)
# 시각화
casted_data<- cast(data, nid ~ rn)
pt_matrix<- as.matrix(casted_data[, -1])
vis_group_1<- group_vis(data, group_num = 1)
#grid.arrange(vis_group_1)
vis_group_2<- group_vis(data, group_num = 2)
#grid.arrange(vis_group_2)
vis_group_3<- group_vis(data, group_num = 3)
#grid.arrange(vis_group_3)
vis_group_4<- group_vis(data, group_num = 4)
#grid.arrange(vis_group_4)
vis_group_5<- group_vis(data, group_num = 5)
#grid.arrange(vis_group_5)
vis_group_6<- group_vis(data, group_num = 6)
#grid.arrange(vis_group_6)
grid.arrange(vis_group_1, vis_group_2, vis_group_3, vis_group_4, vis_group_5, vis_group_6)
# 유저 특성 찾기
# Provide authentication through the JSON service account key
path="D:/데이터분석/SZ/lgsz-0718-5728f5afdf4f.json"
bq_auth(path)
# Store the project id
projectid="lgsz-0718"
# Set your query
level_sql <- paste0("
with sample as (
SELECT nid, max_pl
FROM sz_dw.f_user_map
where date_diff_reg = 0
and nid in ('", paste( grp_nid, collapse = "','" ), "')"
, ")
select 1.0 * sum(lv) / count(distinct nid) as avg_pl
from sample as A
LEFT JOIN sz_dw.dim_hero_lv as B
ON A.max_pl = B.lv_hero
"
)
bigquery_sql<- function(sql, projectid, num_group){
result<- data.frame()
for( num in seq_len(num_group) ){
grp_nid<- names( sub_grp[ which(sub_grp == num)] )
# Run the query and store the data in a dataframe
tb <- bq_project_query(query=sql,x=projectid)
df <- bq_table_download(tb)
result<- rbind(result, df)
}
return(result)
}
#### 그룹별 결제금액
sql <- paste0("
with sample as (
SELECT nid, sum(daily_revenue) as daily_revenue
FROM sz_dw.f_user_map
where date_diff_reg < 7 and daily_revenue > 0
and nid in ('", paste( grp_nid, collapse = "','" ), "')
group by nid"
, ")
select 1.0 * sum(daily_revenue) / count(distinct nid) as avg_revenue, count(distinct nid) as pu
from sample
"
)
# Run the query and store the data in a dataframe
tb <- bq_project_query(query=sql,x=projectid)
df <- bq_table_download(tb)
print(df)
|
/Discovering_Play_Patterns/time_series_cluster.R
|
no_license
|
eat-toast/papers
|
R
| false
| false
| 5,333
|
r
|
setwd('D:\\git_R\\papers\\Discovering_Play_Patterns')
options(scipen = 999, digits=21)
########################################################################################################
library(parallelDist) # https://www.rdocumentation.org/packages/parallelDist/versions/0.1.1/topics/parDist
source("corFuncPtr.R") # 자동으로 패키지 설치 안내문으뜨면 설치하면 된다.
source('df_prepro.R')
source('group_vis.R')
library(data.table) # read CSV file
library(dplyr)
library(reshape)
library(forecast) # MA 계산
library(ggplot2) # for vis
library(RColorBrewer) # for vis
library(gridExtra) # for vis
library(bigrquery) # 군집별 특성을 찾을 때 사용
########################################################################################################
# 데이터 읽기
# nid : 유저 아이디
# rn : 구분자 번호 (10분단위, 1일단위 등)
# pt : rn에 맞는 수치
data<- fread('SZ_daily_PT_sum_201909.csv', integer64 = 'numeric')
colnames(data)<- c('nid', 'rn', 'pt')
# Hyper parameter
max_rn <- max(data$rn)
MA <- 3
# 전처리 결과 불러오기
data<- df_prepro(data,seq_length = max_rn) # source('df_prepro.R')
# 전처리 단계 추가 --> rn 2이상 sum(pt) = 0 이면 data에서 제외
# 보통 30 ~ 50% 유저들이 걸러진다.
nid_pt_sum<-data %>% filter(rn > 1) %>% group_by(nid) %>% summarise(sum_pt = sum(pt))
nid_pt_sum<- nid_pt_sum %>% filter(sum_pt==0)
data<- data %>% filter( !(nid %in% nid_pt_sum$nid ))
# COR matrix 작성
# 유저간 trend 데이터를 담아두는 공간
nid_list<- as.character(unique(data$nid))
COR<- matrix(0, nrow = length(nid_list), ncol = max_rn - MA +1 )
rownames(COR)<- nid_list
# trend 계산
# nid(유저)별 모든 seq데이터를 생성해 놨기 때문에 아래와 같이 수행 가능
# max_rn 의 크기만큼 nid별 인덱스를 생성
start_list<- seq(1, nrow(data), by = max_rn)
end_list<- seq(1+max_rn-1, nrow(data)+max_rn-1, by = max_rn)
temp_list<- vector(mode ='list', length = length(nid_list))
for(i in 1:length(nid_list)){
temp_list[[i]]<- start_list[i] : end_list[i]
}
for(i in 1:length(nid_list)){
qq <- forecast::ma(data[temp_list[[i]],'pt'], order = MA)
qq <- qq[!is.na(qq)]
COR[i,]<-qq
}
# COR + trend 구하기
d<- parDist(COR, method="custom", func = corFuncPtr)
d[is.na(d)]<-0
# 군집
# Hierarchical clustering using Ward Linkage
hc1 <- fastcluster::hclust(d, method = "ward.D" )
# Plot the obtained dendrogram
# 데이터가 많으면 굳이 안그릴 것을 추천
# plot(hc1, cex = 0.01, hang = -1, main= 'SZ 2019-10 가입유저 - 가입 후 7일 PT', label = FALSE)
# rect.hclust(hc1, k = 6, border = 2:8)
# Cut tree into 3 groups
sub_grp <- cutree(hc1, k = 6)
# Number of members in each cluster
table(sub_grp)
# 시각화
casted_data<- cast(data, nid ~ rn)
pt_matrix<- as.matrix(casted_data[, -1])
vis_group_1<- group_vis(data, group_num = 1)
#grid.arrange(vis_group_1)
vis_group_2<- group_vis(data, group_num = 2)
#grid.arrange(vis_group_2)
vis_group_3<- group_vis(data, group_num = 3)
#grid.arrange(vis_group_3)
vis_group_4<- group_vis(data, group_num = 4)
#grid.arrange(vis_group_4)
vis_group_5<- group_vis(data, group_num = 5)
#grid.arrange(vis_group_5)
vis_group_6<- group_vis(data, group_num = 6)
#grid.arrange(vis_group_6)
grid.arrange(vis_group_1, vis_group_2, vis_group_3, vis_group_4, vis_group_5, vis_group_6)
# 유저 특성 찾기
# Provide authentication through the JSON service account key
path="D:/데이터분석/SZ/lgsz-0718-5728f5afdf4f.json"
bq_auth(path)
# Store the project id
projectid="lgsz-0718"
# Set your query
level_sql <- paste0("
with sample as (
SELECT nid, max_pl
FROM sz_dw.f_user_map
where date_diff_reg = 0
and nid in ('", paste( grp_nid, collapse = "','" ), "')"
, ")
select 1.0 * sum(lv) / count(distinct nid) as avg_pl
from sample as A
LEFT JOIN sz_dw.dim_hero_lv as B
ON A.max_pl = B.lv_hero
"
)
bigquery_sql<- function(sql, projectid, num_group){
result<- data.frame()
for( num in seq_len(num_group) ){
grp_nid<- names( sub_grp[ which(sub_grp == num)] )
# Run the query and store the data in a dataframe
tb <- bq_project_query(query=sql,x=projectid)
df <- bq_table_download(tb)
result<- rbind(result, df)
}
return(result)
}
#### 그룹별 결제금액
sql <- paste0("
with sample as (
SELECT nid, sum(daily_revenue) as daily_revenue
FROM sz_dw.f_user_map
where date_diff_reg < 7 and daily_revenue > 0
and nid in ('", paste( grp_nid, collapse = "','" ), "')
group by nid"
, ")
select 1.0 * sum(daily_revenue) / count(distinct nid) as avg_revenue, count(distinct nid) as pu
from sample
"
)
# Run the query and store the data in a dataframe
tb <- bq_project_query(query=sql,x=projectid)
df <- bq_table_download(tb)
print(df)
|
setwd("~/Documents/UCD/BA Prac/ReligionStudy")
require(gutenbergr) # For downloads of The King James Version Bible (#10) and The Tao Te Ching (#216)
bible <- gutenberg_download(10)
names(bible) <- c("doc","text")
bible$doc <- "The King James Bible"
tao <- gutenberg_download(216)
names(tao) <- c("doc","text")
tao$doc<- "The Tao Te Ching"
system(paste('"/Users/Avi/Documents/UCD/BA Prac/ReligionStudy/pdftotext"','"gita.pdf"'), wait=FALSE)
text <- readLines("gita.txt")
gita <- data_frame(doc = "The Bhagavad Gita", text)
system(paste('"/Users/Avi/Documents/UCD/BA Prac/ReligionStudy/pdftotext"','"quran.pdf"'), wait=FALSE)
text <- readLines("quran.txt")
quran <- data_frame(doc = "The Quran", text)
holybooks <- rbind(bible,tao,gita,quran)
holybooks <- holybooks[holybooks$text !="",]
holybooks$text <- gsub('[[:punct:]]|[[:digit:]]','',holybooks$text)
rm(bible,gita,quran,tao)
save(list = ls(),file = "holybooks.Rdata")
# Start here if the dataset can be loaded -------------------------------
require(dplyr)
require(tidytext)
require(ggplot2)
dev.off()
load("holybooks.Rdata")
clean_hb <- holybooks %>%
group_by(doc) %>%
mutate(linenumber = row_number()) %>%
ungroup()
wordwise <- clean_hb %>%
unnest_tokens(word, text)
wordwise<- filter(wordwise, nchar(wordwise$word)>2)
wordwise<- wordwise %>%
group_by(doc) %>%
mutate(idx = round(100*(linenumber/max(linenumber)),0))
data(stop_words)
cleanwords <- wordwise %>%
anti_join(stop_words)
cleanwords %>%
count(word, sort = TRUE) %>%
filter(n > 1000) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_bar(stat = "identity") +
xlab(NULL) +
coord_flip()
# Sentiment flow ----------------------------------------------------------
require(tidyr)
hb_sentiments <- wordwise %>%
inner_join(get_sentiments("bing")) %>%
count(doc, index = idx, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
hb_sentiments<- hb_sentiments %>%
group_by(doc) %>%
mutate(centeredsentiment = as.numeric(scale(sentiment)))
ggplot(hb_sentiments, aes(index, sentiment, fill=as.factor(sentiment>0))) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~doc, ncol = 1) +
scale_x_continuous(label=function(x){return(paste0(x, "%"))})+ scale_fill_manual("",values = c("#ddaa00","#ff9933","#009000","brown")) +
labs(x= "Time Trajectory of the document\n(Section Percentile)",
y= "Sentiments\n(scales & centered)",
title = "Flow of sentiments")
ggplot(hb_sentiments, aes(index, centeredsentiment, fill=as.factor(sentiment>0))) +
geom_bar(stat = "identity", show.legend = FALSE) +
geom_segment(mapping=aes(x=0, y=0, xend=105, yend=0), arrow=arrow(angle = 30,length = unit(0.10, "inches"), ends = "last", type = "closed"), size=0.05, color="black") +
facet_wrap(~doc, ncol = 1) +
scale_x_continuous(label=function(x){return(paste0(x, "%"))})+ scale_fill_manual("",values = c("brown","#ddaa00")) +
labs(x= expression("Time Trajectory of the document (Section in Percentile)" %->% ""),
y= "Sentiments\n(scaled & centered)",
title = "Flow of sentiments") +
theme_bw() +
theme(legend.position="none",
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(colour = "#f9f9f9"),
panel.grid.minor = element_blank(),
plot.title=element_text(hjust=0.5,face="bold"),
strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
strip.background = element_blank())
# Word Cloud drill down on sentiment flow ---------------------------------
require(wordcloud)
require(reshape2)
# Largest positive sentiment word cloud
cleanwords %>%
filter(doc=="The King James Bible") %>%
filter(idx=="53") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Largest negative sentiment word cloud
cleanwords %>%
filter(doc=="The King James Bible") %>%
filter(idx=="67") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Largest negative centered sentiment word cloud
cleanwords %>%
filter(doc=="The Tao Te Ching") %>%
filter(idx=="92") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Largest positive centered sentiment word cloud
cleanwords %>%
filter(doc=="The Bhagavad Gita") %>%
filter(idx=="33") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Percentage of negative and positive sentiments --------------------------
bingnegative <- get_sentiments("bing") %>%
filter(sentiment == "negative")
bingpositive <- get_sentiments("bing") %>%
filter(sentiment == "positive")
wordcounts <- cleanwords %>%
group_by(doc) %>%
summarize(words = n())
negwordcount<-cleanwords %>%
semi_join(bingnegative) %>%
group_by(doc) %>%
summarize(negativewords = n()) %>%
left_join(wordcounts, by = c("doc")) %>%
mutate(neg_words_percent = round(100*negativewords/words,2)) %>%
ungroup
poswordcount<-cleanwords %>%
semi_join(bingpositive) %>%
group_by(doc) %>%
summarize(positivewords = n()) %>%
left_join(wordcounts, by = c("doc")) %>%
mutate(pos_words_percent = round(100*positivewords/words,2)) %>%
ungroup
all_sentiments<-merge(negwordcount,poswordcount)
ggplot(melt(all_sentiments[,c(1,4,6)]),
aes(x=doc, value, fill=variable, width = 0.25)) +
geom_bar(stat = "identity",position = position_dodge(width=0.3))+
labs(x= "",y= "Percentage of words")+
ggtitle("Percentage of Total Negative and Positive word sentiments") +
scale_fill_manual("",values = c("brown","#ddaa00"),labels= c("Negative\nSentiments","Positive\nSentiments")) +
scale_y_continuous(label=function(y){return(paste0(y, "%"))})+
theme_bw() +
theme(plot.title=element_text(hjust=0.5,face="bold"),
axis.text.x = element_text(face="bold",size=10),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# Top 25 sentiments -------------------------------------------------------
doc_words <- wordwise %>%
group_by(doc,word) %>%
summarize(count=length(word)) %>%
inner_join(get_sentiments("bing"), by = c(word = "word"))
total_words<- doc_words %>%
group_by(doc) %>%
summarize(total = sum(count))
doc_words <- left_join(doc_words,total_words)
plotthis<-doc_words %>%
#filter(document =="Bible") %>%
count(doc,sentiment, word, wt = count, sort = TRUE) %>%
#ungroup() %>%
#ungroup() %>%
#group_by(doc) %>%
#top_n(n=25,wt=n) %>%
#ungroup() %>%
mutate(n = ifelse(sentiment == "negative", -n, n)) %>%
group_by(doc) %>%
top_n(n=20,wt=abs(n)) %>%
arrange(doc,n) %>%
ungroup () %>%
mutate(order = row_number())
ggplot(plotthis, aes(order, n, fill = sentiment)) +
geom_bar(stat = "identity") +
facet_wrap(~ doc, scales = "free") +
xlab("Words preceded by negation") +
ylab("Sentiment score * # of occurrences") +
theme_bw() +
coord_flip() +
# Add categories to axis
scale_x_continuous(
breaks = plotthis$order,
labels = plotthis$word,
expand = c(0,0)) +
labs(y="Contribution to sentiment",x=NULL,title = "Top 20 sentiments in each text") +
coord_flip() +
facet_wrap(~doc,scales = "free",ncol=1)+
scale_fill_manual("",values = c("brown","#ddaa00"),labels= c("Negative\nSentiments","Positive\nSentiments"))+
theme_bw() +
theme(legend.position="none",
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(colour = "#f9f9f9"),
panel.grid.minor = element_blank(),
plot.title=element_text(hjust=0.5,face="bold"),
strip.text.x = element_text(size=11,hjust=0,face="bold"),
strip.background = element_blank())
# Common Sentiments -------------------------------------------------------------
require(ggradar)
require(gridExtra)
commonnegwords<-doc_words %>%
group_by(doc) %>%
mutate(countpercent=count/sum(count)) %>%
ungroup() %>%
group_by(word) %>%
mutate(wordimp= sum(countpercent)) %>%
filter(sentiment=="negative") %>%
group_by(doc) %>%
top_n(n=10,wt=wordimp) %>%
arrange(doc,wordimp) %>%
select(doc,word,countpercent) %>%
dcast(doc~word)
commonnegwords[is.na(commonnegwords)]<-0
x<-ggradar(commonnegwords, grid.min = 0,
grid.mid = 0.015,
grid.max = 0.03,
axis.label.offset = 1.1,
axis.label.size = 4,
grid.label.size = 0,
group.line.width = 0.8,
group.point.size = 1.5,
background.circle.colour = "#ffffff",
legend.text.size = 9,
plot.legend = FALSE,
plot.title = "Common negative sentiments")+
theme(legend.position = "bottom",
plot.title=element_text(hjust=0.5,face = "bold"))+
scale_colour_manual(values = rep(c("#ffbf00","darkkhaki","#009000","cadetblue3"), 100))
commonposwords<-doc_words %>%
group_by(doc) %>%
mutate(countpercent=count/sum(count)) %>%
ungroup() %>%
group_by(word) %>%
mutate(wordimp= sum(countpercent)) %>%
filter(sentiment=="positive") %>%
group_by(doc) %>%
top_n(n=10,wt=wordimp) %>%
arrange(doc,wordimp) %>%
select(doc,word,countpercent) %>%
dcast(doc~word)
commonposwords[is.na(commonposwords)]<-0
y<-ggradar(commonposwords, grid.min = 0,
grid.mid = 0.025,
grid.max = 0.05,
axis.label.offset = 1.1,
axis.label.size = 4,
grid.label.size = 0,
group.line.width = 0.8,
group.point.size = 1.5,
background.circle.colour = "#ffffff",
legend.text.size = 9,
plot.legend = FALSE,
plot.title = "Common positive sentiments")+
theme(legend.position = "bottom",
plot.title=element_text(hjust=0.5,face = "bold"),
axis.title = element_text(face = "bold"))+
scale_colour_manual(values = rep(c("#ffbf00","darkkhaki","#009000","cadetblue3"), 100))
tmp <- arrangeGrob(x + theme(legend.position = "none"), y + theme(legend.position = "none"), layout_matrix = matrix(c(1, 2), nrow = 2))
g <- ggplotGrob(y + theme(legend.position="right"))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
grid.arrange(tmp, legend,ncol=2, widths=c(9,6))
grid.arrange(x,y,nrow=2)
# Deleted:-
# require(gridExtra)
#
# p<-doc_words %>%
# #inner_join(get_sentiments("bing")) %>%
# #count(doc,word, sentiment, sort = TRUE) %>%
# acast(word ~ doc, value.var = "count",sum) %>%
# data.frame()
#
# #p$total<-rowSums(p)
# p$word<-row.names(p)
#
#
# p$The.Bhagavad.Gita<- ifelse(p$The.Bhagavad.Gita!=0,p$The.Bhagavad.Gita/sum(p$The.Bhagavad.Gita),0)
# p$The.King.James.Bible<-ifelse(p$The.King.James.Bible!=0,p$The.King.James.Bible/sum(p$The.King.James.Bible),0)
# p$The.Quran<-ifelse(p$The.Quran!=0,p$The.Quran/sum(p$The.Quran),0)
# p$The.Tao.Te.Ching<-ifelse(p$The.Tao.Te.Ching!=0,p$The.Tao.Te.Ching/sum(p$The.Tao.Te.Ching),0)
# #p$totalratio<-p$total/sum(p$total)
#
# #p$sumratio<-p$The.Bhagavad.Gita+p$The.King.James.Bible+p$The.Quran+p$The.Tao.Te.Ching
#
# p<-melt(p)
#
# p<-p %>%
# inner_join(get_sentiments("bing"))
#
# x<-p %>%
# filter(sentiment=="negative") %>%
# group_by(word) %>%
# mutate(l=sum(value)) %>%
# ungroup() %>%
# top_n(n=50) %>%
# mutate(word = reorder(word, l)) %>%
# ggplot(aes(word,value,fill=variable)) +
# geom_bar(stat="identity",position = position_dodge(width=0.8),show.legend = F) +
# coord_flip()+
# scale_fill_manual("",values = c("#ffbf00","darkkhaki","#009000","cadetblue3"),labels = c("The Bhagavad GIta","The King James Bible","The Quran","The Tao Te Ching"))+
# labs(x="Word",y="Weight", title = "Common Negative sentiments")+
# theme(plot.title=element_text(hjust=0.5,face = "bold"),
# panel.border = element_blank(),
# panel.background = element_blank(),
# panel.grid.major = element_line(colour = "#f9f9f9"),
# panel.grid.minor = element_blank(),
# strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
# strip.background = element_blank(),
# axis.title = element_text(face = "bold"))
#
# y<-p %>%
# filter(sentiment=="positive") %>%
# group_by(word) %>%
# mutate(l=sum(value)) %>%
# ungroup() %>%
# top_n(n=50) %>%
# mutate(word = reorder(word, l)) %>%
# ggplot(aes(word,value,fill=variable)) +
# geom_bar(stat="identity",position = position_dodge(width=0.8)) +
# coord_flip()+
# scale_fill_manual("",values = c("#ffbf00","darkkhaki","#009000","cadetblue3"),labels = c("The Bhagavad GIta","The King James Bible","The Quran","The Tao Te Ching"))+
# labs(x="",y="Weight", title = "Common Positive sentiments")+
# theme(plot.title=element_text(hjust=0.5,face = "bold"),
# panel.border = element_blank(),
# panel.background = element_blank(),
# panel.grid.major = element_line(colour = "#f9f9f9"),
# panel.grid.minor = element_blank(),
# strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
# strip.background = element_blank(),
# axis.title = element_text(face = "bold"))
#
# grid.arrange(x,y, widths=3:4)
# Top 20 Unqiue Words - TF-IDF --------------------------------------------
doc_words2<- cleanwords %>%
count(doc, word, sort = TRUE) %>%
ungroup()
doc_words2 <- left_join(doc_words2,
doc_words2 %>%
group_by(doc) %>%
summarize(total = sum(n)))
ggplot(doc_words2, aes(n/total, fill = doc)) +
geom_histogram(show.legend = FALSE,bins=70) +
#xlim(NA, 0.03) +
facet_wrap(~doc, ncol = 2, scales = "free_y")
doc_wordstfidf <- doc_words2 %>%
bind_tf_idf(word, doc, n)
doc_wordstfidf <- anti_join(doc_wordstfidf , doc_wordstfidf [duplicated(doc_wordstfidf[2]),], by="word")
plot_tfidf <- doc_wordstfidf %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
plot_this2 <- plot_tfidf %>%
group_by(doc) %>%
top_n(20) %>%
ungroup
ggplot(plot_this2, aes(word, tf_idf, fill = doc)) +
geom_bar(stat = "identity", show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~doc, ncol = 2, scales = "free") +
coord_flip()+
scale_fill_manual("",values = c("#ffbf00","darkkhaki","#009000","cadetblue3")) +
labs(y= "TF-IDF",title = "Top 20 unique words in each text")+
theme(plot.title=element_text(hjust=0.5,face = "bold"),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(colour = "#f9f9f9"),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
strip.background = element_blank(),
axis.title = element_text(face = "bold"))
# Bigram analysis ---------------------------------------------------------
require(igraph)
require(ggraph)
require(gridExtra)
hb_bigrams <- holybooks %>%
unnest_tokens(bigram, text, token = "ngrams", n = 2)
hb_bigrams<- filter(hb_bigrams, nchar(hb_bigrams$bigram)>2)
bigrams_separated <- hb_bigrams %>%
separate(bigram, c("word1", "word2"), sep = " ")
bigrams_filtered <- bigrams_separated %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_counts <- bigrams_filtered %>%
count(doc,word1, word2, sort = TRUE)
bigram_counts<- bigram_counts[,c(2,3,4,1)]
bigram_counts_gr<- bigram_counts %>%
group_by(doc) %>%
top_n(50,wt=n)%>%
filter (n>2) %>%
ungroup()
set_graph_style()
printgraph<- function(df,doc,colorname){
set.seed(123)
bigram_graph <- df[df$doc==doc,] %>%
graph_from_data_frame()
gh<-df[df$doc==doc,]
names(gh)<-c("word","word","n","doc")
tt<- rbind(gh[,c(1,4)],gh[,c(2,4)])
pk<-tt[match(unique(tt$word), tt$word),]
V(bigram_graph)$class<-pk$doc
a <- grid::arrow(type = "closed", length = unit(.10, "inches"))
p<- ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n),arrow = a) +
geom_node_point(size = 1.5,colour = colorname) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
ggtitle(doc) +
#th_foreground(foreground = 'grey80', border = F)+
theme(legend.position="none",
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin=unit(c(0.2,0.5,0.7,0.5), "cm"))
return (p)
}
g2<-printgraph(bigram_counts_gr,"The King James Bible",colorname="darkkhaki")
g1<-printgraph(bigram_counts_gr,"The Bhagavad Gita",colorname="#ffbf00")
g3<-printgraph(bigram_counts_gr,"The Quran",colorname="#009000")
g4<-printgraph(bigram_counts_gr,"The Tao Te Ching",colorname="cadetblue3")
grid.arrange(g1,g2,g3,g4,ncol=1)
|
/Holy_books_analysis.R
|
no_license
|
Nashavi/ReligionStudy
|
R
| false
| false
| 17,392
|
r
|
setwd("~/Documents/UCD/BA Prac/ReligionStudy")
require(gutenbergr) # For downloads of The King James Version Bible (#10) and The Tao Te Ching (#216)
bible <- gutenberg_download(10)
names(bible) <- c("doc","text")
bible$doc <- "The King James Bible"
tao <- gutenberg_download(216)
names(tao) <- c("doc","text")
tao$doc<- "The Tao Te Ching"
system(paste('"/Users/Avi/Documents/UCD/BA Prac/ReligionStudy/pdftotext"','"gita.pdf"'), wait=FALSE)
text <- readLines("gita.txt")
gita <- data_frame(doc = "The Bhagavad Gita", text)
system(paste('"/Users/Avi/Documents/UCD/BA Prac/ReligionStudy/pdftotext"','"quran.pdf"'), wait=FALSE)
text <- readLines("quran.txt")
quran <- data_frame(doc = "The Quran", text)
holybooks <- rbind(bible,tao,gita,quran)
holybooks <- holybooks[holybooks$text !="",]
holybooks$text <- gsub('[[:punct:]]|[[:digit:]]','',holybooks$text)
rm(bible,gita,quran,tao)
save(list = ls(),file = "holybooks.Rdata")
# Start here if the dataset can be loaded -------------------------------
require(dplyr)
require(tidytext)
require(ggplot2)
dev.off()
load("holybooks.Rdata")
clean_hb <- holybooks %>%
group_by(doc) %>%
mutate(linenumber = row_number()) %>%
ungroup()
wordwise <- clean_hb %>%
unnest_tokens(word, text)
wordwise<- filter(wordwise, nchar(wordwise$word)>2)
wordwise<- wordwise %>%
group_by(doc) %>%
mutate(idx = round(100*(linenumber/max(linenumber)),0))
data(stop_words)
cleanwords <- wordwise %>%
anti_join(stop_words)
cleanwords %>%
count(word, sort = TRUE) %>%
filter(n > 1000) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_bar(stat = "identity") +
xlab(NULL) +
coord_flip()
# Sentiment flow ----------------------------------------------------------
require(tidyr)
hb_sentiments <- wordwise %>%
inner_join(get_sentiments("bing")) %>%
count(doc, index = idx, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
hb_sentiments<- hb_sentiments %>%
group_by(doc) %>%
mutate(centeredsentiment = as.numeric(scale(sentiment)))
ggplot(hb_sentiments, aes(index, sentiment, fill=as.factor(sentiment>0))) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~doc, ncol = 1) +
scale_x_continuous(label=function(x){return(paste0(x, "%"))})+ scale_fill_manual("",values = c("#ddaa00","#ff9933","#009000","brown")) +
labs(x= "Time Trajectory of the document\n(Section Percentile)",
y= "Sentiments\n(scales & centered)",
title = "Flow of sentiments")
ggplot(hb_sentiments, aes(index, centeredsentiment, fill=as.factor(sentiment>0))) +
geom_bar(stat = "identity", show.legend = FALSE) +
geom_segment(mapping=aes(x=0, y=0, xend=105, yend=0), arrow=arrow(angle = 30,length = unit(0.10, "inches"), ends = "last", type = "closed"), size=0.05, color="black") +
facet_wrap(~doc, ncol = 1) +
scale_x_continuous(label=function(x){return(paste0(x, "%"))})+ scale_fill_manual("",values = c("brown","#ddaa00")) +
labs(x= expression("Time Trajectory of the document (Section in Percentile)" %->% ""),
y= "Sentiments\n(scaled & centered)",
title = "Flow of sentiments") +
theme_bw() +
theme(legend.position="none",
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(colour = "#f9f9f9"),
panel.grid.minor = element_blank(),
plot.title=element_text(hjust=0.5,face="bold"),
strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
strip.background = element_blank())
# Word Cloud drill down on sentiment flow ---------------------------------
require(wordcloud)
require(reshape2)
# Largest positive sentiment word cloud
cleanwords %>%
filter(doc=="The King James Bible") %>%
filter(idx=="53") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Largest negative sentiment word cloud
cleanwords %>%
filter(doc=="The King James Bible") %>%
filter(idx=="67") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Largest negative centered sentiment word cloud
cleanwords %>%
filter(doc=="The Tao Te Ching") %>%
filter(idx=="92") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Largest positive centered sentiment word cloud
cleanwords %>%
filter(doc=="The Bhagavad Gita") %>%
filter(idx=="33") %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown","#ddaa00"),random.order=FALSE,rot.per=0,max.words = 100)
# Percentage of negative and positive sentiments --------------------------
bingnegative <- get_sentiments("bing") %>%
filter(sentiment == "negative")
bingpositive <- get_sentiments("bing") %>%
filter(sentiment == "positive")
wordcounts <- cleanwords %>%
group_by(doc) %>%
summarize(words = n())
negwordcount<-cleanwords %>%
semi_join(bingnegative) %>%
group_by(doc) %>%
summarize(negativewords = n()) %>%
left_join(wordcounts, by = c("doc")) %>%
mutate(neg_words_percent = round(100*negativewords/words,2)) %>%
ungroup
poswordcount<-cleanwords %>%
semi_join(bingpositive) %>%
group_by(doc) %>%
summarize(positivewords = n()) %>%
left_join(wordcounts, by = c("doc")) %>%
mutate(pos_words_percent = round(100*positivewords/words,2)) %>%
ungroup
all_sentiments<-merge(negwordcount,poswordcount)
ggplot(melt(all_sentiments[,c(1,4,6)]),
aes(x=doc, value, fill=variable, width = 0.25)) +
geom_bar(stat = "identity",position = position_dodge(width=0.3))+
labs(x= "",y= "Percentage of words")+
ggtitle("Percentage of Total Negative and Positive word sentiments") +
scale_fill_manual("",values = c("brown","#ddaa00"),labels= c("Negative\nSentiments","Positive\nSentiments")) +
scale_y_continuous(label=function(y){return(paste0(y, "%"))})+
theme_bw() +
theme(plot.title=element_text(hjust=0.5,face="bold"),
axis.text.x = element_text(face="bold",size=10),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# Top 25 sentiments -------------------------------------------------------
doc_words <- wordwise %>%
group_by(doc,word) %>%
summarize(count=length(word)) %>%
inner_join(get_sentiments("bing"), by = c(word = "word"))
total_words<- doc_words %>%
group_by(doc) %>%
summarize(total = sum(count))
doc_words <- left_join(doc_words,total_words)
plotthis<-doc_words %>%
#filter(document =="Bible") %>%
count(doc,sentiment, word, wt = count, sort = TRUE) %>%
#ungroup() %>%
#ungroup() %>%
#group_by(doc) %>%
#top_n(n=25,wt=n) %>%
#ungroup() %>%
mutate(n = ifelse(sentiment == "negative", -n, n)) %>%
group_by(doc) %>%
top_n(n=20,wt=abs(n)) %>%
arrange(doc,n) %>%
ungroup () %>%
mutate(order = row_number())
ggplot(plotthis, aes(order, n, fill = sentiment)) +
geom_bar(stat = "identity") +
facet_wrap(~ doc, scales = "free") +
xlab("Words preceded by negation") +
ylab("Sentiment score * # of occurrences") +
theme_bw() +
coord_flip() +
# Add categories to axis
scale_x_continuous(
breaks = plotthis$order,
labels = plotthis$word,
expand = c(0,0)) +
labs(y="Contribution to sentiment",x=NULL,title = "Top 20 sentiments in each text") +
coord_flip() +
facet_wrap(~doc,scales = "free",ncol=1)+
scale_fill_manual("",values = c("brown","#ddaa00"),labels= c("Negative\nSentiments","Positive\nSentiments"))+
theme_bw() +
theme(legend.position="none",
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(colour = "#f9f9f9"),
panel.grid.minor = element_blank(),
plot.title=element_text(hjust=0.5,face="bold"),
strip.text.x = element_text(size=11,hjust=0,face="bold"),
strip.background = element_blank())
# Common Sentiments -------------------------------------------------------------
require(ggradar)
require(gridExtra)
commonnegwords<-doc_words %>%
group_by(doc) %>%
mutate(countpercent=count/sum(count)) %>%
ungroup() %>%
group_by(word) %>%
mutate(wordimp= sum(countpercent)) %>%
filter(sentiment=="negative") %>%
group_by(doc) %>%
top_n(n=10,wt=wordimp) %>%
arrange(doc,wordimp) %>%
select(doc,word,countpercent) %>%
dcast(doc~word)
commonnegwords[is.na(commonnegwords)]<-0
x<-ggradar(commonnegwords, grid.min = 0,
grid.mid = 0.015,
grid.max = 0.03,
axis.label.offset = 1.1,
axis.label.size = 4,
grid.label.size = 0,
group.line.width = 0.8,
group.point.size = 1.5,
background.circle.colour = "#ffffff",
legend.text.size = 9,
plot.legend = FALSE,
plot.title = "Common negative sentiments")+
theme(legend.position = "bottom",
plot.title=element_text(hjust=0.5,face = "bold"))+
scale_colour_manual(values = rep(c("#ffbf00","darkkhaki","#009000","cadetblue3"), 100))
commonposwords<-doc_words %>%
group_by(doc) %>%
mutate(countpercent=count/sum(count)) %>%
ungroup() %>%
group_by(word) %>%
mutate(wordimp= sum(countpercent)) %>%
filter(sentiment=="positive") %>%
group_by(doc) %>%
top_n(n=10,wt=wordimp) %>%
arrange(doc,wordimp) %>%
select(doc,word,countpercent) %>%
dcast(doc~word)
commonposwords[is.na(commonposwords)]<-0
y<-ggradar(commonposwords, grid.min = 0,
grid.mid = 0.025,
grid.max = 0.05,
axis.label.offset = 1.1,
axis.label.size = 4,
grid.label.size = 0,
group.line.width = 0.8,
group.point.size = 1.5,
background.circle.colour = "#ffffff",
legend.text.size = 9,
plot.legend = FALSE,
plot.title = "Common positive sentiments")+
theme(legend.position = "bottom",
plot.title=element_text(hjust=0.5,face = "bold"),
axis.title = element_text(face = "bold"))+
scale_colour_manual(values = rep(c("#ffbf00","darkkhaki","#009000","cadetblue3"), 100))
tmp <- arrangeGrob(x + theme(legend.position = "none"), y + theme(legend.position = "none"), layout_matrix = matrix(c(1, 2), nrow = 2))
g <- ggplotGrob(y + theme(legend.position="right"))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
grid.arrange(tmp, legend,ncol=2, widths=c(9,6))
grid.arrange(x,y,nrow=2)
# Deleted:-
# require(gridExtra)
#
# p<-doc_words %>%
# #inner_join(get_sentiments("bing")) %>%
# #count(doc,word, sentiment, sort = TRUE) %>%
# acast(word ~ doc, value.var = "count",sum) %>%
# data.frame()
#
# #p$total<-rowSums(p)
# p$word<-row.names(p)
#
#
# p$The.Bhagavad.Gita<- ifelse(p$The.Bhagavad.Gita!=0,p$The.Bhagavad.Gita/sum(p$The.Bhagavad.Gita),0)
# p$The.King.James.Bible<-ifelse(p$The.King.James.Bible!=0,p$The.King.James.Bible/sum(p$The.King.James.Bible),0)
# p$The.Quran<-ifelse(p$The.Quran!=0,p$The.Quran/sum(p$The.Quran),0)
# p$The.Tao.Te.Ching<-ifelse(p$The.Tao.Te.Ching!=0,p$The.Tao.Te.Ching/sum(p$The.Tao.Te.Ching),0)
# #p$totalratio<-p$total/sum(p$total)
#
# #p$sumratio<-p$The.Bhagavad.Gita+p$The.King.James.Bible+p$The.Quran+p$The.Tao.Te.Ching
#
# p<-melt(p)
#
# p<-p %>%
# inner_join(get_sentiments("bing"))
#
# x<-p %>%
# filter(sentiment=="negative") %>%
# group_by(word) %>%
# mutate(l=sum(value)) %>%
# ungroup() %>%
# top_n(n=50) %>%
# mutate(word = reorder(word, l)) %>%
# ggplot(aes(word,value,fill=variable)) +
# geom_bar(stat="identity",position = position_dodge(width=0.8),show.legend = F) +
# coord_flip()+
# scale_fill_manual("",values = c("#ffbf00","darkkhaki","#009000","cadetblue3"),labels = c("The Bhagavad GIta","The King James Bible","The Quran","The Tao Te Ching"))+
# labs(x="Word",y="Weight", title = "Common Negative sentiments")+
# theme(plot.title=element_text(hjust=0.5,face = "bold"),
# panel.border = element_blank(),
# panel.background = element_blank(),
# panel.grid.major = element_line(colour = "#f9f9f9"),
# panel.grid.minor = element_blank(),
# strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
# strip.background = element_blank(),
# axis.title = element_text(face = "bold"))
#
# y<-p %>%
# filter(sentiment=="positive") %>%
# group_by(word) %>%
# mutate(l=sum(value)) %>%
# ungroup() %>%
# top_n(n=50) %>%
# mutate(word = reorder(word, l)) %>%
# ggplot(aes(word,value,fill=variable)) +
# geom_bar(stat="identity",position = position_dodge(width=0.8)) +
# coord_flip()+
# scale_fill_manual("",values = c("#ffbf00","darkkhaki","#009000","cadetblue3"),labels = c("The Bhagavad GIta","The King James Bible","The Quran","The Tao Te Ching"))+
# labs(x="",y="Weight", title = "Common Positive sentiments")+
# theme(plot.title=element_text(hjust=0.5,face = "bold"),
# panel.border = element_blank(),
# panel.background = element_blank(),
# panel.grid.major = element_line(colour = "#f9f9f9"),
# panel.grid.minor = element_blank(),
# strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
# strip.background = element_blank(),
# axis.title = element_text(face = "bold"))
#
# grid.arrange(x,y, widths=3:4)
# Top 20 Unqiue Words - TF-IDF --------------------------------------------
doc_words2<- cleanwords %>%
count(doc, word, sort = TRUE) %>%
ungroup()
doc_words2 <- left_join(doc_words2,
doc_words2 %>%
group_by(doc) %>%
summarize(total = sum(n)))
ggplot(doc_words2, aes(n/total, fill = doc)) +
geom_histogram(show.legend = FALSE,bins=70) +
#xlim(NA, 0.03) +
facet_wrap(~doc, ncol = 2, scales = "free_y")
doc_wordstfidf <- doc_words2 %>%
bind_tf_idf(word, doc, n)
doc_wordstfidf <- anti_join(doc_wordstfidf , doc_wordstfidf [duplicated(doc_wordstfidf[2]),], by="word")
plot_tfidf <- doc_wordstfidf %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
plot_this2 <- plot_tfidf %>%
group_by(doc) %>%
top_n(20) %>%
ungroup
ggplot(plot_this2, aes(word, tf_idf, fill = doc)) +
geom_bar(stat = "identity", show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~doc, ncol = 2, scales = "free") +
coord_flip()+
scale_fill_manual("",values = c("#ffbf00","darkkhaki","#009000","cadetblue3")) +
labs(y= "TF-IDF",title = "Top 20 unique words in each text")+
theme(plot.title=element_text(hjust=0.5,face = "bold"),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(colour = "#f9f9f9"),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size=11,hjust=0.05,face="bold"),
strip.background = element_blank(),
axis.title = element_text(face = "bold"))
# Bigram analysis ---------------------------------------------------------
require(igraph)
require(ggraph)
require(gridExtra)
hb_bigrams <- holybooks %>%
unnest_tokens(bigram, text, token = "ngrams", n = 2)
hb_bigrams<- filter(hb_bigrams, nchar(hb_bigrams$bigram)>2)
bigrams_separated <- hb_bigrams %>%
separate(bigram, c("word1", "word2"), sep = " ")
bigrams_filtered <- bigrams_separated %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_counts <- bigrams_filtered %>%
count(doc,word1, word2, sort = TRUE)
bigram_counts<- bigram_counts[,c(2,3,4,1)]
bigram_counts_gr<- bigram_counts %>%
group_by(doc) %>%
top_n(50,wt=n)%>%
filter (n>2) %>%
ungroup()
set_graph_style()
printgraph<- function(df,doc,colorname){
set.seed(123)
bigram_graph <- df[df$doc==doc,] %>%
graph_from_data_frame()
gh<-df[df$doc==doc,]
names(gh)<-c("word","word","n","doc")
tt<- rbind(gh[,c(1,4)],gh[,c(2,4)])
pk<-tt[match(unique(tt$word), tt$word),]
V(bigram_graph)$class<-pk$doc
a <- grid::arrow(type = "closed", length = unit(.10, "inches"))
p<- ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n),arrow = a) +
geom_node_point(size = 1.5,colour = colorname) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
ggtitle(doc) +
#th_foreground(foreground = 'grey80', border = F)+
theme(legend.position="none",
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin=unit(c(0.2,0.5,0.7,0.5), "cm"))
return (p)
}
g2<-printgraph(bigram_counts_gr,"The King James Bible",colorname="darkkhaki")
g1<-printgraph(bigram_counts_gr,"The Bhagavad Gita",colorname="#ffbf00")
g3<-printgraph(bigram_counts_gr,"The Quran",colorname="#009000")
g4<-printgraph(bigram_counts_gr,"The Tao Te Ching",colorname="cadetblue3")
grid.arrange(g1,g2,g3,g4,ncol=1)
|
library(tidyverse)
library(ggpubr)
library(ggprism)
old_cluster <- readRDS("clusters_old.rds") %>% dplyr::rename(AssignedClusterOld = AssignedCluster)
tt_rank <- readRDS("data/tt_kinaseRank_updated_clust.rds") %>% mutate(AccMod = paste(Accession, MasterMod, sep = ":")) %>%
left_join(old_cluster %>% dplyr::select(-MasterMod), by = c("GeneMod"))
locateCyMotifs <- function(df){
#Function that extracts cymotifs locations
#Amino acid seqeunce column has to be named "AAseq"
#Function for caluclating means in the sublists row-wise
calcMean <- function(x){
apply(x, 1, mean)}
#set input to temporary variable
temp <- df
#Extract the locations
l1 <- str_locate_all(pattern = "[R].[L]", temp$AAseq) # Edit this part to change to classical RxL
l2 <- rapply(l1, calcMean, how = "list")
l3 <- rapply(l2, function(x) paste(x, collapse = ";"))
#Extract the mathced Cy motifs
c1 <- str_extract_all(pattern = "[R].[L]", temp$AAseq) # Edit this part to change to classical RxL
c2 <- rapply(c1, function(x) paste(x, collapse = ";"))
#Add back to dataframe
temp$cy_location <- l3
temp$cy_motif <- c2
#return dataframe
as_tibble(temp)
}
#find Cy motif locations
cy <- tt_rank %>% select(GeneMod, MasterMod, AAseq, location) %>%
locateCyMotifs()
closesCyMotif <- function(df){
#Some functiosn for index extraction
myfunc <- function(a,b){
which(a == b)
}
myfunc2 <- function(a,b){
a[b]}
temp <- df
#merge site location to the cy_locations
c1 <- paste(temp$cy_location, temp$location, sep = ";")
#Split the locations
c2 <- strsplit(c1,split=';', fixed=TRUE)
#convert to integers and sort
c3 <- rapply(c2, function(x) sort(as.integer(x)), how = "list")
#collapse back into one string
c4 <- rapply(c3, function(x) paste(x, collapse=";"))
temp$merged_location <- c4
#Some times the motifs that contain S,T,Y in the middle of the Cy motif are on top of the detected phosphoSites
c5 <- mapply(myfunc, strsplit(c4,split=';', fixed=TRUE),temp$location)
is.na(c5) <- lengths(c5) == 0
temp$siteIndexList <- c5
#Marking the "on top" sites
temp$CyOnSite <- ifelse(lengths(temp$siteIndexList) == 2, "YES", "NO")
#Filtering out the the merged_location to the remove the cy motif and site duplication
#Split the merged_locations
c6 <- strsplit(temp$merged_location,split=';', fixed=TRUE)
#Remove the non-unique elements
c7 <- rapply(c6, function(x)unique(x), how = "list")
#Collapsing back into one string
c8 <- rapply(c7,function(x) paste(x, collapse = ";"))
temp$merged_location_filtered <- c8
#Extract the location indexes
c9 <- mapply(myfunc, strsplit(c8,split=';', fixed=TRUE),temp$location)
is.na(c9) <- lengths(c9) == 0
temp$siteIndex <- unlist(c9)
temp$leftIndex <- temp$siteIndex - 1
temp$rightIndex <- temp$siteIndex + 1
#Extracting the flanking cy motif locations and replacing the out of bound locations with NA
temp$location_left <- mapply(myfunc2, strsplit(temp$merged_location_filtered,split=';', fixed=TRUE), temp$leftIndex)
temp$location_left[lengths(temp$location_left) == 0] <- NA
temp$location_left <- as.numeric(unlist(temp$location_left))
temp$location_right <- mapply(myfunc2, strsplit(temp$merged_location_filtered,split=';', fixed=TRUE), temp$rightIndex)
temp$location_right[lengths(temp$location_right) == 0] <- NA
temp$location_right <- as.numeric(unlist(temp$location_right))
#Extracting the cy motifs
#Extracting the flanking cy motif locations and replacing the out of bound locations with NA
temp$cy_left <- mapply(myfunc2, strsplit(temp$cy_motif,split=';', fixed=TRUE), temp$leftIndex)
temp$cy_left[lengths(temp$cy_left) == 0] <- NA
temp$cy_left <- unlist(temp$cy_left)
temp$cy_right <- mapply(myfunc2, strsplit(temp$cy_motif,split=';', fixed=TRUE), temp$rightIndex)
temp$cy_right[lengths(temp$cy_right) == 0] <- NA
temp$cy_right <- unlist(temp$cy_right)
temp
}
#Find the closes Cy motif to the phosphorylation site
cy2 <- closesCyMotif(cy) %>% distinct()
#Extracting known CDK sites
cdk <- tt_rank %>%
filter(grepl("CDK1|CDK2", enzyme_genesymbol)) %>%
select(AssignedCluster, GeneMod, MasterMod, AAseq, location) %>%
distinct() %>%
left_join(cy2, by = c("GeneMod", "MasterMod", "AAseq", "location"))
#Extracting predicted CDK sites
cdk <- tt_rank %>%
filter(CDK1 > .563 | CDK2 > .374 | CDK5 > .452, FDR_90min < 0.05, AssignedClusterOld %in% c(7,8)) %>%
select(AssignedClusterOld, GeneMod, MasterMod, AAseq, location) %>%
distinct() %>%
left_join(cy2, by = c("GeneMod", "MasterMod", "AAseq", "location"))
#Trasforming the data for plotting
new_cdk <- cdk %>%
mutate(loc_left = abs(location_left - location),
loc_right = abs(location_right - location)) %>%
filter(AssignedClusterOld %in% c(7,8)) %>%
mutate(AssignedCluster = ifelse(AssignedClusterOld == 7, "Fast", "Slow")) %>%
select(-AAseq)
#Calculate some summary statistic
cy_stats <- new_cdk %>% group_by(AssignedClusterOld) %>%
summarise(notFound = sum(loc_left > 100, na.rm =T),
Found = sum(loc_left < 100, na.rm =T),
med = median(loc_left, na.rm = T),
er = mad(loc_left, na.rm = T))
cy_stats
ggplot(new_cdk %>% filter(loc_right< 80 & loc_right > 15), aes(x = loc_right, fill = AssignedCluster)) +
#geom_density(size = 1.5) +
geom_histogram(aes(y = stat(count) / sum(count)), bins = 10, position = "dodge") +
theme_prism() +
xlab("Cy motif distance") +ylab("Frequency") +
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.position = "top") +
ylim(c(0,.3)) +
scale_y_continuous(guide = "prism_offset", position = "left") +
scale_x_continuous(guide = "prism_offset") +
scale_fill_manual(values = hcl.colors(n=3, palette = "viridis")) +
xlim(c(100,0))
ggplot(new_cdk %>% filter(loc_left < 100), aes(x = loc_left, color = AssignedCluster)) +
stat_ecdf(size =2) +
scale_color_manual(values = hcl.colors(n=3, "viridis")) +
theme_prism() +
xlab("Cy motif distance") +ylab("Probability") +
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.position = "top") +
ylim(c(0,.3)) +
scale_y_continuous(guide = "prism_offset", position = "left") +
scale_x_continuous(guide = "prism_offset") +
scale_fill_manual(values = hcl.colors(n=3, palette = "viridis")) +
xlim(c(0,100))
## Testing out the new approach
# theme(axis.text.x = element_text(size = 12),
# axis.text.y = element_text(size = 12),
# legend.position = "top") +
# scale_color_manual(values = hcl.colors(n=3, palette = "viridis")) +
# ggpubr::labs_pubr() +
# xlim(c(100,0)) #+
# scale_y_continuous(position = "right")
#Adjusted function that calculates the distance of the Cy motifs phosphorylation site
closesCyMotif2 <- function(df){
#Assign temporary variable
temp <- df
#Split Cy location string into single numbers "24|100|150" -> ["24", "100", "1500"]
c1 <- strsplit(temp$cy_location ,split=';', fixed=TRUE)
#Convert the string list to integers
c2 <- lapply(c1, function(x) as.integer(x))
#Replacing the integer(0) for rows without citations with 0, to avoid errors in the substration bellow
c2[which(lengths(c2) == 0)] <- 0
#Substrat the location of the phosphorylation from the cy motif location
#Simple substraction function to use in a for loop
substractionFunction <- function(l, y) {
rapply(l, function(x) x - y)}
#Creating empty list to save the data
c3 <- list()
#Looping for all the rows
for (i in 1:length(temp$location)){
c3[[i]] <- substractionFunction(c2[i], temp$location[i])}
#Collapse all the integers into one string so they can be added as a row to the dataframe
c4 <- rapply(c3,function(x) paste(x, collapse = ";"))
#Add to the temporary dataframe and return
temp$fromSite <- c4
temp
}
cy3 <- closesCyMotif2(cy)
#Function that extract the motifs that are a specific distance from the modification
specificDistance <- function(df, d) {
#df - dataframe from closesCyMotif2
#d - mas distance to factor in motifs
temp <- df
#Split the the string into integers
c1 <- strsplit(temp$fromSite ,split=';', fixed=TRUE)
#Convert the string list to integers
c2 <- lapply(c1, function(x) as.integer(x))
#Keep only the Cy motifs that are and a certain distance (d) from the from the phosphorylation site
c3 <- lapply(c2, function(x) x[abs(x) <= d])
#Collapse all the integers into one string so they can be added as a row to the dataframe
c4 <- rapply(c3,function(x) paste(x, collapse = ";"))
#Add to the temporary dataframe and return
temp$fromSiteFiltered <- c4
temp
}
cy4 <- specificDistance(cy3, 100)
#Merge with datase data
temp_tt <- tt_rank %>% select(AssignedCluster,AssignedClusterOld, K1:K3, CDK1, CDK2, CDK5, FDR_90min, FDR_20min)
data <- tibble(cbind(cy4, temp_tt))
df_1 <- data %>%
distinct() %>%
filter(!is.na(fromSiteFiltered)) %>%
separate_rows(fromSiteFiltered, sep = ";", convert = TRUE) %>%
filter(abs(fromSiteFiltered) > 10) %>%
#filter(grepl("CDK1|CDK2", enzyme_genesymbol), AssignedCluster %in% c(3,8))
#filter(grepl("CDK", K1) | grepl("CDK", K2) | grepl("CDK", K3), FDR_90min < 0.05, AssignedCluster %in% c(3,8))
filter(CDK1 > .563 | CDK2 > .374 | CDK5 > .452, FDR_90min < 0.05, AssignedClusterOld %in% c(7,8)) %>%
mutate(newgroups = case_when(fromSiteFiltered < 0 & AssignedClusterOld == 7 ~ "A",
fromSiteFiltered > 0 & AssignedClusterOld == 7 ~ "B",
fromSiteFiltered < 0 & AssignedClusterOld == 8 ~ "C",
fromSiteFiltered > 0 & AssignedClusterOld == 8 ~ "D"))
mycol = c(rep(hcl.colors(n = 10, "darkmint")[1],2),
rep(hcl.colors(n = 10, "darkmint")[8],2))
ggplot(df_1, aes(x = fromSiteFiltered, color = as.character(newgroups))) +
geom_density(size = 1.5) +
theme_prism(border = F) +
scale_y_continuous(guide = "prism_offset") +
xlab("Distance, aa") + ylab("Density") +
scale_color_manual(values = mycol) +
theme(legend.position = "none")
df_1
ggplot(df_1, aes(x = fromSiteFiltered, fill = as.character(AssignedClusterOld))) +
geom_histogram(bins = 20, position = "dodge") +
xlab("Distance, aa") + ylab("Density")
df_2 <- df_1 %>% group_by(GeneMod, AssignedClusterOld) %>% count(fromSiteFiltered)
ggplot(df_2, aes(y = n, x = fromSiteFiltered, group = AssignedClusterOld, color = as.character(AssignedClusterOld))) +
geom_point(size = 3)
ggplot(df_1, aes(x = fromSiteFiltered, fill = AssignedClusterOld)) +
#geom_density(size = 1.5) +
geom_histogram(aes(y = stat(count) / sum(count)), bins = 20, position ="dodge") +
theme_prism() +
xlab("Cy motif distance") +ylab("Frequency") +
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.position = "top") +
ylim(c(0,.3)) +
scale_y_continuous(guide = "prism_offset", position = "left") +
scale_x_continuous(guide = "prism_offset") +
scale_fill_manual(values = hcl.colors(n=3, palette = "viridis")) +
xlim(c(100,0))
test <- ggplot_build(hist1)$data[[1]]
#
test2 <- ggplot_build(myhist)$data[[1]]
totals <- df_1 %>% group_by(AssignedClusterOld) %>% count() %>% pull(n)
test3 <- df_1 %>% group_by(AssignedClusterOld) %>%
mutate(bins = cut_interval(fromSiteFiltered, n = 20, width = 10)) %>%
count(bins) %>% mutate(freq = case_when(AssignedClusterOld == 7 ~ n/totals[1],
AssignedClusterOld == 8 ~ n/totals[2]))
#Boostraping for frequency uncertainty
N = 1000
temp_a <- matrix(nrow = 20, ncol = 1000)
temp_b <- matrix(nrow = 20, ncol = 1000)
a <- df_1 %>% filter(AssignedClusterOld == 7) %>% pull(fromSiteFiltered)
b <- df_1 %>% filter(AssignedClusterOld == 8) %>% pull(fromSiteFiltered)
#Taking two thirs as the sample size
for(i in 1:N){
#Generate integer for sampling
#First sampling vector
s1 <- sample.int(length(a),length(a), replace = TRUE)
#Extract the motifs and add to list
temp <- as.numeric(table(cut_interval(a[s1], n = 20, width = 10)))/totals[1]
temp_a[,i] <- temp
}
for(i in 1:N){
#Generate integer for sampling
#First sampling vector
s1 <- sample.int(length(b),length(b), replace = TRUE)
#Extract the motifs and add to list
temp <- as.numeric(table(cut_interval(b[s1], n = 20, width = 10)))/totals[2]
temp_b[,i] <- temp
}
#Calculating mean and std
freq_df <- data.frame( freq = c( apply(temp_a, 1, mean), apply(temp_b, 1, mean)),
er = c(apply(temp_a, 1, sd), apply(temp_b, 1, sd)),
AssignedClusterOld = c(rep("Fast", 20), rep("Slow", 20)),
bins = rep(c(-100,-90,-80,-70,-60,-50,-40,-30,-20,-10,10,20,30,40,50,60,70,80,90,100),2)) %>%
mutate(mygroup = case_when(AssignedClusterOld == "Fast" & bins > 0 ~ "A",
AssignedClusterOld == "Fast" & bins < 0 ~ "B",
AssignedClusterOld == "Slow" & bins > 0 ~ "C",
AssignedClusterOld == "Slow" & bins < 0 ~ "D"))
ggplot(subset(freq_df, abs(bins) != 10), aes( x= bins, y = freq, color = AssignedClusterOld, group = mygroup)) +
geom_point(size = 2) +
geom_linerange(aes(ymin = freq -er, ymax = freq + er), size = 1) +
geom_line(size = 1) +
theme_prism(border = F) +
scale_y_continuous(guide = "prism_offset") +
scale_color_manual(values = hcl.colors(n=6, "viridis")[c(1,4)])+
xlab("Distance, aa") + ylab("Frequency") +
theme(legend.position = "top")
ggplot(test3, aes(x = bins, y = n, color = AssignedClusterOld, group = AssignedClusterOld)) +
geom_point() +
geom_line()
sum(is.na(df_1$fromSiteFiltered))
test3
### Adding disorcer and conservation prediction to select only one Cy motif per site (run the CyDistance.R script first to set up the dataframe)
cy4 <- cy4 %>% mutate(substrate = tt_rank$Accession)
c1 <- cy4 %>% separate_rows(cy_location, sep = ";") %>% pull(cy_location)
cy5 <- cy4 %>% separate_rows(fromSite, sep = ";") %>% mutate(cy_loc = c1, cyAcc = paste(substrate, c1, sep = "_"))
#Add the conservation and disorder prediction for each Cy motif
test <- list()
for (i in unique(cy5$substrate)){
prot <- dis_cons[[i]] # extract substrate information for specific substrate
#loop over all potential
temp_dis <- c(); temp_cons <- c(); temp_aa <- c()
for (j in cy5$cy_loc[cy5$substrate == i]){
j <- as.numeric(j)
temp_dis <- c(temp_dis, mean(as.numeric(prot$property[c(j-1,j,j+1)])))
temp_cons <- c(temp_cons,mean(as.numeric(prot$conservation_level[c(j-1,j,j+1)])))
temp_aa <- c(temp_aa, paste(prot$AminoAcid[c(j-1,j,j+1)], collapse = ""))
}
temp_info <- data.frame(substrate = i, cy_loc = cy5$cy_loc[cy5$substrate == i],dis = temp_dis, cons = temp_cons, aa = temp_aa)
test[[i]] <- temp_info
}
test2 <- bind_rows(test)
cy6 <- cy5 %>%
left_join(test2, by = c("substrate", "cy_loc")) %>%
mutate(cy_id = paste(substrate, cy_loc, sep = "_"),
fromSite = as.numeric(fromSite)) %>%
distinct()
#Filtering out motif in the range between 10 and 100 AA
cdkSites <- tt_rank %>% filter(CDK1 > .563 | CDK2 > .374 | CDK5 > .452, FDR_90min < 0.05, AssignedClusterOld %in% c(7,8)) %>%
select(GeneMod,MasterMod, AssignedClusterOld) %>% distinct()
cy7 <- cy6 %>% filter(fromSite < 100 & fromSite > 10) %>%
left_join(cdkSites, by = c("GeneMod", "MasterMod")) %>% filter(!is.na(AssignedClusterOld))
table(cy7$AssignedClusterOld)
cy_cons <- cy7 %>% group_by(GeneMod, MasterMod) %>% filter(cons == max(cons))
cy_dis <- cy7 %>% group_by(GeneMod, MasterMod) %>% filter(dis == max(dis))
ggplot(cy_dis, aes(x = fromSite, fill = as.character(AssignedClusterOld))) +
geom_histogram(bins = 20, position = "dodge")
#geom_density()
|
/cymotif_anaysis.R
|
no_license
|
NotValdemaras/WEE1-phosphoproteomics
|
R
| false
| false
| 17,593
|
r
|
library(tidyverse)
library(ggpubr)
library(ggprism)
old_cluster <- readRDS("clusters_old.rds") %>% dplyr::rename(AssignedClusterOld = AssignedCluster)
tt_rank <- readRDS("data/tt_kinaseRank_updated_clust.rds") %>% mutate(AccMod = paste(Accession, MasterMod, sep = ":")) %>%
left_join(old_cluster %>% dplyr::select(-MasterMod), by = c("GeneMod"))
locateCyMotifs <- function(df){
#Function that extracts cymotifs locations
#Amino acid seqeunce column has to be named "AAseq"
#Function for caluclating means in the sublists row-wise
calcMean <- function(x){
apply(x, 1, mean)}
#set input to temporary variable
temp <- df
#Extract the locations
l1 <- str_locate_all(pattern = "[R].[L]", temp$AAseq) # Edit this part to change to classical RxL
l2 <- rapply(l1, calcMean, how = "list")
l3 <- rapply(l2, function(x) paste(x, collapse = ";"))
#Extract the mathced Cy motifs
c1 <- str_extract_all(pattern = "[R].[L]", temp$AAseq) # Edit this part to change to classical RxL
c2 <- rapply(c1, function(x) paste(x, collapse = ";"))
#Add back to dataframe
temp$cy_location <- l3
temp$cy_motif <- c2
#return dataframe
as_tibble(temp)
}
#find Cy motif locations
cy <- tt_rank %>% select(GeneMod, MasterMod, AAseq, location) %>%
locateCyMotifs()
closesCyMotif <- function(df){
#Some functiosn for index extraction
myfunc <- function(a,b){
which(a == b)
}
myfunc2 <- function(a,b){
a[b]}
temp <- df
#merge site location to the cy_locations
c1 <- paste(temp$cy_location, temp$location, sep = ";")
#Split the locations
c2 <- strsplit(c1,split=';', fixed=TRUE)
#convert to integers and sort
c3 <- rapply(c2, function(x) sort(as.integer(x)), how = "list")
#collapse back into one string
c4 <- rapply(c3, function(x) paste(x, collapse=";"))
temp$merged_location <- c4
#Some times the motifs that contain S,T,Y in the middle of the Cy motif are on top of the detected phosphoSites
c5 <- mapply(myfunc, strsplit(c4,split=';', fixed=TRUE),temp$location)
is.na(c5) <- lengths(c5) == 0
temp$siteIndexList <- c5
#Marking the "on top" sites
temp$CyOnSite <- ifelse(lengths(temp$siteIndexList) == 2, "YES", "NO")
#Filtering out the the merged_location to the remove the cy motif and site duplication
#Split the merged_locations
c6 <- strsplit(temp$merged_location,split=';', fixed=TRUE)
#Remove the non-unique elements
c7 <- rapply(c6, function(x)unique(x), how = "list")
#Collapsing back into one string
c8 <- rapply(c7,function(x) paste(x, collapse = ";"))
temp$merged_location_filtered <- c8
#Extract the location indexes
c9 <- mapply(myfunc, strsplit(c8,split=';', fixed=TRUE),temp$location)
is.na(c9) <- lengths(c9) == 0
temp$siteIndex <- unlist(c9)
temp$leftIndex <- temp$siteIndex - 1
temp$rightIndex <- temp$siteIndex + 1
#Extracting the flanking cy motif locations and replacing the out of bound locations with NA
temp$location_left <- mapply(myfunc2, strsplit(temp$merged_location_filtered,split=';', fixed=TRUE), temp$leftIndex)
temp$location_left[lengths(temp$location_left) == 0] <- NA
temp$location_left <- as.numeric(unlist(temp$location_left))
temp$location_right <- mapply(myfunc2, strsplit(temp$merged_location_filtered,split=';', fixed=TRUE), temp$rightIndex)
temp$location_right[lengths(temp$location_right) == 0] <- NA
temp$location_right <- as.numeric(unlist(temp$location_right))
#Extracting the cy motifs
#Extracting the flanking cy motif locations and replacing the out of bound locations with NA
temp$cy_left <- mapply(myfunc2, strsplit(temp$cy_motif,split=';', fixed=TRUE), temp$leftIndex)
temp$cy_left[lengths(temp$cy_left) == 0] <- NA
temp$cy_left <- unlist(temp$cy_left)
temp$cy_right <- mapply(myfunc2, strsplit(temp$cy_motif,split=';', fixed=TRUE), temp$rightIndex)
temp$cy_right[lengths(temp$cy_right) == 0] <- NA
temp$cy_right <- unlist(temp$cy_right)
temp
}
#Find the closes Cy motif to the phosphorylation site
cy2 <- closesCyMotif(cy) %>% distinct()
#Extracting known CDK sites
cdk <- tt_rank %>%
filter(grepl("CDK1|CDK2", enzyme_genesymbol)) %>%
select(AssignedCluster, GeneMod, MasterMod, AAseq, location) %>%
distinct() %>%
left_join(cy2, by = c("GeneMod", "MasterMod", "AAseq", "location"))
#Extracting predicted CDK sites
cdk <- tt_rank %>%
filter(CDK1 > .563 | CDK2 > .374 | CDK5 > .452, FDR_90min < 0.05, AssignedClusterOld %in% c(7,8)) %>%
select(AssignedClusterOld, GeneMod, MasterMod, AAseq, location) %>%
distinct() %>%
left_join(cy2, by = c("GeneMod", "MasterMod", "AAseq", "location"))
#Trasforming the data for plotting
new_cdk <- cdk %>%
mutate(loc_left = abs(location_left - location),
loc_right = abs(location_right - location)) %>%
filter(AssignedClusterOld %in% c(7,8)) %>%
mutate(AssignedCluster = ifelse(AssignedClusterOld == 7, "Fast", "Slow")) %>%
select(-AAseq)
#Calculate some summary statistic
cy_stats <- new_cdk %>% group_by(AssignedClusterOld) %>%
summarise(notFound = sum(loc_left > 100, na.rm =T),
Found = sum(loc_left < 100, na.rm =T),
med = median(loc_left, na.rm = T),
er = mad(loc_left, na.rm = T))
cy_stats
ggplot(new_cdk %>% filter(loc_right< 80 & loc_right > 15), aes(x = loc_right, fill = AssignedCluster)) +
#geom_density(size = 1.5) +
geom_histogram(aes(y = stat(count) / sum(count)), bins = 10, position = "dodge") +
theme_prism() +
xlab("Cy motif distance") +ylab("Frequency") +
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.position = "top") +
ylim(c(0,.3)) +
scale_y_continuous(guide = "prism_offset", position = "left") +
scale_x_continuous(guide = "prism_offset") +
scale_fill_manual(values = hcl.colors(n=3, palette = "viridis")) +
xlim(c(100,0))
ggplot(new_cdk %>% filter(loc_left < 100), aes(x = loc_left, color = AssignedCluster)) +
stat_ecdf(size =2) +
scale_color_manual(values = hcl.colors(n=3, "viridis")) +
theme_prism() +
xlab("Cy motif distance") +ylab("Probability") +
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.position = "top") +
ylim(c(0,.3)) +
scale_y_continuous(guide = "prism_offset", position = "left") +
scale_x_continuous(guide = "prism_offset") +
scale_fill_manual(values = hcl.colors(n=3, palette = "viridis")) +
xlim(c(0,100))
## Testing out the new approach
# theme(axis.text.x = element_text(size = 12),
# axis.text.y = element_text(size = 12),
# legend.position = "top") +
# scale_color_manual(values = hcl.colors(n=3, palette = "viridis")) +
# ggpubr::labs_pubr() +
# xlim(c(100,0)) #+
# scale_y_continuous(position = "right")
#Adjusted function that calculates the distance of the Cy motifs phosphorylation site
closesCyMotif2 <- function(df){
#Assign temporary variable
temp <- df
#Split Cy location string into single numbers "24|100|150" -> ["24", "100", "1500"]
c1 <- strsplit(temp$cy_location ,split=';', fixed=TRUE)
#Convert the string list to integers
c2 <- lapply(c1, function(x) as.integer(x))
#Replacing the integer(0) for rows without citations with 0, to avoid errors in the substration bellow
c2[which(lengths(c2) == 0)] <- 0
#Substrat the location of the phosphorylation from the cy motif location
#Simple substraction function to use in a for loop
substractionFunction <- function(l, y) {
rapply(l, function(x) x - y)}
#Creating empty list to save the data
c3 <- list()
#Looping for all the rows
for (i in 1:length(temp$location)){
c3[[i]] <- substractionFunction(c2[i], temp$location[i])}
#Collapse all the integers into one string so they can be added as a row to the dataframe
c4 <- rapply(c3,function(x) paste(x, collapse = ";"))
#Add to the temporary dataframe and return
temp$fromSite <- c4
temp
}
cy3 <- closesCyMotif2(cy)
#Function that extract the motifs that are a specific distance from the modification
specificDistance <- function(df, d) {
#df - dataframe from closesCyMotif2
#d - mas distance to factor in motifs
temp <- df
#Split the the string into integers
c1 <- strsplit(temp$fromSite ,split=';', fixed=TRUE)
#Convert the string list to integers
c2 <- lapply(c1, function(x) as.integer(x))
#Keep only the Cy motifs that are and a certain distance (d) from the from the phosphorylation site
c3 <- lapply(c2, function(x) x[abs(x) <= d])
#Collapse all the integers into one string so they can be added as a row to the dataframe
c4 <- rapply(c3,function(x) paste(x, collapse = ";"))
#Add to the temporary dataframe and return
temp$fromSiteFiltered <- c4
temp
}
cy4 <- specificDistance(cy3, 100)
#Merge with datase data
temp_tt <- tt_rank %>% select(AssignedCluster,AssignedClusterOld, K1:K3, CDK1, CDK2, CDK5, FDR_90min, FDR_20min)
data <- tibble(cbind(cy4, temp_tt))
df_1 <- data %>%
distinct() %>%
filter(!is.na(fromSiteFiltered)) %>%
separate_rows(fromSiteFiltered, sep = ";", convert = TRUE) %>%
filter(abs(fromSiteFiltered) > 10) %>%
#filter(grepl("CDK1|CDK2", enzyme_genesymbol), AssignedCluster %in% c(3,8))
#filter(grepl("CDK", K1) | grepl("CDK", K2) | grepl("CDK", K3), FDR_90min < 0.05, AssignedCluster %in% c(3,8))
filter(CDK1 > .563 | CDK2 > .374 | CDK5 > .452, FDR_90min < 0.05, AssignedClusterOld %in% c(7,8)) %>%
mutate(newgroups = case_when(fromSiteFiltered < 0 & AssignedClusterOld == 7 ~ "A",
fromSiteFiltered > 0 & AssignedClusterOld == 7 ~ "B",
fromSiteFiltered < 0 & AssignedClusterOld == 8 ~ "C",
fromSiteFiltered > 0 & AssignedClusterOld == 8 ~ "D"))
mycol = c(rep(hcl.colors(n = 10, "darkmint")[1],2),
rep(hcl.colors(n = 10, "darkmint")[8],2))
ggplot(df_1, aes(x = fromSiteFiltered, color = as.character(newgroups))) +
geom_density(size = 1.5) +
theme_prism(border = F) +
scale_y_continuous(guide = "prism_offset") +
xlab("Distance, aa") + ylab("Density") +
scale_color_manual(values = mycol) +
theme(legend.position = "none")
df_1
ggplot(df_1, aes(x = fromSiteFiltered, fill = as.character(AssignedClusterOld))) +
geom_histogram(bins = 20, position = "dodge") +
xlab("Distance, aa") + ylab("Density")
df_2 <- df_1 %>% group_by(GeneMod, AssignedClusterOld) %>% count(fromSiteFiltered)
ggplot(df_2, aes(y = n, x = fromSiteFiltered, group = AssignedClusterOld, color = as.character(AssignedClusterOld))) +
geom_point(size = 3)
ggplot(df_1, aes(x = fromSiteFiltered, fill = AssignedClusterOld)) +
#geom_density(size = 1.5) +
geom_histogram(aes(y = stat(count) / sum(count)), bins = 20, position ="dodge") +
theme_prism() +
xlab("Cy motif distance") +ylab("Frequency") +
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
legend.position = "top") +
ylim(c(0,.3)) +
scale_y_continuous(guide = "prism_offset", position = "left") +
scale_x_continuous(guide = "prism_offset") +
scale_fill_manual(values = hcl.colors(n=3, palette = "viridis")) +
xlim(c(100,0))
test <- ggplot_build(hist1)$data[[1]]
#
test2 <- ggplot_build(myhist)$data[[1]]
totals <- df_1 %>% group_by(AssignedClusterOld) %>% count() %>% pull(n)
test3 <- df_1 %>% group_by(AssignedClusterOld) %>%
mutate(bins = cut_interval(fromSiteFiltered, n = 20, width = 10)) %>%
count(bins) %>% mutate(freq = case_when(AssignedClusterOld == 7 ~ n/totals[1],
AssignedClusterOld == 8 ~ n/totals[2]))
#Boostraping for frequency uncertainty
N = 1000
temp_a <- matrix(nrow = 20, ncol = 1000)
temp_b <- matrix(nrow = 20, ncol = 1000)
a <- df_1 %>% filter(AssignedClusterOld == 7) %>% pull(fromSiteFiltered)
b <- df_1 %>% filter(AssignedClusterOld == 8) %>% pull(fromSiteFiltered)
#Taking two thirs as the sample size
for(i in 1:N){
#Generate integer for sampling
#First sampling vector
s1 <- sample.int(length(a),length(a), replace = TRUE)
#Extract the motifs and add to list
temp <- as.numeric(table(cut_interval(a[s1], n = 20, width = 10)))/totals[1]
temp_a[,i] <- temp
}
for(i in 1:N){
#Generate integer for sampling
#First sampling vector
s1 <- sample.int(length(b),length(b), replace = TRUE)
#Extract the motifs and add to list
temp <- as.numeric(table(cut_interval(b[s1], n = 20, width = 10)))/totals[2]
temp_b[,i] <- temp
}
#Calculating mean and std
freq_df <- data.frame( freq = c( apply(temp_a, 1, mean), apply(temp_b, 1, mean)),
er = c(apply(temp_a, 1, sd), apply(temp_b, 1, sd)),
AssignedClusterOld = c(rep("Fast", 20), rep("Slow", 20)),
bins = rep(c(-100,-90,-80,-70,-60,-50,-40,-30,-20,-10,10,20,30,40,50,60,70,80,90,100),2)) %>%
mutate(mygroup = case_when(AssignedClusterOld == "Fast" & bins > 0 ~ "A",
AssignedClusterOld == "Fast" & bins < 0 ~ "B",
AssignedClusterOld == "Slow" & bins > 0 ~ "C",
AssignedClusterOld == "Slow" & bins < 0 ~ "D"))
ggplot(subset(freq_df, abs(bins) != 10), aes( x= bins, y = freq, color = AssignedClusterOld, group = mygroup)) +
geom_point(size = 2) +
geom_linerange(aes(ymin = freq -er, ymax = freq + er), size = 1) +
geom_line(size = 1) +
theme_prism(border = F) +
scale_y_continuous(guide = "prism_offset") +
scale_color_manual(values = hcl.colors(n=6, "viridis")[c(1,4)])+
xlab("Distance, aa") + ylab("Frequency") +
theme(legend.position = "top")
ggplot(test3, aes(x = bins, y = n, color = AssignedClusterOld, group = AssignedClusterOld)) +
geom_point() +
geom_line()
sum(is.na(df_1$fromSiteFiltered))
test3
### Adding disorcer and conservation prediction to select only one Cy motif per site (run the CyDistance.R script first to set up the dataframe)
cy4 <- cy4 %>% mutate(substrate = tt_rank$Accession)
c1 <- cy4 %>% separate_rows(cy_location, sep = ";") %>% pull(cy_location)
cy5 <- cy4 %>% separate_rows(fromSite, sep = ";") %>% mutate(cy_loc = c1, cyAcc = paste(substrate, c1, sep = "_"))
#Add the conservation and disorder prediction for each Cy motif
test <- list()
for (i in unique(cy5$substrate)){
prot <- dis_cons[[i]] # extract substrate information for specific substrate
#loop over all potential
temp_dis <- c(); temp_cons <- c(); temp_aa <- c()
for (j in cy5$cy_loc[cy5$substrate == i]){
j <- as.numeric(j)
temp_dis <- c(temp_dis, mean(as.numeric(prot$property[c(j-1,j,j+1)])))
temp_cons <- c(temp_cons,mean(as.numeric(prot$conservation_level[c(j-1,j,j+1)])))
temp_aa <- c(temp_aa, paste(prot$AminoAcid[c(j-1,j,j+1)], collapse = ""))
}
temp_info <- data.frame(substrate = i, cy_loc = cy5$cy_loc[cy5$substrate == i],dis = temp_dis, cons = temp_cons, aa = temp_aa)
test[[i]] <- temp_info
}
test2 <- bind_rows(test)
cy6 <- cy5 %>%
left_join(test2, by = c("substrate", "cy_loc")) %>%
mutate(cy_id = paste(substrate, cy_loc, sep = "_"),
fromSite = as.numeric(fromSite)) %>%
distinct()
#Filtering out motif in the range between 10 and 100 AA
cdkSites <- tt_rank %>% filter(CDK1 > .563 | CDK2 > .374 | CDK5 > .452, FDR_90min < 0.05, AssignedClusterOld %in% c(7,8)) %>%
select(GeneMod,MasterMod, AssignedClusterOld) %>% distinct()
cy7 <- cy6 %>% filter(fromSite < 100 & fromSite > 10) %>%
left_join(cdkSites, by = c("GeneMod", "MasterMod")) %>% filter(!is.na(AssignedClusterOld))
table(cy7$AssignedClusterOld)
cy_cons <- cy7 %>% group_by(GeneMod, MasterMod) %>% filter(cons == max(cons))
cy_dis <- cy7 %>% group_by(GeneMod, MasterMod) %>% filter(dis == max(dis))
ggplot(cy_dis, aes(x = fromSite, fill = as.character(AssignedClusterOld))) +
geom_histogram(bins = 20, position = "dodge")
#geom_density()
|
### quality checking:
# check format of columns (id, variable, value)
# all ids have the same variables
# within a id, the same length is required for each variable,
# it can be forced to the interesection, but default it is a stop
# no missing days inbetween the date range
# no NAs
#sanity check, all end dates are the same
#sanity check if mutistep, check early if it is thet same as horizon, or just ignore horizon
# SERPARAR LAS FUNCIONES DE EMBEDDING PARA DAR FUNCIONALIDAD APARTE,
# POR EJEMLO PARA EXPERIMENTOS DE INSAMPLE LOSS
# MATCH NAMES PARE TO AVOID ERRORS WITH ARRANGING OF IDS AND VARIABLES AND DATES
#checl params_tbl is valid
library(readr)
library(tidyr)
library(dplyr)
library(lubridate)
library(ggplot2)
embed_lag = function(x, lag, step_ahead) {
stopifnot(length(x) > lag + step_ahead)
stats::embed(x, lag + step_ahead)
}
calc_tgt_cols_in_multivar_embed = function(num_variables, lag, step_ahead) {
ind_mat = sapply(1:num_variables, function(i) { #this ugly code to get the tgt cols in the multivariate matrix 1,2,3 - 21,22,23 - 41,42,43
1:step_ahead + (i-1)*(lag+step_ahead)
})
as.vector(ind_mat)
}
#dset has the format id, variable, value
get_mvar_emb = function(dset, lag, step_ahead) {
#get the ids and last date
id_var_date = dset %>%
arrange(id) %>%
group_by(id,variable) %>%
summarise(date=max(date), .groups="drop") %>%
ungroup()
#get the variable names for constructing the output
var_names = id_var_date %>% select(variable) %>% unique() %>% arrange()
num_variables = length(var_names$variable)
if (0) { ##deprecated code, other branch is the optimized one
#multivar AND multisep embedding from a tibble
emb_list = dset %>% arrange(id) %>% group_by(id) %>%
arrange(date, variable) %>%
group_map( function(.x, .y) {
Xvar = .x %>% group_by(variable) %>%
group_map( function (.xvar, .yvar) { #embed each variable for each id and column join
tseries = c(.xvar$value, rep(NA, step_ahead-1)) #pad with NA for the multi-step, so shorter horizon dont miss observations
Xvar = embed_lag(tseries, lag, step_ahead)
})
do.call(cbind, Xvar)
})
X = do.call(rbind, emb_list)
###!!!! TO DO: do the fast embedding, not the rbind, but precreating X and Y matrices and filling them
last_rows = sapply(emb_list, nrow) #get the position of the last observation in each series, it is the one used for forecasting
#get the columns that will be used for forecasting, removing the last lag because the first is not the target
last_cols = as.vector( sapply(1:num_variables, function(i) {(step_ahead - 1 + 1:lag) + (i-1)*(lag+step_ahead)}) )
X_last = X[cumsum(last_rows), last_cols, drop=FALSE] #save the last observation, useful for forecasting
#!!!!!clear memory just in case it gets rough
#rm(emb_list);gc()
y_cols = calc_tgt_cols_in_multivar_embed(num_variables, lag, step_ahead)
Y = X[, y_cols, drop=FALSE]
X = X[, -y_cols, drop=FALSE]
} else {
emb_list = dset %>% arrange(id) %>% group_by(id) %>%
arrange(date, variable) %>%
group_map( function(.X, .y) {
.X %>% group_by(variable) %>%
group_map( function (.xvar, .yvar) { #embed each variable for each id and column join
c(.xvar$value, rep(NA, step_ahead-1)) #pad with NA for the multi-step, so shorter horizon dont miss observations
})
})
lengths <- sapply(emb_list, function (x) length(x[[1]]))
lengths <- lengths - lag
#!!!!maybe precalc the size of the matrix and use hard drive for very large matrices
X = matrix(0, nrow=sum(lengths), ncol=(lag) * num_variables)
Y = matrix(0, nrow=sum(lengths), ncol=step_ahead*num_variables)
X_last = matrix(0, nrow=length(emb_list), ncol=lag*num_variables)
row_count = 1
for (i in 1:length(emb_list)) {
for (j in 1:length(emb_list[[i]])) {
emb <- embed(emb_list[[i]][[j]], lag + step_ahead)
X[row_count:(row_count + nrow(emb)-1), (j-1)*lag + 1:lag] <- emb[,-(1:step_ahead), drop=FALSE]
Y[row_count:(row_count + nrow(emb)-1) , (j-1)*step_ahead + 1:step_ahead] <- emb[,1:step_ahead, drop=FALSE]
X_last[i, (j-1)*lag + 1:lag] = emb[nrow(emb), (step_ahead - 1) + 1:lag, drop=FALSE]
}
row_count <- row_count + nrow(emb)
}
}
colnames(Y) <- paste(rep(var_names$variable, each=step_ahead), "_hor_", step_ahead:1, sep="")
colnames(X) <- colnames(X_last) <- paste(rep(var_names$variable, each=lag), "_lag_", 1:lag, sep="")
list(X=X, Y=Y, id_var_date=id_var_date, X_last = X_last, lag=lag, step_ahead=step_ahead,
num_variables = num_variables,
var_names = var_names)
}
fit_fastlm = function(X, Y, lag, step_ahead, num_variables ) {
COEF = do.call(cbind, lapply(1:(step_ahead*num_variables), function(i) {
valid_rows = !is.na(Y[,i]) #for a given step ahead, remove the special masked row, no to lose info for shorter horizons
RcppArmadillo::fastLmPure(X[valid_rows, , drop=FALSE],
Y[valid_rows,i])$coefficients
}))
COEF <- as.matrix(COEF)
rownames(COEF) <- colnames(X)
colnames(COEF) <- colnames(Y)
COEF
}
#get predictions
forec_mvar <- function(COEF, X_last, horizon, lag, step_ahead, num_variables) {
if (step_ahead == 1) { #recursive forec
preds = X_last %*% COEF
if (horizon > 1) {
PRED = matrix(0, nrow(preds), horizon*ncol(preds))
pos_in_PRED = seq(horizon, horizon*num_variables, horizon)
PRED[, pos_in_PRED] = preds
newpred = preds
newpred_indic = seq(1, lag*num_variables, lag)
X_tmp = X_last
for (i in 2:horizon) {
X_tmp[ , 2:ncol(X_tmp)] = X_tmp[ , 1:(ncol(X_tmp)-1), drop=FALSE ]
X_tmp[, newpred_indic] = newpred
newpred = X_tmp %*% COEF
PRED[, pos_in_PRED - i + 1] = newpred
}
preds = PRED
}
} else { #multi step ahead
if (step_ahead != horizon) {
stop(paste("ERROR: Multi-step and Horizon dont match! Cannot do multi-step:", step_ahead, "with this model for horizon:", horizon))
}
preds = X_last %*% COEF
}
preds
}
preds_to_tibble <- function(preds, horizon, id_var_date, var_names, num_variables) {
id_last_date = id_var_date %>% select(id, date) %>% distinct()
#turn them into a tibble
pred_dset = lapply(1:length(id_last_date$id),
function (i) tibble(id=id_last_date$id[i],
variable=rep(var_names$variable, each=horizon),
date = id_last_date$date[i] + rep(horizon:1, num_variables), #!dates are reversed because embed reverses
value=preds[i,]))
pred_dset = bind_rows(pred_dset) %>% arrange(id, date, variable)
}
predict_multivar_tbl <- function(dset, step_ahead, lag, horizon) {
mvr_emb = get_mvar_emb(dset, lag, step_ahead)
COEF <- fit_fastlm(mvr_emb$X, mvr_emb$Y, mvr_emb$lag, mvr_emb$step_ahead, mvr_emb$num_variables)
preds <- forec_mvar(COEF, mvr_emb$X_last, horizon = horizon,
lag = mvr_emb$lag,
step_ahead = mvr_emb$step_ahead, num_variables = mvr_emb$num_variables)
pred_dset = preds_to_tibble(preds, horizon=horizon,
id_var_date = mvr_emb$id_var_date,
var_names = mvr_emb$var_names,
num_variables =mvr_emb$num_variables)
pred_dset
}
predict_univar_tbl <- function(dset, step_ahead, lag, horizon) {
dset %>% group_by(variable) %>% group_modify(
function (.x, .y)
predict_multivar_tbl(mutate(.x, variable=.y$variable), step_ahead=step_ahead,
lag=lag, horizon=horizon) %>% select(-variable) ) %>%
select(id, variable, date, value) %>% ungroup()
}
maselike_normtable <- function(dset) {
dset %>% group_by(id, variable) %>%
summarize(scale_type="mase",
scale=mean(abs(diff(value))),
.groups="drop")
}
max_normtable <- function(dset) {
dset %>% group_by(id, variable) %>%
summarize(scale_type="max",
scale=max(value), .groups="drop")
}
diffmax_normtable <- function(dset) {
dset %>% group_by(id, variable) %>%
summarize(scale_type="diffmax",
scale=max(abs(diff(value))), .groups="drop")
}
norm_by_table <- function(dset, table) {
dset %>% left_join(table, by=c("id", "variable")) %>%
group_by(id,variable) %>% mutate(value = value / scale) %>%
ungroup() %>% select(-scale, -scale_type)
}
denorm_by_table <- function(dset, table) {
dset %>% left_join(table, by=c("id", "variable")) %>%
group_by(id,variable) %>% mutate(value = value * scale) %>%
ungroup() %>% select(-scale, -scale_type)
}
flatbase_transform <- function(dset, base) {
dset$value = dset$value + base
dset
}
diff_transform <- function(dset) {
dset %>% group_by(id, variable) %>%
mutate(value = diff(c(0, value))) %>%
ungroup()
}
dediff_transform <- function(dset) {
dset %>% group_by(id, variable) %>%
mutate(value = cumsum(value)) %>%
ungroup()
}
rollorig_predict = function(dset, forec_dates, params_tbl) {
max_normtable; #these are needed for parallelization, so they know the functions can be used
maselike_normtable;
diffmax_normtable;
results_rollorig = NULL
for (i in 1:nrow(params_tbl)) {
param_set = params_tbl[i,]
multi_var = param_set$multi_var
lag = param_set$lag
horizon = param_set$horizon
norm_f = get(param_set$norm_type)
multi_step = param_set$multi_step
id_set = unlist(param_set$id_set)
if (multi_step) {
step_ahead = horizon
} else {
step_ahead = 1
}
for (forec_date in forec_dates) {
forec_date = as_date(forec_date)
train_set = dset %>% filter(date <= forec_date, id %in% id_set)
norm_table = train_set %>% norm_f()
forec_dset = train_set %>%
norm_by_table(norm_table) %>%
ifelse(multi_var, predict_multivar_tbl, predict_univar_tbl)(step_ahead=step_ahead,
lag=lag,
horizon=horizon)
forec_dset = forec_dset %>% denorm_by_table(norm_table)
results_rollorig = bind_rows(results_rollorig, tibble(forec_date = forec_date,
param_set,
forec_dset))
}
}
results_rollorig
}
future_rollorig_predict = function(dset, forec_dates, params_tbl) {
furrr::future_map(1:nrow(params_tbl), function(indi) { #we use indices to simplify: the id_set col are lists
param_set = tibble(params_tbl[indi,])
rollorig_predict(dset, forec_dates, param_set)},
.options = furrr::furrr_options(seed = 123)) %>% bind_rows()
}
#!!!! TO DO !!!!
#spain: add italy covariates
#byrow normalization
#general model classes, external features
#efficient bigdata, wide format, chunking, keras
#unfiying frequency
add_forec_errors <- function(dset, ground_truth) {
dset %>% inner_join(ground_truth, by=c("id", "variable", "date"),
suffix=c("", "_true")) %>%
mutate(MAE = (abs(value - value_true)),
MAPE = (abs(value - value_true) / (abs(value_true)+1)) )
}
#weekly aggregation for the covid forecast hub and others
#sum, week starting in sundays, remove incomplete weeks
epiweek_aggreg = function(dset) {
dset %>%
group_by(id, variable, date = floor_date(date, "week", week_start = 7)+6) %>%
filter(n() ==7) %>%
summarize(value = sum(value), .groups="drop") %>%
ungroup()
}
pad_zeros = function(dset, start_date) {
dset %>% group_by(id,variable) %>%
group_modify( function (.x, .y) {
min_date = min(.x$date)
if (min_date > start_date) {
date_range = seq(as_date(start_date), min_date-1, by="days")
.x = tibble(date=as_date(date_range),
value=0) %>% bind_rows( .x)
}
.x
}) %>% ungroup()
}
covid_clean_anomalies = function(dset) {
clean_x = function(x) {
#set negatives to zero
x[x < 0] = 0
#remove one day peaks
for (i in 8:(length(x)-8)) {
is_peak = x[i] > sum(x[i- 1:7]) && x[i] > sum(x[i +1:7])
if (is_peak) {
weights = c(1:8, 7:1)
weights = weights / sum(weights)
x[i] = sum(x[i + (-7):7] * weights)
}
}
#remove big swing days
for (i in 1:(length(x)-1)) {
is_peak = x[i +1] > 5*x[i]
if (is_peak) {
x[i] = x[i] + x[i+1]*0.25
x[i+1] = 0.75*x[i+1]
} else {
is_low = x[i +1] < 0.2*x[i]
if (is_low) {
x[i+1] = x[i+1] + x[i]*0.25
x[i] = 0.75*x[i]
}
}
}
x
}
dset %>%
group_by(id, variable) %>%
mutate(value = clean_x(value)) %>%
ungroup()
}
#function that does forecasting on the given set of dates
#for the hyperparameter combination given (lag order, normalization type, dataset (external series))
#then the errors are calculated against a ground truth
#and a forecast combination of the best univariate and multivariate are calculated
#the output are the forecast at the dates given
#the summary of the best hyperparam for each variable and their error (according to MAE)
#and the forecast for only the best for each variable, at the later date given as input
future_val_pred = function(train_dset, forec_dates, ground_truth_dset, params_tbl) {
#(quick/dirty) code to randomly access the params table, to better share the load among parallel workers
#http://www.cookbook-r.com/Numbers/Saving_the_state_of_the_random_number_generator/
if (exists(".Random.seed", .GlobalEnv)) {
oldseed <- .GlobalEnv$.Random.seed
} else {
oldseed <- NULL
}
set.seed(1234)
shuff_params_tbl = params_tbl[sample(nrow(params_tbl)),]
if (!is.null(oldseed)) {
.GlobalEnv$.Random.seed <- oldseed
} else {
rm(".Random.seed", envir = .GlobalEnv)
}
ro_forec_dset = future_rollorig_predict(train_dset, forec_dates, shuff_params_tbl)
errs = ro_forec_dset %>%
add_forec_errors(ground_truth_dset) %>%
filter(forec_date %in% forec_dates)
#best per variable and multi/uni variate
best_unimulti = errs %>% group_by(lag, multi_var, multi_step,
horizon, norm_type, id_set, variable) %>%
summarize(avg_MAE = mean(MAE), .groups="drop") %>%
group_by(variable, multi_var) %>% top_n(1, -avg_MAE) %>% ungroup()
#forecast combination of the best uni-multi, per variable
best_combi_forec = ro_forec_dset %>%
inner_join(best_unimulti, by=c("lag", "multi_var", "multi_step",
"horizon", "norm_type",
"id_set", "variable")) %>%
select(-avg_MAE) %>%
group_by(forec_date, id, variable, date) %>%
summarize(value = mean(value), .groups="drop") %>%
mutate(combi=TRUE) %>% ungroup()
#error of the best uni-multi combination
best_combi_err = best_combi_forec %>%
add_forec_errors(ground_truth_dset) %>%
group_by(variable) %>% summarize(avg_MAE = mean(MAE))
best_final = best_unimulti %>%
mutate(combi=FALSE) %>%
bind_rows( best_combi_err %>% mutate(combi=TRUE) ) %>%
group_by(variable) %>%
top_n(1, -avg_MAE) %>%
ungroup()
all_ro_forec_dset = ro_forec_dset %>% mutate(combi=FALSE) %>%
bind_rows(best_combi_forec)
best_forec = all_ro_forec_dset %>%
inner_join(best_final, by=c("lag", "multi_var",
"multi_step", "horizon",
"norm_type", "id_set",
"variable", "combi")) %>%
select(-avg_MAE)
best_forec = best_forec %>% group_by(id) %>%
filter(forec_date == max(forec_date)) %>% ungroup()
#return the table of best models
#the best forecasts for the last date
#all forecasts including the older
list(rollor_forec = all_ro_forec_dset,
best_table = best_final,
best_forec = best_forec)
}
|
/R/tidy_global_ar.R
|
no_license
|
pmontman/covid19forec
|
R
| false
| false
| 16,071
|
r
|
### quality checking:
# check format of columns (id, variable, value)
# all ids have the same variables
# within a id, the same length is required for each variable,
# it can be forced to the interesection, but default it is a stop
# no missing days inbetween the date range
# no NAs
#sanity check, all end dates are the same
#sanity check if mutistep, check early if it is thet same as horizon, or just ignore horizon
# SERPARAR LAS FUNCIONES DE EMBEDDING PARA DAR FUNCIONALIDAD APARTE,
# POR EJEMLO PARA EXPERIMENTOS DE INSAMPLE LOSS
# MATCH NAMES PARE TO AVOID ERRORS WITH ARRANGING OF IDS AND VARIABLES AND DATES
#checl params_tbl is valid
library(readr)
library(tidyr)
library(dplyr)
library(lubridate)
library(ggplot2)
embed_lag = function(x, lag, step_ahead) {
stopifnot(length(x) > lag + step_ahead)
stats::embed(x, lag + step_ahead)
}
calc_tgt_cols_in_multivar_embed = function(num_variables, lag, step_ahead) {
ind_mat = sapply(1:num_variables, function(i) { #this ugly code to get the tgt cols in the multivariate matrix 1,2,3 - 21,22,23 - 41,42,43
1:step_ahead + (i-1)*(lag+step_ahead)
})
as.vector(ind_mat)
}
#dset has the format id, variable, value
get_mvar_emb = function(dset, lag, step_ahead) {
#get the ids and last date
id_var_date = dset %>%
arrange(id) %>%
group_by(id,variable) %>%
summarise(date=max(date), .groups="drop") %>%
ungroup()
#get the variable names for constructing the output
var_names = id_var_date %>% select(variable) %>% unique() %>% arrange()
num_variables = length(var_names$variable)
if (0) { ##deprecated code, other branch is the optimized one
#multivar AND multisep embedding from a tibble
emb_list = dset %>% arrange(id) %>% group_by(id) %>%
arrange(date, variable) %>%
group_map( function(.x, .y) {
Xvar = .x %>% group_by(variable) %>%
group_map( function (.xvar, .yvar) { #embed each variable for each id and column join
tseries = c(.xvar$value, rep(NA, step_ahead-1)) #pad with NA for the multi-step, so shorter horizon dont miss observations
Xvar = embed_lag(tseries, lag, step_ahead)
})
do.call(cbind, Xvar)
})
X = do.call(rbind, emb_list)
###!!!! TO DO: do the fast embedding, not the rbind, but precreating X and Y matrices and filling them
last_rows = sapply(emb_list, nrow) #get the position of the last observation in each series, it is the one used for forecasting
#get the columns that will be used for forecasting, removing the last lag because the first is not the target
last_cols = as.vector( sapply(1:num_variables, function(i) {(step_ahead - 1 + 1:lag) + (i-1)*(lag+step_ahead)}) )
X_last = X[cumsum(last_rows), last_cols, drop=FALSE] #save the last observation, useful for forecasting
#!!!!!clear memory just in case it gets rough
#rm(emb_list);gc()
y_cols = calc_tgt_cols_in_multivar_embed(num_variables, lag, step_ahead)
Y = X[, y_cols, drop=FALSE]
X = X[, -y_cols, drop=FALSE]
} else {
emb_list = dset %>% arrange(id) %>% group_by(id) %>%
arrange(date, variable) %>%
group_map( function(.X, .y) {
.X %>% group_by(variable) %>%
group_map( function (.xvar, .yvar) { #embed each variable for each id and column join
c(.xvar$value, rep(NA, step_ahead-1)) #pad with NA for the multi-step, so shorter horizon dont miss observations
})
})
lengths <- sapply(emb_list, function (x) length(x[[1]]))
lengths <- lengths - lag
#!!!!maybe precalc the size of the matrix and use hard drive for very large matrices
X = matrix(0, nrow=sum(lengths), ncol=(lag) * num_variables)
Y = matrix(0, nrow=sum(lengths), ncol=step_ahead*num_variables)
X_last = matrix(0, nrow=length(emb_list), ncol=lag*num_variables)
row_count = 1
for (i in 1:length(emb_list)) {
for (j in 1:length(emb_list[[i]])) {
emb <- embed(emb_list[[i]][[j]], lag + step_ahead)
X[row_count:(row_count + nrow(emb)-1), (j-1)*lag + 1:lag] <- emb[,-(1:step_ahead), drop=FALSE]
Y[row_count:(row_count + nrow(emb)-1) , (j-1)*step_ahead + 1:step_ahead] <- emb[,1:step_ahead, drop=FALSE]
X_last[i, (j-1)*lag + 1:lag] = emb[nrow(emb), (step_ahead - 1) + 1:lag, drop=FALSE]
}
row_count <- row_count + nrow(emb)
}
}
colnames(Y) <- paste(rep(var_names$variable, each=step_ahead), "_hor_", step_ahead:1, sep="")
colnames(X) <- colnames(X_last) <- paste(rep(var_names$variable, each=lag), "_lag_", 1:lag, sep="")
list(X=X, Y=Y, id_var_date=id_var_date, X_last = X_last, lag=lag, step_ahead=step_ahead,
num_variables = num_variables,
var_names = var_names)
}
fit_fastlm = function(X, Y, lag, step_ahead, num_variables ) {
COEF = do.call(cbind, lapply(1:(step_ahead*num_variables), function(i) {
valid_rows = !is.na(Y[,i]) #for a given step ahead, remove the special masked row, no to lose info for shorter horizons
RcppArmadillo::fastLmPure(X[valid_rows, , drop=FALSE],
Y[valid_rows,i])$coefficients
}))
COEF <- as.matrix(COEF)
rownames(COEF) <- colnames(X)
colnames(COEF) <- colnames(Y)
COEF
}
#get predictions
forec_mvar <- function(COEF, X_last, horizon, lag, step_ahead, num_variables) {
if (step_ahead == 1) { #recursive forec
preds = X_last %*% COEF
if (horizon > 1) {
PRED = matrix(0, nrow(preds), horizon*ncol(preds))
pos_in_PRED = seq(horizon, horizon*num_variables, horizon)
PRED[, pos_in_PRED] = preds
newpred = preds
newpred_indic = seq(1, lag*num_variables, lag)
X_tmp = X_last
for (i in 2:horizon) {
X_tmp[ , 2:ncol(X_tmp)] = X_tmp[ , 1:(ncol(X_tmp)-1), drop=FALSE ]
X_tmp[, newpred_indic] = newpred
newpred = X_tmp %*% COEF
PRED[, pos_in_PRED - i + 1] = newpred
}
preds = PRED
}
} else { #multi step ahead
if (step_ahead != horizon) {
stop(paste("ERROR: Multi-step and Horizon dont match! Cannot do multi-step:", step_ahead, "with this model for horizon:", horizon))
}
preds = X_last %*% COEF
}
preds
}
preds_to_tibble <- function(preds, horizon, id_var_date, var_names, num_variables) {
id_last_date = id_var_date %>% select(id, date) %>% distinct()
#turn them into a tibble
pred_dset = lapply(1:length(id_last_date$id),
function (i) tibble(id=id_last_date$id[i],
variable=rep(var_names$variable, each=horizon),
date = id_last_date$date[i] + rep(horizon:1, num_variables), #!dates are reversed because embed reverses
value=preds[i,]))
pred_dset = bind_rows(pred_dset) %>% arrange(id, date, variable)
}
predict_multivar_tbl <- function(dset, step_ahead, lag, horizon) {
mvr_emb = get_mvar_emb(dset, lag, step_ahead)
COEF <- fit_fastlm(mvr_emb$X, mvr_emb$Y, mvr_emb$lag, mvr_emb$step_ahead, mvr_emb$num_variables)
preds <- forec_mvar(COEF, mvr_emb$X_last, horizon = horizon,
lag = mvr_emb$lag,
step_ahead = mvr_emb$step_ahead, num_variables = mvr_emb$num_variables)
pred_dset = preds_to_tibble(preds, horizon=horizon,
id_var_date = mvr_emb$id_var_date,
var_names = mvr_emb$var_names,
num_variables =mvr_emb$num_variables)
pred_dset
}
predict_univar_tbl <- function(dset, step_ahead, lag, horizon) {
dset %>% group_by(variable) %>% group_modify(
function (.x, .y)
predict_multivar_tbl(mutate(.x, variable=.y$variable), step_ahead=step_ahead,
lag=lag, horizon=horizon) %>% select(-variable) ) %>%
select(id, variable, date, value) %>% ungroup()
}
maselike_normtable <- function(dset) {
dset %>% group_by(id, variable) %>%
summarize(scale_type="mase",
scale=mean(abs(diff(value))),
.groups="drop")
}
max_normtable <- function(dset) {
dset %>% group_by(id, variable) %>%
summarize(scale_type="max",
scale=max(value), .groups="drop")
}
diffmax_normtable <- function(dset) {
dset %>% group_by(id, variable) %>%
summarize(scale_type="diffmax",
scale=max(abs(diff(value))), .groups="drop")
}
norm_by_table <- function(dset, table) {
dset %>% left_join(table, by=c("id", "variable")) %>%
group_by(id,variable) %>% mutate(value = value / scale) %>%
ungroup() %>% select(-scale, -scale_type)
}
denorm_by_table <- function(dset, table) {
dset %>% left_join(table, by=c("id", "variable")) %>%
group_by(id,variable) %>% mutate(value = value * scale) %>%
ungroup() %>% select(-scale, -scale_type)
}
flatbase_transform <- function(dset, base) {
dset$value = dset$value + base
dset
}
diff_transform <- function(dset) {
dset %>% group_by(id, variable) %>%
mutate(value = diff(c(0, value))) %>%
ungroup()
}
dediff_transform <- function(dset) {
dset %>% group_by(id, variable) %>%
mutate(value = cumsum(value)) %>%
ungroup()
}
rollorig_predict = function(dset, forec_dates, params_tbl) {
max_normtable; #these are needed for parallelization, so they know the functions can be used
maselike_normtable;
diffmax_normtable;
results_rollorig = NULL
for (i in 1:nrow(params_tbl)) {
param_set = params_tbl[i,]
multi_var = param_set$multi_var
lag = param_set$lag
horizon = param_set$horizon
norm_f = get(param_set$norm_type)
multi_step = param_set$multi_step
id_set = unlist(param_set$id_set)
if (multi_step) {
step_ahead = horizon
} else {
step_ahead = 1
}
for (forec_date in forec_dates) {
forec_date = as_date(forec_date)
train_set = dset %>% filter(date <= forec_date, id %in% id_set)
norm_table = train_set %>% norm_f()
forec_dset = train_set %>%
norm_by_table(norm_table) %>%
ifelse(multi_var, predict_multivar_tbl, predict_univar_tbl)(step_ahead=step_ahead,
lag=lag,
horizon=horizon)
forec_dset = forec_dset %>% denorm_by_table(norm_table)
results_rollorig = bind_rows(results_rollorig, tibble(forec_date = forec_date,
param_set,
forec_dset))
}
}
results_rollorig
}
future_rollorig_predict = function(dset, forec_dates, params_tbl) {
furrr::future_map(1:nrow(params_tbl), function(indi) { #we use indices to simplify: the id_set col are lists
param_set = tibble(params_tbl[indi,])
rollorig_predict(dset, forec_dates, param_set)},
.options = furrr::furrr_options(seed = 123)) %>% bind_rows()
}
#!!!! TO DO !!!!
#spain: add italy covariates
#byrow normalization
#general model classes, external features
#efficient bigdata, wide format, chunking, keras
#unfiying frequency
add_forec_errors <- function(dset, ground_truth) {
dset %>% inner_join(ground_truth, by=c("id", "variable", "date"),
suffix=c("", "_true")) %>%
mutate(MAE = (abs(value - value_true)),
MAPE = (abs(value - value_true) / (abs(value_true)+1)) )
}
#weekly aggregation for the covid forecast hub and others
#sum, week starting in sundays, remove incomplete weeks
epiweek_aggreg = function(dset) {
dset %>%
group_by(id, variable, date = floor_date(date, "week", week_start = 7)+6) %>%
filter(n() ==7) %>%
summarize(value = sum(value), .groups="drop") %>%
ungroup()
}
pad_zeros = function(dset, start_date) {
dset %>% group_by(id,variable) %>%
group_modify( function (.x, .y) {
min_date = min(.x$date)
if (min_date > start_date) {
date_range = seq(as_date(start_date), min_date-1, by="days")
.x = tibble(date=as_date(date_range),
value=0) %>% bind_rows( .x)
}
.x
}) %>% ungroup()
}
covid_clean_anomalies = function(dset) {
clean_x = function(x) {
#set negatives to zero
x[x < 0] = 0
#remove one day peaks
for (i in 8:(length(x)-8)) {
is_peak = x[i] > sum(x[i- 1:7]) && x[i] > sum(x[i +1:7])
if (is_peak) {
weights = c(1:8, 7:1)
weights = weights / sum(weights)
x[i] = sum(x[i + (-7):7] * weights)
}
}
#remove big swing days
for (i in 1:(length(x)-1)) {
is_peak = x[i +1] > 5*x[i]
if (is_peak) {
x[i] = x[i] + x[i+1]*0.25
x[i+1] = 0.75*x[i+1]
} else {
is_low = x[i +1] < 0.2*x[i]
if (is_low) {
x[i+1] = x[i+1] + x[i]*0.25
x[i] = 0.75*x[i]
}
}
}
x
}
dset %>%
group_by(id, variable) %>%
mutate(value = clean_x(value)) %>%
ungroup()
}
#function that does forecasting on the given set of dates
#for the hyperparameter combination given (lag order, normalization type, dataset (external series))
#then the errors are calculated against a ground truth
#and a forecast combination of the best univariate and multivariate are calculated
#the output are the forecast at the dates given
#the summary of the best hyperparam for each variable and their error (according to MAE)
#and the forecast for only the best for each variable, at the later date given as input
future_val_pred = function(train_dset, forec_dates, ground_truth_dset, params_tbl) {
#(quick/dirty) code to randomly access the params table, to better share the load among parallel workers
#http://www.cookbook-r.com/Numbers/Saving_the_state_of_the_random_number_generator/
if (exists(".Random.seed", .GlobalEnv)) {
oldseed <- .GlobalEnv$.Random.seed
} else {
oldseed <- NULL
}
set.seed(1234)
shuff_params_tbl = params_tbl[sample(nrow(params_tbl)),]
if (!is.null(oldseed)) {
.GlobalEnv$.Random.seed <- oldseed
} else {
rm(".Random.seed", envir = .GlobalEnv)
}
ro_forec_dset = future_rollorig_predict(train_dset, forec_dates, shuff_params_tbl)
errs = ro_forec_dset %>%
add_forec_errors(ground_truth_dset) %>%
filter(forec_date %in% forec_dates)
#best per variable and multi/uni variate
best_unimulti = errs %>% group_by(lag, multi_var, multi_step,
horizon, norm_type, id_set, variable) %>%
summarize(avg_MAE = mean(MAE), .groups="drop") %>%
group_by(variable, multi_var) %>% top_n(1, -avg_MAE) %>% ungroup()
#forecast combination of the best uni-multi, per variable
best_combi_forec = ro_forec_dset %>%
inner_join(best_unimulti, by=c("lag", "multi_var", "multi_step",
"horizon", "norm_type",
"id_set", "variable")) %>%
select(-avg_MAE) %>%
group_by(forec_date, id, variable, date) %>%
summarize(value = mean(value), .groups="drop") %>%
mutate(combi=TRUE) %>% ungroup()
#error of the best uni-multi combination
best_combi_err = best_combi_forec %>%
add_forec_errors(ground_truth_dset) %>%
group_by(variable) %>% summarize(avg_MAE = mean(MAE))
best_final = best_unimulti %>%
mutate(combi=FALSE) %>%
bind_rows( best_combi_err %>% mutate(combi=TRUE) ) %>%
group_by(variable) %>%
top_n(1, -avg_MAE) %>%
ungroup()
all_ro_forec_dset = ro_forec_dset %>% mutate(combi=FALSE) %>%
bind_rows(best_combi_forec)
best_forec = all_ro_forec_dset %>%
inner_join(best_final, by=c("lag", "multi_var",
"multi_step", "horizon",
"norm_type", "id_set",
"variable", "combi")) %>%
select(-avg_MAE)
best_forec = best_forec %>% group_by(id) %>%
filter(forec_date == max(forec_date)) %>% ungroup()
#return the table of best models
#the best forecasts for the last date
#all forecasts including the older
list(rollor_forec = all_ro_forec_dset,
best_table = best_final,
best_forec = best_forec)
}
|
library(jsonlite)
library(dplyr)
library(ggplot2)
# read in business data
filename <- "data/yelp_dataset_challenge_academic_dataset/yelp_academic_dataset_business.json"
business_json <- lapply(readLines(filename), fromJSON)
city_state <- factor(paste(sapply(business_json, '[[', 'city'), ", ",sapply(business_json, '[[', 'state'), sep=""))
stars <- sapply(business_json, '[[', 'stars')
review_count <- sapply(business_json, '[[', 'review_count')
biz_name <- sapply(business_json, '[[', 'name')
biz_name_length <- nchar(biz_name)
biz_category <- sapply(sapply(business_json, '[[', 'categories'), paste, collapse=";")
business_id <- factor(sapply(business_json, '[[', 'business_id'))
biz_df <- data_frame(business_id, biz_name, biz_name_length,stars, review_count, biz_category, city_state)
# read in review data
filename <- "data/yelp_dataset_challenge_academic_dataset/yelp_academic_dataset_review.json"
review_df <- stream_in(file(filename), pagesize = 10000)
bad_reviews <-
right_join(select(biz_df, business_id, biz_category), review_df) %>%
filter(grepl("Health", review_df$biz_category)) %>%
select(-votes, -type) %>%
filter(stars < 3)
save(bad_reviews, file = "data/badreviews.rds")
|
/prep.R
|
no_license
|
vpnagraj/yelp-academic
|
R
| false
| false
| 1,221
|
r
|
library(jsonlite)
library(dplyr)
library(ggplot2)
# read in business data
filename <- "data/yelp_dataset_challenge_academic_dataset/yelp_academic_dataset_business.json"
business_json <- lapply(readLines(filename), fromJSON)
city_state <- factor(paste(sapply(business_json, '[[', 'city'), ", ",sapply(business_json, '[[', 'state'), sep=""))
stars <- sapply(business_json, '[[', 'stars')
review_count <- sapply(business_json, '[[', 'review_count')
biz_name <- sapply(business_json, '[[', 'name')
biz_name_length <- nchar(biz_name)
biz_category <- sapply(sapply(business_json, '[[', 'categories'), paste, collapse=";")
business_id <- factor(sapply(business_json, '[[', 'business_id'))
biz_df <- data_frame(business_id, biz_name, biz_name_length,stars, review_count, biz_category, city_state)
# read in review data
filename <- "data/yelp_dataset_challenge_academic_dataset/yelp_academic_dataset_review.json"
review_df <- stream_in(file(filename), pagesize = 10000)
bad_reviews <-
right_join(select(biz_df, business_id, biz_category), review_df) %>%
filter(grepl("Health", review_df$biz_category)) %>%
select(-votes, -type) %>%
filter(stars < 3)
save(bad_reviews, file = "data/badreviews.rds")
|
/man/SK.nest.Rd
|
no_license
|
klainfo/ScottKnott
|
R
| false
| false
| 6,261
|
rd
| ||
# Source file for prior and likelihood functions. Not needed separately.
logprior <- function(theta, samp_mean=132){
p1s = c(theta[c(8,9)], 1-sum(theta[c(8,9,12)]), theta[12])
p2s = c(theta[10], 1-sum(theta[c(10,13)]), theta[13])
p3s = c(theta[11], 1-sum(theta[c(11,14)]), theta[14])
sig2eps = dgamma(theta[1], shape=40, scale=10, log = TRUE)
mu1 = dgamma(theta[2], samp_mean^2/100, scale=100/samp_mean, log = TRUE)
mu2 = dgamma(-theta[3], 15, scale=2/3, log = TRUE)
mu3 = dgamma(-theta[4], 20, scale=2, log = TRUE)
sig2tempo = dgamma(theta[5], shape=40, scale=10, log=TRUE)
#sig2acc = dgamma(theta[6], shape=1, scale=1, log=TRUE)
#sig2stress = dgamma(theta[7], shape=1, scale=1, log=TRUE)
p1 = ddirichlet(p1s, alpha=c(85,5,8,2))
p22 = ddirichlet(p2s, alpha=c(10,1,4))
p31 = ddirichlet(p3s, alpha=c(5,7,3))
lp = sum(sig2eps, mu1, mu2, mu3,
sig2tempo, #sig2acc, sig2stress,
p1, p22, p31)
lp
}
prior_means <- function(samp_mean=132){
c(400, samp_mean, -10, -40, 400, #1, 1,
.85, 1/20, 10/15, 5/15, 1/50, 4/15, 3/15)
}
rprior <- function(n, samp_mean=132){
sig2eps = rgamma(n, shape=40, scale=10)
mu1 = rgamma(n, samp_mean^2/100, scale=100/samp_mean)
mu2 = -1*rgamma(n, 15, scale=2/3)
mu3 = -1*rgamma(n, 20, scale=2)
sig2tempo = rgamma(n, shape=40, scale=10)
sig2acc = rgamma(n, shape=1, scale=1)
sig2stress = rgamma(n, shape=1, scale=1)
p1 = rdirichlet(n, alpha=c(85,5,8,2))
p13 = p1[,4]
p11 = p1[,1]
p12 = p1[,2]
p2 = rdirichlet(n, alpha=c(10,1,4))
p22 = p2[,1]
p21 = p2[,3]
p3 = rdirichlet(n, alpha=c(5,7,3))
p31 = p3[,1]
p32 = p3[,3]
cbind(sig2eps, mu1, mu2, mu3, sig2tempo, #sig2acc, sig2stress,
p11, p12, p22, p31, p13, p21, p32)
}
init <- function(samp_mean=132, noise = 0){
if(noise > 0){
x = rprior(1, samp_mean)
} else {
x = prior_means(samp_mean)
}
x
}
logStatesGivenParams <- function(states,transProbs){
ind = cbind(states[1:(length(states)-1)], states[2:length(states)]) + 1
return(sum(log(transProbs[ind])))
}
toOptimize <- function(theta, yt, lt, Npart, samp_mean = 132, badvals=Inf){
theta = c(theta[1:5],1,1,theta[6:12])
pmats = musicModel(lt, theta[1], theta[2:4], theta[5:7], theta[8:14],
initialMean = c(samp_mean,0), # 132 is marked tempo, 0 is unused
initialVariance = c(400,10)) # sd of 20, 10 is unused
beam = with(pmats,
beamSearch(a0, P0, c(1,0,0,0,0,0,0,0,0,0,0),
dt, ct, Tt, Zt,
HHt, GGt, yt, transMat, Npart))
if(beam$LastStep < length(lt)){
cat('beam$LastStep < length(lt)\n')
return(badvals)
}
if(all(is.na(beam$weights))){
cat('all weights are NA\n')
return(badvals)
}
states = beam$paths[which.max(beam$weights),]
negllike = getloglike(pmats, states, yt)# -log(P(y|params, states))
sgp = -1 * logStatesGivenParams(states, pmats$transMat)
logp = -1 * logprior(theta, samp_mean)
obj = negllike + logp + sgp
obj
}
# Cluster funs ------------------------------------------------------------
optimizer <- function(perf, lt, Npart=200, ntries = 5, samp_mean=132, badvals=1e8){
yt = matrix(perf, nrow=1)
if(is.null(samp_mean)) samp_mean = mean(yt)
randos = NULL
if(ntries > 1) randos = rprior(ntries-1, samp_mean)
init_vals = rbind(prior_means(samp_mean), randos)
out1 = multistart(init_vals, toOptimize, yt=yt, lt=lt, Npart=Npart,
badvals=badvals,samp_mean=samp_mean,
method='Nelder-Mead',
control=list(trace=0, maxit=5000, badval=badvals))
out2 = multistart(init_vals, toOptimize, yt=yt, lt=lt, Npart=Npart,
badvals=badvals,samp_mean=samp_mean,
method='SANN',
control=list(trace=0, maxit=5000,badval=badvals))
out = rbind.data.frame(out1, out2)
out
}
|
/extras/my_model.R
|
no_license
|
dajmcdon/dpf
|
R
| false
| false
| 3,901
|
r
|
# Source file for prior and likelihood functions. Not needed separately.
logprior <- function(theta, samp_mean=132){
p1s = c(theta[c(8,9)], 1-sum(theta[c(8,9,12)]), theta[12])
p2s = c(theta[10], 1-sum(theta[c(10,13)]), theta[13])
p3s = c(theta[11], 1-sum(theta[c(11,14)]), theta[14])
sig2eps = dgamma(theta[1], shape=40, scale=10, log = TRUE)
mu1 = dgamma(theta[2], samp_mean^2/100, scale=100/samp_mean, log = TRUE)
mu2 = dgamma(-theta[3], 15, scale=2/3, log = TRUE)
mu3 = dgamma(-theta[4], 20, scale=2, log = TRUE)
sig2tempo = dgamma(theta[5], shape=40, scale=10, log=TRUE)
#sig2acc = dgamma(theta[6], shape=1, scale=1, log=TRUE)
#sig2stress = dgamma(theta[7], shape=1, scale=1, log=TRUE)
p1 = ddirichlet(p1s, alpha=c(85,5,8,2))
p22 = ddirichlet(p2s, alpha=c(10,1,4))
p31 = ddirichlet(p3s, alpha=c(5,7,3))
lp = sum(sig2eps, mu1, mu2, mu3,
sig2tempo, #sig2acc, sig2stress,
p1, p22, p31)
lp
}
prior_means <- function(samp_mean=132){
c(400, samp_mean, -10, -40, 400, #1, 1,
.85, 1/20, 10/15, 5/15, 1/50, 4/15, 3/15)
}
rprior <- function(n, samp_mean=132){
sig2eps = rgamma(n, shape=40, scale=10)
mu1 = rgamma(n, samp_mean^2/100, scale=100/samp_mean)
mu2 = -1*rgamma(n, 15, scale=2/3)
mu3 = -1*rgamma(n, 20, scale=2)
sig2tempo = rgamma(n, shape=40, scale=10)
sig2acc = rgamma(n, shape=1, scale=1)
sig2stress = rgamma(n, shape=1, scale=1)
p1 = rdirichlet(n, alpha=c(85,5,8,2))
p13 = p1[,4]
p11 = p1[,1]
p12 = p1[,2]
p2 = rdirichlet(n, alpha=c(10,1,4))
p22 = p2[,1]
p21 = p2[,3]
p3 = rdirichlet(n, alpha=c(5,7,3))
p31 = p3[,1]
p32 = p3[,3]
cbind(sig2eps, mu1, mu2, mu3, sig2tempo, #sig2acc, sig2stress,
p11, p12, p22, p31, p13, p21, p32)
}
init <- function(samp_mean=132, noise = 0){
if(noise > 0){
x = rprior(1, samp_mean)
} else {
x = prior_means(samp_mean)
}
x
}
logStatesGivenParams <- function(states,transProbs){
ind = cbind(states[1:(length(states)-1)], states[2:length(states)]) + 1
return(sum(log(transProbs[ind])))
}
toOptimize <- function(theta, yt, lt, Npart, samp_mean = 132, badvals=Inf){
theta = c(theta[1:5],1,1,theta[6:12])
pmats = musicModel(lt, theta[1], theta[2:4], theta[5:7], theta[8:14],
initialMean = c(samp_mean,0), # 132 is marked tempo, 0 is unused
initialVariance = c(400,10)) # sd of 20, 10 is unused
beam = with(pmats,
beamSearch(a0, P0, c(1,0,0,0,0,0,0,0,0,0,0),
dt, ct, Tt, Zt,
HHt, GGt, yt, transMat, Npart))
if(beam$LastStep < length(lt)){
cat('beam$LastStep < length(lt)\n')
return(badvals)
}
if(all(is.na(beam$weights))){
cat('all weights are NA\n')
return(badvals)
}
states = beam$paths[which.max(beam$weights),]
negllike = getloglike(pmats, states, yt)# -log(P(y|params, states))
sgp = -1 * logStatesGivenParams(states, pmats$transMat)
logp = -1 * logprior(theta, samp_mean)
obj = negllike + logp + sgp
obj
}
# Cluster funs ------------------------------------------------------------
optimizer <- function(perf, lt, Npart=200, ntries = 5, samp_mean=132, badvals=1e8){
yt = matrix(perf, nrow=1)
if(is.null(samp_mean)) samp_mean = mean(yt)
randos = NULL
if(ntries > 1) randos = rprior(ntries-1, samp_mean)
init_vals = rbind(prior_means(samp_mean), randos)
out1 = multistart(init_vals, toOptimize, yt=yt, lt=lt, Npart=Npart,
badvals=badvals,samp_mean=samp_mean,
method='Nelder-Mead',
control=list(trace=0, maxit=5000, badval=badvals))
out2 = multistart(init_vals, toOptimize, yt=yt, lt=lt, Npart=Npart,
badvals=badvals,samp_mean=samp_mean,
method='SANN',
control=list(trace=0, maxit=5000,badval=badvals))
out = rbind.data.frame(out1, out2)
out
}
|
#'
#'Funcion para calcular el indicador mdesv, que compara la desviacion estandar del periodo simulado y los datos reales
#'@param real son los datos reales
#'@param simu son los datos simulados
#'@return rmdesv indicador mdesv
mdesv <- function(real,simu) {
mreal<-sd(real)
msimu <- sd(simu)
rmdesv<-msimu/mreal
return(rmdesv)
}
|
/storage/app/templates/1/rscript/main/funciones/mdesv.R
|
permissive
|
davidpachonc/maep-backend
|
R
| false
| false
| 337
|
r
|
#'
#'Funcion para calcular el indicador mdesv, que compara la desviacion estandar del periodo simulado y los datos reales
#'@param real son los datos reales
#'@param simu son los datos simulados
#'@return rmdesv indicador mdesv
mdesv <- function(real,simu) {
mreal<-sd(real)
msimu <- sd(simu)
rmdesv<-msimu/mreal
return(rmdesv)
}
|
rm(list=ls())
setwd("Analysis")
source("Rcode/SQL/DatabaseHandler.R")
source("Rcode/SQL/MLmethodsQueries.R")
source("Rcode/SQL/BaseQueries.R")
dataJoiner = function(match, lastMatches) {
homeData = sqlData(db, match$HomeTeamID, match$Date, lastMatches)
awayData = sqlData(db, match$AwayTeamID, match$Date, lastMatches)
names(homeData) = paste0("A_", names(homeData))
names(awayData) = paste0("B_", names(awayData))
return (cbind(Class = match$Class, MatchID = match$ID, homeData, awayData))
}
dataJoinerSide = function(match, lastMatches) {
homeData = sqlSideData(db, match$HomeTeamID, match$Date, lastMatches, "HomeTeamID")
awayData = sqlSideData(db, match$AwayTeamID, match$Date, lastMatches, "AwayTeamID")
names(homeData) = paste0("A_", names(homeData))
names(awayData) = paste0("B_", names(awayData))
return (cbind(Class = match$Class, MatchID = match$ID, homeData, awayData))
}
buildData = function(matchData, lastMatches, dataJoinerFunction, filename) {
print (Sys.time())
data = data.frame()
for (i in 1:nrow(matchData)) {
data = rbind(data, dataJoinerFunction(matchData[i,], lastMatches))
if (i %% 1000 == 0) {
print (i)
print (Sys.time())
}
}
data[,] <- as.numeric(as.matrix(data[,]))
save(data, file = filename)
print (Sys.time())
}
buildSets = function() {
db = getDatabase()
matchData = sqlMatchesData(db)
buildData(matchData, 5, dataJoinerSide, "data_joinerSide_5.Rdata")
buildData(matchData, 10, dataJoiner, "data_joiner_10.Rdata")
buildData(matchData, 10, dataJoinerSide, "data_joinerSide_10.Rdata")
buildData(matchData, 20, dataJoiner, "data_joiner_20.Rdata")
buildData(matchData, 15, dataJoinerSide, "data_joinerSide_15.Rdata")
buildData(matchData, 30, dataJoiner, "data_joiner_30.Rdata")
closeDatabase(db)
}
getDataset = function(filename, minCount) {
db = getDatabase()
testsetOdds = getBaseTestSetQuery(db)
load(filename)
data = subset(data, A_Count >= minCount & B_Count >= minCount)
trainingData = data[!(data$MatchID %in% testsetOdds$MatchID), which(!names(data) %in% c("MatchID", "A_Count", "B_Count"))]
testData = data[(data$MatchID %in% testsetOdds$MatchID), which(!names(data) %in% c("MatchID", "A_Count", "B_Count"))]
testsetOdds = subset(testsetOdds, MatchID %in% data$MatchID)
data = list(trainingData = trainingData, testData = testData, testsetOdds = testsetOdds)
closeDatabase(db)
return (data)
}
|
/Rcode/Other/datasetBuilder.R
|
no_license
|
kaidolepik/NBA
|
R
| false
| false
| 2,573
|
r
|
rm(list=ls())
setwd("Analysis")
source("Rcode/SQL/DatabaseHandler.R")
source("Rcode/SQL/MLmethodsQueries.R")
source("Rcode/SQL/BaseQueries.R")
dataJoiner = function(match, lastMatches) {
homeData = sqlData(db, match$HomeTeamID, match$Date, lastMatches)
awayData = sqlData(db, match$AwayTeamID, match$Date, lastMatches)
names(homeData) = paste0("A_", names(homeData))
names(awayData) = paste0("B_", names(awayData))
return (cbind(Class = match$Class, MatchID = match$ID, homeData, awayData))
}
dataJoinerSide = function(match, lastMatches) {
homeData = sqlSideData(db, match$HomeTeamID, match$Date, lastMatches, "HomeTeamID")
awayData = sqlSideData(db, match$AwayTeamID, match$Date, lastMatches, "AwayTeamID")
names(homeData) = paste0("A_", names(homeData))
names(awayData) = paste0("B_", names(awayData))
return (cbind(Class = match$Class, MatchID = match$ID, homeData, awayData))
}
buildData = function(matchData, lastMatches, dataJoinerFunction, filename) {
print (Sys.time())
data = data.frame()
for (i in 1:nrow(matchData)) {
data = rbind(data, dataJoinerFunction(matchData[i,], lastMatches))
if (i %% 1000 == 0) {
print (i)
print (Sys.time())
}
}
data[,] <- as.numeric(as.matrix(data[,]))
save(data, file = filename)
print (Sys.time())
}
buildSets = function() {
db = getDatabase()
matchData = sqlMatchesData(db)
buildData(matchData, 5, dataJoinerSide, "data_joinerSide_5.Rdata")
buildData(matchData, 10, dataJoiner, "data_joiner_10.Rdata")
buildData(matchData, 10, dataJoinerSide, "data_joinerSide_10.Rdata")
buildData(matchData, 20, dataJoiner, "data_joiner_20.Rdata")
buildData(matchData, 15, dataJoinerSide, "data_joinerSide_15.Rdata")
buildData(matchData, 30, dataJoiner, "data_joiner_30.Rdata")
closeDatabase(db)
}
getDataset = function(filename, minCount) {
db = getDatabase()
testsetOdds = getBaseTestSetQuery(db)
load(filename)
data = subset(data, A_Count >= minCount & B_Count >= minCount)
trainingData = data[!(data$MatchID %in% testsetOdds$MatchID), which(!names(data) %in% c("MatchID", "A_Count", "B_Count"))]
testData = data[(data$MatchID %in% testsetOdds$MatchID), which(!names(data) %in% c("MatchID", "A_Count", "B_Count"))]
testsetOdds = subset(testsetOdds, MatchID %in% data$MatchID)
data = list(trainingData = trainingData, testData = testData, testsetOdds = testsetOdds)
closeDatabase(db)
return (data)
}
|
\name{quantregTable}
\alias{quantregTable}
\title{
Quantile regression table.}
\description{
Produces a quantile regression table.
}
\usage{
quantregTable(x, digits = 2, significance="none")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
summary for quantile regression results as returned when calling summary on an quantile regression object.
}
\item{digits}{
number of digits to display.
}
\item{significance}{
factor that toggles whether beside the standard error significance should be made visible. Can be either "none" nothing additional is displayed, "stars" significance stars are displayed and "bold" signicant values are bold when saved by the saveTable function.
}
}
\value{
A quantreg table.}
\examples{
#must have quantreg installed
library(quantreg)
data(stackloss)
y <- stack.loss
x <- stack.x
res <- summary(rq(y ~ x, tau=c(0.25, 0.5, 0.75)), se="boot")
quantregTable(res)
quantregTable(res, significance="stars")
tab <- quantregTable(res, significance="bold")
}
|
/man/quantregTable.Rd
|
no_license
|
cran/psytabs
|
R
| false
| false
| 1,063
|
rd
|
\name{quantregTable}
\alias{quantregTable}
\title{
Quantile regression table.}
\description{
Produces a quantile regression table.
}
\usage{
quantregTable(x, digits = 2, significance="none")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
summary for quantile regression results as returned when calling summary on an quantile regression object.
}
\item{digits}{
number of digits to display.
}
\item{significance}{
factor that toggles whether beside the standard error significance should be made visible. Can be either "none" nothing additional is displayed, "stars" significance stars are displayed and "bold" signicant values are bold when saved by the saveTable function.
}
}
\value{
A quantreg table.}
\examples{
#must have quantreg installed
library(quantreg)
data(stackloss)
y <- stack.loss
x <- stack.x
res <- summary(rq(y ~ x, tau=c(0.25, 0.5, 0.75)), se="boot")
quantregTable(res)
quantregTable(res, significance="stars")
tab <- quantregTable(res, significance="bold")
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Main.R
\name{sensitivity}
\alias{sensitivity}
\title{sensitivity}
\usage{
sensitivity(actuals, predictedScores, threshold = 0.5)
}
\arguments{
\item{actuals}{The actual binary flags for the response variable. It can take values of either 1 or 0, where 1 represents the 'Good' or 'Events' while 0 represents 'Bad' or 'Non-Events'.}
\item{predictedScores}{The prediction probability scores for each observation.}
\item{threshold}{If predicted value is above the threshold, it will be considered as an event (1), else it will be a non-event (0). Defaults to 0.5.}
}
\value{
The sensitivity of the given binary response actuals and predicted probability scores, which is, the number of observations with the event AND predicted to have the event divided by the nummber of observations with the event.
}
\description{
Calculate the sensitivity for a given logit model.
}
\details{
For a given binary response actuals and predicted probability scores, sensitivity is defined as number of observations with the event AND predicted to have the event divided by the number of observations with the event. It can be used as an indicator to gauge how sensitive is your model in detecting the occurence of events, especially when you are not so concerned about predicting the non-events as true.
}
\examples{
data('ActualsAndScores')
sensitivity(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
}
\author{
Selva Prabhakaran \email{selva86@gmail.com}
}
|
/man/sensitivity.Rd
|
no_license
|
selva86/car2
|
R
| false
| false
| 1,564
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Main.R
\name{sensitivity}
\alias{sensitivity}
\title{sensitivity}
\usage{
sensitivity(actuals, predictedScores, threshold = 0.5)
}
\arguments{
\item{actuals}{The actual binary flags for the response variable. It can take values of either 1 or 0, where 1 represents the 'Good' or 'Events' while 0 represents 'Bad' or 'Non-Events'.}
\item{predictedScores}{The prediction probability scores for each observation.}
\item{threshold}{If predicted value is above the threshold, it will be considered as an event (1), else it will be a non-event (0). Defaults to 0.5.}
}
\value{
The sensitivity of the given binary response actuals and predicted probability scores, which is, the number of observations with the event AND predicted to have the event divided by the nummber of observations with the event.
}
\description{
Calculate the sensitivity for a given logit model.
}
\details{
For a given binary response actuals and predicted probability scores, sensitivity is defined as number of observations with the event AND predicted to have the event divided by the number of observations with the event. It can be used as an indicator to gauge how sensitive is your model in detecting the occurence of events, especially when you are not so concerned about predicting the non-events as true.
}
\examples{
data('ActualsAndScores')
sensitivity(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
}
\author{
Selva Prabhakaran \email{selva86@gmail.com}
}
|
#Camila Kosma 24/05/2020
library(leaps)
library(ggplot2)
library(reshape2)
library(MASS)
library(ggcorrplot)
library(plotmo)
library(rpart)
library(rpart.plot)
library(tidyverse)
library(corrplot)
library(gridExtra)
library(GGally)
library(knitr)
#Get the dataset from URL
red_data <- read.csv("https://query.data.world/s/uwnbkixzuarlyb7kvtighli2okv7kn")
white_data <- read.csv("https://query.data.world/s/nhgaqjqexeir27sqdc7ordwmuaikhu")
#Data Preparation
str(red_data)
str(white_data)
head(red_data)
head(white_data)
summary(red_data)
summary(white_data)
sapply(red_data, sd)
sapply(white_data, sd)
df <- scale(red_data[-1])
head(df)
df <- scale(white_data[-1])
head(df)
#Histogram of Quality in a first place
hist(red_data$quality)
hist(white_data$quality)
#Training, Test set and graphics outputs
red_wine_train <- red_data[1:3750, ]
red_wine_test <- red_data[3751:4898, ]
white_wine_train <- white_data[1:3750, ]
white_wine_test <- white_data[3751:4898, ]
red_m.rpart <- rpart(quality ~ ., data = red_wine_train)
red_m.rpart
white_m.rpart <- rpart(quality ~ ., data = white_wine_train)
white_m.rpart
rpart.plot(red_m.rpart, digits = 3)
rpart.plot(white_m.rpart, digits = 3)
rpart.plot(red_m.rpart, digits = 4, fallen.leaves = TRUE, type = 3, extra = 101)
rpart.plot(white_m.rpart, digits = 4, fallen.leaves = TRUE, type = 3, extra = 101)
red_p.rpart <- predict(red_m.rpart, red_wine_test)
summary(red_p.rpart)
white_p.rpart <- predict(white_m.rpart, white_wine_test)
summary(white_p.rpart)
#Boxplot Wine Attributes
red_oldpar = par(mfrow = c(2,6))
for ( i in 1:11 ) {
boxplot(red_data[[i]])
mtext(names(red_data)[i], cex = 0.8, side = 1, line = 2)
}
par(red_oldpar)
white_oldpar = par(mfrow = c(2,6))
for ( i in 1:11 ) {
boxplot(white_data[[i]])
mtext(names(white_data)[i], cex = 0.8, side = 1, line = 2)
}
par(white_oldpar)
corrplot(cor(red_data), type="upper", method="ellipse", tl.cex=0.9)
#Classification of Quality
hist(red_data$quality)
hist(white_data$quality)
red_data$taste <- ifelse(red_data$quality < 6, 'poor', 'high')
red_data$taste[red_data$quality == 6] <- 'standard'
red_data$taste <- as.factor(red_data$taste)
table(red_data$taste)
white_data$taste <- ifelse(white_data$quality < 6, 'poor', 'high')
white_data$taste[white_data$quality == 6] <- 'standard'
white_data$taste <- as.factor(white_data$taste)
table(white_data$taste)
#Correlation Matrix
corrplot(cor(red_data), type="upper", method="ellipse", tl.cex=0.9)
#Relation between 2 variables
ggplot(red_data, aes(x=Acidity, y=Sulphur)) +
geom_point() +
geom_smooth(method="lm", se=FALSE) +
labs(title="Wines Attributes",
subtitle="Relationship between Phenols and Flavanoids") +
theme_bw()
|
/Wine Analysis R.R
|
no_license
|
CKosma/pbd-ck
|
R
| false
| false
| 2,718
|
r
|
#Camila Kosma 24/05/2020
library(leaps)
library(ggplot2)
library(reshape2)
library(MASS)
library(ggcorrplot)
library(plotmo)
library(rpart)
library(rpart.plot)
library(tidyverse)
library(corrplot)
library(gridExtra)
library(GGally)
library(knitr)
#Get the dataset from URL
red_data <- read.csv("https://query.data.world/s/uwnbkixzuarlyb7kvtighli2okv7kn")
white_data <- read.csv("https://query.data.world/s/nhgaqjqexeir27sqdc7ordwmuaikhu")
#Data Preparation
str(red_data)
str(white_data)
head(red_data)
head(white_data)
summary(red_data)
summary(white_data)
sapply(red_data, sd)
sapply(white_data, sd)
df <- scale(red_data[-1])
head(df)
df <- scale(white_data[-1])
head(df)
#Histogram of Quality in a first place
hist(red_data$quality)
hist(white_data$quality)
#Training, Test set and graphics outputs
red_wine_train <- red_data[1:3750, ]
red_wine_test <- red_data[3751:4898, ]
white_wine_train <- white_data[1:3750, ]
white_wine_test <- white_data[3751:4898, ]
red_m.rpart <- rpart(quality ~ ., data = red_wine_train)
red_m.rpart
white_m.rpart <- rpart(quality ~ ., data = white_wine_train)
white_m.rpart
rpart.plot(red_m.rpart, digits = 3)
rpart.plot(white_m.rpart, digits = 3)
rpart.plot(red_m.rpart, digits = 4, fallen.leaves = TRUE, type = 3, extra = 101)
rpart.plot(white_m.rpart, digits = 4, fallen.leaves = TRUE, type = 3, extra = 101)
red_p.rpart <- predict(red_m.rpart, red_wine_test)
summary(red_p.rpart)
white_p.rpart <- predict(white_m.rpart, white_wine_test)
summary(white_p.rpart)
#Boxplot Wine Attributes
red_oldpar = par(mfrow = c(2,6))
for ( i in 1:11 ) {
boxplot(red_data[[i]])
mtext(names(red_data)[i], cex = 0.8, side = 1, line = 2)
}
par(red_oldpar)
white_oldpar = par(mfrow = c(2,6))
for ( i in 1:11 ) {
boxplot(white_data[[i]])
mtext(names(white_data)[i], cex = 0.8, side = 1, line = 2)
}
par(white_oldpar)
corrplot(cor(red_data), type="upper", method="ellipse", tl.cex=0.9)
#Classification of Quality
hist(red_data$quality)
hist(white_data$quality)
red_data$taste <- ifelse(red_data$quality < 6, 'poor', 'high')
red_data$taste[red_data$quality == 6] <- 'standard'
red_data$taste <- as.factor(red_data$taste)
table(red_data$taste)
white_data$taste <- ifelse(white_data$quality < 6, 'poor', 'high')
white_data$taste[white_data$quality == 6] <- 'standard'
white_data$taste <- as.factor(white_data$taste)
table(white_data$taste)
#Correlation Matrix
corrplot(cor(red_data), type="upper", method="ellipse", tl.cex=0.9)
#Relation between 2 variables
ggplot(red_data, aes(x=Acidity, y=Sulphur)) +
geom_point() +
geom_smooth(method="lm", se=FALSE) +
labs(title="Wines Attributes",
subtitle="Relationship between Phenols and Flavanoids") +
theme_bw()
|
#####
#Simulated Annealing
#####
library(GenSA)
# Try Rastrgin function (The objective function value for global minimum
# is 0 with all components of par are 0.)
Rastrigin <- function(x) {
sum(x^2 - 10 * cos(2 * pi * x)) + 10 * length(x)
}
# Perform the search on a 30 dimensions rastrigin function. Rastrigin
# function with dimension 30 is known as the most
# difficult optimization problem according to "Yao X, Liu Y, Lin G (1999).
# \Evolutionary Programming Made Faster."
# IEEE Transactions on Evolutionary Computation, 3(2), 82-102.
# GenSA will stop after finding the targeted function value 0 with
# absolute tolerance 1e-13
set.seed(1234) # The user can use any seed.
dimension <- 30
global.min <- 0
tol <- 1e-13
lower <- rep(-5.12, dimension)
upper <- rep(5.12, dimension)
out <- GenSA(lower = lower, upper = upper, fn = Rastrigin,
control=list(threshold.stop=global.min+tol,verbose=TRUE))
out[c("value","par","counts")]
summary(out)
head(out$trace)
# GenSA will stop after running for about 2 seconds
# Note: The time for solving this problem by GenSA may vary
# depending on the computer used.
set.seed(1234) # The user can use any seed.
dimension <- 30
global.min <- 0
tol <- 1e-13
lower <- rep(-5.12, dimension)
upper <- rep(5.12, dimension)
out <- GenSA(lower = lower, upper = upper, fn = Rastrigin,
control=list(max.time=2))
out[c("value","par","counts")]
|
/Optimization/GenSATools.R
|
no_license
|
anhnguyendepocen/RCodeResearch
|
R
| false
| false
| 1,404
|
r
|
#####
#Simulated Annealing
#####
library(GenSA)
# Try Rastrgin function (The objective function value for global minimum
# is 0 with all components of par are 0.)
Rastrigin <- function(x) {
sum(x^2 - 10 * cos(2 * pi * x)) + 10 * length(x)
}
# Perform the search on a 30 dimensions rastrigin function. Rastrigin
# function with dimension 30 is known as the most
# difficult optimization problem according to "Yao X, Liu Y, Lin G (1999).
# \Evolutionary Programming Made Faster."
# IEEE Transactions on Evolutionary Computation, 3(2), 82-102.
# GenSA will stop after finding the targeted function value 0 with
# absolute tolerance 1e-13
set.seed(1234) # The user can use any seed.
dimension <- 30
global.min <- 0
tol <- 1e-13
lower <- rep(-5.12, dimension)
upper <- rep(5.12, dimension)
out <- GenSA(lower = lower, upper = upper, fn = Rastrigin,
control=list(threshold.stop=global.min+tol,verbose=TRUE))
out[c("value","par","counts")]
summary(out)
head(out$trace)
# GenSA will stop after running for about 2 seconds
# Note: The time for solving this problem by GenSA may vary
# depending on the computer used.
set.seed(1234) # The user can use any seed.
dimension <- 30
global.min <- 0
tol <- 1e-13
lower <- rep(-5.12, dimension)
upper <- rep(5.12, dimension)
out <- GenSA(lower = lower, upper = upper, fn = Rastrigin,
control=list(max.time=2))
out[c("value","par","counts")]
|
## This file contains two functions, makeCacheMatrix and cacheSolve.
## The first, makeCacheMatrix, takes as an argument a matrix (x) and creates a list
## of four function objects. The second, cacheSolve, takes the list resulting from
## makeCacheMatrix and computes or returns the inverse of x.
## makeCacheMatrix: argument is a matrix object. The function returns a list of
## four functions. They are (1) set: takes a matrix as an argument and assigns it
## to x, (2) get: returns its argument (x) when called, (3) setInverse: takes the
## inverse matrix as an argument and sets it (I), and (4) getInverse: returns I.
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setInverse <- function(inverse) I <<- inverse
getInverse <- function() I
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve takes a list created by makeCacheMatrix and pulls the inverse matrix
## using the getInverse() function. If inverse (I) is NOT null, it returns I from
## the cache. Otherwise it gets the original matrix (x) using the get() function,
## computes the inverse with solve() and assigns it to the list using the
## setInverse() function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I <- x$getInverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setInverse(I)
I
}
|
/cachematrix.R
|
no_license
|
nrfrank/ProgrammingAssignment2
|
R
| false
| false
| 1,526
|
r
|
## This file contains two functions, makeCacheMatrix and cacheSolve.
## The first, makeCacheMatrix, takes as an argument a matrix (x) and creates a list
## of four function objects. The second, cacheSolve, takes the list resulting from
## makeCacheMatrix and computes or returns the inverse of x.
## makeCacheMatrix: argument is a matrix object. The function returns a list of
## four functions. They are (1) set: takes a matrix as an argument and assigns it
## to x, (2) get: returns its argument (x) when called, (3) setInverse: takes the
## inverse matrix as an argument and sets it (I), and (4) getInverse: returns I.
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setInverse <- function(inverse) I <<- inverse
getInverse <- function() I
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve takes a list created by makeCacheMatrix and pulls the inverse matrix
## using the getInverse() function. If inverse (I) is NOT null, it returns I from
## the cache. Otherwise it gets the original matrix (x) using the get() function,
## computes the inverse with solve() and assigns it to the list using the
## setInverse() function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I <- x$getInverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setInverse(I)
I
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/homogenize.R
\name{homogenize}
\alias{homogenize}
\alias{dehomogenize}
\alias{is.homogeneous}
\alias{homogeneous_components}
\title{Homogenize a polynomial}
\usage{
homogenize(x, indeterminate = "t")
dehomogenize(x, indeterminate = "t")
is.homogeneous(x)
homogeneous_components(x)
}
\arguments{
\item{x}{an mpoly object, see \code{\link[=mpoly]{mpoly()}}}
\item{indeterminate}{name of homogenization}
}
\value{
a (de/homogenized) mpoly or an mpolyList
}
\description{
Homogenize a polynomial.
}
\examples{
x <- mp("x^4 + y + 2 x y^2 - 3 z")
is.homogeneous(x)
(xh <- homogenize(x))
is.homogeneous(xh)
homogeneous_components(x)
homogenize(x, "o")
xh <- homogenize(x)
dehomogenize(xh) # assumes indeterminate = "t"
plug(xh, "t", 1) # same effect, but dehomogenize is faster
# the functions are vectorized
(ps <- mp(c("x + y^2", "x + y^3")))
(psh <- homogenize(ps))
dehomogenize(psh)
# demonstrating a leading property of homogeneous polynomials
library(magrittr)
p <- mp("x^2 + 2 x + 3")
(ph <- homogenize(p, "y"))
lambda <- 3
(d <- totaldeg(p))
ph \%>\%
plug("x", lambda*mp("x")) \%>\%
plug("y", lambda*mp("y"))
lambda^d * ph
}
|
/man/homogenize.Rd
|
no_license
|
dkahle/mpoly
|
R
| false
| true
| 1,223
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/homogenize.R
\name{homogenize}
\alias{homogenize}
\alias{dehomogenize}
\alias{is.homogeneous}
\alias{homogeneous_components}
\title{Homogenize a polynomial}
\usage{
homogenize(x, indeterminate = "t")
dehomogenize(x, indeterminate = "t")
is.homogeneous(x)
homogeneous_components(x)
}
\arguments{
\item{x}{an mpoly object, see \code{\link[=mpoly]{mpoly()}}}
\item{indeterminate}{name of homogenization}
}
\value{
a (de/homogenized) mpoly or an mpolyList
}
\description{
Homogenize a polynomial.
}
\examples{
x <- mp("x^4 + y + 2 x y^2 - 3 z")
is.homogeneous(x)
(xh <- homogenize(x))
is.homogeneous(xh)
homogeneous_components(x)
homogenize(x, "o")
xh <- homogenize(x)
dehomogenize(xh) # assumes indeterminate = "t"
plug(xh, "t", 1) # same effect, but dehomogenize is faster
# the functions are vectorized
(ps <- mp(c("x + y^2", "x + y^3")))
(psh <- homogenize(ps))
dehomogenize(psh)
# demonstrating a leading property of homogeneous polynomials
library(magrittr)
p <- mp("x^2 + 2 x + 3")
(ph <- homogenize(p, "y"))
lambda <- 3
(d <- totaldeg(p))
ph \%>\%
plug("x", lambda*mp("x")) \%>\%
plug("y", lambda*mp("y"))
lambda^d * ph
}
|
#' Header function for optimization routines
#'
#' Create some output to the screen and a text file that summarizes the problem you are tying to solve.
#'
#' @inheritParams RS_opt
#' @inheritParams evaluate.fim
#' @inheritParams blockexp
#' @inheritParams Doptim
#' @inheritParams create.poped.database
#' @inheritParams RS_opt_gen
#' @param name The name used for the output file. Combined with \code{name_header} and \code{iter}.
#' If \code{""} then output is to the screen.
#' @param iter The last number in the name printed to the output file, combined with \code{name}.
#' @param name_header The initial portion of the file name.
#' @param file_path The path to where the file should be created.
#' @param header_flag Should the header text be printed out?
#' @param ... Additional arguments passed to further functions.
#'
#' @family Helper
#' @return fn A file handle (or \code{''} if \code{name=''})
#' @example tests/testthat/examples_fcn_doc/warfarin_optimize.R
#' @example tests/testthat/examples_fcn_doc/examples_blockheader.R
#' @keywords internal
#' @export
## Function translated using 'matlab.to.r()'
## Then manually adjusted to make work
## Author: Andrew Hooker
blockheader <- function(poped.db,name="Default",iter=NULL,
e_flag=!(poped.db$settings$d_switch),opt_xt=poped.db$settings$optsw[2],
opt_a=poped.db$settings$optsw[4],opt_x=poped.db$settings$optsw[3],
opt_samps=poped.db$settings$optsw[1],opt_inds=poped.db$settings$optsw[5],
fmf=0,dmf=0,bpop=NULL,d=NULL,docc=NULL,sigma=NULL,
name_header=poped.db$settings$strOutputFileName,
file_path=poped.db$settings$strOutputFilePath,
out_file=NULL,compute_inv=TRUE,
trflag=TRUE,
header_flag=TRUE,
...)
{
# BLOCKHEADER_2
# filename to write to is
# poped.db$settings$strOutputFilePath,poped.db$settings$strOutputFileName,NAME,iter,poped.db$settings$strOutputFileExtension
# if((bDiscreteOpt)){
# tmpfile=sprintf('%s_Discrete_%g%s',poped.db$settings$strOutputFileName,iter,poped.db$settings$strOutputFileExtension)
# } else {
# tmpfile=sprintf('%s_RS_SG_%g%s',poped.db$settings$strOutputFileName,iter,poped.db$settings$strOutputFileExtension)
# }
#tmpfile=sprintf('%s_%s_%g%s',poped.db$settings$strOutputFileName,name,iter,poped.db$settings$strOutputFileExtension)
if(!trflag) return('')
if(!is.null(out_file)){
fn <- out_file
if(!any(class(fn)=="file") && (fn!='')){
fn=file(fn,'w')
if(fn==-1){
stop(sprintf('output file could not be opened'))
}
}
} else if(name!=""){
tmpfile <- name_header
if(name!="Default") tmpfile=paste(tmpfile,"_",name,sep="")
if(!is.null(iter)) tmpfile=paste(tmpfile,"_",iter,sep="")
tmpfile=paste(tmpfile,".txt",sep="")
#tmpfile=sprintf('%s_%s.txt',name_header,name)
#if(!is.null(iter)) tmpfile=sprintf('%s_%s_%g.txt',name_header,name,iter)
tmpfile = fullfile(poped.db$settings$strOutputFilePath,tmpfile)
fn=file(tmpfile,'w')
if((fn==-1)){
stop(sprintf('output file could not be opened'))
}
} else {
fn <- ''
# filename=readline("File to open for output: ")
# fn = file(filename, 'w')
# if((fn == -1)){
# stop(sprintf('output file could not be opened'))
# }
}
if(!header_flag) return(fn)
#tic()
tic(name=".poped_total_time")
# -------------- LOG FILE: initial status
if(name=="RS"){
alg_name <- "Adaptive Random Search"
if(fn!="") fprintf(fn,'PopED Optimization Results for the %s Algorithm \n\n',alg_name)
} else {
if(fn!="") fprintf(fn,'PopED Results \n\n')
}
if(fn!="") fprintf(fn,' ')
if(fn!="") fprintf(fn,datestr_poped(poped.db$settings$Engine$Type))
if(fn!="") fprintf(fn,'\n\n')
if(fn!="" || trflag>1) blockexp(fn,poped.db,
e_flag=e_flag,opt_xt=opt_xt,
opt_a=opt_a,opt_x=opt_x,
opt_samps=opt_samps,opt_inds=opt_inds)
if(dmf!=0 || fmf != 0){
fprintf(fn,paste0("===============================================================================\n",
"Initial design evaluation\n"))
if(fn!="") fprintf(paste0("===============================================================================\n",
"Initial design evaluation\n"))
}
if(dmf!=0) fprintf(fn,'\nInitial OFV = %g\n',dmf)
if(dmf!=0 && fn!="") fprintf('\nInitial OFV = %g\n',dmf)
if(dmf!=0 && (fn!="" || trflag>1)){
output <- get_unfixed_params(poped.db)
npar <- length(output$all)
fprintf(fn,'\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',
ofv_criterion(dmf,npar,poped.db))
if(fn!=""){
fprintf('\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',
ofv_criterion(dmf,npar,poped.db))
}
}
if(is.matrix(fmf) && compute_inv){
param_vars=diag_matlab(inv(fmf))
returnArgs <- get_cv(param_vars,bpop,d,docc,sigma,poped.db)
params <- returnArgs[[1]]
param_cvs <- returnArgs[[2]]
#fprintf(fn,'\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',dmf^(1/length(params)))
#fprintf(fn,'\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',
# ofv_criterion(dmf,length(params),poped.db))
parnam <- get_parnam(poped.db)
fprintf(fn,'\nInitial design expected parameter \nrelative standard error (%sRSE)\n','%')
if(fn!="") fprintf('\nInitial design expected parameter \nrelative standard error (%sRSE)\n','%')
df <- data.frame("Parameter"=parnam,"Values"=params,"RSE_0"=t(param_cvs*100))
print(df,digits=3, print.gap=3,row.names=F)
if(fn!="") capture.output(print(df,digits=3, print.gap=3,row.names=F),file=fn)
fprintf('\n')
if(fn!="") fprintf(fn,'\n')
}
if(fn!="" || trflag>1) blockopt(fn,poped.db,opt_method=name)
if(fn!="" || trflag>1) blockother(fn,poped.db,d_switch=!e_flag)
if(fn!="" || trflag) blockoptwrt(fn,poped.db$settings$optsw, opt_xt=opt_xt,
opt_a=opt_a,opt_x=opt_x,
opt_samps=opt_samps,opt_inds=opt_inds)
#fprintf('\n')
#if(fn!="") fprintf(fn,'\n')
return( fn)
}
|
/PopED/R/blockheader.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,517
|
r
|
#' Header function for optimization routines
#'
#' Create some output to the screen and a text file that summarizes the problem you are tying to solve.
#'
#' @inheritParams RS_opt
#' @inheritParams evaluate.fim
#' @inheritParams blockexp
#' @inheritParams Doptim
#' @inheritParams create.poped.database
#' @inheritParams RS_opt_gen
#' @param name The name used for the output file. Combined with \code{name_header} and \code{iter}.
#' If \code{""} then output is to the screen.
#' @param iter The last number in the name printed to the output file, combined with \code{name}.
#' @param name_header The initial portion of the file name.
#' @param file_path The path to where the file should be created.
#' @param header_flag Should the header text be printed out?
#' @param ... Additional arguments passed to further functions.
#'
#' @family Helper
#' @return fn A file handle (or \code{''} if \code{name=''})
#' @example tests/testthat/examples_fcn_doc/warfarin_optimize.R
#' @example tests/testthat/examples_fcn_doc/examples_blockheader.R
#' @keywords internal
#' @export
## Function translated using 'matlab.to.r()'
## Then manually adjusted to make work
## Author: Andrew Hooker
blockheader <- function(poped.db,name="Default",iter=NULL,
e_flag=!(poped.db$settings$d_switch),opt_xt=poped.db$settings$optsw[2],
opt_a=poped.db$settings$optsw[4],opt_x=poped.db$settings$optsw[3],
opt_samps=poped.db$settings$optsw[1],opt_inds=poped.db$settings$optsw[5],
fmf=0,dmf=0,bpop=NULL,d=NULL,docc=NULL,sigma=NULL,
name_header=poped.db$settings$strOutputFileName,
file_path=poped.db$settings$strOutputFilePath,
out_file=NULL,compute_inv=TRUE,
trflag=TRUE,
header_flag=TRUE,
...)
{
# BLOCKHEADER_2
# filename to write to is
# poped.db$settings$strOutputFilePath,poped.db$settings$strOutputFileName,NAME,iter,poped.db$settings$strOutputFileExtension
# if((bDiscreteOpt)){
# tmpfile=sprintf('%s_Discrete_%g%s',poped.db$settings$strOutputFileName,iter,poped.db$settings$strOutputFileExtension)
# } else {
# tmpfile=sprintf('%s_RS_SG_%g%s',poped.db$settings$strOutputFileName,iter,poped.db$settings$strOutputFileExtension)
# }
#tmpfile=sprintf('%s_%s_%g%s',poped.db$settings$strOutputFileName,name,iter,poped.db$settings$strOutputFileExtension)
if(!trflag) return('')
if(!is.null(out_file)){
fn <- out_file
if(!any(class(fn)=="file") && (fn!='')){
fn=file(fn,'w')
if(fn==-1){
stop(sprintf('output file could not be opened'))
}
}
} else if(name!=""){
tmpfile <- name_header
if(name!="Default") tmpfile=paste(tmpfile,"_",name,sep="")
if(!is.null(iter)) tmpfile=paste(tmpfile,"_",iter,sep="")
tmpfile=paste(tmpfile,".txt",sep="")
#tmpfile=sprintf('%s_%s.txt',name_header,name)
#if(!is.null(iter)) tmpfile=sprintf('%s_%s_%g.txt',name_header,name,iter)
tmpfile = fullfile(poped.db$settings$strOutputFilePath,tmpfile)
fn=file(tmpfile,'w')
if((fn==-1)){
stop(sprintf('output file could not be opened'))
}
} else {
fn <- ''
# filename=readline("File to open for output: ")
# fn = file(filename, 'w')
# if((fn == -1)){
# stop(sprintf('output file could not be opened'))
# }
}
if(!header_flag) return(fn)
#tic()
tic(name=".poped_total_time")
# -------------- LOG FILE: initial status
if(name=="RS"){
alg_name <- "Adaptive Random Search"
if(fn!="") fprintf(fn,'PopED Optimization Results for the %s Algorithm \n\n',alg_name)
} else {
if(fn!="") fprintf(fn,'PopED Results \n\n')
}
if(fn!="") fprintf(fn,' ')
if(fn!="") fprintf(fn,datestr_poped(poped.db$settings$Engine$Type))
if(fn!="") fprintf(fn,'\n\n')
if(fn!="" || trflag>1) blockexp(fn,poped.db,
e_flag=e_flag,opt_xt=opt_xt,
opt_a=opt_a,opt_x=opt_x,
opt_samps=opt_samps,opt_inds=opt_inds)
if(dmf!=0 || fmf != 0){
fprintf(fn,paste0("===============================================================================\n",
"Initial design evaluation\n"))
if(fn!="") fprintf(paste0("===============================================================================\n",
"Initial design evaluation\n"))
}
if(dmf!=0) fprintf(fn,'\nInitial OFV = %g\n',dmf)
if(dmf!=0 && fn!="") fprintf('\nInitial OFV = %g\n',dmf)
if(dmf!=0 && (fn!="" || trflag>1)){
output <- get_unfixed_params(poped.db)
npar <- length(output$all)
fprintf(fn,'\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',
ofv_criterion(dmf,npar,poped.db))
if(fn!=""){
fprintf('\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',
ofv_criterion(dmf,npar,poped.db))
}
}
if(is.matrix(fmf) && compute_inv){
param_vars=diag_matlab(inv(fmf))
returnArgs <- get_cv(param_vars,bpop,d,docc,sigma,poped.db)
params <- returnArgs[[1]]
param_cvs <- returnArgs[[2]]
#fprintf(fn,'\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',dmf^(1/length(params)))
#fprintf(fn,'\nEfficiency criterion [usually defined as OFV^(1/npar)] = %g\n',
# ofv_criterion(dmf,length(params),poped.db))
parnam <- get_parnam(poped.db)
fprintf(fn,'\nInitial design expected parameter \nrelative standard error (%sRSE)\n','%')
if(fn!="") fprintf('\nInitial design expected parameter \nrelative standard error (%sRSE)\n','%')
df <- data.frame("Parameter"=parnam,"Values"=params,"RSE_0"=t(param_cvs*100))
print(df,digits=3, print.gap=3,row.names=F)
if(fn!="") capture.output(print(df,digits=3, print.gap=3,row.names=F),file=fn)
fprintf('\n')
if(fn!="") fprintf(fn,'\n')
}
if(fn!="" || trflag>1) blockopt(fn,poped.db,opt_method=name)
if(fn!="" || trflag>1) blockother(fn,poped.db,d_switch=!e_flag)
if(fn!="" || trflag) blockoptwrt(fn,poped.db$settings$optsw, opt_xt=opt_xt,
opt_a=opt_a,opt_x=opt_x,
opt_samps=opt_samps,opt_inds=opt_inds)
#fprintf('\n')
#if(fn!="") fprintf(fn,'\n')
return( fn)
}
|
#' Print Brief Details of a bootstrap correction for a high-risk zone
#'
#' Prints a very brief description of the bootstrap correction for a high-risk zone.
#'
#' A very brief description of the bootstrap correction x for a high-risk zone is printed.
#' This is a method for the generic function \code{\link[base]{print}}.
#'
#' @param x bootstrap correction for of a high-risk zone (object of class "\code{bootcorr}")
#' @param ... ignored
#' @method print bootcorr
#' @export print bootcorr
#' @seealso \code{\link[base]{print}}, \code{\link{summary.bootcorr}}
print.bootcorr <- function(x, ...){
cat("resulting value for alpha (cutoff):", x$alphastar, " \n")
}
#' Summary of a the bootstrap correction for a high-risk zone
#'
#' Prints a useful summary of the bootstrap correction for a high-risk zone.
#'
#' A useful summary of the bootstrap correction x for a high-risk zone is printed.
#' This is a method for the generic function \code{\link[base]{summary}}.
#'
#' @param object bootstrap correction for a high-risk zone (object of class "\code{bootcorr}")
#' @param ... ignored
#' @method summary bootcorr
#' @export summary bootcorr
#' @seealso \code{\link[base]{summary}}, \code{\link{print.bootcorr}}, \code{\link{plot.bootcorr}}
summary.bootcorr <- function(object, ...){
cat("resulting value for alpha (cutoff):", object$alphastar, " \n \n")
cat("values for alpha (cutoff) which were tested: \n \n")
numtest <- max(object$course$k)
for(j in 1:numtest){
dataj <- subset(object$course, object$course$k==j)
maxi <- max(dataj$i)
alphastarj <- max(dataj$alphastar)
maxnumoutj <- max(dataj$numout)
cat(alphastarj, " (", maxi, " iterations, final numout: ", maxnumoutj, ") \n", sep="")
}
}
#' Visualize the bootstrap correction for a high-risk zone.
#'
#' Plot a visualization of the bootstrap correction for a high-risk zone.
#' The different values tested for alpha are plotted.
#'
#' This is the plot method for the class \code{bootcorr}.
#'
#' @param x bootstrap correction for a high-risk zone (object of class "\code{bootcorr}")
#' @param ... extra arguments passed to the generic \code{\link[graphics]{plot}} function.
#' @method plot bootcorr
#' @importFrom graphics points
#' @export plot bootcorr
#' @seealso \code{\link[graphics]{plot}}, \code{\link{print.bootcorr}}, \code{\link{summary.bootcorr}}
plot.bootcorr <- function(x, ...){
resultdf <- x$course
resultdf$iter <- 1
resultdf$isum <- cumsum(resultdf$iter)
lastid <- length(resultdf$isum)
plot(resultdf$isum, resultdf$alphastar, type="s", xlab="iteration", ylab="cutoff", ...)
points(resultdf$isum[lastid], resultdf$alphastar[lastid], pch=16, ...)
}
|
/highriskzone/R/genericbootcorr.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,700
|
r
|
#' Print Brief Details of a bootstrap correction for a high-risk zone
#'
#' Prints a very brief description of the bootstrap correction for a high-risk zone.
#'
#' A very brief description of the bootstrap correction x for a high-risk zone is printed.
#' This is a method for the generic function \code{\link[base]{print}}.
#'
#' @param x bootstrap correction for of a high-risk zone (object of class "\code{bootcorr}")
#' @param ... ignored
#' @method print bootcorr
#' @export print bootcorr
#' @seealso \code{\link[base]{print}}, \code{\link{summary.bootcorr}}
print.bootcorr <- function(x, ...){
cat("resulting value for alpha (cutoff):", x$alphastar, " \n")
}
#' Summary of a the bootstrap correction for a high-risk zone
#'
#' Prints a useful summary of the bootstrap correction for a high-risk zone.
#'
#' A useful summary of the bootstrap correction x for a high-risk zone is printed.
#' This is a method for the generic function \code{\link[base]{summary}}.
#'
#' @param object bootstrap correction for a high-risk zone (object of class "\code{bootcorr}")
#' @param ... ignored
#' @method summary bootcorr
#' @export summary bootcorr
#' @seealso \code{\link[base]{summary}}, \code{\link{print.bootcorr}}, \code{\link{plot.bootcorr}}
summary.bootcorr <- function(object, ...){
cat("resulting value for alpha (cutoff):", object$alphastar, " \n \n")
cat("values for alpha (cutoff) which were tested: \n \n")
numtest <- max(object$course$k)
for(j in 1:numtest){
dataj <- subset(object$course, object$course$k==j)
maxi <- max(dataj$i)
alphastarj <- max(dataj$alphastar)
maxnumoutj <- max(dataj$numout)
cat(alphastarj, " (", maxi, " iterations, final numout: ", maxnumoutj, ") \n", sep="")
}
}
#' Visualize the bootstrap correction for a high-risk zone.
#'
#' Plot a visualization of the bootstrap correction for a high-risk zone.
#' The different values tested for alpha are plotted.
#'
#' This is the plot method for the class \code{bootcorr}.
#'
#' @param x bootstrap correction for a high-risk zone (object of class "\code{bootcorr}")
#' @param ... extra arguments passed to the generic \code{\link[graphics]{plot}} function.
#' @method plot bootcorr
#' @importFrom graphics points
#' @export plot bootcorr
#' @seealso \code{\link[graphics]{plot}}, \code{\link{print.bootcorr}}, \code{\link{summary.bootcorr}}
plot.bootcorr <- function(x, ...){
resultdf <- x$course
resultdf$iter <- 1
resultdf$isum <- cumsum(resultdf$iter)
lastid <- length(resultdf$isum)
plot(resultdf$isum, resultdf$alphastar, type="s", xlab="iteration", ylab="cutoff", ...)
points(resultdf$isum[lastid], resultdf$alphastar[lastid], pch=16, ...)
}
|
library(QGglmm)
### Name: QGvcov
### Title: Compute the phenotypic variance-covariance matrix on the
### observed / expected scale
### Aliases: QGvcov
### ** Examples
## Example using a bivariate model (Binary trait/Gaussian trait)
# Parameters
mu <- c(0, 1)
P <- diag(c(1, 4))
# Note: no phenotypic, nor genetic correlations, hence should be equal to univariate case!
# Setting up the link functions
# Note that since the use of "cubature" to compute the integrals,
# the functions must use a matrix as input and yield a matrix as output,
# each row corresponding to a trait
inv.links <- function(mat) {matrix(c(pnorm(mat[1, ]), mat[2, ]), nrow = 2, byrow = TRUE)}
# Setting up the distribution variance functions
var.funcs <- function(mat) {matrix(c(pnorm(mat[1, ]) * (1 - pnorm(mat[1, ])), 0 * mat[2, ]),
nrow = 2,
byrow = TRUE)}
# The first row is p * (1 - p) (variance of a binomial)
# The second row is 0 because no extra distribution is assumed for a Gaussian trait
# Computing the multivariate mean on observed scale
# Phenotypic VCV matrix on observed scale
QGvcov(mu = mu, vcov = P, link.inv = inv.links, var.func = var.funcs)
# Phenotypic VCV matrix on the expected scale
QGvcov(mu = mu, vcov = P, link.inv = inv.links, var.func = var.funcs, exp.scale = TRUE)
QGvar.exp(mu = 0, var = 1, link.inv = pnorm) # Same variance on the expected scale
QGvar.exp(mu = 0, var = 1, link.inv = pnorm) +
QGvar.dist(mu = 0, var = 1, var.func = function(x){pnorm(x) * (1 - pnorm(x))})
# Same variance on the observed scale
# Reminder: the results are the same here because we have no correlation between the two traits
|
/data/genthat_extracted_code/QGglmm/examples/QGvcov.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,705
|
r
|
library(QGglmm)
### Name: QGvcov
### Title: Compute the phenotypic variance-covariance matrix on the
### observed / expected scale
### Aliases: QGvcov
### ** Examples
## Example using a bivariate model (Binary trait/Gaussian trait)
# Parameters
mu <- c(0, 1)
P <- diag(c(1, 4))
# Note: no phenotypic, nor genetic correlations, hence should be equal to univariate case!
# Setting up the link functions
# Note that since the use of "cubature" to compute the integrals,
# the functions must use a matrix as input and yield a matrix as output,
# each row corresponding to a trait
inv.links <- function(mat) {matrix(c(pnorm(mat[1, ]), mat[2, ]), nrow = 2, byrow = TRUE)}
# Setting up the distribution variance functions
var.funcs <- function(mat) {matrix(c(pnorm(mat[1, ]) * (1 - pnorm(mat[1, ])), 0 * mat[2, ]),
nrow = 2,
byrow = TRUE)}
# The first row is p * (1 - p) (variance of a binomial)
# The second row is 0 because no extra distribution is assumed for a Gaussian trait
# Computing the multivariate mean on observed scale
# Phenotypic VCV matrix on observed scale
QGvcov(mu = mu, vcov = P, link.inv = inv.links, var.func = var.funcs)
# Phenotypic VCV matrix on the expected scale
QGvcov(mu = mu, vcov = P, link.inv = inv.links, var.func = var.funcs, exp.scale = TRUE)
QGvar.exp(mu = 0, var = 1, link.inv = pnorm) # Same variance on the expected scale
QGvar.exp(mu = 0, var = 1, link.inv = pnorm) +
QGvar.dist(mu = 0, var = 1, var.func = function(x){pnorm(x) * (1 - pnorm(x))})
# Same variance on the observed scale
# Reminder: the results are the same here because we have no correlation between the two traits
|
## Read the data
fileName <- "household_power_consumption.txt"
rawdata <- read.table(fileName, header = TRUE, sep = ";", stringsAsFactors=FALSE, dec=".")
## Construct Data and Time
timeData <- paste(rawdata[, 1], rawdata[, 2])
timeData <- strptime(timeData, "%d/%m/%Y %H:%M:%S")
## Select only the data with in data range 2017-02-01 to 2017-02-02
timeIndex <- timeData >= "2007-02-01" & timeData <= "2007-02-03"
plotdata <- cbind(data.frame(DateTime = timeData[timeIndex]), rawdata[timeIndex, 3:9])
## Remove NA values
plotdata <- plotdata[!is.na(plotdata$DateTime), ]
for (i in 2:8) {
plotdata[, i] <- as.numeric(plotdata[, i])
}
## plot3
png("plot3.png", width = 480, height = 480)
plot(plotdata$DateTime, plotdata$Sub_metering_1, type="l", xaxt="n", xlab = "", ylab = "Energy sub metering")
axis.POSIXct(1, at=seq(min(plotdata$DateTime), max(plotdata$DateTime), by="days"), format = "%a")
lines(plotdata$DateTime, plotdata$Sub_metering_2, col = "Red")
lines(plotdata$DateTime, plotdata$Sub_metering_3, col = "Blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
gongshan0521/ExploratoryGraphProject
|
R
| false
| false
| 1,161
|
r
|
## Read the data
fileName <- "household_power_consumption.txt"
rawdata <- read.table(fileName, header = TRUE, sep = ";", stringsAsFactors=FALSE, dec=".")
## Construct Data and Time
timeData <- paste(rawdata[, 1], rawdata[, 2])
timeData <- strptime(timeData, "%d/%m/%Y %H:%M:%S")
## Select only the data with in data range 2017-02-01 to 2017-02-02
timeIndex <- timeData >= "2007-02-01" & timeData <= "2007-02-03"
plotdata <- cbind(data.frame(DateTime = timeData[timeIndex]), rawdata[timeIndex, 3:9])
## Remove NA values
plotdata <- plotdata[!is.na(plotdata$DateTime), ]
for (i in 2:8) {
plotdata[, i] <- as.numeric(plotdata[, i])
}
## plot3
png("plot3.png", width = 480, height = 480)
plot(plotdata$DateTime, plotdata$Sub_metering_1, type="l", xaxt="n", xlab = "", ylab = "Energy sub metering")
axis.POSIXct(1, at=seq(min(plotdata$DateTime), max(plotdata$DateTime), by="days"), format = "%a")
lines(plotdata$DateTime, plotdata$Sub_metering_2, col = "Red")
lines(plotdata$DateTime, plotdata$Sub_metering_3, col = "Blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
library(LSAfun)
# NOTE: You will need to change this filepath to wherever
# your TASA space is
load("/Users/Alice/Desktop/Code/r_scripts/dyad_lsa/TASA.rda")
setwd("/Users/Alice/Desktop/Code/r_scripts/dyad_data/dyads/")
# Set up initial variables
# NOTE: All files in the current working directory
# must be dyad files in the expected format
all_dyad_filenames <- list.files()
total_dyads_count <- length(all_dyad_filenames)
dyad_ids <- vector(mode = "character", length = total_dyads_count)
dyad_cosine_sims <- vector(mode = "numeric", length = total_dyads_count)
for (i in 1:total_dyads_count) {
# Read in CSV and split it into each user's data
filename <- all_dyad_filenames[i]
chat <- read.csv(filename)
chat$ID <- as.factor(chat$ID)
splitchat <- split(chat, chat$ID)
# Add this dyad's ID to the results vector
dyad_ids[i] <- chat$dyad[1]
userchats = splitchat[!(names(splitchat) %in% c("SERVER"))]
# Skip if the dyad had fewer than 2 (human) users
if (length(names(userchats)) < 2) {
warning(paste(filename, "had fewer than 2 users. Skipping dyad."))
next
}
# Concatenate each user's text into one big string
id1 = names(userchats)[1]
id2 = names(userchats)[2]
user1text <- paste(userchats[[id1]]$text, collapse = " ")
user2text <- paste(userchats[[id2]]$text, collapse = " ")
if (user1text == "") {
warning(paste("User", id1, "of", filename, "had no text. Skipping dyad."))
}
else if (user2text == "") {
warning(paste("User", id2, "of", filename, "had no text. Skipping dyad."))
}
else {
# Calculate text similarity
cosine_sim = costring(user1text, user2text, TASA, TRUE)
# Add cosine sim to the results vector
dyad_cosine_sims[i] <- cosine_sim
}
}
# Write results to file
dyads_and_cosine_sims <- data.frame("dyad" = dyad_ids,
"cosine_sim" = dyad_cosine_sims,
stringsAsFactors = FALSE)
results_filename <- paste("dyad_cosine_similarities_", as.integer(Sys.time()), ".csv", sep = "")
setwd("~/Desktop/")
write.csv(dyads_and_cosine_sims, file = results_filename, row.names = FALSE)
|
/dyad_lsa.r
|
no_license
|
ichthala/dyad_lsa
|
R
| false
| false
| 2,158
|
r
|
library(LSAfun)
# NOTE: You will need to change this filepath to wherever
# your TASA space is
load("/Users/Alice/Desktop/Code/r_scripts/dyad_lsa/TASA.rda")
setwd("/Users/Alice/Desktop/Code/r_scripts/dyad_data/dyads/")
# Set up initial variables
# NOTE: All files in the current working directory
# must be dyad files in the expected format
all_dyad_filenames <- list.files()
total_dyads_count <- length(all_dyad_filenames)
dyad_ids <- vector(mode = "character", length = total_dyads_count)
dyad_cosine_sims <- vector(mode = "numeric", length = total_dyads_count)
for (i in 1:total_dyads_count) {
# Read in CSV and split it into each user's data
filename <- all_dyad_filenames[i]
chat <- read.csv(filename)
chat$ID <- as.factor(chat$ID)
splitchat <- split(chat, chat$ID)
# Add this dyad's ID to the results vector
dyad_ids[i] <- chat$dyad[1]
userchats = splitchat[!(names(splitchat) %in% c("SERVER"))]
# Skip if the dyad had fewer than 2 (human) users
if (length(names(userchats)) < 2) {
warning(paste(filename, "had fewer than 2 users. Skipping dyad."))
next
}
# Concatenate each user's text into one big string
id1 = names(userchats)[1]
id2 = names(userchats)[2]
user1text <- paste(userchats[[id1]]$text, collapse = " ")
user2text <- paste(userchats[[id2]]$text, collapse = " ")
if (user1text == "") {
warning(paste("User", id1, "of", filename, "had no text. Skipping dyad."))
}
else if (user2text == "") {
warning(paste("User", id2, "of", filename, "had no text. Skipping dyad."))
}
else {
# Calculate text similarity
cosine_sim = costring(user1text, user2text, TASA, TRUE)
# Add cosine sim to the results vector
dyad_cosine_sims[i] <- cosine_sim
}
}
# Write results to file
dyads_and_cosine_sims <- data.frame("dyad" = dyad_ids,
"cosine_sim" = dyad_cosine_sims,
stringsAsFactors = FALSE)
results_filename <- paste("dyad_cosine_similarities_", as.integer(Sys.time()), ".csv", sep = "")
setwd("~/Desktop/")
write.csv(dyads_and_cosine_sims, file = results_filename, row.names = FALSE)
|
#' \code{chart_eia_steo}
#' @description Supply Demand Balance from EIA Short Term Energy Outlook.
#' @param key Your private EIA API token.
#' @param from Date as character "2020-07-01". Default to all dates available.
#' @param market "globalOil" only currently implemented.
#' @param fig.title Defaults to "EIA STEO Global Liquids SD Balance".
#' @param fig.units Defaults to "million barrels per day"
#' @param legend.pos Defaults to list(x = 0.4, y = 0.53)
#' @param output "chart" for plotly object or "data" for dataframe.
#' @return A plotly object or a dataframe
#' @export chart_eia_steo
#' @author Philippe Cote
#' @examples
#' \dontrun{
#' chart_eia_steo(key = EIAkey, market = "globalOil")
#' }
chart_eia_steo <- function(market = "globalOil",
key = "your EIA.gov API key",
from = "2018-07-01",
fig.title = "EIA STEO Global Liquids SD Balance",
fig.units = "million barrels per day",
legend.pos = list(x = 0.4, y = 0.53),
output = "chart") {
if (market == "globalOil") {
eia_df <- tibble::tribble(~ticker, ~name,
"STEO.PAPR_NONOPEC.M", "SupplyNOPEC",
"STEO.PAPR_OPEC.M", "SupplyOPEC",
"STEO.PATC_WORLD.M", "Demand",
"STEO.T3_STCHANGE_WORLD.M", "Inv_Change") %>%
dplyr::mutate(key = key) %>%
dplyr::mutate(df = purrr::pmap(list(ticker,key,name),.f=RTL::eia2tidy)) %>%
dplyr::select(df) %>% tidyr::unnest(df) %>%
tidyr::pivot_wider(id_cols = date, names_from = series, values_from = value) %>%
dplyr::transmute(date, Supply = SupplyNOPEC + SupplyOPEC, Demand,
Inv_Change = Inv_Change * -1) %>%
stats::na.omit()
if (!is.null(from)) {eia_df <- eia_df %>% dplyr::filter(date >= from)}
if (output == "data") {return(eia_df)} else {
out <- eia_df %>%
tidyr::pivot_longer(-date,names_to = "series",values_to = "value") %>%
dplyr::mutate(group = dplyr::case_when(series == "Inv_Change" ~ 2,TRUE ~ 1)) %>%
split(.$group) %>%
lapply(function(d) plotly::plot_ly(d, x = ~date, y = ~value,
color = ~series, colors = c("red","black","blue"),
type = c("scatter"), mode = "lines")) %>%
plotly::subplot(nrows = NROW(.), shareX = TRUE) %>%
plotly::layout(title = list(text = fig.title, x = 0),
xaxis = list(title = " "),
yaxis = list(title = fig.units ),
legend = legend.pos)
return(out)
}
}
}
# chart_eia_steo(market = "globalOil",
# key = EIAkey,
# from = "2000-07-01",
# fig.title = "EIA STEO Global Liquids SD Balance",
# fig.units = "million barrels per day",
# legend.pos = list(x = 0.4, y = 0.53),
# output = "chart")
|
/R/chart_eia_steo.R
|
no_license
|
fmair/RTL
|
R
| false
| false
| 3,169
|
r
|
#' \code{chart_eia_steo}
#' @description Supply Demand Balance from EIA Short Term Energy Outlook.
#' @param key Your private EIA API token.
#' @param from Date as character "2020-07-01". Default to all dates available.
#' @param market "globalOil" only currently implemented.
#' @param fig.title Defaults to "EIA STEO Global Liquids SD Balance".
#' @param fig.units Defaults to "million barrels per day"
#' @param legend.pos Defaults to list(x = 0.4, y = 0.53)
#' @param output "chart" for plotly object or "data" for dataframe.
#' @return A plotly object or a dataframe
#' @export chart_eia_steo
#' @author Philippe Cote
#' @examples
#' \dontrun{
#' chart_eia_steo(key = EIAkey, market = "globalOil")
#' }
chart_eia_steo <- function(market = "globalOil",
key = "your EIA.gov API key",
from = "2018-07-01",
fig.title = "EIA STEO Global Liquids SD Balance",
fig.units = "million barrels per day",
legend.pos = list(x = 0.4, y = 0.53),
output = "chart") {
if (market == "globalOil") {
eia_df <- tibble::tribble(~ticker, ~name,
"STEO.PAPR_NONOPEC.M", "SupplyNOPEC",
"STEO.PAPR_OPEC.M", "SupplyOPEC",
"STEO.PATC_WORLD.M", "Demand",
"STEO.T3_STCHANGE_WORLD.M", "Inv_Change") %>%
dplyr::mutate(key = key) %>%
dplyr::mutate(df = purrr::pmap(list(ticker,key,name),.f=RTL::eia2tidy)) %>%
dplyr::select(df) %>% tidyr::unnest(df) %>%
tidyr::pivot_wider(id_cols = date, names_from = series, values_from = value) %>%
dplyr::transmute(date, Supply = SupplyNOPEC + SupplyOPEC, Demand,
Inv_Change = Inv_Change * -1) %>%
stats::na.omit()
if (!is.null(from)) {eia_df <- eia_df %>% dplyr::filter(date >= from)}
if (output == "data") {return(eia_df)} else {
out <- eia_df %>%
tidyr::pivot_longer(-date,names_to = "series",values_to = "value") %>%
dplyr::mutate(group = dplyr::case_when(series == "Inv_Change" ~ 2,TRUE ~ 1)) %>%
split(.$group) %>%
lapply(function(d) plotly::plot_ly(d, x = ~date, y = ~value,
color = ~series, colors = c("red","black","blue"),
type = c("scatter"), mode = "lines")) %>%
plotly::subplot(nrows = NROW(.), shareX = TRUE) %>%
plotly::layout(title = list(text = fig.title, x = 0),
xaxis = list(title = " "),
yaxis = list(title = fig.units ),
legend = legend.pos)
return(out)
}
}
}
# chart_eia_steo(market = "globalOil",
# key = EIAkey,
# from = "2000-07-01",
# fig.title = "EIA STEO Global Liquids SD Balance",
# fig.units = "million barrels per day",
# legend.pos = list(x = 0.4, y = 0.53),
# output = "chart")
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.10.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' BulkDownloadLinks Class
#'
#' @field name
#' @field url
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
BulkDownloadLinks <- R6::R6Class(
'BulkDownloadLinks',
public = list(
`name` = NA,
`url` = NA,
initialize = function(`name`, `url`){
if (!missing(`name`)) {
self$`name` <- `name`
}
if (!missing(`url`)) {
self$`url` <- `url`
}
},
toJSON = function() {
BulkDownloadLinksObject <- list()
if (!is.null(self$`name`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`name`) && ((length(self$`name`) == 0) || ((length(self$`name`) != 0 && R6::is.R6(self$`name`[[1]]))))) {
BulkDownloadLinksObject[['name']] <- lapply(self$`name`, function(x) x$toJSON())
} else {
BulkDownloadLinksObject[['name']] <- jsonlite::toJSON(self$`name`, auto_unbox = TRUE)
}
}
if (!is.null(self$`url`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`url`) && ((length(self$`url`) == 0) || ((length(self$`url`) != 0 && R6::is.R6(self$`url`[[1]]))))) {
BulkDownloadLinksObject[['url']] <- lapply(self$`url`, function(x) x$toJSON())
} else {
BulkDownloadLinksObject[['url']] <- jsonlite::toJSON(self$`url`, auto_unbox = TRUE)
}
}
BulkDownloadLinksObject
},
fromJSON = function(BulkDownloadLinksJson) {
BulkDownloadLinksObject <- jsonlite::fromJSON(BulkDownloadLinksJson)
if (!is.null(BulkDownloadLinksObject$`name`)) {
self$`name` <- BulkDownloadLinksObject$`name`
}
if (!is.null(BulkDownloadLinksObject$`url`)) {
self$`url` <- BulkDownloadLinksObject$`url`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(BulkDownloadLinksJson) {
BulkDownloadLinksObject <- jsonlite::fromJSON(BulkDownloadLinksJson, simplifyDataFrame = FALSE)
self$setFromList(BulkDownloadLinksObject)
},
setFromList = function(listObject) {
if (!is.null(listObject$`name`)) {
self$`name` <- listObject$`name`
}
else {
self$`name` <- NA
}
if (!is.null(listObject$`url`)) {
self$`url` <- listObject$`url`
}
else {
self$`url` <- NA
}
},
getAsList = function() {
listObject = list()
listObject[["name"]] <- self$`name`
listObject[["url"]] <- self$`url`
return(listObject)
}
)
)
|
/R/BulkDownloadLinks.r
|
no_license
|
Aggarch/r-sdk
|
R
| false
| false
| 3,228
|
r
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.10.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' BulkDownloadLinks Class
#'
#' @field name
#' @field url
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
BulkDownloadLinks <- R6::R6Class(
'BulkDownloadLinks',
public = list(
`name` = NA,
`url` = NA,
initialize = function(`name`, `url`){
if (!missing(`name`)) {
self$`name` <- `name`
}
if (!missing(`url`)) {
self$`url` <- `url`
}
},
toJSON = function() {
BulkDownloadLinksObject <- list()
if (!is.null(self$`name`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`name`) && ((length(self$`name`) == 0) || ((length(self$`name`) != 0 && R6::is.R6(self$`name`[[1]]))))) {
BulkDownloadLinksObject[['name']] <- lapply(self$`name`, function(x) x$toJSON())
} else {
BulkDownloadLinksObject[['name']] <- jsonlite::toJSON(self$`name`, auto_unbox = TRUE)
}
}
if (!is.null(self$`url`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`url`) && ((length(self$`url`) == 0) || ((length(self$`url`) != 0 && R6::is.R6(self$`url`[[1]]))))) {
BulkDownloadLinksObject[['url']] <- lapply(self$`url`, function(x) x$toJSON())
} else {
BulkDownloadLinksObject[['url']] <- jsonlite::toJSON(self$`url`, auto_unbox = TRUE)
}
}
BulkDownloadLinksObject
},
fromJSON = function(BulkDownloadLinksJson) {
BulkDownloadLinksObject <- jsonlite::fromJSON(BulkDownloadLinksJson)
if (!is.null(BulkDownloadLinksObject$`name`)) {
self$`name` <- BulkDownloadLinksObject$`name`
}
if (!is.null(BulkDownloadLinksObject$`url`)) {
self$`url` <- BulkDownloadLinksObject$`url`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(BulkDownloadLinksJson) {
BulkDownloadLinksObject <- jsonlite::fromJSON(BulkDownloadLinksJson, simplifyDataFrame = FALSE)
self$setFromList(BulkDownloadLinksObject)
},
setFromList = function(listObject) {
if (!is.null(listObject$`name`)) {
self$`name` <- listObject$`name`
}
else {
self$`name` <- NA
}
if (!is.null(listObject$`url`)) {
self$`url` <- listObject$`url`
}
else {
self$`url` <- NA
}
},
getAsList = function() {
listObject = list()
listObject[["name"]] <- self$`name`
listObject[["url"]] <- self$`url`
return(listObject)
}
)
)
|
library(tidyverse)
library(NMF)
reg_stats_compact <- read_csv("data/DataFiles/RegularSeasonCompactResults.csv")
tmp <- bind_rows(
reg_stats_compact %>%
select(Season,
TeamID = WTeamID, OTeamID = LTeamID,
Score = WScore,
OScore = LScore),
reg_stats_compact %>%
select(Season,
TeamID = LTeamID, OTeamID = WTeamID,
Score = LScore,
OScore = WScore)
) %>%
group_by(Season, TeamID, OTeamID) %>%
summarise(Score = mean(Score / (OScore + Score))) %>%
group_by(Season) %>%
nest() %>%
mutate(data = map(data, ~ .x %>%
spread(OTeamID, Score, fill = 0.5) %>%
as.data.frame() %>%
column_to_rownames(var = "TeamID") %>%
as.matrix()),
nmf = map(data, ~ nmf(.x, rank = 1, seed = "ica")),
coef = map(nmf, ~ coef(.x)),
basis = map(nmf, ~ basis(.x)),
mat = map2(coef, basis, ~ .y %*% .x))
tmp %>%
mutate(data = map(mat, ~ .x %>%
as.data.frame() %>%
rownames_to_column(var = "team1") %>%
as_tibble %>%
gather(team2, rate, -team1))) %>%
select(Season, data) %>%
unnest() %>%
write_csv("data/processed/nmf.csv")
tmp %>%
select(Season, coef) %>%
mutate(coef = map(coef, ~ tibble(coef = c(.x),
TeamID = colnames(.x)))) %>%
unnest() %>%
write_csv("data/processed/coef_nmf.csv")
tmp %>%
select(Season, basis) %>%
mutate(basis = map(basis, ~ tibble(basis = c(.x),
TeamID = rownames(.x)))) %>%
unnest() %>%
write_csv("data/processed/basis_nmf.csv")
rm(tmp);gc()
|
/src/processed/nmf.R
|
no_license
|
kur0cky/NCAA2019
|
R
| false
| false
| 1,760
|
r
|
library(tidyverse)
library(NMF)
reg_stats_compact <- read_csv("data/DataFiles/RegularSeasonCompactResults.csv")
tmp <- bind_rows(
reg_stats_compact %>%
select(Season,
TeamID = WTeamID, OTeamID = LTeamID,
Score = WScore,
OScore = LScore),
reg_stats_compact %>%
select(Season,
TeamID = LTeamID, OTeamID = WTeamID,
Score = LScore,
OScore = WScore)
) %>%
group_by(Season, TeamID, OTeamID) %>%
summarise(Score = mean(Score / (OScore + Score))) %>%
group_by(Season) %>%
nest() %>%
mutate(data = map(data, ~ .x %>%
spread(OTeamID, Score, fill = 0.5) %>%
as.data.frame() %>%
column_to_rownames(var = "TeamID") %>%
as.matrix()),
nmf = map(data, ~ nmf(.x, rank = 1, seed = "ica")),
coef = map(nmf, ~ coef(.x)),
basis = map(nmf, ~ basis(.x)),
mat = map2(coef, basis, ~ .y %*% .x))
tmp %>%
mutate(data = map(mat, ~ .x %>%
as.data.frame() %>%
rownames_to_column(var = "team1") %>%
as_tibble %>%
gather(team2, rate, -team1))) %>%
select(Season, data) %>%
unnest() %>%
write_csv("data/processed/nmf.csv")
tmp %>%
select(Season, coef) %>%
mutate(coef = map(coef, ~ tibble(coef = c(.x),
TeamID = colnames(.x)))) %>%
unnest() %>%
write_csv("data/processed/coef_nmf.csv")
tmp %>%
select(Season, basis) %>%
mutate(basis = map(basis, ~ tibble(basis = c(.x),
TeamID = rownames(.x)))) %>%
unnest() %>%
write_csv("data/processed/basis_nmf.csv")
rm(tmp);gc()
|
# ASReml
#R
library(asreml)
asreml.license.activate()
#enter this code CDEA-HECC-CDAH-FIED
setwd("/local/workdir/mh865/ASreml")
source("is.symmetric.matrix.R")
source("is.square.matrix.R")
source("is.positive.definite.R")
FileDir<-"/local/workdir/mh865/GCA_SCA/"
load(paste0(FileDir,"OneTime1920/data/","dataNHpi_withChk_3_sets_PhotoScore23.rdata")) ## Plot
load(paste0(FileDir,"OneTime1920/data/","outCovComb_dip_0116_2021.Rdata"))
data<-droplevels(dataNHpiBoth_C)
data$Trait<-data$dryWgtPerM
data$Year<-as.factor(data$Year)
data$popChk<-as.factor(data$popChk)
data$line<-as.factor(data$line)
data$block<-as.factor(data$block)
Trait_grm<-outCovComb4_dipOrder[rownames(outCovComb4_dipOrder)%in%as.character(data$Crosses),colnames(outCovComb4_dipOrder)%in%as.character(data$Crosses)]
### Adding the checks into the grm, all 1s in diagonal,all 0s for others
data[data$popChk=="ES",]$Crosses
droplevels(data[data$popChk=="ES",])$Crosses
ChkCross<-unique(droplevels(data[!data$popChk=="ES",])$Crosses) # 33 plots of checks, 4 unique ones
Col0<-matrix(0,nrow=nrow(Trait_grm),ncol=length(ChkCross))
colnames(Col0)<-ChkCross
Trait_grm2<-cbind(Trait_grm,Col0)
Row0<-matrix(0,nrow=length(ChkCross),ncol=ncol(Trait_grm))
Chk1<-diag(x=1,nrow=length(ChkCross),ncol=length(ChkCross))
Row0_Chk1<-cbind(Row0,Chk1)
rownames(Row0_Chk1)<-ChkCross
Trait_grm3<-rbind(Trait_grm2,Row0_Chk1)
data$Trait2<-ifelse(data$Year==2019,data$Trait*sqrt(10),data$Trait) # Yr1 phenotype * sqrt(10)
data1<-droplevels(data[data$Year==2019,])
data2<-droplevels(data[data$Year==2020,])
data1_grm<-Trait_grm3[rownames(Trait_grm3)%in%as.character(data1$Crosses),colnames(Trait_grm3)%in%as.character(data1$Crosses)]
data2_grm<-Trait_grm3[rownames(Trait_grm3)%in%as.character(data2$Crosses),colnames(Trait_grm3)%in%as.character(data2$Crosses)]
modBoth <- asreml(Trait2 ~ Year+line+block+popChk,
random= ~ us(Year):vm(Crosses,Trait_grm3),
data = data, maxiter=100, trace=TRUE)
mod1 <- asreml(Trait ~ line+block+popChk,
random= ~ vm(Crosses,data1_grm),
data = data1, maxiter=100, trace=TRUE)
mod2 <- asreml(Trait ~ line+block+popChk,
random= ~ vm(Crosses,data2_grm),
data = data2, maxiter=100, trace=TRUE)
save(data,Trait_grm,Trait_grm3,file="dataNHpiBoth_C_for_ASReml.rdata")
summary(mod1)$varcomp
#covarianceA_B/sqrt(varianceA*varianceB)
### Multi-Trait BLUP
#1. Make Ainverse
ls()
source("/Users/maohuang/Desktop/Kelp/SugarKelpBreeding/TraitAnalyses201003/Code_10032020/is.positive.definite.R")
source("/Users/maohuang/Desktop/Kelp/SugarKelpBreeding/TraitAnalyses201003/Code_10032020/is.square.matrix.R")
source("/Users/maohuang/Desktop/Kelp/SugarKelpBreeding/TraitAnalyses201003/Code_10032020/is.symmetric.matrix.R")
# mat2sparse <- function (X, rowNames = dimnames(X)[[1]])
# {
# which <- (X != 0 & lower.tri(X, diag = TRUE))
# df <- data.frame(row = t(row(X))[t(which)], col = t(col(X))[t(which)],
# val = t(X)[t(which)])
# if (is.null(rowNames))
# rowNames <- as.character(1:nrow(X))
# attr(df, "rowNames") <- rowNames
# df
# }
# Trait<-"DwPM"
######
for (t in c("dryWgtPerM","AshFDwPM")){
dataNH$Trait<-ifelse(dataNH$Year==2019,dataNH[,t]*sqrt(10),dataNH[,t]) # Yr1 phenotype * sqrt(10)
colnames(dataNH)[colnames(dataNH)=="Trait"]<-paste0(t,"_sqrt10")
}
Y<-dataNH
is.positive.definite(outCovComb4_dipOrder)
outCovComb4_dipOrder[1:4,1:5]
Amat<-outCovComb4_dipOrder[rownames(outCovComb4_dipOrder)%in%as.character(Y$Crosses),colnames(outCovComb4_dipOrder)%in%as.character(Y$Crosses)]
snpRelMat<-Amat
Gsnp=solve(snpRelMat+diag(1e-6,length(snpRelMat[,1]),length(snpRelMat[,1])))
# Map elements in the relationship matrix to the phenotypes
#rownames(Gsnp)=levels(dataf$variety)
#colnames(Gsnp)=levels(dataf$variety)
attr(Gsnp, "INVERSE")=TRUE
# running two different GxE models
modMTM <- asreml(cbind(wetWgtPerM,percDryWgt,dryWgtPerM,densityBlades) ~ trait,
random= ~ us(trait):vm(Crosses,Gsnp),
residual = ~id(units):us(trait),
data = Y, maxiter=50, trace=TRUE)
####### Older way of package syntax???
#N<-nrow(Amat)
# AHAT.inv<-solve(Amat)
# AHAT.inv[1:5,1:5]
# det(AHAT.inv)
#AHAT.inv.sparse<-mat2sparse(AHAT.inv) #lower diag sparse matrix
# colnames(AHAT.inv.sparse)<-c('Row','Column','Ainverse')
# head(AHAT.inv.sparse)
# write.table(AHAT.inv.sparse,file=paste0(Trait,"_Amat-Inv-sparse.txt"))
# #2. Make dummy pedi_file
# peddummy<-matrix(ncol=3,nrow=N)
# colnames(peddummy)<-c("Individual","Female","Male")
# peddummy[,1]<-rownames(Amat)
# peddummy[,2]<-rep(0,length=N)
# peddummy[,3]<-rep(0,length=N)
# peddummy<-as.data.frame(peddummy)
# rownames(peddummy)<-rownames(Amat)
# write.table(peddummy,file=paste(Trait,"_peddummy.txt",sep=""))
#
# gmatrix<-data.frame(AHAT.inv.sparse)
# ainvped<-ainverse(peddummy)
# attr(gmatrix,"rowNames")<-attr(ainvped,"rowNames")
# Y$Genot<-as.factor(Y$Crosses) ###
# MultiTrait<-asreml(fixed=cbind(wetWgtPlot,wetWgtPerM,percDryWgt,dryWgtPerM,AshFreedryWgtPerM,densityBlades)~trait+line+block+popChk+Year,
# residual=~id(units):us(trait),
# random=~vm(Crosses,Trait_grm3),
# workspace=128e06,na.action=na.method(y="include"),
# data=data)
|
/OneTimePrediction_tst/code/ASreml_Genetic_Cor.R
|
no_license
|
MaoHuang2020/GCA_SCA
|
R
| false
| false
| 5,422
|
r
|
# ASReml
#R
library(asreml)
asreml.license.activate()
#enter this code CDEA-HECC-CDAH-FIED
setwd("/local/workdir/mh865/ASreml")
source("is.symmetric.matrix.R")
source("is.square.matrix.R")
source("is.positive.definite.R")
FileDir<-"/local/workdir/mh865/GCA_SCA/"
load(paste0(FileDir,"OneTime1920/data/","dataNHpi_withChk_3_sets_PhotoScore23.rdata")) ## Plot
load(paste0(FileDir,"OneTime1920/data/","outCovComb_dip_0116_2021.Rdata"))
data<-droplevels(dataNHpiBoth_C)
data$Trait<-data$dryWgtPerM
data$Year<-as.factor(data$Year)
data$popChk<-as.factor(data$popChk)
data$line<-as.factor(data$line)
data$block<-as.factor(data$block)
Trait_grm<-outCovComb4_dipOrder[rownames(outCovComb4_dipOrder)%in%as.character(data$Crosses),colnames(outCovComb4_dipOrder)%in%as.character(data$Crosses)]
### Adding the checks into the grm, all 1s in diagonal,all 0s for others
data[data$popChk=="ES",]$Crosses
droplevels(data[data$popChk=="ES",])$Crosses
ChkCross<-unique(droplevels(data[!data$popChk=="ES",])$Crosses) # 33 plots of checks, 4 unique ones
Col0<-matrix(0,nrow=nrow(Trait_grm),ncol=length(ChkCross))
colnames(Col0)<-ChkCross
Trait_grm2<-cbind(Trait_grm,Col0)
Row0<-matrix(0,nrow=length(ChkCross),ncol=ncol(Trait_grm))
Chk1<-diag(x=1,nrow=length(ChkCross),ncol=length(ChkCross))
Row0_Chk1<-cbind(Row0,Chk1)
rownames(Row0_Chk1)<-ChkCross
Trait_grm3<-rbind(Trait_grm2,Row0_Chk1)
data$Trait2<-ifelse(data$Year==2019,data$Trait*sqrt(10),data$Trait) # Yr1 phenotype * sqrt(10)
data1<-droplevels(data[data$Year==2019,])
data2<-droplevels(data[data$Year==2020,])
data1_grm<-Trait_grm3[rownames(Trait_grm3)%in%as.character(data1$Crosses),colnames(Trait_grm3)%in%as.character(data1$Crosses)]
data2_grm<-Trait_grm3[rownames(Trait_grm3)%in%as.character(data2$Crosses),colnames(Trait_grm3)%in%as.character(data2$Crosses)]
modBoth <- asreml(Trait2 ~ Year+line+block+popChk,
random= ~ us(Year):vm(Crosses,Trait_grm3),
data = data, maxiter=100, trace=TRUE)
mod1 <- asreml(Trait ~ line+block+popChk,
random= ~ vm(Crosses,data1_grm),
data = data1, maxiter=100, trace=TRUE)
mod2 <- asreml(Trait ~ line+block+popChk,
random= ~ vm(Crosses,data2_grm),
data = data2, maxiter=100, trace=TRUE)
save(data,Trait_grm,Trait_grm3,file="dataNHpiBoth_C_for_ASReml.rdata")
summary(mod1)$varcomp
#covarianceA_B/sqrt(varianceA*varianceB)
### Multi-Trait BLUP
#1. Make Ainverse
ls()
source("/Users/maohuang/Desktop/Kelp/SugarKelpBreeding/TraitAnalyses201003/Code_10032020/is.positive.definite.R")
source("/Users/maohuang/Desktop/Kelp/SugarKelpBreeding/TraitAnalyses201003/Code_10032020/is.square.matrix.R")
source("/Users/maohuang/Desktop/Kelp/SugarKelpBreeding/TraitAnalyses201003/Code_10032020/is.symmetric.matrix.R")
# mat2sparse <- function (X, rowNames = dimnames(X)[[1]])
# {
# which <- (X != 0 & lower.tri(X, diag = TRUE))
# df <- data.frame(row = t(row(X))[t(which)], col = t(col(X))[t(which)],
# val = t(X)[t(which)])
# if (is.null(rowNames))
# rowNames <- as.character(1:nrow(X))
# attr(df, "rowNames") <- rowNames
# df
# }
# Trait<-"DwPM"
######
for (t in c("dryWgtPerM","AshFDwPM")){
dataNH$Trait<-ifelse(dataNH$Year==2019,dataNH[,t]*sqrt(10),dataNH[,t]) # Yr1 phenotype * sqrt(10)
colnames(dataNH)[colnames(dataNH)=="Trait"]<-paste0(t,"_sqrt10")
}
Y<-dataNH
is.positive.definite(outCovComb4_dipOrder)
outCovComb4_dipOrder[1:4,1:5]
Amat<-outCovComb4_dipOrder[rownames(outCovComb4_dipOrder)%in%as.character(Y$Crosses),colnames(outCovComb4_dipOrder)%in%as.character(Y$Crosses)]
snpRelMat<-Amat
Gsnp=solve(snpRelMat+diag(1e-6,length(snpRelMat[,1]),length(snpRelMat[,1])))
# Map elements in the relationship matrix to the phenotypes
#rownames(Gsnp)=levels(dataf$variety)
#colnames(Gsnp)=levels(dataf$variety)
attr(Gsnp, "INVERSE")=TRUE
# running two different GxE models
modMTM <- asreml(cbind(wetWgtPerM,percDryWgt,dryWgtPerM,densityBlades) ~ trait,
random= ~ us(trait):vm(Crosses,Gsnp),
residual = ~id(units):us(trait),
data = Y, maxiter=50, trace=TRUE)
####### Older way of package syntax???
#N<-nrow(Amat)
# AHAT.inv<-solve(Amat)
# AHAT.inv[1:5,1:5]
# det(AHAT.inv)
#AHAT.inv.sparse<-mat2sparse(AHAT.inv) #lower diag sparse matrix
# colnames(AHAT.inv.sparse)<-c('Row','Column','Ainverse')
# head(AHAT.inv.sparse)
# write.table(AHAT.inv.sparse,file=paste0(Trait,"_Amat-Inv-sparse.txt"))
# #2. Make dummy pedi_file
# peddummy<-matrix(ncol=3,nrow=N)
# colnames(peddummy)<-c("Individual","Female","Male")
# peddummy[,1]<-rownames(Amat)
# peddummy[,2]<-rep(0,length=N)
# peddummy[,3]<-rep(0,length=N)
# peddummy<-as.data.frame(peddummy)
# rownames(peddummy)<-rownames(Amat)
# write.table(peddummy,file=paste(Trait,"_peddummy.txt",sep=""))
#
# gmatrix<-data.frame(AHAT.inv.sparse)
# ainvped<-ainverse(peddummy)
# attr(gmatrix,"rowNames")<-attr(ainvped,"rowNames")
# Y$Genot<-as.factor(Y$Crosses) ###
# MultiTrait<-asreml(fixed=cbind(wetWgtPlot,wetWgtPerM,percDryWgt,dryWgtPerM,AshFreedryWgtPerM,densityBlades)~trait+line+block+popChk+Year,
# residual=~id(units):us(trait),
# random=~vm(Crosses,Trait_grm3),
# workspace=128e06,na.action=na.method(y="include"),
# data=data)
|
\name{scale_colour_gradient}
\alias{scale_color_continuous}
\alias{scale_color_gradient}
\alias{scale_colour_continuous}
\alias{scale_colour_gradient}
\alias{scale_fill_continuous}
\alias{scale_fill_gradient}
\title{Smooth gradient between two colours}
\usage{
scale_colour_gradient(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_fill_gradient(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_colour_continuous(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_fill_continuous(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_color_continuous(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_color_gradient(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
}
\arguments{
\item{...}{Other arguments passed on to
\code{\link{continuous_scale}} to control name, limits,
breaks, labels and so forth.}
\item{na.value}{Colour to use for missing values}
\item{low}{colour for low end of gradient.}
\item{high}{colour for high end of gradient.}
\item{space}{colour space in which to calculate gradient.
"Lab" usually best unless gradient goes through white.}
}
\description{
Default colours are generated with \pkg{munsell} and
\code{mnsl(c("2.5PB 2/4", "2.5PB 7/10")}. Generally, for
continuous colour scales you want to keep hue constant,
but vary chroma and luminance. The \pkg{munsell} package
makes this easy to do using the Munsell colour system.
}
\examples{
# It's hard to see, but look for the bright yellow dot
# in the bottom right hand corner
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
(d <- qplot(x, y, data=dsub, colour=z))
# That one point throws our entire scale off. We could
# remove it, or manually tweak the limits of the scale
# Tweak scale limits. Any points outside these limits will not be
# plotted, and will not affect the calculation of statistics, etc
d + scale_colour_gradient(limits=c(3, 10))
d + scale_colour_gradient(limits=c(3, 4))
# Setting the limits manually is also useful when producing
# multiple plots that need to be comparable
# Alternatively we could try transforming the scale:
d + scale_colour_gradient(trans = "log")
d + scale_colour_gradient(trans = "sqrt")
# Other more trivial manipulations, including changing the name
# of the scale and the colours.
d + scale_colour_gradient("Depth")
d + scale_colour_gradient(expression(Depth[mm]))
d + scale_colour_gradient(limits=c(3, 4), low="red")
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white")
# Much slower
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white", space="Lab")
d + scale_colour_gradient(limits=c(3, 4), space="Lab")
# scale_fill_continuous works similarly, but for fill colours
(h <- qplot(x - y, data=dsub, geom="histogram", binwidth=0.01, fill=..count..))
h + scale_fill_continuous(low="black", high="pink", limits=c(0,3100))
# Colour of missing values is controlled with na.value:
miss <- sample(c(NA, 1:5), nrow(mtcars), rep = T)
qplot(mpg, wt, data = mtcars, colour = miss)
qplot(mpg, wt, data = mtcars, colour = miss) +
scale_colour_gradient(na.value = "black")
}
\seealso{
\code{\link[scales]{seq_gradient_pal}} for details on
underlying palette
Other colour scales: \code{\link{scale_color_brewer}},
\code{\link{scale_color_discrete}},
\code{\link{scale_color_gradient2}},
\code{\link{scale_color_gradientn}},
\code{\link{scale_color_grey}},
\code{\link{scale_color_hue}},
\code{\link{scale_colour_brewer}},
\code{\link{scale_colour_discrete}},
\code{\link{scale_colour_gradient2}},
\code{\link{scale_colour_gradientn}},
\code{\link{scale_colour_grey}},
\code{\link{scale_colour_hue}},
\code{\link{scale_fill_brewer}},
\code{\link{scale_fill_discrete}},
\code{\link{scale_fill_gradient2}},
\code{\link{scale_fill_gradientn}},
\code{\link{scale_fill_grey}},
\code{\link{scale_fill_hue}}
}
|
/man/scale_gradient.Rd
|
no_license
|
djmurphy420/ggplot2
|
R
| false
| false
| 4,055
|
rd
|
\name{scale_colour_gradient}
\alias{scale_color_continuous}
\alias{scale_color_gradient}
\alias{scale_colour_continuous}
\alias{scale_colour_gradient}
\alias{scale_fill_continuous}
\alias{scale_fill_gradient}
\title{Smooth gradient between two colours}
\usage{
scale_colour_gradient(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_fill_gradient(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_colour_continuous(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_fill_continuous(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_color_continuous(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
scale_color_gradient(..., low = "#132B43",
high = "#56B1F7", space = "Lab", na.value = "grey50")
}
\arguments{
\item{...}{Other arguments passed on to
\code{\link{continuous_scale}} to control name, limits,
breaks, labels and so forth.}
\item{na.value}{Colour to use for missing values}
\item{low}{colour for low end of gradient.}
\item{high}{colour for high end of gradient.}
\item{space}{colour space in which to calculate gradient.
"Lab" usually best unless gradient goes through white.}
}
\description{
Default colours are generated with \pkg{munsell} and
\code{mnsl(c("2.5PB 2/4", "2.5PB 7/10")}. Generally, for
continuous colour scales you want to keep hue constant,
but vary chroma and luminance. The \pkg{munsell} package
makes this easy to do using the Munsell colour system.
}
\examples{
# It's hard to see, but look for the bright yellow dot
# in the bottom right hand corner
dsub <- subset(diamonds, x > 5 & x < 6 & y > 5 & y < 6)
(d <- qplot(x, y, data=dsub, colour=z))
# That one point throws our entire scale off. We could
# remove it, or manually tweak the limits of the scale
# Tweak scale limits. Any points outside these limits will not be
# plotted, and will not affect the calculation of statistics, etc
d + scale_colour_gradient(limits=c(3, 10))
d + scale_colour_gradient(limits=c(3, 4))
# Setting the limits manually is also useful when producing
# multiple plots that need to be comparable
# Alternatively we could try transforming the scale:
d + scale_colour_gradient(trans = "log")
d + scale_colour_gradient(trans = "sqrt")
# Other more trivial manipulations, including changing the name
# of the scale and the colours.
d + scale_colour_gradient("Depth")
d + scale_colour_gradient(expression(Depth[mm]))
d + scale_colour_gradient(limits=c(3, 4), low="red")
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white")
# Much slower
d + scale_colour_gradient(limits=c(3, 4), low="red", high="white", space="Lab")
d + scale_colour_gradient(limits=c(3, 4), space="Lab")
# scale_fill_continuous works similarly, but for fill colours
(h <- qplot(x - y, data=dsub, geom="histogram", binwidth=0.01, fill=..count..))
h + scale_fill_continuous(low="black", high="pink", limits=c(0,3100))
# Colour of missing values is controlled with na.value:
miss <- sample(c(NA, 1:5), nrow(mtcars), rep = T)
qplot(mpg, wt, data = mtcars, colour = miss)
qplot(mpg, wt, data = mtcars, colour = miss) +
scale_colour_gradient(na.value = "black")
}
\seealso{
\code{\link[scales]{seq_gradient_pal}} for details on
underlying palette
Other colour scales: \code{\link{scale_color_brewer}},
\code{\link{scale_color_discrete}},
\code{\link{scale_color_gradient2}},
\code{\link{scale_color_gradientn}},
\code{\link{scale_color_grey}},
\code{\link{scale_color_hue}},
\code{\link{scale_colour_brewer}},
\code{\link{scale_colour_discrete}},
\code{\link{scale_colour_gradient2}},
\code{\link{scale_colour_gradientn}},
\code{\link{scale_colour_grey}},
\code{\link{scale_colour_hue}},
\code{\link{scale_fill_brewer}},
\code{\link{scale_fill_discrete}},
\code{\link{scale_fill_gradient2}},
\code{\link{scale_fill_gradientn}},
\code{\link{scale_fill_grey}},
\code{\link{scale_fill_hue}}
}
|
#' @title Tidy Catdesc
#' @description Uses FactoMineR::catdesc function to describe the categories of one factor by categorical variables and/or by quantitative variables
#' @param df data frame to analyse
#' @param cluster cluster column name
#'
#' @examples
#'
#' library(FactoMineR)
#' data(iris)
#' # Principal Component Analysis:
#' res.pca <- PCA(iris[,1:4], graph=FALSE)
#' # Clustering, auto nb of clusters:
#' hc <- HCPC(res.pca, nb.clust=-1)
#'
#' hc$data.clust %>% tidy_catdesc(., clust)
#' hc %>% tidy_catdesc()
#' @return list object
#' @export
tidy_catdesc <- function(df, cluster){
options(stringsAsFactors = F)
load_pkg(c("purrr", "dplyr", 'FactoMineR'))
if(missing(cluster)){
cluster_col <- NULL
} else {
cluster_col <- enquo(cluster)
}
if(class(df)[1] %in% c("HCPC")){
df <- df$data.clust %>%
data.frame %>%
select(clust, everything())
} else {
if(is.null(cluster_col)) stop("Please provide cluster column name for analysis")
df <- df %>%
select(!!cluster_col, everything())
}
df[,1] <- factor(df %>% pull(1))
df <- df %>% data.frame
res_catdes <- catdes(df, 1)
quali <- res_catdes['category'][[1]]
quanti <- res_catdes['quanti'][[1]]
if(!is.null(quali))
{
quali <- quali %>%
purrr::compact() %>%
Map(data.frame, cluster = names(.), .) %>%
map(~.x %>% data.frame %>% tibble::rownames_to_column("variable")) %>%
reduce(rbind) %>%
mutate(type_variable = "qualitative")
} else { quali <- data.frame(Message = "No qualitative variables were used")}
if(!is.null(quanti)){
quanti <- quanti %>%
purrr::compact() %>%
Map(data.frame, cluster = names(.), .) %>%
map(~.x %>% data.frame %>% tibble::rownames_to_column("variable")) %>%
reduce(rbind) %>%
mutate(type_variable = "quantitative")
} else { quanti <- data.frame(Message = "No quantitative variables were used")}
list(quali, quanti)
}
|
/R/tidy_catdesc.R
|
permissive
|
HanjoStudy/quotidieR
|
R
| false
| false
| 1,997
|
r
|
#' @title Tidy Catdesc
#' @description Uses FactoMineR::catdesc function to describe the categories of one factor by categorical variables and/or by quantitative variables
#' @param df data frame to analyse
#' @param cluster cluster column name
#'
#' @examples
#'
#' library(FactoMineR)
#' data(iris)
#' # Principal Component Analysis:
#' res.pca <- PCA(iris[,1:4], graph=FALSE)
#' # Clustering, auto nb of clusters:
#' hc <- HCPC(res.pca, nb.clust=-1)
#'
#' hc$data.clust %>% tidy_catdesc(., clust)
#' hc %>% tidy_catdesc()
#' @return list object
#' @export
tidy_catdesc <- function(df, cluster){
options(stringsAsFactors = F)
load_pkg(c("purrr", "dplyr", 'FactoMineR'))
if(missing(cluster)){
cluster_col <- NULL
} else {
cluster_col <- enquo(cluster)
}
if(class(df)[1] %in% c("HCPC")){
df <- df$data.clust %>%
data.frame %>%
select(clust, everything())
} else {
if(is.null(cluster_col)) stop("Please provide cluster column name for analysis")
df <- df %>%
select(!!cluster_col, everything())
}
df[,1] <- factor(df %>% pull(1))
df <- df %>% data.frame
res_catdes <- catdes(df, 1)
quali <- res_catdes['category'][[1]]
quanti <- res_catdes['quanti'][[1]]
if(!is.null(quali))
{
quali <- quali %>%
purrr::compact() %>%
Map(data.frame, cluster = names(.), .) %>%
map(~.x %>% data.frame %>% tibble::rownames_to_column("variable")) %>%
reduce(rbind) %>%
mutate(type_variable = "qualitative")
} else { quali <- data.frame(Message = "No qualitative variables were used")}
if(!is.null(quanti)){
quanti <- quanti %>%
purrr::compact() %>%
Map(data.frame, cluster = names(.), .) %>%
map(~.x %>% data.frame %>% tibble::rownames_to_column("variable")) %>%
reduce(rbind) %>%
mutate(type_variable = "quantitative")
} else { quanti <- data.frame(Message = "No quantitative variables were used")}
list(quali, quanti)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LossFunctions.R
\name{loss_MAE_hess}
\alias{loss_MAE_hess}
\title{Mean Absolute Error (hessian function)}
\usage{
loss_MAE_hess(y_pred, y_true)
}
\arguments{
\item{y_pred}{The \code{predictions}.}
\item{y_true}{The \code{labels}.}
}
\value{
The hessian of the Absolute Error per value.
}
\description{
This function computes the Mean Absolute Error loss (MAE) hessian per value provided \code{preds} and \code{labels}.
}
\details{
Supposing: \eqn{x = preds - labels}
Loss Formula : \eqn{abs(x)}
Gradient Formula : \eqn{sign(x)}
Hessian Formula : \eqn{0}
}
|
/man/loss_MAE_hess.Rd
|
no_license
|
BruceZhaoR/Laurae
|
R
| false
| true
| 639
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LossFunctions.R
\name{loss_MAE_hess}
\alias{loss_MAE_hess}
\title{Mean Absolute Error (hessian function)}
\usage{
loss_MAE_hess(y_pred, y_true)
}
\arguments{
\item{y_pred}{The \code{predictions}.}
\item{y_true}{The \code{labels}.}
}
\value{
The hessian of the Absolute Error per value.
}
\description{
This function computes the Mean Absolute Error loss (MAE) hessian per value provided \code{preds} and \code{labels}.
}
\details{
Supposing: \eqn{x = preds - labels}
Loss Formula : \eqn{abs(x)}
Gradient Formula : \eqn{sign(x)}
Hessian Formula : \eqn{0}
}
|
beads <- rep(c("red", "blue"), times = c(2,3)) # create an urn with 2 red, 3 blue
beads # view beads object
sample(beads, 1) # sample 1 bead at random
mean(beads)
B <- 10000 # number of times to draw 1 bead
events <- replicate(B, sample(beads, 1)) # draw 1 bead, B times
tab <- table(events) # make a table of outcome counts
tab # view count table
prop.table(tab) # view table of outcome proportions
rolls<-rep(c("cyan", "magenta", "yellow"), times =c(3,5,7))
B<-100000
events<-replicate(B,sample(rolls,1))
tabs<-table(events)
prop.table(tabs)
|
/probability.R
|
no_license
|
mafis103/RFiles
|
R
| false
| false
| 569
|
r
|
beads <- rep(c("red", "blue"), times = c(2,3)) # create an urn with 2 red, 3 blue
beads # view beads object
sample(beads, 1) # sample 1 bead at random
mean(beads)
B <- 10000 # number of times to draw 1 bead
events <- replicate(B, sample(beads, 1)) # draw 1 bead, B times
tab <- table(events) # make a table of outcome counts
tab # view count table
prop.table(tab) # view table of outcome proportions
rolls<-rep(c("cyan", "magenta", "yellow"), times =c(3,5,7))
B<-100000
events<-replicate(B,sample(rolls,1))
tabs<-table(events)
prop.table(tabs)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{calc_zipstat_over_duration}
\alias{calc_zipstat_over_duration}
\title{Calculate the ZIP window statistic over all durations, for a given zone.}
\usage{
calc_zipstat_over_duration(duration, p, mu, y, maxdur, tol = 0.01)
}
\arguments{
\item{duration}{An integer vector.}
\item{p}{A numeric vector of the given/estimated excess zero probabilities
corresponding to each count.}
\item{mu}{A numeric vector of the given/estimated Poisson expected value
parameters corresponding to each count. Of same length as \code{p}.}
\item{y}{An integer vector of the observed counts, of same length as
\code{p}.}
\item{tol}{A scalar between 0 and 1. It is the absolute tolerance criterion
for the estimate of the excess zero indicator; convergence is reached when
two successive elements in the sequence of estimates have an absolute
difference less than \code{tol}.}
}
\value{
A list with two elements:
\describe{
\item{duration}{Vector of integers from 1 to \code{maxdur}.}
\item{statistic}{Numeric vector containing the ZIP statistics corresponding
to each duration, for the given spatial zone.}
}
}
\description{
This function calculates the zero-inflated Poisson statistic for a given
spatial zone, for all durations considered.
}
\keyword{internal}
|
/man/calc_zipstat_over_duration.Rd
|
no_license
|
rfsaldanha/scanstatistics
|
R
| false
| true
| 1,351
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{calc_zipstat_over_duration}
\alias{calc_zipstat_over_duration}
\title{Calculate the ZIP window statistic over all durations, for a given zone.}
\usage{
calc_zipstat_over_duration(duration, p, mu, y, maxdur, tol = 0.01)
}
\arguments{
\item{duration}{An integer vector.}
\item{p}{A numeric vector of the given/estimated excess zero probabilities
corresponding to each count.}
\item{mu}{A numeric vector of the given/estimated Poisson expected value
parameters corresponding to each count. Of same length as \code{p}.}
\item{y}{An integer vector of the observed counts, of same length as
\code{p}.}
\item{tol}{A scalar between 0 and 1. It is the absolute tolerance criterion
for the estimate of the excess zero indicator; convergence is reached when
two successive elements in the sequence of estimates have an absolute
difference less than \code{tol}.}
}
\value{
A list with two elements:
\describe{
\item{duration}{Vector of integers from 1 to \code{maxdur}.}
\item{statistic}{Numeric vector containing the ZIP statistics corresponding
to each duration, for the given spatial zone.}
}
}
\description{
This function calculates the zero-inflated Poisson statistic for a given
spatial zone, for all durations considered.
}
\keyword{internal}
|
#' Calculate the thermal conductivity of air, W/(m K).
#'
#' Calculate the thermal conductivity of air, W/(m K).
#'
#' @param Tk: value of air temperature in Kelvin.
#'
#' @return Thermal conductivity of air, W/(m K).
#'
#' @author Ana Casanueva (05.01.2017).
#' @details Reference: BSL, page 257.
##############################################################################
thermal_cond <- function(Tk){
m.air <- 28.97
r.gas <- 8314.34
r.air <- r.gas / m.air
cp <- 1003.5 # heat capaticy at constant pressure of dry air
# Calculate the thermal conductivity of air, W/(m K)
therm.con <- (cp + 1.25 * r.air) * viscosity(Tk)
return(therm.con)
}
|
/R/thermal_cond.R
|
no_license
|
jonasbhend/HeatStress
|
R
| false
| false
| 673
|
r
|
#' Calculate the thermal conductivity of air, W/(m K).
#'
#' Calculate the thermal conductivity of air, W/(m K).
#'
#' @param Tk: value of air temperature in Kelvin.
#'
#' @return Thermal conductivity of air, W/(m K).
#'
#' @author Ana Casanueva (05.01.2017).
#' @details Reference: BSL, page 257.
##############################################################################
thermal_cond <- function(Tk){
m.air <- 28.97
r.gas <- 8314.34
r.air <- r.gas / m.air
cp <- 1003.5 # heat capaticy at constant pressure of dry air
# Calculate the thermal conductivity of air, W/(m K)
therm.con <- (cp + 1.25 * r.air) * viscosity(Tk)
return(therm.con)
}
|
#' @title A checkFunction
#'
#' @description A \code{\link{checkFunction}} to be called from
#' \code{\link{check}} for identifying numeric variables that have
#' been misclassified as categorical.
#'
#' @param v A character, factor, or labelled variable to check.
#'
#' @param nVals An integer determining how many unique values a variable must have
#' before it can potentially be determined to be a misclassified numeric variable.
#' The default is \code{12}.
#'
#' @param ... Not in use.
#'
#' @return A \code{\link{checkResult}} with three entires:
#' \code{$problem} (a logical indicating the variable is suspected to be
#' a misclassified numeric variable), \code{$message} (if a problem was found,
#' the following message: "Note: The variable consists exclusively of numbers and takes
#' a lot of different values. Is it perhaps a misclassified numeric variable?",
#' otherwise "") and \code{$problemValues} (always \code{NULL}).
#'
#' @details A categorical variable is suspected to be a misclassified
#' numeric variable if it has the following two properties: First,
#' it should consist exclusively of numbers (possibly including signs
#' and decimals points). Secondly, it must have at least \code{nVals} unique values.
#' The default values of \code{nVals} is 12, which means that
#' e.g. variables including answers on a scale from 0-10 will
#' not be recognized as misclassified numerics.
#'
#' @seealso \code{\link{check}}, \code{\link{allCheckFunctions}},
#' \code{\link{checkFunction}}, \code{\link{checkResult}}
#'
#' @examples
#' #Positive and negative numbers, saved as characters
#' identifyNums(c(as.character(-9:9)))
#'
#' #An ordinary character variable
#' identifyNums(c("a", "b", "c", "d", "e.f", "-a", 1:100))
#'
#'
#' @importFrom stats na.omit
#' @importFrom haven as_factor
#' @export
identifyNums <- function(v, nVals = 12, ...) {
out <- list(problem = FALSE, message = "", problemValues = NULL)
#note: as_factor does nothing to factor variables and makes
#char and labelled variables into factors
v <- as.character(na.omit(haven::as_factor(v)))
if (length(unique(v)) < nVals) {
return(checkResult(out))
}
v[v==""] <- "a" #make sure v contains no empty strings
v <- gsub("^-{1}", "", v) #remove signs (prefixed -)
v <- gsub("\\.{1}", "", v) #remove decimal points
v <- gsub("[[:digit:]]", "", v) #replace numbers with empty strings
if (sum(nchar(v)) == 0) {
out$problem <- TRUE
out$message <- "Note: The variable consists exclusively of numbers and takes a lot of different values. Is it perhaps a misclassified numeric variable?"
}
checkResult(out)
}
#' @include checkFunction.R
identifyNums <- checkFunction(identifyNums,
"Identify misclassified numeric or integer variables",
c("character", "factor", "labelled"))
|
/R/identifyNums.R
|
no_license
|
epijim/dataMaid
|
R
| false
| false
| 2,866
|
r
|
#' @title A checkFunction
#'
#' @description A \code{\link{checkFunction}} to be called from
#' \code{\link{check}} for identifying numeric variables that have
#' been misclassified as categorical.
#'
#' @param v A character, factor, or labelled variable to check.
#'
#' @param nVals An integer determining how many unique values a variable must have
#' before it can potentially be determined to be a misclassified numeric variable.
#' The default is \code{12}.
#'
#' @param ... Not in use.
#'
#' @return A \code{\link{checkResult}} with three entires:
#' \code{$problem} (a logical indicating the variable is suspected to be
#' a misclassified numeric variable), \code{$message} (if a problem was found,
#' the following message: "Note: The variable consists exclusively of numbers and takes
#' a lot of different values. Is it perhaps a misclassified numeric variable?",
#' otherwise "") and \code{$problemValues} (always \code{NULL}).
#'
#' @details A categorical variable is suspected to be a misclassified
#' numeric variable if it has the following two properties: First,
#' it should consist exclusively of numbers (possibly including signs
#' and decimals points). Secondly, it must have at least \code{nVals} unique values.
#' The default values of \code{nVals} is 12, which means that
#' e.g. variables including answers on a scale from 0-10 will
#' not be recognized as misclassified numerics.
#'
#' @seealso \code{\link{check}}, \code{\link{allCheckFunctions}},
#' \code{\link{checkFunction}}, \code{\link{checkResult}}
#'
#' @examples
#' #Positive and negative numbers, saved as characters
#' identifyNums(c(as.character(-9:9)))
#'
#' #An ordinary character variable
#' identifyNums(c("a", "b", "c", "d", "e.f", "-a", 1:100))
#'
#'
#' @importFrom stats na.omit
#' @importFrom haven as_factor
#' @export
identifyNums <- function(v, nVals = 12, ...) {
out <- list(problem = FALSE, message = "", problemValues = NULL)
#note: as_factor does nothing to factor variables and makes
#char and labelled variables into factors
v <- as.character(na.omit(haven::as_factor(v)))
if (length(unique(v)) < nVals) {
return(checkResult(out))
}
v[v==""] <- "a" #make sure v contains no empty strings
v <- gsub("^-{1}", "", v) #remove signs (prefixed -)
v <- gsub("\\.{1}", "", v) #remove decimal points
v <- gsub("[[:digit:]]", "", v) #replace numbers with empty strings
if (sum(nchar(v)) == 0) {
out$problem <- TRUE
out$message <- "Note: The variable consists exclusively of numbers and takes a lot of different values. Is it perhaps a misclassified numeric variable?"
}
checkResult(out)
}
#' @include checkFunction.R
identifyNums <- checkFunction(identifyNums,
"Identify misclassified numeric or integer variables",
c("character", "factor", "labelled"))
|
library(shiny)
library(tidyverse)
library(scales)
library(bslib)
library(rsconnect)
library(shinythemes)
library(plotly)
library(shinyWidgets)
raw_data <- read_csv("data.csv") %>%
select(
-QKEY, -INTERVIEW_START_W56, -INTERVIEW_END_W56, -DEVICE_TYPE_W56, -SAMPLE_W56, -FORM_W56, -WHYDATE10YRHARDOE_M1_W56, -WHYDATE10YRHARDOE_M2_W56, -WHYDATE10YRHARDOE_M3_W56, -WHYDATE10YRHARD_TECH_W56, -WHYDATE10YREASYOE_M1_W56, -WHYDATE10YREASYOE_M2_W56, -WHYDATE10YREASYOE_M3_W56, -WHYDATE10YREASY_TECH_W56, -ONIMPACTPOSOE_M1_W56, -ONIMPACTPOSOE_M2_W56,
-ONIMPACTPOSOE_M3_W56, -ONIMPACTNEGOE_M1_W56, -ONIMPACTNEGOE_M2_W56,
-ONIMPACTNEGOE_M3_W56, -F_ACSWEB, -F_VOLSUM, -WEIGHT_W56_ATPONLY, -WEIGHT_W56, -F_ATTEND
)
# Get variable names from original dataset
var_names <- dput(names(raw_data))
# Convert variable names to single column with 183 rows
var_names <- cbind(var_names)
# Get questions from questionnaire corresponding to each variable
questions <- c(
"Marital Status",
"Current Committed Relationship Status",
"Have you ever been in a committed romantic relationship?",
"Are you currently casually dating anyone?",
"What they are seeking in their dating/romantic life",
"Whether \"Just like being single\" is a reason why not seeking relationship/dating",
"Whether \"Have more important priorities right now\" is a reason why not seeking relationship/dating",
"Whether \"Feel like I am too old to date\" is a reason why not seeking relationship/dating",
"Whether \"Have health problems that make it difficult to date\" is a reason why not seeking relationship/dating",
"Whether \"Haven’t had luck with dating or relationships in the past\" is a reason why not seeking relationship/dating",
"Whether \"Too busy\" is a reason why not seeking relationship/dating",
"Whether \"Feel like no one would be interested in dating me\" is a reason why not seeking relationship/dating",
"Whether \"Not ready to date after losing my spouse (if widowed) or ending a relationship\" is a reason why not seeking relationship/dating",
"How long have you been in your current romantic relationship?",
"Overall, would you say that things in your relationship are going...",
"Overall, would you say that things in your dating life are going...",
"Have you ever used an online dating site or dating app?",
"Are you currently using an online dating site or dating app?",
"How did you first meet your spouse or partner?",
"Where online did you first meet your spouse or partner?",
"Compared to 10 years ago, for most people, do you think dating is...",
"Is giving a hug acceptable on a first date?",
"Is kissing acceptable on a first date?",
"Is having sex acceptable on a first date?",
"Is sex between unmarried adults who are in a committed relationship acceptable?",
"Is having an open relationship- that is, a committed relationship where both people agree that it is acceptable to date or have sex with other people acceptable?",
"Is casual sex between consenting adults who are not in a committed relationship acceptable?",
"Is two consenting adults exchanging sexually explicit images of themselves acceptable?",
"Is kissing someone on a date without asking permission first acceptable?",
"Have you ever searched for information online about someone you were romantically interested in?",
"Regardless of whether you would do it yourself, do you think it’s ever acceptable for someone to look through their significant other’s cellphone without their knowledge?",
"If you decided after a first date that you didn’t want to go out with that person again, what is the most likely way you would let them know?",
"Is it acceptable to break up with someone you're casually dating in person?",
"Is it acceptable to break up with someone you're casually dating through a phone call?",
"Is it acceptable to break up with someone you're casually dating through email?",
"Is it acceptable to break up with someone you're casually dating through a private message on a social media site?",
"Is it acceptable to break up with someone you're casually dating through a text message?",
"Is it acceptable to break up with someone you're in a committed relationship with in person?",
"Is it acceptable to break up with someone you're in a committed relationship with through a phone call?",
"Is it acceptable to break up with someone you're in a committed relationship with through email?",
"Is it acceptable to break up with someone you're in a committed relationship with through a private message on a social media site?",
"Is it acceptable to break up with someone you're in a committed relationship with through a text message?",
"Do you think the increased focus on sexual harassment and assault over the last few years has made it easier or harder for MEN to know how to interact with someone they’re on a date with?",
"Do you think the increased focus on sexual harassment and assault over the last few years has made it easier or harder for WOMEN to know how to interact with someone they’re on a date with?",
"Overall, what type of effect would you say online dating sites and dating apps have had on dating and relationships?",
"Compared to relationships that begin in person, in general, do you think relationships where people first meet through an online dating site or dating app are…",
"In general, how safe do you think online dating sites and dating apps are as a way to meet people?",
"How common is people being harassed or bullied on online dating sites and dating apps?",
"How common is people receiving sexually explicit messages or images they did not ask for on online dating sites and dating apps?",
"How common is people lying about themselves to appear more desirable on online dating sites and dating apps?",
"How common are privacy violations, such as data breaches or identity theft on online dating sites and dating apps?",
"How common is people setting up fake accounts in order to scam others on online dating sites and dating apps?",
"Would you ever consider being in a committed relationship with someone who is of a different religion than you?",
"Would you ever consider being in a committed relationship with someone who is of a different race or ethnicity than you?",
"Would you ever consider being in a committed relationship with someone who has a significant amount of debt?",
"Would you ever consider being in a committed relationship with someone who is raising children from another relationship?",
"Would you ever consider being in a committed relationship with someone who lives far away from you?",
"Would you ever consider being in a committed relationship with someone who is a Republican?",
"Would you ever consider being in a committed relationship with someone who is a Democrat?",
"Would you ever consider being in a committed relationship with someone who makes significantly more money than you?",
"Would you ever consider being in a committed relationship with someone who makes significantly less money than you?",
"Would you ever consider being in a committed relationship with someone who voted for Donald Trump?",
"Would you ever consider being in a committed relationship with someone who voted for Hillary Clinton?",
"Would you ever consider being in a committed relationship with someone who is 10 years older than you?",
"Would you ever consider being in a committed relationship with someone who is 10 years younger than you?",
"How much pressure, if any, do you feel from family members to be in a committed relationship?",
"How much pressure, if any, do you feel from your friends to be in a committed relationship?",
"How much pressure, if any, do you feel from society to be in a committed relationship?",
"In the past year, how easy or difficult has it been for you to find people to date?",
"It has been difficult for you to find people to date because... there is a limited number of people in my area for me to date",
"It has been difficult for you to find people to date because... it's hard for me to find someone who meets my expectations",
"It has been difficult for you to find people to date because... it's hard to find someone who's looking for the same type of relationship as me",
"It has been difficult for you to find people to date because... it's hard for me to approach people",
"It has been difficult for you to find people to date because... people aren't interested in dating me",
"It has been difficult for you to find people to date because... I'm too busy",
"Overall, would you say your OWN personal experiences with online dating sites or dating apps have been…",
"In general in the past year, has using online dating sites or dating apps made you feel more confident or insecure?",
"In general in the past year, has using online dating sites or dating apps made you feel more optimistic or pessimistic?",
"In general in the past year, has using online dating sites or dating apps made you feel more hopeful or frustrated?",
"Have you ever gone on a date with someone you met through an online dating site or dating app?",
"Have you ever been in a committed relationship or married someone you first met through an online dating site or dating app?",
"Have you ever come across the online dating profile of someone you already know offline?",
"How important is it to you that online profiles included hobbies and interests?",
"How important is it to you that online profiles included political affiliation?",
"How important is it to you that online profiles included religious beliefs?",
"How important is it to you that online profiles included occupation?",
"How important is it to you that online profiles included racial or ethnic background?",
"How important is it to you that online profiles included height?",
"How important is it to you that online profiles included if they have children?",
"How important is it to you that online profiles included type of relationship they're looking for?",
"How important is it to you that online profiles included photos of themselves?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who you were physically attracted to?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who shared your hobbies and interests?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who were looking for the same kind of relationship as you?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who seemed like someone you would want to meet in person?",
"How would you characterize the number of messages you have received on dating sites/apps?",
"How would you characterize the number of messages you have received from people you were interested in on dating sites/apps?",
"How well, if at all, do you feel you understand why online dating sites or dating apps present certain people as potential matches for you?",
"How concerned are you, if at all, about how much data online dating sites or dating apps collect about you?",
"Do you ever use social media sites, like Facebook, Twitter, or Instagram?",
"How often, if ever, do you see people posting things about their romantic relationships on social media?",
"In general, do the posts you see on social media about other people’s romantic relationships make you feel better or worse about your own relationships?",
"In general, do the posts you see on social media about other people’s romantic relationships make you feel better or worse about your own dating life?",
"Have you ever used social media to check up on someone that you used to date or be in a relationship with?",
"Have you ever used social media to share or discuss things about your relationship or dating life?",
"As far as you know, does your spouse or partner have a cellphone?",
"As far as you know, does your spouse or partner use social media sites?",
"As far as you know, does your spouse or partner play video games on a computer, game console or cellphone?",
"How important, if at all, is social media to you personally when it comes to keeping up with what's going on your spouse's or partner's life?",
"How important, if at all, is social media to you personally when it comes to showing how much you care about your spouse or partner?",
"Have you ever felt jealous or unsure about your relationship because of the way your current spouse or partner interacts with other people on social media?",
"How often, if ever, do you feel as if your spouse or partner is distracted by their cellphone when you are trying to have a conversation with them?",
"How often, if ever, are you bothered by the amount of time your spouse or partner spends on their cellphone?",
"How often, if ever, are you bothered by the amount of time your spouse or partner spends on social media sites?",
"How often, if ever, are you bothered by the amount of time your spouse or partner spends playing video games?",
"Have you ever given your spouse or partner the password or passcode to your email account?",
"Have you ever given your spouse or partner the password or passcode to any of your social media accounts?",
"Have you ever given your spouse or partner the password or passcode to your cellphone?",
"Have you ever looked through your current spouse's or partner's cellphone without their knowledge?",
"Have you ever heard of ghosting?",
"Have you ever heard of breadcrumbing?",
"Have you ever heard of phubbing?",
"Have you ever heard of catfishing?",
"Have you ever heard of friends with benefits?",
"Have you ever had someone you’ve gone out with suddenly stop answering your phone calls or messages without explanation (sometimes called “ghosting”)?",
"Has someone you were dating or on a date with ever pressured you for sex?",
"Has someone you were dating or on a date with ever touched you in a way that made you feel uncomfortable?",
"Has someone you were dating or on a date with ever sent you sexually explicit images that you didn't ask for?",
"As far as you know, has someone you were dating or been on a date with ever spread rumors about your sexual history?",
"As far as you know, has someone you were dating or been on a date with ever shared a sexually explicit image of you without your consent?",
"As far as you know, has someone you were dating or been on a date with ever publically shared your contact information or address without your permission?",
"Thinking about your own personal experiences, has someone ever called you an offensive name ON AN ONLINE DATING SITE OR DATING APP?",
"Thinking about your own personal experiences, has someone ever threatened to physically harm you ON AN ONLINE DATING SITE OR DATING APP?",
"Thinking about your own personal experiences, has someone ever sent you a sexually explicit message or image you didn’t ask for ON AN ONLINE DATING SITE OR DATING APP?",
"Thinking about your own personal experiences, has someone ever continued to contact you after you said you were not interested ON AN ONLINE DATING SITE OR DATING APP?",
"What sex is your spouse or partner?",
"Sexual orientation",
"Whether or not live in a metropolitan area",
"Region of the US they reside in",
"Type of region they reside in",
"Age category",
"Sex",
"Education",
"Education (expanded)",
"Race/Ethnicity",
"Place of birth",
"Citizenship Status",
"Marital Status",
"Religion",
"Whether born-again or evangelical Christian",
"Political Party (Democrat/Republican/Independent)",
"Political Party (Democrat/Republican dichotimized)",
"Political Party (Dem/Lean Dem or Rep/Lean Rep dichotomized)",
"Income",
"Income (trichotomized)",
"Voting registration status",
"Voting registration status (trichotomized)",
"Political ideology"
)
# Convert questions to single col
questions <- cbind(questions)
# Combine variables and questions into df
lookup_questions <- data.frame(var_names, questions)
## code copied and modified from https://mastering-shiny.org/basic-ui.html (selectInput, plotOutput
## idea/syntax)
## and https://mastering-shiny.org/action-layout.html (titlePanel, sidebarLayout, sidebarPanel,
## mainPanel idea/syntax)
## and https://mastering-shiny.org/action-layout.html (tabPanel idea/syntax)
## and https://shiny.rstudio.com/articles/layout-guide.html (navbarPage idea/syntax and
## fluidRow, column idea and syntax)
## and https://shiny.rstudio.com/reference/shiny/1.6.0/textOutput.html (textOutput idea/syntax)
## and https://campus.datacamp.com/courses/case-studies-building-web-applications-with-shiny-in-r/shiny-review?ex=3
## (strong("text") idea and syntax)
ui <- fluidPage(
## https://stackoverflow.com/questions/47743789/change-the-default-error-message-in-shiny
tags$head(tags$style(".shiny-output-error{visibility: hidden}")),
tags$head(tags$style(".shiny-output-error:after{content: 'Both of these questions are conditional, and no one was asked both of the questions. Please select a different combination of variables.'; visibility: visible}")),
theme = shinytheme("flatly"),
navbarPage(
"CSC/SDS 235 Final Project: Michelle, Lauryn, Grace",
tabPanel(
"Interactive Dashboard",
fluidRow(
column(12,
## h3 from https://shiny.rstudio.com/tutorial/written-tutorial/lesson2/
h3("Explore the Data!"),
textOutput("howtousetext"),
## https://shiny.rstudio.com/tutorial/written-tutorial/lesson2/ br idea
br())
),
sidebarLayout(
sidebarPanel(
## https://shiny.rstudio.com/reference/shiny/latest/radioButtons.html
prettyRadioButtons(inputId = "plotType", label = "Plot Type", c(Bar = "bar", Heatmap = "count"), selected = "bar"),
## selected = idea and syntax from https://shiny.rstudio.com/reference/shiny/0.12.2/selectInput.html
selectInput(inputId = "variable1", label = "Choose a first variable", selected = "Current Committed Relationship Status", lookup_questions$questions),
## code for this conditional panel is directly copied and pasted from
## the example at https://shiny.rstudio.com/reference/shiny/1.3.0/conditionalPanel.html -----------
# Only show this panel if the plot type is a two-way count
conditionalPanel(
condition = "input.plotType == 'count'",
selectInput(inputId = "variable2", label = "Choose a second variable", selected = "Region of the US they reside in", lookup_questions$questions),
),
textOutput("disclaimer_text")
),
### ---------------------------------------------------------------------------------------
## the conditional plot code is based on the conditional panel code above
mainPanel(
conditionalPanel(
condition = "input.plotType == 'bar'",
# plotlyOutput and renderPlotly
# from https://stackoverflow.com/questions/57085342/renderplotly-does-not-work-despite-not-having-any-errors
plotlyOutput("plotbar"),
br(),
textOutput("numparticipantsasked"),
br(),
textOutput("numparticipantsaskedexplan")
),
conditionalPanel(
condition = "input.plotType == 'count'",
plotlyOutput("heatmap"),
br(),
textOutput("heatmaptextboxone"),
textOutput("heatmaptextboxtwo"),
br(),
textOutput("countexplaintwo")
)
)
)
),
tabPanel(
"Static Data Analysis",
fluidRow(
column(
12,
h3("About Our Project"),
htmlOutput("aboutprojtext"),
## https://shiny.rstudio.com/tutorial/written-tutorial/lesson2/ br idea
br(),
h3("Characterizing the Sample"),
textOutput("characterizing_sample_text"),
br(),
fluidRow(
column(1),
column(5,
plotlyOutput("characterizingsamplemstatus"),
br(),
textOutput("partaskedmstatus"),
br(),
br()),
column(5,
plotlyOutput("characterizingsampleorientation"),
br(),
textOutput("partansweredorientationi"),
br(),
br()
),
column(1),
),
fluidRow(
column(1),
column(5,
plotlyOutput("characterizingsampleideology"),
br(),
textOutput("partansweredpoliticalideo"),
br(),
br()),
column(5,
plotlyOutput("characterizingsamplerace"),
br(),
textOutput("partanswerrace"),
br(),
br()),
column(1)
),
fluidRow(
column(1),
column(5,
plotlyOutput("characterizingsampleage"),
br(),
textOutput("partanswerage")),
column(5,
plotlyOutput("characterizingsampleeduc"),
br(),
textOutput("partanswereduc")),
column(1)
)
)),
fluidRow(
column(
12,
h3("Interesting Findings"),
textOutput("overallinterestingfindingstext"),
br(),
fluidRow(column(3),
column(6,
plotlyOutput("thingsindatinglife"),
br(),
textOutput("partanswerthings"),
br(),
textOutput("thingsdaatinglifetext"),
br(),
br()),
column(3)
),
fluidRow(
column(6,
plotlyOutput("datinglifebyage"),
br()),
column(6,
plotlyOutput("datinglifebysex"),
br())
),
fluidRow(column(3),
column(6,
textOutput("summarydatinglife"),
br(),
br()
),
column(3)
),
fluidRow(column(3),
column(6,
plotlyOutput("tenyears"),
br(),
textOutput("partanswer10years"),
br(),
textOutput("tenyearstext"),
br(),
br()),
column(3)
),
fluidRow(column(6,
plotlyOutput("tenyearsbyage"),
br()),
column(6,
plotlyOutput("tenyearsbysex"),
br())
),
fluidRow(column(3),
column(6,
textOutput("summarytenyears"),
br(),
br()
),
column(3)
),
fluidRow(column(3),
column(6,
plotlyOutput("feelings_by_sex"),
br(),
textOutput("partanswerfeelings"),
br(),
textOutput("onlinenegfeelingstext"),
br(),
br()
),
column(3)
),
fluidRow(column(3),
column(6,
plotlyOutput("bullingharass"),
br(),
textOutput("partanswerbullyharass"),
br(),
textOutput("bullingharasstext"),
br(),
br()
),
column(3)
),
fluidRow(
column(2),
column(6,
plotlyOutput("effectofonline"),
br(),
textOutput("effectofonlinetext"),
br(),
br()
),
column(2,
textOutput("summaryonline"),
br(),
br()),
column(2)
),
fluidRow(
column(2),
column(6,
plotlyOutput("jealousy_by_sex"),
br(),
textOutput("jealousypartaskedtext"),
br(),
br()
),
column(2,
textOutput("textbtwo"),
br(),
br()),
column(2)
),
fluidRow(column(6,
plotlyOutput("botheredbycell"),
br(),
textOutput("cellphoneonepartanswer"),
br()),
column(6,
plotlyOutput("distractedbycell"),
br(),
textOutput("cellphonetwopartanswer"),
br())
),
fluidRow(column(2),
column(8,
textOutput("cellphonetext"),
br(),
textOutput("finaltext")),
column(2))
)
),
fluidRow(
column(
12,
## footer hr() from https://stackoverflow.com/questions/30205034/shiny-layout-how-to-add-footer-disclaimer/38241035
hr(),
h4("References"),
htmlOutput("citations_textone"),
htmlOutput("citations_texttwo"),
br()
)
)
)
)
)
## code copied and modified from https://mastering-shiny.org/basic-app.html and
# https://mastering-shiny.org/basic-ui.html
server <- function(input, output, session) {
output$plotbar <- renderPlotly({
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])))) %>%
group_by_(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])),
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
xlab(str_wrap(input$variable1)) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$numparticipantsasked <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(num_not_na[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants were asked this question."), sep = " ")
})
output$numparticipantsaskedexplan <- renderText(
"Whether or not participants
were asked certain questions was often conditional on previous
responses. For example, only those who are married were not asked
whether they were in a committed relationship."
)
output$countexplaintwo <- renderText(
"Whether or not participants
were asked certain questions was often conditional on previous
responses. For example, only those who are married were not asked
whether they were in a committed relationship."
)
# Attempt to build heatmap
output$heatmap <- renderPlotly({
if(input$variable1 != input$variable2){
g <- raw_data %>%
filter(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])))) %>%
filter(!is.na(get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])))) %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]),
lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])),
y = get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])),
fill = n
)) +
geom_tile() +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#000773", na.value = "#8E8E8E") +
xlab(str_wrap(input$variable1)) +
ylab(str_wrap(input$variable2)) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
}
else{
g <- raw_data %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])),
y = n
)) +
geom_col() +
xlab(str_wrap(input$variable1)) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "y")
}
})
output$heatmaptextboxone <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(num_not_na[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(n[1])
text <- paste(paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants were asked"), sep = " "), str_wrap(input$variable1))
})
output$heatmaptextboxtwo <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1]))))
) %>%
pull(num_not_na[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1]))))
) %>%
pull(n[1])
text <- paste(paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants were asked"), sep = " "), str_wrap(input$variable2))
})
output$disclaimer_text <- renderText(
"Disclaimer: Some question text was changed for clarity or conciseness"
)
output$citations_textone <- renderUI(HTML(
"Vogels, E. A., & Anderson, M. (2020, May 8).
Dating and Relationships in the Digital Age. Pew Research Center. <a href ='https://www.pewresearch.org/internet/2020/05/08/dating-and-relationships-in-the-digital-age/'>Link to the data</a>."
))
output$citations_texttwo <- renderUI(HTML(
"Pew Research Center. (2019).
Pew Research Center’s American Trends Panel Wave 56 Methodology Report.
Downloaded as metadata alongside the data from <a href ='https://www.pewresearch.org/internet/2020/05/08/dating-and-relationships-in-the-digital-age/'>this link</a>"
))
#Static plots for Interesting Findings
output$feelings_by_sex <- renderPlotly({
raw_data$ONFEEL.c_W56 <- factor(raw_data$ONFEEL.c_W56,levels = c("Frustrated", "Neither", "Hopeful", "Refused"))
g <- raw_data %>%
filter(F_SEX != "Refused") %>%
filter(ONFEEL.c_W56 != "Refused") %>%
filter(!is.na(F_SEX) & !is.na(ONFEEL.c_W56)) %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == "In general in the past year, has using online dating sites or dating apps made you feel more hopeful or frustrated?") %>%
pull(var_names[1]),
lookup_questions %>%
filter(questions == "Sex") %>%
pull(var_names[1])
) %>%
summarize(
n = n()
) %>%
ggplot(aes(x = ONFEEL.c_W56, y =F_SEX, fill = n)) +
geom_tile() +
ggtitle("Feelings After Using Online Dating, Sorted by Sex") +
xlab(str_wrap("In general in the past year, has using online dating sites or dating apps made you feel more hopeful or frustrated?"))+
ylab("Sex")+
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))+
scale_fill_gradient(low = "#FFFFFF", high = "#004D71", na.value = "#8E8E8E")
ggplotly(g, tooltip = "fill")
})
output$jealousy_by_sex <- renderPlotly({
raw_data$SNSFEEL_W56 <- factor(raw_data$SNSFEEL_W56,levels = c("Yes, have felt this way", "No, have never felt this way", "Refused"))
g <- raw_data %>%
filter(F_SEX != "Refused" & SNSFEEL_W56 != "Refused") %>%
filter(!is.na(F_SEX) & !is.na(SNSFEEL_W56)) %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == "Have you ever felt jealous or unsure about your relationship because of the way your current spouse or partner interacts with other people on social media?") %>%
pull(var_names[1]),
lookup_questions %>%
filter(questions == "Sex") %>%
pull(var_names[1])
) %>%
summarize(
n = n()
) %>%
ggplot(aes(x = SNSFEEL_W56, y =F_SEX, fill = n)) +
geom_tile() +
ggtitle("Jealousy in Relationships and Social Media, Sorted by Sex") +
xlab(str_wrap("Have you ever felt jealous or unsure about your relationship because of the way your current spouse or partner interacts with other people on social media?"))+
ylab("Sex")+
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))+
scale_fill_gradient(low = "#FFFFFF", high = "#DD7405", na.value = "#8E8E8E")
ggplotly(g, tooltip = "fill")
})
output$aboutprojtext<- renderUI(HTML("This application provides an analysis of and means to
interact with data from the 2019 Pew Research Center survey on the
intersection between romantic relationships and technology. The set of participants recruited for the survey, part of the American Trends Panel, were designed to serve as a representative sample of the US (Pew Research Center, 2019).
Download the dataset with a Pew Research Center account and view their
analysis <a href ='https://www.pewresearch.org/internet/2020/05/08/dating-and-relationships-in-the-digital-age/'>here</a> (Vogels & Anderson, 2020)."))
output$onlinenegfeelingstext <- renderText("Now, we will turn specifically to online dating, as these apps and sites are a major component of modern dating. Here, we notice that females tend to experience more negative feelings regarding online dating. For people who used online dating, more females felt pessimistic (41% of all females asked this question) than males (35%).")
output$textbtwo <- renderText("We thought that, perhaps jealousy and insecurity inflicted by social media
play a role here. We note that more females in committed relationships reported feeling insecure because of their partner's social media use (29%) than males (15%). However, both of these
proportions are relatively low, so social media jealousy may not account for dissatisfaction with dating.")
output$characterizing_sample_text <- renderText("This sample is largely married (40%), straight (68%), politically moderate (36%) or liberal (27%), non-Hispanic white (69%), and ages 30-64 (64%) with a college degree or higher (46%).")
output$characterizingsamplemstatus <- renderPlotly({
g <- raw_data %>%
filter(MARITAL_W56 != "Refused") %>%
filter(!is.na(MARITAL_W56)) %>%
group_by(MARITAL_W56) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
# reorder from https://sebastiansauer.github.io/ordering-bars/
ggplot(aes(x = reorder(MARITAL_W56, -n), y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Marital Status") +
ylab("Number of Participants") +
ggtitle("Number of Participants by Marital Status") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
## tooltip = "text" with text specified above idea from https://plotly.com/ggplot2/interactive-tooltip/
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleorientation <- renderPlotly({
g <- raw_data %>%
filter(ORIENTATIONMOD_W56 != "Refused") %>%
filter(!is.na(ORIENTATIONMOD_W56)) %>%
group_by(ORIENTATIONMOD_W56) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
# reorder from https://sebastiansauer.github.io/ordering-bars/
ggplot(aes(x = reorder(ORIENTATIONMOD_W56, -n), y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Sexual Orientation") +
ggtitle("Number of Participants by Sexual Orientation") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleideology <- renderPlotly({
## reorder from https://sebastiansauer.github.io/ordering-bars/
raw_data$F_IDEO <- factor(raw_data$F_IDEO,levels = c("Very conservative", "Conservative", "Moderate", "Liberal", "Very liberal", "Refused"))
g <- raw_data %>%
filter(F_IDEO != "Refused") %>%
filter(!is.na(F_IDEO)) %>%
group_by(F_IDEO) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_IDEO, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Political Ideology") +
ggtitle("Number of Participants by Political Ideology") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsamplerace <- renderPlotly({
g <- raw_data %>%
filter(F_RACETHN != "Refused") %>%
filter(!is.na(F_RACETHN)) %>%
group_by(F_RACETHN) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_RACETHN, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Race/Ethnicity") +
ggtitle("Number of Participants by Race/Ethnicity") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleage <- renderPlotly({
g <- raw_data %>%
filter(F_AGECAT != "DK/REF") %>%
filter(!is.na(F_AGECAT)) %>%
group_by(F_AGECAT) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_AGECAT, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Age") +
ggtitle("Number of Participants by Age") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleeduc <- renderPlotly({
## reorder from https://sebastiansauer.github.io/ordering-bars/
raw_data$F_EDUCCAT <- factor(raw_data$F_EDUCCAT,levels = c("H.S. graduate or less", "Some College", "College graduate+", "Don't know/Refused"))
g <- raw_data %>%
filter(F_EDUCCAT != "Don't know/Refused") %>%
filter(!is.na(F_EDUCCAT)) %>%
group_by(F_EDUCCAT) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_EDUCCAT, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Education Level") +
ggtitle("Number of Participants by Education") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$thingsindatinglife <- renderPlotly({
raw_data$FAMSURV19DATING_W56 <- factor(raw_data$FAMSURV19DATING_W56,levels = c("Not at all well", "Not too well", "Fairly well", "Very well", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(FAMSURV19DATING_W56)) %>%
filter(FAMSURV19DATING_W56 != "Refused") %>%
group_by(FAMSURV19DATING_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = FAMSURV19DATING_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("How Participants' Dating Lives are Going") +
xlab("Overall, would you say that things in your dating life are going...") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$thingsdaatinglifetext <- renderText("The majority of participants (70%) asked this question said that things in their dating
life are going not at all well or not too well. This highlights the trouble
many are facing with modern dating, whether participants' problems are technology related or not.")
output$tenyears <- renderPlotly({
raw_data$DATE10YR_W56 <- factor(raw_data$DATE10YR_W56,levels = c("Harder today", "About the same", "Easier today", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(DATE10YR_W56)) %>%
filter(DATE10YR_W56 != "Refused") %>%
group_by(DATE10YR_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = DATE10YR_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
xlab("Compared to 10 years ago, for most people, do you think dating is...") +
ggtitle("Difficulty of Dating Now Compared to the Past") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$tenyearstext <- renderText("Participants also believe that dating has gotten more difficult over time.
A plurality of respondents said that dating is harder today than it was 10 years
ago (48%), while only 18% think that dating is easier today.")
output$overallinterestingfindingstext <- renderText("Many, but not all, participants expressed struggles or dissatisfaction
with modern dating. On this survey, which was collected before the COVID-19 pandemic,
participants identifying as male and female, and across ages, reported difficulties. Many also reported
feeling frustrated with online dating, and noted the prevalence of bullying and harassment. As a disclaimer, please note that this survey and
the following analysis binary sex as a proxy for gender identity. This is a
flawed and incomplete measure of gender.")
output$datinglifebysex <- renderPlotly({
raw_data$FAMSURV19DATING_W56 <- factor(raw_data$FAMSURV19DATING_W56,levels = c("Not at all well", "Not too well", "Fairly well", "Very well", "Refused"))
g <- raw_data %>%
filter(!is.na(FAMSURV19DATING_W56)) %>%
filter(!is.na(F_SEX)) %>%
filter(F_SEX != "Refused") %>%
filter(FAMSURV19DATING_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(FAMSURV19DATING_W56,
F_SEX
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = FAMSURV19DATING_W56,
y = F_SEX,
fill = n
)) +
geom_tile() +
ggtitle("How Participants' Dating Lives are Going, Sorted by Sex") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#000773", na.value = "#8E8E8E", limits = c(0,300)) +
xlab("Overall, would you say that things in your dating life are going...") +
ylab("Sex") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$datinglifebyage <- renderPlotly({
raw_data$FAMSURV19DATING_W56 <- factor(raw_data$FAMSURV19DATING_W56,levels = c("Not at all well", "Not too well", "Fairly well", "Very well", "Refused"))
g <- raw_data %>%
filter(!is.na(FAMSURV19DATING_W56)) %>%
filter(!is.na(F_AGECAT)) %>%
filter(F_AGECAT != "DK/REF") %>%
filter(FAMSURV19DATING_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(FAMSURV19DATING_W56,
F_AGECAT
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = FAMSURV19DATING_W56,
y = F_AGECAT,
fill = n
)) +
geom_tile() +
ggtitle("How Participants' Dating Lives are Going, Sorted by Age") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#000773", na.value = "#8E8E8E", limits = c(0,300)) +
xlab("Overall, would you say that things in your dating life are going...") +
ylab("Age") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$summarydatinglife <- renderText("Findings about most people having some trouble with their dating life
are found across ages and sexes.")
output$tenyearsbysex <- renderPlotly({
raw_data$DATE10YR_W56 <- factor(raw_data$DATE10YR_W56,levels = c("Harder today", "About the same", "Easier today", "Refused"))
g <- raw_data %>%
filter(!is.na(DATE10YR_W56)) %>%
filter(!is.na(F_SEX)) %>%
filter(F_SEX != "Refused") %>%
filter(DATE10YR_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(DATE10YR_W56,
F_SEX
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = DATE10YR_W56,
y = F_SEX,
fill = n
)) +
geom_tile() +
ggtitle("Difficulty of Dating Now Compared to the Past, Sorted by Sex") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#035B00", na.value = "#8E8E8E", limits = c(0,1400)) +
xlab("Compared to 10 years ago, for most people, do you think dating is...") +
ylab("Sex") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$tenyearsbyage <- renderPlotly({
raw_data$DATE10YR_W56 <- factor(raw_data$DATE10YR_W56,levels = c("Harder today", "About the same", "Easier today", "Refused"))
g <- raw_data %>%
filter(!is.na(DATE10YR_W56)) %>%
filter(!is.na(F_AGECAT)) %>%
filter(F_AGECAT != "DK/REF") %>%
filter(DATE10YR_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(DATE10YR_W56,
F_AGECAT
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = DATE10YR_W56,
y = F_AGECAT,
fill = n
)) +
geom_tile() +
ggtitle("Difficulty of Dating Now Compared to the Past, Sorted by Age") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#035B00", na.value = "#8E8E8E", limits = c(0,1400)) +
xlab("Compared to 10 years ago, for most people, do you think dating is...") +
ylab("Age") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$summarytenyears <- renderText("Those who identify as female seem to be more pessimistic about current dating conditions than those identifying as
male, and younger people (under the age of 49) seem to be somewhat more pessimistic than those
over the age of 50.")
output$effectofonline <- renderPlotly({
raw_data$ONIMPACT_W56 <- factor(raw_data$ONIMPACT_W56,levels = c("Mostly negative effect", "Neither positive or negative effect", "Mostly positive effect", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(ONIMPACT_W56 != "Refused") %>%
filter(!is.na(ONIMPACT_W56)) %>%
group_by(ONIMPACT_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = ONIMPACT_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Perceived Effect of Online Dating") +
xlab(str_wrap("Overall, what type of effect would you say online dating sites and dating apps have had on dating and relationships?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$summaryonline <- renderText("Despite these findings about pesimission with regard to
recent dating in general, people's current dating lives, and the downsides of online dating,
the participants were mixed on whether online dating has improved
or worsened dating in general. A plurality of participants (49%)
said that online dating had neither effect. This brings up the question,
if online dating is not what is making modern dating more difficult, then what is?")
output$bullingharass <- renderPlotly({
raw_data$ONPROBLEM.a_W56 <- factor(raw_data$ONPROBLEM.a_W56,levels = c("Not at all common", "Not too common", "Somewhat common", "Very common", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(ONPROBLEM.a_W56)) %>%
filter(ONPROBLEM.a_W56 != "Refused") %>%
group_by(ONPROBLEM.a_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = ONPROBLEM.a_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Perceived Prevalence of Bullying/Harassment in Online Dating") +
xlab(str_wrap("How common is people being harassed or bullied on online dating sites and dating apps?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$bullingharasstext <- renderText("Bullying and harassment appears to be a problem on online dating apps and
websites. 61% of participants who answered said that this was somewhat or very common,
a concerning statistic.")
output$botheredbycell <- renderPlotly({
raw_data$PARTNERSCREEN.a_W56 <- factor(raw_data$PARTNERSCREEN.a_W56,levels = c("Never", "Rarely", "Sometimes", "Often", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(PARTNERSCREEN.a_W56)) %>%
filter(PARTNERSCREEN.a_W56 != "Refused") %>%
group_by(PARTNERSCREEN.a_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = PARTNERSCREEN.a_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Cell Phone Time Bother in Relationships") +
xlab(str_wrap("How often, if ever, are you bothered by the amount of time your spouse or partner spends on their cellphone?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0), limits = c(0,1100)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$distractedbycell <- renderPlotly({
raw_data$PARTNERDISTRACT_W56 <- factor(raw_data$PARTNERDISTRACT_W56,levels = c("Never", "Rarely", "Sometimes", "Often", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(PARTNERDISTRACT_W56)) %>%
filter(PARTNERDISTRACT_W56 != "Refused") %>%
group_by(PARTNERDISTRACT_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = PARTNERDISTRACT_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Frequency of Distraction by Cell Phone in Relationships") +
xlab(str_wrap("How often, if ever, do you feel as if your spouse or partner is distracted by their cellphone when you are trying to have a conversation with them?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0), limits = c(0,1100)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$cellphonetext <- renderText("Another possibility for why participants are dissatisfied with dating is that
we constantly use our cell phones. 40% of participants said that they were sometimess or often
bothered by the amount of time that their spouse or partner spent on their cell phone, highlighting
how others' behavior in technology can put a strain on relationships. Perhaps more notably, a majority of the sample (54%)
said that they sometimes or often feel that their partners are distracted by a cell phone while
they want to have a conversation. This high level of distraction has the potential to make casual dating
and committed relationships alike difficult.")
output$finaltext <- renderText("Use the Interactive Dashboard to explore more reasons why some are dissatisfied, and learn why some are happy with modern technology in dating and relationships.")
output$partaskedmstatus <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partansweredorientationi <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partansweredpoliticalideo <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerrace <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerage <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1]))) == "DK/REF"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswereduc <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1]))) == "Don't know/Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswer10years <- renderText({
total_asked <- raw_data %>%
select(DATE10YR_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(DATE10YR_W56)),
num_refused = sum(DATE10YR_W56 == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(DATE10YR_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerthings <- renderText({
num_refused_num <- raw_data %>%
filter(FAMSURV19DATING_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(FAMSURV19DATING_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(FAMSURV19DATING_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(FAMSURV19DATING_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerfeelings <- renderText({
num_refused_num <- raw_data %>%
filter(ONFEEL.c_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(ONFEEL.c_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(ONFEEL.c_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(ONFEEL.c_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerbullyharass <- renderText({
num_refused_num <- raw_data %>%
filter(ONPROBLEM.a_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(ONPROBLEM.a_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(ONPROBLEM.a_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(ONPROBLEM.a_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$effectofonlinetext <- renderText({
num_refused_num <- raw_data %>%
filter(ONIMPACT_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(ONIMPACT_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(ONIMPACT_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(ONIMPACT_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$jealousypartaskedtext <- renderText({
num_refused_num <- raw_data %>%
filter(SNSFEEL_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(SNSFEEL_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(SNSFEEL_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(SNSFEEL_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$cellphoneonepartanswer <- renderText({
num_refused_num <- raw_data %>%
filter(PARTNERSCREEN.a_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(PARTNERSCREEN.a_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(PARTNERSCREEN.a_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(PARTNERSCREEN.a_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$cellphonetwopartanswer <- renderText({
num_refused_num <- raw_data %>%
filter(PARTNERDISTRACT_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(PARTNERDISTRACT_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(PARTNERDISTRACT_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(PARTNERDISTRACT_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$howtousetext <- renderText("Start by selecting a plot type. If you would like to visualize one variable, select \"bar\"; if you would
like to select two variables, select \"heatmap\". Then, choose the variable(s) you would like to visualize from the
dropdown menu(s) below.")
}
# Run the application
shinyApp(ui = ui, server = server)
|
/final_project_app/app.R
|
no_license
|
mflesaker/SDS235-FP
|
R
| false
| false
| 86,397
|
r
|
library(shiny)
library(tidyverse)
library(scales)
library(bslib)
library(rsconnect)
library(shinythemes)
library(plotly)
library(shinyWidgets)
raw_data <- read_csv("data.csv") %>%
select(
-QKEY, -INTERVIEW_START_W56, -INTERVIEW_END_W56, -DEVICE_TYPE_W56, -SAMPLE_W56, -FORM_W56, -WHYDATE10YRHARDOE_M1_W56, -WHYDATE10YRHARDOE_M2_W56, -WHYDATE10YRHARDOE_M3_W56, -WHYDATE10YRHARD_TECH_W56, -WHYDATE10YREASYOE_M1_W56, -WHYDATE10YREASYOE_M2_W56, -WHYDATE10YREASYOE_M3_W56, -WHYDATE10YREASY_TECH_W56, -ONIMPACTPOSOE_M1_W56, -ONIMPACTPOSOE_M2_W56,
-ONIMPACTPOSOE_M3_W56, -ONIMPACTNEGOE_M1_W56, -ONIMPACTNEGOE_M2_W56,
-ONIMPACTNEGOE_M3_W56, -F_ACSWEB, -F_VOLSUM, -WEIGHT_W56_ATPONLY, -WEIGHT_W56, -F_ATTEND
)
# Get variable names from original dataset
var_names <- dput(names(raw_data))
# Convert variable names to single column with 183 rows
var_names <- cbind(var_names)
# Get questions from questionnaire corresponding to each variable
questions <- c(
"Marital Status",
"Current Committed Relationship Status",
"Have you ever been in a committed romantic relationship?",
"Are you currently casually dating anyone?",
"What they are seeking in their dating/romantic life",
"Whether \"Just like being single\" is a reason why not seeking relationship/dating",
"Whether \"Have more important priorities right now\" is a reason why not seeking relationship/dating",
"Whether \"Feel like I am too old to date\" is a reason why not seeking relationship/dating",
"Whether \"Have health problems that make it difficult to date\" is a reason why not seeking relationship/dating",
"Whether \"Haven’t had luck with dating or relationships in the past\" is a reason why not seeking relationship/dating",
"Whether \"Too busy\" is a reason why not seeking relationship/dating",
"Whether \"Feel like no one would be interested in dating me\" is a reason why not seeking relationship/dating",
"Whether \"Not ready to date after losing my spouse (if widowed) or ending a relationship\" is a reason why not seeking relationship/dating",
"How long have you been in your current romantic relationship?",
"Overall, would you say that things in your relationship are going...",
"Overall, would you say that things in your dating life are going...",
"Have you ever used an online dating site or dating app?",
"Are you currently using an online dating site or dating app?",
"How did you first meet your spouse or partner?",
"Where online did you first meet your spouse or partner?",
"Compared to 10 years ago, for most people, do you think dating is...",
"Is giving a hug acceptable on a first date?",
"Is kissing acceptable on a first date?",
"Is having sex acceptable on a first date?",
"Is sex between unmarried adults who are in a committed relationship acceptable?",
"Is having an open relationship- that is, a committed relationship where both people agree that it is acceptable to date or have sex with other people acceptable?",
"Is casual sex between consenting adults who are not in a committed relationship acceptable?",
"Is two consenting adults exchanging sexually explicit images of themselves acceptable?",
"Is kissing someone on a date without asking permission first acceptable?",
"Have you ever searched for information online about someone you were romantically interested in?",
"Regardless of whether you would do it yourself, do you think it’s ever acceptable for someone to look through their significant other’s cellphone without their knowledge?",
"If you decided after a first date that you didn’t want to go out with that person again, what is the most likely way you would let them know?",
"Is it acceptable to break up with someone you're casually dating in person?",
"Is it acceptable to break up with someone you're casually dating through a phone call?",
"Is it acceptable to break up with someone you're casually dating through email?",
"Is it acceptable to break up with someone you're casually dating through a private message on a social media site?",
"Is it acceptable to break up with someone you're casually dating through a text message?",
"Is it acceptable to break up with someone you're in a committed relationship with in person?",
"Is it acceptable to break up with someone you're in a committed relationship with through a phone call?",
"Is it acceptable to break up with someone you're in a committed relationship with through email?",
"Is it acceptable to break up with someone you're in a committed relationship with through a private message on a social media site?",
"Is it acceptable to break up with someone you're in a committed relationship with through a text message?",
"Do you think the increased focus on sexual harassment and assault over the last few years has made it easier or harder for MEN to know how to interact with someone they’re on a date with?",
"Do you think the increased focus on sexual harassment and assault over the last few years has made it easier or harder for WOMEN to know how to interact with someone they’re on a date with?",
"Overall, what type of effect would you say online dating sites and dating apps have had on dating and relationships?",
"Compared to relationships that begin in person, in general, do you think relationships where people first meet through an online dating site or dating app are…",
"In general, how safe do you think online dating sites and dating apps are as a way to meet people?",
"How common is people being harassed or bullied on online dating sites and dating apps?",
"How common is people receiving sexually explicit messages or images they did not ask for on online dating sites and dating apps?",
"How common is people lying about themselves to appear more desirable on online dating sites and dating apps?",
"How common are privacy violations, such as data breaches or identity theft on online dating sites and dating apps?",
"How common is people setting up fake accounts in order to scam others on online dating sites and dating apps?",
"Would you ever consider being in a committed relationship with someone who is of a different religion than you?",
"Would you ever consider being in a committed relationship with someone who is of a different race or ethnicity than you?",
"Would you ever consider being in a committed relationship with someone who has a significant amount of debt?",
"Would you ever consider being in a committed relationship with someone who is raising children from another relationship?",
"Would you ever consider being in a committed relationship with someone who lives far away from you?",
"Would you ever consider being in a committed relationship with someone who is a Republican?",
"Would you ever consider being in a committed relationship with someone who is a Democrat?",
"Would you ever consider being in a committed relationship with someone who makes significantly more money than you?",
"Would you ever consider being in a committed relationship with someone who makes significantly less money than you?",
"Would you ever consider being in a committed relationship with someone who voted for Donald Trump?",
"Would you ever consider being in a committed relationship with someone who voted for Hillary Clinton?",
"Would you ever consider being in a committed relationship with someone who is 10 years older than you?",
"Would you ever consider being in a committed relationship with someone who is 10 years younger than you?",
"How much pressure, if any, do you feel from family members to be in a committed relationship?",
"How much pressure, if any, do you feel from your friends to be in a committed relationship?",
"How much pressure, if any, do you feel from society to be in a committed relationship?",
"In the past year, how easy or difficult has it been for you to find people to date?",
"It has been difficult for you to find people to date because... there is a limited number of people in my area for me to date",
"It has been difficult for you to find people to date because... it's hard for me to find someone who meets my expectations",
"It has been difficult for you to find people to date because... it's hard to find someone who's looking for the same type of relationship as me",
"It has been difficult for you to find people to date because... it's hard for me to approach people",
"It has been difficult for you to find people to date because... people aren't interested in dating me",
"It has been difficult for you to find people to date because... I'm too busy",
"Overall, would you say your OWN personal experiences with online dating sites or dating apps have been…",
"In general in the past year, has using online dating sites or dating apps made you feel more confident or insecure?",
"In general in the past year, has using online dating sites or dating apps made you feel more optimistic or pessimistic?",
"In general in the past year, has using online dating sites or dating apps made you feel more hopeful or frustrated?",
"Have you ever gone on a date with someone you met through an online dating site or dating app?",
"Have you ever been in a committed relationship or married someone you first met through an online dating site or dating app?",
"Have you ever come across the online dating profile of someone you already know offline?",
"How important is it to you that online profiles included hobbies and interests?",
"How important is it to you that online profiles included political affiliation?",
"How important is it to you that online profiles included religious beliefs?",
"How important is it to you that online profiles included occupation?",
"How important is it to you that online profiles included racial or ethnic background?",
"How important is it to you that online profiles included height?",
"How important is it to you that online profiles included if they have children?",
"How important is it to you that online profiles included type of relationship they're looking for?",
"How important is it to you that online profiles included photos of themselves?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who you were physically attracted to?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who shared your hobbies and interests?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who were looking for the same kind of relationship as you?",
"How easy or difficult was it for you to find people on online dating sites or dating apps who seemed like someone you would want to meet in person?",
"How would you characterize the number of messages you have received on dating sites/apps?",
"How would you characterize the number of messages you have received from people you were interested in on dating sites/apps?",
"How well, if at all, do you feel you understand why online dating sites or dating apps present certain people as potential matches for you?",
"How concerned are you, if at all, about how much data online dating sites or dating apps collect about you?",
"Do you ever use social media sites, like Facebook, Twitter, or Instagram?",
"How often, if ever, do you see people posting things about their romantic relationships on social media?",
"In general, do the posts you see on social media about other people’s romantic relationships make you feel better or worse about your own relationships?",
"In general, do the posts you see on social media about other people’s romantic relationships make you feel better or worse about your own dating life?",
"Have you ever used social media to check up on someone that you used to date or be in a relationship with?",
"Have you ever used social media to share or discuss things about your relationship or dating life?",
"As far as you know, does your spouse or partner have a cellphone?",
"As far as you know, does your spouse or partner use social media sites?",
"As far as you know, does your spouse or partner play video games on a computer, game console or cellphone?",
"How important, if at all, is social media to you personally when it comes to keeping up with what's going on your spouse's or partner's life?",
"How important, if at all, is social media to you personally when it comes to showing how much you care about your spouse or partner?",
"Have you ever felt jealous or unsure about your relationship because of the way your current spouse or partner interacts with other people on social media?",
"How often, if ever, do you feel as if your spouse or partner is distracted by their cellphone when you are trying to have a conversation with them?",
"How often, if ever, are you bothered by the amount of time your spouse or partner spends on their cellphone?",
"How often, if ever, are you bothered by the amount of time your spouse or partner spends on social media sites?",
"How often, if ever, are you bothered by the amount of time your spouse or partner spends playing video games?",
"Have you ever given your spouse or partner the password or passcode to your email account?",
"Have you ever given your spouse or partner the password or passcode to any of your social media accounts?",
"Have you ever given your spouse or partner the password or passcode to your cellphone?",
"Have you ever looked through your current spouse's or partner's cellphone without their knowledge?",
"Have you ever heard of ghosting?",
"Have you ever heard of breadcrumbing?",
"Have you ever heard of phubbing?",
"Have you ever heard of catfishing?",
"Have you ever heard of friends with benefits?",
"Have you ever had someone you’ve gone out with suddenly stop answering your phone calls or messages without explanation (sometimes called “ghosting”)?",
"Has someone you were dating or on a date with ever pressured you for sex?",
"Has someone you were dating or on a date with ever touched you in a way that made you feel uncomfortable?",
"Has someone you were dating or on a date with ever sent you sexually explicit images that you didn't ask for?",
"As far as you know, has someone you were dating or been on a date with ever spread rumors about your sexual history?",
"As far as you know, has someone you were dating or been on a date with ever shared a sexually explicit image of you without your consent?",
"As far as you know, has someone you were dating or been on a date with ever publically shared your contact information or address without your permission?",
"Thinking about your own personal experiences, has someone ever called you an offensive name ON AN ONLINE DATING SITE OR DATING APP?",
"Thinking about your own personal experiences, has someone ever threatened to physically harm you ON AN ONLINE DATING SITE OR DATING APP?",
"Thinking about your own personal experiences, has someone ever sent you a sexually explicit message or image you didn’t ask for ON AN ONLINE DATING SITE OR DATING APP?",
"Thinking about your own personal experiences, has someone ever continued to contact you after you said you were not interested ON AN ONLINE DATING SITE OR DATING APP?",
"What sex is your spouse or partner?",
"Sexual orientation",
"Whether or not live in a metropolitan area",
"Region of the US they reside in",
"Type of region they reside in",
"Age category",
"Sex",
"Education",
"Education (expanded)",
"Race/Ethnicity",
"Place of birth",
"Citizenship Status",
"Marital Status",
"Religion",
"Whether born-again or evangelical Christian",
"Political Party (Democrat/Republican/Independent)",
"Political Party (Democrat/Republican dichotimized)",
"Political Party (Dem/Lean Dem or Rep/Lean Rep dichotomized)",
"Income",
"Income (trichotomized)",
"Voting registration status",
"Voting registration status (trichotomized)",
"Political ideology"
)
# Convert questions to single col
questions <- cbind(questions)
# Combine variables and questions into df
lookup_questions <- data.frame(var_names, questions)
## code copied and modified from https://mastering-shiny.org/basic-ui.html (selectInput, plotOutput
## idea/syntax)
## and https://mastering-shiny.org/action-layout.html (titlePanel, sidebarLayout, sidebarPanel,
## mainPanel idea/syntax)
## and https://mastering-shiny.org/action-layout.html (tabPanel idea/syntax)
## and https://shiny.rstudio.com/articles/layout-guide.html (navbarPage idea/syntax and
## fluidRow, column idea and syntax)
## and https://shiny.rstudio.com/reference/shiny/1.6.0/textOutput.html (textOutput idea/syntax)
## and https://campus.datacamp.com/courses/case-studies-building-web-applications-with-shiny-in-r/shiny-review?ex=3
## (strong("text") idea and syntax)
ui <- fluidPage(
## https://stackoverflow.com/questions/47743789/change-the-default-error-message-in-shiny
tags$head(tags$style(".shiny-output-error{visibility: hidden}")),
tags$head(tags$style(".shiny-output-error:after{content: 'Both of these questions are conditional, and no one was asked both of the questions. Please select a different combination of variables.'; visibility: visible}")),
theme = shinytheme("flatly"),
navbarPage(
"CSC/SDS 235 Final Project: Michelle, Lauryn, Grace",
tabPanel(
"Interactive Dashboard",
fluidRow(
column(12,
## h3 from https://shiny.rstudio.com/tutorial/written-tutorial/lesson2/
h3("Explore the Data!"),
textOutput("howtousetext"),
## https://shiny.rstudio.com/tutorial/written-tutorial/lesson2/ br idea
br())
),
sidebarLayout(
sidebarPanel(
## https://shiny.rstudio.com/reference/shiny/latest/radioButtons.html
prettyRadioButtons(inputId = "plotType", label = "Plot Type", c(Bar = "bar", Heatmap = "count"), selected = "bar"),
## selected = idea and syntax from https://shiny.rstudio.com/reference/shiny/0.12.2/selectInput.html
selectInput(inputId = "variable1", label = "Choose a first variable", selected = "Current Committed Relationship Status", lookup_questions$questions),
## code for this conditional panel is directly copied and pasted from
## the example at https://shiny.rstudio.com/reference/shiny/1.3.0/conditionalPanel.html -----------
# Only show this panel if the plot type is a two-way count
conditionalPanel(
condition = "input.plotType == 'count'",
selectInput(inputId = "variable2", label = "Choose a second variable", selected = "Region of the US they reside in", lookup_questions$questions),
),
textOutput("disclaimer_text")
),
### ---------------------------------------------------------------------------------------
## the conditional plot code is based on the conditional panel code above
mainPanel(
conditionalPanel(
condition = "input.plotType == 'bar'",
# plotlyOutput and renderPlotly
# from https://stackoverflow.com/questions/57085342/renderplotly-does-not-work-despite-not-having-any-errors
plotlyOutput("plotbar"),
br(),
textOutput("numparticipantsasked"),
br(),
textOutput("numparticipantsaskedexplan")
),
conditionalPanel(
condition = "input.plotType == 'count'",
plotlyOutput("heatmap"),
br(),
textOutput("heatmaptextboxone"),
textOutput("heatmaptextboxtwo"),
br(),
textOutput("countexplaintwo")
)
)
)
),
tabPanel(
"Static Data Analysis",
fluidRow(
column(
12,
h3("About Our Project"),
htmlOutput("aboutprojtext"),
## https://shiny.rstudio.com/tutorial/written-tutorial/lesson2/ br idea
br(),
h3("Characterizing the Sample"),
textOutput("characterizing_sample_text"),
br(),
fluidRow(
column(1),
column(5,
plotlyOutput("characterizingsamplemstatus"),
br(),
textOutput("partaskedmstatus"),
br(),
br()),
column(5,
plotlyOutput("characterizingsampleorientation"),
br(),
textOutput("partansweredorientationi"),
br(),
br()
),
column(1),
),
fluidRow(
column(1),
column(5,
plotlyOutput("characterizingsampleideology"),
br(),
textOutput("partansweredpoliticalideo"),
br(),
br()),
column(5,
plotlyOutput("characterizingsamplerace"),
br(),
textOutput("partanswerrace"),
br(),
br()),
column(1)
),
fluidRow(
column(1),
column(5,
plotlyOutput("characterizingsampleage"),
br(),
textOutput("partanswerage")),
column(5,
plotlyOutput("characterizingsampleeduc"),
br(),
textOutput("partanswereduc")),
column(1)
)
)),
fluidRow(
column(
12,
h3("Interesting Findings"),
textOutput("overallinterestingfindingstext"),
br(),
fluidRow(column(3),
column(6,
plotlyOutput("thingsindatinglife"),
br(),
textOutput("partanswerthings"),
br(),
textOutput("thingsdaatinglifetext"),
br(),
br()),
column(3)
),
fluidRow(
column(6,
plotlyOutput("datinglifebyage"),
br()),
column(6,
plotlyOutput("datinglifebysex"),
br())
),
fluidRow(column(3),
column(6,
textOutput("summarydatinglife"),
br(),
br()
),
column(3)
),
fluidRow(column(3),
column(6,
plotlyOutput("tenyears"),
br(),
textOutput("partanswer10years"),
br(),
textOutput("tenyearstext"),
br(),
br()),
column(3)
),
fluidRow(column(6,
plotlyOutput("tenyearsbyage"),
br()),
column(6,
plotlyOutput("tenyearsbysex"),
br())
),
fluidRow(column(3),
column(6,
textOutput("summarytenyears"),
br(),
br()
),
column(3)
),
fluidRow(column(3),
column(6,
plotlyOutput("feelings_by_sex"),
br(),
textOutput("partanswerfeelings"),
br(),
textOutput("onlinenegfeelingstext"),
br(),
br()
),
column(3)
),
fluidRow(column(3),
column(6,
plotlyOutput("bullingharass"),
br(),
textOutput("partanswerbullyharass"),
br(),
textOutput("bullingharasstext"),
br(),
br()
),
column(3)
),
fluidRow(
column(2),
column(6,
plotlyOutput("effectofonline"),
br(),
textOutput("effectofonlinetext"),
br(),
br()
),
column(2,
textOutput("summaryonline"),
br(),
br()),
column(2)
),
fluidRow(
column(2),
column(6,
plotlyOutput("jealousy_by_sex"),
br(),
textOutput("jealousypartaskedtext"),
br(),
br()
),
column(2,
textOutput("textbtwo"),
br(),
br()),
column(2)
),
fluidRow(column(6,
plotlyOutput("botheredbycell"),
br(),
textOutput("cellphoneonepartanswer"),
br()),
column(6,
plotlyOutput("distractedbycell"),
br(),
textOutput("cellphonetwopartanswer"),
br())
),
fluidRow(column(2),
column(8,
textOutput("cellphonetext"),
br(),
textOutput("finaltext")),
column(2))
)
),
fluidRow(
column(
12,
## footer hr() from https://stackoverflow.com/questions/30205034/shiny-layout-how-to-add-footer-disclaimer/38241035
hr(),
h4("References"),
htmlOutput("citations_textone"),
htmlOutput("citations_texttwo"),
br()
)
)
)
)
)
## code copied and modified from https://mastering-shiny.org/basic-app.html and
# https://mastering-shiny.org/basic-ui.html
server <- function(input, output, session) {
output$plotbar <- renderPlotly({
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])))) %>%
group_by_(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])),
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
xlab(str_wrap(input$variable1)) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$numparticipantsasked <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(num_not_na[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants were asked this question."), sep = " ")
})
output$numparticipantsaskedexplan <- renderText(
"Whether or not participants
were asked certain questions was often conditional on previous
responses. For example, only those who are married were not asked
whether they were in a committed relationship."
)
output$countexplaintwo <- renderText(
"Whether or not participants
were asked certain questions was often conditional on previous
responses. For example, only those who are married were not asked
whether they were in a committed relationship."
)
# Attempt to build heatmap
output$heatmap <- renderPlotly({
if(input$variable1 != input$variable2){
g <- raw_data %>%
filter(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])))) %>%
filter(!is.na(get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])))) %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]),
lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])),
y = get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])),
fill = n
)) +
geom_tile() +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#000773", na.value = "#8E8E8E") +
xlab(str_wrap(input$variable1)) +
ylab(str_wrap(input$variable2)) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
}
else{
g <- raw_data %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])),
y = n
)) +
geom_col() +
xlab(str_wrap(input$variable1)) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "y")
}
})
output$heatmaptextboxone <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(num_not_na[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable1) %>%
pull(var_names[1]))))
) %>%
pull(n[1])
text <- paste(paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants were asked"), sep = " "), str_wrap(input$variable1))
})
output$heatmaptextboxtwo <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1]))))
) %>%
pull(num_not_na[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_not_na = sum(!is.na(get(lookup_questions %>%
filter(questions == input$variable2) %>%
pull(var_names[1]))))
) %>%
pull(n[1])
text <- paste(paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants were asked"), sep = " "), str_wrap(input$variable2))
})
output$disclaimer_text <- renderText(
"Disclaimer: Some question text was changed for clarity or conciseness"
)
output$citations_textone <- renderUI(HTML(
"Vogels, E. A., & Anderson, M. (2020, May 8).
Dating and Relationships in the Digital Age. Pew Research Center. <a href ='https://www.pewresearch.org/internet/2020/05/08/dating-and-relationships-in-the-digital-age/'>Link to the data</a>."
))
output$citations_texttwo <- renderUI(HTML(
"Pew Research Center. (2019).
Pew Research Center’s American Trends Panel Wave 56 Methodology Report.
Downloaded as metadata alongside the data from <a href ='https://www.pewresearch.org/internet/2020/05/08/dating-and-relationships-in-the-digital-age/'>this link</a>"
))
#Static plots for Interesting Findings
output$feelings_by_sex <- renderPlotly({
raw_data$ONFEEL.c_W56 <- factor(raw_data$ONFEEL.c_W56,levels = c("Frustrated", "Neither", "Hopeful", "Refused"))
g <- raw_data %>%
filter(F_SEX != "Refused") %>%
filter(ONFEEL.c_W56 != "Refused") %>%
filter(!is.na(F_SEX) & !is.na(ONFEEL.c_W56)) %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == "In general in the past year, has using online dating sites or dating apps made you feel more hopeful or frustrated?") %>%
pull(var_names[1]),
lookup_questions %>%
filter(questions == "Sex") %>%
pull(var_names[1])
) %>%
summarize(
n = n()
) %>%
ggplot(aes(x = ONFEEL.c_W56, y =F_SEX, fill = n)) +
geom_tile() +
ggtitle("Feelings After Using Online Dating, Sorted by Sex") +
xlab(str_wrap("In general in the past year, has using online dating sites or dating apps made you feel more hopeful or frustrated?"))+
ylab("Sex")+
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))+
scale_fill_gradient(low = "#FFFFFF", high = "#004D71", na.value = "#8E8E8E")
ggplotly(g, tooltip = "fill")
})
output$jealousy_by_sex <- renderPlotly({
raw_data$SNSFEEL_W56 <- factor(raw_data$SNSFEEL_W56,levels = c("Yes, have felt this way", "No, have never felt this way", "Refused"))
g <- raw_data %>%
filter(F_SEX != "Refused" & SNSFEEL_W56 != "Refused") %>%
filter(!is.na(F_SEX) & !is.na(SNSFEEL_W56)) %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by_(
lookup_questions %>%
filter(questions == "Have you ever felt jealous or unsure about your relationship because of the way your current spouse or partner interacts with other people on social media?") %>%
pull(var_names[1]),
lookup_questions %>%
filter(questions == "Sex") %>%
pull(var_names[1])
) %>%
summarize(
n = n()
) %>%
ggplot(aes(x = SNSFEEL_W56, y =F_SEX, fill = n)) +
geom_tile() +
ggtitle("Jealousy in Relationships and Social Media, Sorted by Sex") +
xlab(str_wrap("Have you ever felt jealous or unsure about your relationship because of the way your current spouse or partner interacts with other people on social media?"))+
ylab("Sex")+
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))+
scale_fill_gradient(low = "#FFFFFF", high = "#DD7405", na.value = "#8E8E8E")
ggplotly(g, tooltip = "fill")
})
output$aboutprojtext<- renderUI(HTML("This application provides an analysis of and means to
interact with data from the 2019 Pew Research Center survey on the
intersection between romantic relationships and technology. The set of participants recruited for the survey, part of the American Trends Panel, were designed to serve as a representative sample of the US (Pew Research Center, 2019).
Download the dataset with a Pew Research Center account and view their
analysis <a href ='https://www.pewresearch.org/internet/2020/05/08/dating-and-relationships-in-the-digital-age/'>here</a> (Vogels & Anderson, 2020)."))
output$onlinenegfeelingstext <- renderText("Now, we will turn specifically to online dating, as these apps and sites are a major component of modern dating. Here, we notice that females tend to experience more negative feelings regarding online dating. For people who used online dating, more females felt pessimistic (41% of all females asked this question) than males (35%).")
output$textbtwo <- renderText("We thought that, perhaps jealousy and insecurity inflicted by social media
play a role here. We note that more females in committed relationships reported feeling insecure because of their partner's social media use (29%) than males (15%). However, both of these
proportions are relatively low, so social media jealousy may not account for dissatisfaction with dating.")
output$characterizing_sample_text <- renderText("This sample is largely married (40%), straight (68%), politically moderate (36%) or liberal (27%), non-Hispanic white (69%), and ages 30-64 (64%) with a college degree or higher (46%).")
output$characterizingsamplemstatus <- renderPlotly({
g <- raw_data %>%
filter(MARITAL_W56 != "Refused") %>%
filter(!is.na(MARITAL_W56)) %>%
group_by(MARITAL_W56) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
# reorder from https://sebastiansauer.github.io/ordering-bars/
ggplot(aes(x = reorder(MARITAL_W56, -n), y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Marital Status") +
ylab("Number of Participants") +
ggtitle("Number of Participants by Marital Status") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
## tooltip = "text" with text specified above idea from https://plotly.com/ggplot2/interactive-tooltip/
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleorientation <- renderPlotly({
g <- raw_data %>%
filter(ORIENTATIONMOD_W56 != "Refused") %>%
filter(!is.na(ORIENTATIONMOD_W56)) %>%
group_by(ORIENTATIONMOD_W56) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
# reorder from https://sebastiansauer.github.io/ordering-bars/
ggplot(aes(x = reorder(ORIENTATIONMOD_W56, -n), y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Sexual Orientation") +
ggtitle("Number of Participants by Sexual Orientation") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleideology <- renderPlotly({
## reorder from https://sebastiansauer.github.io/ordering-bars/
raw_data$F_IDEO <- factor(raw_data$F_IDEO,levels = c("Very conservative", "Conservative", "Moderate", "Liberal", "Very liberal", "Refused"))
g <- raw_data %>%
filter(F_IDEO != "Refused") %>%
filter(!is.na(F_IDEO)) %>%
group_by(F_IDEO) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_IDEO, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Political Ideology") +
ggtitle("Number of Participants by Political Ideology") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsamplerace <- renderPlotly({
g <- raw_data %>%
filter(F_RACETHN != "Refused") %>%
filter(!is.na(F_RACETHN)) %>%
group_by(F_RACETHN) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_RACETHN, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Race/Ethnicity") +
ggtitle("Number of Participants by Race/Ethnicity") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleage <- renderPlotly({
g <- raw_data %>%
filter(F_AGECAT != "DK/REF") %>%
filter(!is.na(F_AGECAT)) %>%
group_by(F_AGECAT) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_AGECAT, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Age") +
ggtitle("Number of Participants by Age") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$characterizingsampleeduc <- renderPlotly({
## reorder from https://sebastiansauer.github.io/ordering-bars/
raw_data$F_EDUCCAT <- factor(raw_data$F_EDUCCAT,levels = c("H.S. graduate or less", "Some College", "College graduate+", "Don't know/Refused"))
g <- raw_data %>%
filter(F_EDUCCAT != "Don't know/Refused") %>%
filter(!is.na(F_EDUCCAT)) %>%
group_by(F_EDUCCAT) %>%
summarize(
n = n()
) %>%
mutate(
pct = n/sum(n)
) %>%
ggplot(aes(x = F_EDUCCAT, y = n, text = paste("Percent of Total:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "))) +
geom_col() +
xlab("Education Level") +
ggtitle("Number of Participants by Education") +
ylab("Number of Participants") +
scale_y_continuous(expand = c(0,0)) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
ggplotly(g, tooltip = 'text')
})
output$thingsindatinglife <- renderPlotly({
raw_data$FAMSURV19DATING_W56 <- factor(raw_data$FAMSURV19DATING_W56,levels = c("Not at all well", "Not too well", "Fairly well", "Very well", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(FAMSURV19DATING_W56)) %>%
filter(FAMSURV19DATING_W56 != "Refused") %>%
group_by(FAMSURV19DATING_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = FAMSURV19DATING_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("How Participants' Dating Lives are Going") +
xlab("Overall, would you say that things in your dating life are going...") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$thingsdaatinglifetext <- renderText("The majority of participants (70%) asked this question said that things in their dating
life are going not at all well or not too well. This highlights the trouble
many are facing with modern dating, whether participants' problems are technology related or not.")
output$tenyears <- renderPlotly({
raw_data$DATE10YR_W56 <- factor(raw_data$DATE10YR_W56,levels = c("Harder today", "About the same", "Easier today", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(DATE10YR_W56)) %>%
filter(DATE10YR_W56 != "Refused") %>%
group_by(DATE10YR_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = DATE10YR_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
xlab("Compared to 10 years ago, for most people, do you think dating is...") +
ggtitle("Difficulty of Dating Now Compared to the Past") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$tenyearstext <- renderText("Participants also believe that dating has gotten more difficult over time.
A plurality of respondents said that dating is harder today than it was 10 years
ago (48%), while only 18% think that dating is easier today.")
output$overallinterestingfindingstext <- renderText("Many, but not all, participants expressed struggles or dissatisfaction
with modern dating. On this survey, which was collected before the COVID-19 pandemic,
participants identifying as male and female, and across ages, reported difficulties. Many also reported
feeling frustrated with online dating, and noted the prevalence of bullying and harassment. As a disclaimer, please note that this survey and
the following analysis binary sex as a proxy for gender identity. This is a
flawed and incomplete measure of gender.")
output$datinglifebysex <- renderPlotly({
raw_data$FAMSURV19DATING_W56 <- factor(raw_data$FAMSURV19DATING_W56,levels = c("Not at all well", "Not too well", "Fairly well", "Very well", "Refused"))
g <- raw_data %>%
filter(!is.na(FAMSURV19DATING_W56)) %>%
filter(!is.na(F_SEX)) %>%
filter(F_SEX != "Refused") %>%
filter(FAMSURV19DATING_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(FAMSURV19DATING_W56,
F_SEX
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = FAMSURV19DATING_W56,
y = F_SEX,
fill = n
)) +
geom_tile() +
ggtitle("How Participants' Dating Lives are Going, Sorted by Sex") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#000773", na.value = "#8E8E8E", limits = c(0,300)) +
xlab("Overall, would you say that things in your dating life are going...") +
ylab("Sex") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$datinglifebyage <- renderPlotly({
raw_data$FAMSURV19DATING_W56 <- factor(raw_data$FAMSURV19DATING_W56,levels = c("Not at all well", "Not too well", "Fairly well", "Very well", "Refused"))
g <- raw_data %>%
filter(!is.na(FAMSURV19DATING_W56)) %>%
filter(!is.na(F_AGECAT)) %>%
filter(F_AGECAT != "DK/REF") %>%
filter(FAMSURV19DATING_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(FAMSURV19DATING_W56,
F_AGECAT
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = FAMSURV19DATING_W56,
y = F_AGECAT,
fill = n
)) +
geom_tile() +
ggtitle("How Participants' Dating Lives are Going, Sorted by Age") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#000773", na.value = "#8E8E8E", limits = c(0,300)) +
xlab("Overall, would you say that things in your dating life are going...") +
ylab("Age") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$summarydatinglife <- renderText("Findings about most people having some trouble with their dating life
are found across ages and sexes.")
output$tenyearsbysex <- renderPlotly({
raw_data$DATE10YR_W56 <- factor(raw_data$DATE10YR_W56,levels = c("Harder today", "About the same", "Easier today", "Refused"))
g <- raw_data %>%
filter(!is.na(DATE10YR_W56)) %>%
filter(!is.na(F_SEX)) %>%
filter(F_SEX != "Refused") %>%
filter(DATE10YR_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(DATE10YR_W56,
F_SEX
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = DATE10YR_W56,
y = F_SEX,
fill = n
)) +
geom_tile() +
ggtitle("Difficulty of Dating Now Compared to the Past, Sorted by Sex") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#035B00", na.value = "#8E8E8E", limits = c(0,1400)) +
xlab("Compared to 10 years ago, for most people, do you think dating is...") +
ylab("Sex") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$tenyearsbyage <- renderPlotly({
raw_data$DATE10YR_W56 <- factor(raw_data$DATE10YR_W56,levels = c("Harder today", "About the same", "Easier today", "Refused"))
g <- raw_data %>%
filter(!is.na(DATE10YR_W56)) %>%
filter(!is.na(F_AGECAT)) %>%
filter(F_AGECAT != "DK/REF") %>%
filter(DATE10YR_W56 != "Refused") %>%
### group_by_ from https://stackoverflow.com/questions/54482025/call-input-in-shiny-for-a-group-by-function
group_by(DATE10YR_W56,
F_AGECAT
) %>%
summarize(
n = n()
) %>%
ggplot(aes(
x = DATE10YR_W56,
y = F_AGECAT,
fill = n
)) +
geom_tile() +
ggtitle("Difficulty of Dating Now Compared to the Past, Sorted by Age") +
## HTML color codes from https://htmlcolorcodes.com/
## scale fill gradient idea and syntax from https://ggplot2.tidyverse.org/reference/scale_gradient.html
scale_fill_gradient(low = "#FFFFFF", high = "#035B00", na.value = "#8E8E8E", limits = c(0,1400)) +
xlab("Compared to 10 years ago, for most people, do you think dating is...") +
ylab("Age") +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "fill")
})
output$summarytenyears <- renderText("Those who identify as female seem to be more pessimistic about current dating conditions than those identifying as
male, and younger people (under the age of 49) seem to be somewhat more pessimistic than those
over the age of 50.")
output$effectofonline <- renderPlotly({
raw_data$ONIMPACT_W56 <- factor(raw_data$ONIMPACT_W56,levels = c("Mostly negative effect", "Neither positive or negative effect", "Mostly positive effect", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(ONIMPACT_W56 != "Refused") %>%
filter(!is.na(ONIMPACT_W56)) %>%
group_by(ONIMPACT_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = ONIMPACT_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Perceived Effect of Online Dating") +
xlab(str_wrap("Overall, what type of effect would you say online dating sites and dating apps have had on dating and relationships?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$summaryonline <- renderText("Despite these findings about pesimission with regard to
recent dating in general, people's current dating lives, and the downsides of online dating,
the participants were mixed on whether online dating has improved
or worsened dating in general. A plurality of participants (49%)
said that online dating had neither effect. This brings up the question,
if online dating is not what is making modern dating more difficult, then what is?")
output$bullingharass <- renderPlotly({
raw_data$ONPROBLEM.a_W56 <- factor(raw_data$ONPROBLEM.a_W56,levels = c("Not at all common", "Not too common", "Somewhat common", "Very common", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(ONPROBLEM.a_W56)) %>%
filter(ONPROBLEM.a_W56 != "Refused") %>%
group_by(ONPROBLEM.a_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = ONPROBLEM.a_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Perceived Prevalence of Bullying/Harassment in Online Dating") +
xlab(str_wrap("How common is people being harassed or bullied on online dating sites and dating apps?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
#Attempt to fix margin - does not work
#axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 20, l = 0)))
#This does not work either:
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$bullingharasstext <- renderText("Bullying and harassment appears to be a problem on online dating apps and
websites. 61% of participants who answered said that this was somewhat or very common,
a concerning statistic.")
output$botheredbycell <- renderPlotly({
raw_data$PARTNERSCREEN.a_W56 <- factor(raw_data$PARTNERSCREEN.a_W56,levels = c("Never", "Rarely", "Sometimes", "Often", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(PARTNERSCREEN.a_W56)) %>%
filter(PARTNERSCREEN.a_W56 != "Refused") %>%
group_by(PARTNERSCREEN.a_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = PARTNERSCREEN.a_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Cell Phone Time Bother in Relationships") +
xlab(str_wrap("How often, if ever, are you bothered by the amount of time your spouse or partner spends on their cellphone?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0), limits = c(0,1100)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$distractedbycell <- renderPlotly({
raw_data$PARTNERDISTRACT_W56 <- factor(raw_data$PARTNERDISTRACT_W56,levels = c("Never", "Rarely", "Sometimes", "Often", "Refused"))
g <- ggplot((raw_data %>%
## remove NAs https://www.edureka.co/community/634/how-to-remove-na-values-with-dplyr-filter
filter(!is.na(PARTNERDISTRACT_W56)) %>%
filter(PARTNERDISTRACT_W56 != "Refused") %>%
group_by(PARTNERDISTRACT_W56) %>%
summarize(
n = n(),
) %>%
mutate(
pct = n/sum(n)
)), aes(
x = PARTNERDISTRACT_W56,
y = n,
text = paste(paste("Number of Participants:", n, sep = " "), paste("Percentage:", paste(100*round(pct, digits = 2), "%", sep = ""), sep = " "), sep = "<br>")
)) +
geom_col() +
ggtitle("Frequency of Distraction by Cell Phone in Relationships") +
xlab(str_wrap("How often, if ever, do you feel as if your spouse or partner is distracted by their cellphone when you are trying to have a conversation with them?")) +
## Wrapping axis ticks https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous("Number of Participants", expand = c(0,0), limits = c(0,1100)) +
theme(panel.background = element_blank(), axis.ticks = element_blank(),
axis.line = element_line(color = "black"),
axis.title.x = element_text(vjust = 1))
## tooltip from
## https://stackoverflow.com/questions/40598011/how-to-customize-hover-information-in-ggplotly-object/40598524
## and
## https://www.rdocumentation.org/packages/plotly/versions/4.9.3/topics/ggplotly
ggplotly(g, tooltip = "text")
})
output$cellphonetext <- renderText("Another possibility for why participants are dissatisfied with dating is that
we constantly use our cell phones. 40% of participants said that they were sometimess or often
bothered by the amount of time that their spouse or partner spent on their cell phone, highlighting
how others' behavior in technology can put a strain on relationships. Perhaps more notably, a majority of the sample (54%)
said that they sometimes or often feel that their partners are distracted by a cell phone while
they want to have a conversation. This high level of distraction has the potential to make casual dating
and committed relationships alike difficult.")
output$finaltext <- renderText("Use the Interactive Dashboard to explore more reasons why some are dissatisfied, and learn why some are happy with modern technology in dating and relationships.")
output$partaskedmstatus <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Marital Status") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partansweredorientationi <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Sexual orientation") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partansweredpoliticalideo <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Political ideology") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerrace <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1]))) == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Race/Ethnicity") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerage <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1]))) == "DK/REF"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Age category") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswereduc <- renderText({
total_asked <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1])) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(get(lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1])))),
num_refused = sum((get(lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1]))) == "Don't know/Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(
lookup_questions %>%
filter(questions == "Education") %>%
pull(var_names[1])) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswer10years <- renderText({
total_asked <- raw_data %>%
select(DATE10YR_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(DATE10YR_W56)),
num_refused = sum(DATE10YR_W56 == "Refused"),
num_asked = n - (num_na + num_refused)
) %>%
pull(num_asked[1])
total_people <- raw_data %>%
select(DATE10YR_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerthings <- renderText({
num_refused_num <- raw_data %>%
filter(FAMSURV19DATING_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(FAMSURV19DATING_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(FAMSURV19DATING_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(FAMSURV19DATING_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerfeelings <- renderText({
num_refused_num <- raw_data %>%
filter(ONFEEL.c_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(ONFEEL.c_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(ONFEEL.c_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(ONFEEL.c_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$partanswerbullyharass <- renderText({
num_refused_num <- raw_data %>%
filter(ONPROBLEM.a_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(ONPROBLEM.a_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(ONPROBLEM.a_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(ONPROBLEM.a_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$effectofonlinetext <- renderText({
num_refused_num <- raw_data %>%
filter(ONIMPACT_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(ONIMPACT_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(ONIMPACT_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(ONIMPACT_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$jealousypartaskedtext <- renderText({
num_refused_num <- raw_data %>%
filter(SNSFEEL_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(SNSFEEL_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(SNSFEEL_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(SNSFEEL_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$cellphoneonepartanswer <- renderText({
num_refused_num <- raw_data %>%
filter(PARTNERSCREEN.a_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(PARTNERSCREEN.a_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(PARTNERSCREEN.a_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(PARTNERSCREEN.a_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$cellphonetwopartanswer <- renderText({
num_refused_num <- raw_data %>%
filter(PARTNERDISTRACT_W56 == "Refused") %>%
summarize(
num_refused = n()
) %>%
pull(num_refused[1])
total_asked <- raw_data %>%
select(PARTNERDISTRACT_W56) %>%
summarize(
n = n(),
## count NAs https://stackoverflow.com/questions/44290704/count-non-na-values-by-group
num_na = sum(is.na(PARTNERDISTRACT_W56)),
num_asked = n - (num_na)
) %>%
pull(num_asked[1])
total_asked <- total_asked - num_refused_num
total_people <- raw_data %>%
select(PARTNERDISTRACT_W56) %>%
summarize(
n = n()
) %>%
pull(n[1])
text <- paste("Note:",
paste(
paste(total_asked, total_people, sep = "/"),
"participants answered this question."), sep = " ")
})
output$howtousetext <- renderText("Start by selecting a plot type. If you would like to visualize one variable, select \"bar\"; if you would
like to select two variables, select \"heatmap\". Then, choose the variable(s) you would like to visualize from the
dropdown menu(s) below.")
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(data.table)
library(ggplot2)
library(grid)
library(gridExtra)
library(ggpubr)
library(ggplotify)
library(viridis)
data <- read.csv("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/Denek3/united_RT_X0.txt",sep='\t',header = TRUE)
data <- data.table(data)
setnames(data,c("cancer","structure","isAPOBEC","sample","trgIn","cntIn","trgOut","cntOut","trgOutOld","cntOutOld","sign","RTbin"))
data[sign < 5 | sign > 95, signBinary:=1]
data[is.na(signBinary), signBinary := 0]
data$signBinary <- as.factor(data$signBinary)
data[, ratioIn := (cntIn/trgIn)]
data[, ratioOut := (cntOut/trgOut)]
data[, ratio := log((cntIn/trgIn)/(cntOut/trgOut))]
dataAll <- read.csv("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/Denek3/united_X0.txt",sep='\t',header = TRUE)
dataAll <- data.table(dataAll)
setnames(dataAll,c("cancer","structure","isAPOBEC","sample","trgIn","cntIn","trgOut","cntOut","trgOutOld","cntOutOld","sign"))
dataAll[sign < 5 | sign > 95, signBinary:=1]
dataAll[is.na(signBinary), signBinary := 0]
dataAll$signBinary <- as.factor(dataAll$signBinary)
dataAll[, ratioIn := (cntIn/trgIn)]
dataAll[, ratioOut := (cntOut/trgOut)]
dataAll[, ratio := log((cntIn/trgIn)/(cntOut/trgOut))]
activity <- read.csv("/Users/mar/BIO/PROJECTS/PCAWG_APOBEC/PCAWG_enrichment_6cancers.txt",sep='\t',header=FALSE,strip.white =TRUE)
activity <- data.table(activity)
setnames(activity,c("project","sample","enrichment"))
data <- merge(data,activity,by="sample",all.x = TRUE)
dataAll <- merge(dataAll,activity,by="sample",all.x = TRUE)
dataAll[,RTbin := 7]
dataPlot <- rbind(data,dataAll)
structures <- unique(data$structure)
cancers <- unique(data$cancer)
dataPlot <- dataPlot[isAPOBEC == 1]
dataPlot$RTbin <- as.factor(dataPlot$RTbin)
for(s in structures)
{
for(c in cancers)
{
dt <- dataPlot[cancer==c & structure == s]
#samplesInfinite <- unique(dt[is.infinite(ratio),sample])
#dt <- dt[!(sample %in% samplesInfinite)]
dt <- dt[!is.infinite(ratio)]
p <- ggplot(dt,aes(x=enrichment,y=ratio,color=RTbin)) + geom_point(size=0.5) +
scale_shape_manual(values=c(15, 16)) +
geom_hline(yintercept=0,color=rgb(243,94,90,maxColorValue = 255)) +
stat_smooth(method = "lm", se=F,formula = y ~ log(x)) +
scale_color_manual(values=c(rgb(204,123,177,maxColorValue = 255),
rgb(244,155,73,maxColorValue = 255),
rgb(157,209,184,maxColorValue = 255),
rgb(72,193,241,maxColorValue = 255),
rgb(246,234,92,maxColorValue = 255),
rgb(107,181,58,maxColorValue = 255),
rgb(186,182,220,maxColorValue = 255),
rgb(233,72,126,maxColorValue = 255))) +
#ggtitle(paste0("Structure=",s," , Cancer=",c," , isAPOBEC=",a)) +
theme(panel.background = element_blank(),
plot.title = element_text(size=8),
axis.title = element_blank(),
axis.line = element_line(color="black"),
panel.grid.major = element_line(size = rel(0.5), colour='grey92'))
ggsave(paste0("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/pics/fig5/bbHideInfinite/",s,"_",c,".tiff"),units="mm",width=200,height=120,dpi=300)
# samples <- unique(dt$sample)
# for(p in samples){
#
# dt2 <- dt[sample == p]
# dt2[, ratioIn := cntIn/trgIn]
# dt2[, ratioOut := cntOut/trgOut]
#
# dt2 <- dt2[,.(RTbin,ratioIn,ratioOut)]
# dt2melt <- melt(dt2,id.vars = "RTbin")
#
# ggplot(dt2melt,aes(x=RTbin,y=value,fill=variable)) + geom_bar(stat="identity",position = "dodge")
# ggsave(paste0("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/pics/fig5/bbb/",s,"_",c,"_",p,".tiff"))
#
# }
}
}
plots <- list()
i <- 1
for(s in structures)
{
for(c in cancers)
{
for(a in c(0,1))
{
for(rt in 0:6)
{
dt <- data[cancer==c & isAPOBEC == a & structure == s & RTbin == rt]
p <- ggplot(dt,aes(x=enrichment,y=ratio)) + geom_point(color=rgb(38,120,178,maxColorValue = 255),aes(shape=signBinary)) +
scale_shape_manual(values=c(15, 16)) +
geom_hline(yintercept=0,color=rgb(243,94,90,maxColorValue = 255)) +
stat_smooth(method = "lm", col = rgb(140,210,185,maxColorValue = 255),se=F,formula = y ~ log(x)) +
#scale_color_manual(values=c(rgb(38,120,178,maxColorValue = 255),rgb(140,210,185,maxColorValue = 255))) +
#ggtitle(paste0("Structure=",s," , Cancer=",c," , isAPOBEC=",a)) +
theme(panel.background = element_blank(),
plot.title = element_text(size=8),
axis.title = element_blank(),
axis.line = element_line(color="black"),
legend.position = "none",
panel.grid.major = element_line(size = rel(0.5), colour='grey92'))
ggsave(paste0("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/pics/fig5/b/",s,"_",c,"_",a,"_",rt,".tiff"),units="mm",width=100,height=60,dpi=300)
}
}
}
}
|
/fig3b.R
|
no_license
|
mkazanov/nonbdna
|
R
| false
| false
| 5,185
|
r
|
library(data.table)
library(ggplot2)
library(grid)
library(gridExtra)
library(ggpubr)
library(ggplotify)
library(viridis)
data <- read.csv("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/Denek3/united_RT_X0.txt",sep='\t',header = TRUE)
data <- data.table(data)
setnames(data,c("cancer","structure","isAPOBEC","sample","trgIn","cntIn","trgOut","cntOut","trgOutOld","cntOutOld","sign","RTbin"))
data[sign < 5 | sign > 95, signBinary:=1]
data[is.na(signBinary), signBinary := 0]
data$signBinary <- as.factor(data$signBinary)
data[, ratioIn := (cntIn/trgIn)]
data[, ratioOut := (cntOut/trgOut)]
data[, ratio := log((cntIn/trgIn)/(cntOut/trgOut))]
dataAll <- read.csv("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/Denek3/united_X0.txt",sep='\t',header = TRUE)
dataAll <- data.table(dataAll)
setnames(dataAll,c("cancer","structure","isAPOBEC","sample","trgIn","cntIn","trgOut","cntOut","trgOutOld","cntOutOld","sign"))
dataAll[sign < 5 | sign > 95, signBinary:=1]
dataAll[is.na(signBinary), signBinary := 0]
dataAll$signBinary <- as.factor(dataAll$signBinary)
dataAll[, ratioIn := (cntIn/trgIn)]
dataAll[, ratioOut := (cntOut/trgOut)]
dataAll[, ratio := log((cntIn/trgIn)/(cntOut/trgOut))]
activity <- read.csv("/Users/mar/BIO/PROJECTS/PCAWG_APOBEC/PCAWG_enrichment_6cancers.txt",sep='\t',header=FALSE,strip.white =TRUE)
activity <- data.table(activity)
setnames(activity,c("project","sample","enrichment"))
data <- merge(data,activity,by="sample",all.x = TRUE)
dataAll <- merge(dataAll,activity,by="sample",all.x = TRUE)
dataAll[,RTbin := 7]
dataPlot <- rbind(data,dataAll)
structures <- unique(data$structure)
cancers <- unique(data$cancer)
dataPlot <- dataPlot[isAPOBEC == 1]
dataPlot$RTbin <- as.factor(dataPlot$RTbin)
for(s in structures)
{
for(c in cancers)
{
dt <- dataPlot[cancer==c & structure == s]
#samplesInfinite <- unique(dt[is.infinite(ratio),sample])
#dt <- dt[!(sample %in% samplesInfinite)]
dt <- dt[!is.infinite(ratio)]
p <- ggplot(dt,aes(x=enrichment,y=ratio,color=RTbin)) + geom_point(size=0.5) +
scale_shape_manual(values=c(15, 16)) +
geom_hline(yintercept=0,color=rgb(243,94,90,maxColorValue = 255)) +
stat_smooth(method = "lm", se=F,formula = y ~ log(x)) +
scale_color_manual(values=c(rgb(204,123,177,maxColorValue = 255),
rgb(244,155,73,maxColorValue = 255),
rgb(157,209,184,maxColorValue = 255),
rgb(72,193,241,maxColorValue = 255),
rgb(246,234,92,maxColorValue = 255),
rgb(107,181,58,maxColorValue = 255),
rgb(186,182,220,maxColorValue = 255),
rgb(233,72,126,maxColorValue = 255))) +
#ggtitle(paste0("Structure=",s," , Cancer=",c," , isAPOBEC=",a)) +
theme(panel.background = element_blank(),
plot.title = element_text(size=8),
axis.title = element_blank(),
axis.line = element_line(color="black"),
panel.grid.major = element_line(size = rel(0.5), colour='grey92'))
ggsave(paste0("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/pics/fig5/bbHideInfinite/",s,"_",c,".tiff"),units="mm",width=200,height=120,dpi=300)
# samples <- unique(dt$sample)
# for(p in samples){
#
# dt2 <- dt[sample == p]
# dt2[, ratioIn := cntIn/trgIn]
# dt2[, ratioOut := cntOut/trgOut]
#
# dt2 <- dt2[,.(RTbin,ratioIn,ratioOut)]
# dt2melt <- melt(dt2,id.vars = "RTbin")
#
# ggplot(dt2melt,aes(x=RTbin,y=value,fill=variable)) + geom_bar(stat="identity",position = "dodge")
# ggsave(paste0("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/pics/fig5/bbb/",s,"_",c,"_",p,".tiff"))
#
# }
}
}
plots <- list()
i <- 1
for(s in structures)
{
for(c in cancers)
{
for(a in c(0,1))
{
for(rt in 0:6)
{
dt <- data[cancer==c & isAPOBEC == a & structure == s & RTbin == rt]
p <- ggplot(dt,aes(x=enrichment,y=ratio)) + geom_point(color=rgb(38,120,178,maxColorValue = 255),aes(shape=signBinary)) +
scale_shape_manual(values=c(15, 16)) +
geom_hline(yintercept=0,color=rgb(243,94,90,maxColorValue = 255)) +
stat_smooth(method = "lm", col = rgb(140,210,185,maxColorValue = 255),se=F,formula = y ~ log(x)) +
#scale_color_manual(values=c(rgb(38,120,178,maxColorValue = 255),rgb(140,210,185,maxColorValue = 255))) +
#ggtitle(paste0("Structure=",s," , Cancer=",c," , isAPOBEC=",a)) +
theme(panel.background = element_blank(),
plot.title = element_text(size=8),
axis.title = element_blank(),
axis.line = element_line(color="black"),
legend.position = "none",
panel.grid.major = element_line(size = rel(0.5), colour='grey92'))
ggsave(paste0("/Users/mar/BIO/PROJECTS/APOBEC/NONBDNA/pics/fig5/b/",s,"_",c,"_",a,"_",rt,".tiff"),units="mm",width=100,height=60,dpi=300)
}
}
}
}
|
context("Test google_form_decode()")
correct_responses <- data.frame(
user = rep("sean", 6),
course_name = rep("Google Forms Course", 6),
lesson_name = rep("Lesson 1", 6),
question_number = rep(2:3, 3),
correct = rep(TRUE, 6),
attempt = rep(1, 6),
skipped = rep(FALSE, 6),
datetime = c(1465226419.39813, 1465226423.01385, 1465226839.61722,
1465226846.03171, 1465226867.85347, 1465226895.93299),
stringsAsFactors = FALSE
)
diacritics_greek_cyrillic <- data.frame(
user = rep("Sëãń Çroøšż", 6),
course_name = rep("Στατιστική", 6),
lesson_name = rep("Введение", 6),
question_number = rep(2:3, 3),
correct = rep(TRUE, 6),
attempt = rep(1, 6),
skipped = rep(FALSE, 6),
datetime = c(1465226419.39813, 1465226423.01385, 1465226839.61722,
1465226846.03171, 1465226867.85347, 1465226895.93299),
stringsAsFactors = FALSE
)
cr_path <- system.file(file.path("test", "correct_responses.csv"),
package = "swirlify")
dgc_path <- system.file(file.path("test", "diacritics_greek_cyrillic.csv"),
package = "swirlify")
cr <- google_form_decode(cr_path)
dgc <- google_form_decode(dgc_path)
test_that("Google Forms can be Properly Decoded.", {
expect_equal(cr, rbind(correct_responses,
correct_responses,
correct_responses))
})
test_that("Google Forms with diacritics can be Properly Decoded.", {
skip_on_os("windows")
expect_equal(dgc, rbind(diacritics_greek_cyrillic,
diacritics_greek_cyrillic,
diacritics_greek_cyrillic))
})
# # Google form encode
# library(base64enc)
# library(tibble)
# library(readr)
#
# cr_file <- tempfile()
# dgc_file <- tempfile()
#
# write.csv(correct_responses, file = cr_file, row.names = FALSE)
# write.csv(diacritics_greek_cyrillic, file = dgc_file, row.names = FALSE)
#
# encoded_cr <- base64encode(cr_file)
# encoded_dgc <- base64encode(dgc_file)
#
# write_csv(
# tribble(
# ~Timestamp, ~Submission,
# "2016/06/06 11:21:49 AM AST", encoded_cr,
# "2016/06/06 11:27:29 AM AST", encoded_cr,
# "2016/06/06 11:28:18 AM AST", encoded_cr
# ), "inst/test/correct_responses.csv"
# )
#
# write_csv(
# tribble(
# ~Timestamp, ~Submission,
# "2016/06/06 11:21:49 AM AST", encoded_dgc,
# "2016/06/06 11:27:29 AM AST", encoded_dgc,
# "2016/06/06 11:28:18 AM AST", encoded_dgc
# ), "inst/test/diacritics_greek_cyrillic.csv"
# )
|
/tests/testthat/test_google_form_decode.R
|
no_license
|
MeganLBecker/swirlify
|
R
| false
| false
| 2,525
|
r
|
context("Test google_form_decode()")
correct_responses <- data.frame(
user = rep("sean", 6),
course_name = rep("Google Forms Course", 6),
lesson_name = rep("Lesson 1", 6),
question_number = rep(2:3, 3),
correct = rep(TRUE, 6),
attempt = rep(1, 6),
skipped = rep(FALSE, 6),
datetime = c(1465226419.39813, 1465226423.01385, 1465226839.61722,
1465226846.03171, 1465226867.85347, 1465226895.93299),
stringsAsFactors = FALSE
)
diacritics_greek_cyrillic <- data.frame(
user = rep("Sëãń Çroøšż", 6),
course_name = rep("Στατιστική", 6),
lesson_name = rep("Введение", 6),
question_number = rep(2:3, 3),
correct = rep(TRUE, 6),
attempt = rep(1, 6),
skipped = rep(FALSE, 6),
datetime = c(1465226419.39813, 1465226423.01385, 1465226839.61722,
1465226846.03171, 1465226867.85347, 1465226895.93299),
stringsAsFactors = FALSE
)
cr_path <- system.file(file.path("test", "correct_responses.csv"),
package = "swirlify")
dgc_path <- system.file(file.path("test", "diacritics_greek_cyrillic.csv"),
package = "swirlify")
cr <- google_form_decode(cr_path)
dgc <- google_form_decode(dgc_path)
test_that("Google Forms can be Properly Decoded.", {
expect_equal(cr, rbind(correct_responses,
correct_responses,
correct_responses))
})
test_that("Google Forms with diacritics can be Properly Decoded.", {
skip_on_os("windows")
expect_equal(dgc, rbind(diacritics_greek_cyrillic,
diacritics_greek_cyrillic,
diacritics_greek_cyrillic))
})
# # Google form encode
# library(base64enc)
# library(tibble)
# library(readr)
#
# cr_file <- tempfile()
# dgc_file <- tempfile()
#
# write.csv(correct_responses, file = cr_file, row.names = FALSE)
# write.csv(diacritics_greek_cyrillic, file = dgc_file, row.names = FALSE)
#
# encoded_cr <- base64encode(cr_file)
# encoded_dgc <- base64encode(dgc_file)
#
# write_csv(
# tribble(
# ~Timestamp, ~Submission,
# "2016/06/06 11:21:49 AM AST", encoded_cr,
# "2016/06/06 11:27:29 AM AST", encoded_cr,
# "2016/06/06 11:28:18 AM AST", encoded_cr
# ), "inst/test/correct_responses.csv"
# )
#
# write_csv(
# tribble(
# ~Timestamp, ~Submission,
# "2016/06/06 11:21:49 AM AST", encoded_dgc,
# "2016/06/06 11:27:29 AM AST", encoded_dgc,
# "2016/06/06 11:28:18 AM AST", encoded_dgc
# ), "inst/test/diacritics_greek_cyrillic.csv"
# )
|
# Use Siham's Goat data and load it into GenABEL for QTL scanning, an example for Uwe
#
# copyright (c) 2016-2020 - Brockmann group - HU Berlin, Danny Arends
# last modified Jul, 2016
# first written Jul, 2016
# Uncomment the following line if you do not have genable installed in R
#install.packages("GenABEL")
library(GenABEL)
# Set the working directory to where the data is stored, replace \ by / in windows
setwd("E:/UWE")
# Load the raw SNP data
snpdata <- read.csv("Ziegen_HU-Berlin_Matrix.txt", sep="\t", skip = 9, header=TRUE, check.names=FALSE, row.names=1, na.strings=c("NA", "", "--"))
# SNP information (Allele, locations etc)
snpinfo <- read.table("snpinfo.txt", sep="\t", na.strings=c("", "NA", "N.D."))
snpdata <- snpdata[rownames(snpinfo), ] # Take only the SNPs for which we have information (2000 here)
snpdata <- snpdata[,-which(colnames(snpdata) == "DN 2")] # Throw away the duplicate individual because it confuses STRUCTURE
# Load the phenotype data for the samples
samples <- read.table("sampleinfo.txt", sep="\t")
# Load the fixed location data
locations <- read.table("Sample_SNP_location_fixed.txt", sep="\t", header=TRUE, row.names=1) # Phenotype data`
samples <- cbind(samples, locations[rownames(samples),])
samples <- cbind(samples, locationShort = as.character(unlist(lapply(strsplit(as.character(samples[,"Location"]), "_"), "[",1))))
rownames(samples) <- gsub(" ", "", rownames(samples)) # Sample names cannot contain spaces
gendata <- cbind(snpinfo[rownames(snpdata),c("Chr", "Position")], snpdata) # Add location information to the genotype data
colnames(gendata) <- gsub(" ", "", colnames(gendata)) # Sample names cannot contain spaces
# Create the phenotype and covariate file we want to use: sex and age are required ? I add averagemilk as phenotype for QTL scanning
phenocovs <- cbind(id = rownames(samples), sex = rep(0, nrow(samples)), age = samples[,"Age"], averagemilk = samples[,"Averagemilk"])
# Write the genotypes
write.table(cbind(id = rownames(gendata), gendata), file="genable.input", sep="\t", quote=FALSE, na="00", row.names=FALSE)
# Write the phenotypes
write.table(phenocovs, file="genable.pheno", sep="\t", quote=FALSE)
# Convert them to genable binary format using the provided function for csv
convert.snp.illumina("genable.input", "genable.encoded", strand = "+")
# Load the converted genotype data, and the phenotype data
gwadata <- load.gwaa.data(phenofile = "genable.pheno", genofile = "genable.encoded", force = TRUE, makemap = FALSE, sort = TRUE, id = "id")
# Scan for a QTL, adjusted for sex and age (CRSNP is the currentSNP in the model)
res <- scan.glm("averagemilk ~ sex + age + CRSNP", data=gwadata)
|
/Uwe/example_genable.R
|
no_license
|
DannyArends/HU-Berlin
|
R
| false
| false
| 2,773
|
r
|
# Use Siham's Goat data and load it into GenABEL for QTL scanning, an example for Uwe
#
# copyright (c) 2016-2020 - Brockmann group - HU Berlin, Danny Arends
# last modified Jul, 2016
# first written Jul, 2016
# Uncomment the following line if you do not have genable installed in R
#install.packages("GenABEL")
library(GenABEL)
# Set the working directory to where the data is stored, replace \ by / in windows
setwd("E:/UWE")
# Load the raw SNP data
snpdata <- read.csv("Ziegen_HU-Berlin_Matrix.txt", sep="\t", skip = 9, header=TRUE, check.names=FALSE, row.names=1, na.strings=c("NA", "", "--"))
# SNP information (Allele, locations etc)
snpinfo <- read.table("snpinfo.txt", sep="\t", na.strings=c("", "NA", "N.D."))
snpdata <- snpdata[rownames(snpinfo), ] # Take only the SNPs for which we have information (2000 here)
snpdata <- snpdata[,-which(colnames(snpdata) == "DN 2")] # Throw away the duplicate individual because it confuses STRUCTURE
# Load the phenotype data for the samples
samples <- read.table("sampleinfo.txt", sep="\t")
# Load the fixed location data
locations <- read.table("Sample_SNP_location_fixed.txt", sep="\t", header=TRUE, row.names=1) # Phenotype data`
samples <- cbind(samples, locations[rownames(samples),])
samples <- cbind(samples, locationShort = as.character(unlist(lapply(strsplit(as.character(samples[,"Location"]), "_"), "[",1))))
rownames(samples) <- gsub(" ", "", rownames(samples)) # Sample names cannot contain spaces
gendata <- cbind(snpinfo[rownames(snpdata),c("Chr", "Position")], snpdata) # Add location information to the genotype data
colnames(gendata) <- gsub(" ", "", colnames(gendata)) # Sample names cannot contain spaces
# Create the phenotype and covariate file we want to use: sex and age are required ? I add averagemilk as phenotype for QTL scanning
phenocovs <- cbind(id = rownames(samples), sex = rep(0, nrow(samples)), age = samples[,"Age"], averagemilk = samples[,"Averagemilk"])
# Write the genotypes
write.table(cbind(id = rownames(gendata), gendata), file="genable.input", sep="\t", quote=FALSE, na="00", row.names=FALSE)
# Write the phenotypes
write.table(phenocovs, file="genable.pheno", sep="\t", quote=FALSE)
# Convert them to genable binary format using the provided function for csv
convert.snp.illumina("genable.input", "genable.encoded", strand = "+")
# Load the converted genotype data, and the phenotype data
gwadata <- load.gwaa.data(phenofile = "genable.pheno", genofile = "genable.encoded", force = TRUE, makemap = FALSE, sort = TRUE, id = "id")
# Scan for a QTL, adjusted for sex and age (CRSNP is the currentSNP in the model)
res <- scan.glm("averagemilk ~ sex + age + CRSNP", data=gwadata)
|
\name{DOPE}
\alias{DOPE}
\title{Generate a distribution of possible effects.}
\description{Wrapper for parallel \link[DOPE]{simfun}. Takes a linear regression model fit by \link[stats]{lm} and returns the results information on the distribution of possible effects. Currently implimented in both R and C++. The C++ version is faster while the R version is easier for the expected user base to read and modify as needed.}
\usage{
DOPE(mod,nsims=10000,language="cpp",n.cores=1,buff=sqrt(.Machine$double.eps))
}
\arguments{
\item{mod}{
A linear regression model fit by \link[stats]{lm}.
}
\item{nsims}{
numeric. How many draws to take?
}
\item{language}{
character, either "cpp" or "R" determining which implimentation to use.
}
\item{n.cores}{
numeric. How many cores should the simulation be run on?
}
\item{buff}{
numeric. A buffer to avoid numeric positive non-definiteness.
}
}
\value{
Returns a data.frame of nsims + 1 rows, with the last row containing the input coefficients. Includes intercept, regressor coefficients, control function coefficient, and R-squared as columns.
}
\examples{
set.seed(8675309)
x_vars <- 5
n_obs <- 1000
corm <- RandomCormCPP(nvars = x_vars)
X_mat <- MASS::mvrnorm(n_obs, rep(0,x_vars), Sigma = corm, empirical = TRUE)
betas <- 1:x_vars
y <- X_mat %*% betas + rnorm(n_obs, 0, 1)
dat <- data.frame(y,X_mat)
mod <- lm(y ~ ., data=dat)
dope <- DOPE(mod, nsims = 3000, n.cores = parallel::detectCores())
}
|
/man/DOPE.Rd
|
no_license
|
christophercschwarz/DOPE
|
R
| false
| false
| 1,520
|
rd
|
\name{DOPE}
\alias{DOPE}
\title{Generate a distribution of possible effects.}
\description{Wrapper for parallel \link[DOPE]{simfun}. Takes a linear regression model fit by \link[stats]{lm} and returns the results information on the distribution of possible effects. Currently implimented in both R and C++. The C++ version is faster while the R version is easier for the expected user base to read and modify as needed.}
\usage{
DOPE(mod,nsims=10000,language="cpp",n.cores=1,buff=sqrt(.Machine$double.eps))
}
\arguments{
\item{mod}{
A linear regression model fit by \link[stats]{lm}.
}
\item{nsims}{
numeric. How many draws to take?
}
\item{language}{
character, either "cpp" or "R" determining which implimentation to use.
}
\item{n.cores}{
numeric. How many cores should the simulation be run on?
}
\item{buff}{
numeric. A buffer to avoid numeric positive non-definiteness.
}
}
\value{
Returns a data.frame of nsims + 1 rows, with the last row containing the input coefficients. Includes intercept, regressor coefficients, control function coefficient, and R-squared as columns.
}
\examples{
set.seed(8675309)
x_vars <- 5
n_obs <- 1000
corm <- RandomCormCPP(nvars = x_vars)
X_mat <- MASS::mvrnorm(n_obs, rep(0,x_vars), Sigma = corm, empirical = TRUE)
betas <- 1:x_vars
y <- X_mat %*% betas + rnorm(n_obs, 0, 1)
dat <- data.frame(y,X_mat)
mod <- lm(y ~ ., data=dat)
dope <- DOPE(mod, nsims = 3000, n.cores = parallel::detectCores())
}
|
library(shiny)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("BMI Calculator"),
sidebarPanel(
h5('Enter your weight and height in lb/ft'),
numericInput('weight', 'Weight lb',50, min = 50, max = 200, step = 5),
numericInput('height', 'Height ft',100, min = 20, max = 300, step = 5),
submitButton('Submit')
),
mainPanel(
h5('Body Mass Index (BMI) is a calculation the medical world uses as an indicator of overall health. The general classification is: '),
tags$ul(
tags$li("Underweight: < 18.5"),
tags$li("Normal weight: 18.5 - 24.9"),
tags$li("Overweight: 25 - 29.9"),
tags$li("Obese: >= 30"),
tags$li("Extremely Obese: >= 40")
),
h5('Your weight'),
verbatimTextOutput("weight"),
h5('Your height'),
verbatimTextOutput("height"),
h5('Your Calculated BMI '),
verbatimTextOutput("bmi"),
h5('Your BMI is in the following classification'),
verbatimTextOutput("label")
)
)
)
|
/UI.R
|
no_license
|
racheljiling/bmi-code
|
R
| false
| false
| 1,041
|
r
|
library(shiny)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("BMI Calculator"),
sidebarPanel(
h5('Enter your weight and height in lb/ft'),
numericInput('weight', 'Weight lb',50, min = 50, max = 200, step = 5),
numericInput('height', 'Height ft',100, min = 20, max = 300, step = 5),
submitButton('Submit')
),
mainPanel(
h5('Body Mass Index (BMI) is a calculation the medical world uses as an indicator of overall health. The general classification is: '),
tags$ul(
tags$li("Underweight: < 18.5"),
tags$li("Normal weight: 18.5 - 24.9"),
tags$li("Overweight: 25 - 29.9"),
tags$li("Obese: >= 30"),
tags$li("Extremely Obese: >= 40")
),
h5('Your weight'),
verbatimTextOutput("weight"),
h5('Your height'),
verbatimTextOutput("height"),
h5('Your Calculated BMI '),
verbatimTextOutput("bmi"),
h5('Your BMI is in the following classification'),
verbatimTextOutput("label")
)
)
)
|
## The following R code is to create two functions, which are:
## 1. makeCacheMatrix : This function takes a matrix as an input. It will cache the inverse of a matrix returned from the function below.
## 2. cacheSolve : This function will take the output from makeCacheMatrix. It shall first find if an inverse of the matrix exist else it shall compute the inverse using solve function inside makeCacheMatrix.
## This function takes a matrix as an input.
## It will return a special matrix where information on it's inversion has been calculated or not.
## It simply cache the information without performing any calculations
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setMatrixInversion <- function(solve) m <<- solve
getInverseMatrix <- function() m
list(set = set, get = get,
setMatrixInversion = setMatrixInversion,
getInverseMatrix = getInverseMatrix)
}
## This function will take the output from the function above.
##It will confirm if a cached inversed matrix or not.
# if not then it will calculate the inversion of the given matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverseMatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setMatrixInversion(m)
m
}
|
/cachematrix.R
|
no_license
|
CosterwellKhyriem/ProgrammingAssignment2
|
R
| false
| false
| 1,412
|
r
|
## The following R code is to create two functions, which are:
## 1. makeCacheMatrix : This function takes a matrix as an input. It will cache the inverse of a matrix returned from the function below.
## 2. cacheSolve : This function will take the output from makeCacheMatrix. It shall first find if an inverse of the matrix exist else it shall compute the inverse using solve function inside makeCacheMatrix.
## This function takes a matrix as an input.
## It will return a special matrix where information on it's inversion has been calculated or not.
## It simply cache the information without performing any calculations
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setMatrixInversion <- function(solve) m <<- solve
getInverseMatrix <- function() m
list(set = set, get = get,
setMatrixInversion = setMatrixInversion,
getInverseMatrix = getInverseMatrix)
}
## This function will take the output from the function above.
##It will confirm if a cached inversed matrix or not.
# if not then it will calculate the inversion of the given matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverseMatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setMatrixInversion(m)
m
}
|
library(testthat)
library(gt)
test_check("gt")
|
/tests/testthat.R
|
permissive
|
rstudio/gt
|
R
| false
| false
| 48
|
r
|
library(testthat)
library(gt)
test_check("gt")
|
source('/home/yiliao/OLDISK/software/TopDom/TopDom_v0.0.2.R')
setwd("/home/yiliao/OLDISK/genome_assembly/hic_explorer/13_bnbc/100k/TopDom_bnbc")
TopDom(matrix.file="matrixHL2.chr01.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.chr01.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr02.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr02.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr03.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr03.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr04.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr04.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr05.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr05.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr06.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr06.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr07.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr07.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr08.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr08.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr09.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr09.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr10.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr10.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr11.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr11.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr12.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr12.csv.TopDom.matrix.output")
|
/Bin/Rscript/LJHL2.topdom.r
|
no_license
|
yiliao1022/Pepper3Dgenome
|
R
| false
| false
| 1,597
|
r
|
source('/home/yiliao/OLDISK/software/TopDom/TopDom_v0.0.2.R')
setwd("/home/yiliao/OLDISK/genome_assembly/hic_explorer/13_bnbc/100k/TopDom_bnbc")
TopDom(matrix.file="matrixHL2.chr01.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.chr01.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr02.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr02.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr03.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr03.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr04.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr04.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr05.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr05.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr06.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr06.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr07.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr07.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr08.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr08.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr09.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr09.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr10.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr10.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr11.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr11.csv.TopDom.matrix.output")
TopDom(matrix.file="matrixHL2.Chr12.csv.TopDom.matrix",window.size=5,outFile="matrixHL2.Chr12.csv.TopDom.matrix.output")
|
##' Bayes factors or posterior samples for binomial, geometric, or neg. binomial data.
##'
##' Given count data modeled as a binomial, geometric, or negative binomial random variable,
##' the Bayes factor provided by \code{proportionBF} tests the null hypothesis that
##' the probability of a success is \eqn{p_0}{p_0} (argument \code{p}). Specifically,
##' the Bayes factor compares two hypotheses: that the probability is \eqn{p_0}{p_0}, or
##' probability is not \eqn{p_0}{p_0}. Currently, the default alternative is that
##' \deqn{\lambda~logistic(\lambda_0,r)} where
##' \eqn{\lambda_0=logit(p_0)}{lambda_0=logit(p_0)} and
##' \eqn{\lambda=logit(p)}{lambda=logit(p)}. \eqn{r}{r} serves as a prior scale parameter.
##'
##' For the \code{rscale} argument, several named values are recognized:
##' "medium", "wide", and "ultrawide". These correspond
##' to \eqn{r} scale values of \eqn{1/2}{1/2}, \eqn{\sqrt{2}/2}{sqrt(2)/2}, and 1,
##' respectively.
##'
##' The Bayes factor is computed via Gaussian quadrature, and posterior
##' samples are drawn via independence Metropolis-Hastings.
##' @title Function for Bayesian analysis of proportions
##' @param y a vector of successes
##' @param N a vector of total number of observations
##' @param p the null value for the probability of a success to be tested against
##' @param rscale prior scale. A number of preset values can be given as
##' strings; see Details.
##' @param nullInterval optional vector of length 2 containing
##' lower and upper bounds of an interval hypothesis to test, in probability units
##' @param posterior if \code{TRUE}, return samples from the posterior instead
##' of Bayes factor
##' @param callback callback function for third-party interfaces
##' @param ... further arguments to be passed to or from methods.
##' @return If \code{posterior} is \code{FALSE}, an object of class
##' \code{BFBayesFactor} containing the computed model comparisons is
##' returned. If \code{nullInterval} is defined, then two Bayes factors will
##' be computed: The Bayes factor for the interval against the null hypothesis
##' that the probability is \eqn{p_0}{p0}, and the corresponding Bayes factor for
##' the compliment of the interval.
##'
##' If \code{posterior} is \code{TRUE}, an object of class \code{BFmcmc},
##' containing MCMC samples from the posterior is returned.
##' @export
##' @keywords htest
##' @author Richard D. Morey (\email{richarddmorey@@gmail.com})
##' @examples
##' bf = proportionBF(y = 15, N = 25, p = .5)
##' bf
##' ## Sample from the corresponding posterior distribution
##' samples =proportionBF(y = 15, N = 25, p = .5, posterior = TRUE, iterations = 10000)
##' plot(samples[,"p"])
##' @seealso \code{\link{prop.test}}
proportionBF <- function(y, N, p, rscale = "medium", nullInterval = NULL, posterior=FALSE, callback = function(...) as.integer(0), ...)
{
if (p >= 1 || p <= 0)
stop('p must be between 0 and 1', call.=FALSE)
if(!is.null(nullInterval)){
if(any(nullInterval<0) | any(nullInterval>1)) stop("nullInterval endpoints must be in [0,1].")
nullInterval = range(nullInterval)
}
rscale = rpriorValues("proptest",,rscale)
if( length(p) > 1 ) stop("Only a single null allowed (length(p) > 1).")
if( length(y) != length(N) ) stop("Length of y and N must be the same.")
if( any(y>N) | any(y < 0) ) stop("Invalid data (y>N or y<0).")
if( any( c(y,N)%%1 != 0 ) ) stop("y and N must be integers.")
hypNames = makePropHypothesisNames(rscale, nullInterval, p)
mod1 = BFproportion(type = "logistic",
identifier = list(formula = "p =/= p0", nullInterval = nullInterval, p0 = p),
prior=list(rscale=rscale, nullInterval = nullInterval, p0 = p),
shortName = hypNames$shortName,
longName = hypNames$longName
)
data = data.frame(y = y, N = N)
checkCallback(callback,as.integer(0))
if(posterior)
return(posterior(mod1, data = data, callback = callback, ...))
bf1 = compare(numerator = mod1, data = data)
if(!is.null(nullInterval)){
mod2 = mod1
attr(mod2@identifier$nullInterval, "complement") = TRUE
attr(mod2@prior$nullInterval, "complement") = TRUE
hypNames = makePropHypothesisNames(rscale, mod2@identifier$nullInterval,p)
mod2@shortName = hypNames$shortName
mod2@longName = hypNames$longName
bf2 = compare(numerator = mod2, data = data)
checkCallback(callback,as.integer(1000))
return(c(bf1, bf2))
}else{
checkCallback(callback,as.integer(1000))
return(c(bf1))
}
}
|
/R/proportionBF.R
|
no_license
|
cran/BayesFactor
|
R
| false
| false
| 4,648
|
r
|
##' Bayes factors or posterior samples for binomial, geometric, or neg. binomial data.
##'
##' Given count data modeled as a binomial, geometric, or negative binomial random variable,
##' the Bayes factor provided by \code{proportionBF} tests the null hypothesis that
##' the probability of a success is \eqn{p_0}{p_0} (argument \code{p}). Specifically,
##' the Bayes factor compares two hypotheses: that the probability is \eqn{p_0}{p_0}, or
##' probability is not \eqn{p_0}{p_0}. Currently, the default alternative is that
##' \deqn{\lambda~logistic(\lambda_0,r)} where
##' \eqn{\lambda_0=logit(p_0)}{lambda_0=logit(p_0)} and
##' \eqn{\lambda=logit(p)}{lambda=logit(p)}. \eqn{r}{r} serves as a prior scale parameter.
##'
##' For the \code{rscale} argument, several named values are recognized:
##' "medium", "wide", and "ultrawide". These correspond
##' to \eqn{r} scale values of \eqn{1/2}{1/2}, \eqn{\sqrt{2}/2}{sqrt(2)/2}, and 1,
##' respectively.
##'
##' The Bayes factor is computed via Gaussian quadrature, and posterior
##' samples are drawn via independence Metropolis-Hastings.
##' @title Function for Bayesian analysis of proportions
##' @param y a vector of successes
##' @param N a vector of total number of observations
##' @param p the null value for the probability of a success to be tested against
##' @param rscale prior scale. A number of preset values can be given as
##' strings; see Details.
##' @param nullInterval optional vector of length 2 containing
##' lower and upper bounds of an interval hypothesis to test, in probability units
##' @param posterior if \code{TRUE}, return samples from the posterior instead
##' of Bayes factor
##' @param callback callback function for third-party interfaces
##' @param ... further arguments to be passed to or from methods.
##' @return If \code{posterior} is \code{FALSE}, an object of class
##' \code{BFBayesFactor} containing the computed model comparisons is
##' returned. If \code{nullInterval} is defined, then two Bayes factors will
##' be computed: The Bayes factor for the interval against the null hypothesis
##' that the probability is \eqn{p_0}{p0}, and the corresponding Bayes factor for
##' the compliment of the interval.
##'
##' If \code{posterior} is \code{TRUE}, an object of class \code{BFmcmc},
##' containing MCMC samples from the posterior is returned.
##' @export
##' @keywords htest
##' @author Richard D. Morey (\email{richarddmorey@@gmail.com})
##' @examples
##' bf = proportionBF(y = 15, N = 25, p = .5)
##' bf
##' ## Sample from the corresponding posterior distribution
##' samples =proportionBF(y = 15, N = 25, p = .5, posterior = TRUE, iterations = 10000)
##' plot(samples[,"p"])
##' @seealso \code{\link{prop.test}}
proportionBF <- function(y, N, p, rscale = "medium", nullInterval = NULL, posterior=FALSE, callback = function(...) as.integer(0), ...)
{
if (p >= 1 || p <= 0)
stop('p must be between 0 and 1', call.=FALSE)
if(!is.null(nullInterval)){
if(any(nullInterval<0) | any(nullInterval>1)) stop("nullInterval endpoints must be in [0,1].")
nullInterval = range(nullInterval)
}
rscale = rpriorValues("proptest",,rscale)
if( length(p) > 1 ) stop("Only a single null allowed (length(p) > 1).")
if( length(y) != length(N) ) stop("Length of y and N must be the same.")
if( any(y>N) | any(y < 0) ) stop("Invalid data (y>N or y<0).")
if( any( c(y,N)%%1 != 0 ) ) stop("y and N must be integers.")
hypNames = makePropHypothesisNames(rscale, nullInterval, p)
mod1 = BFproportion(type = "logistic",
identifier = list(formula = "p =/= p0", nullInterval = nullInterval, p0 = p),
prior=list(rscale=rscale, nullInterval = nullInterval, p0 = p),
shortName = hypNames$shortName,
longName = hypNames$longName
)
data = data.frame(y = y, N = N)
checkCallback(callback,as.integer(0))
if(posterior)
return(posterior(mod1, data = data, callback = callback, ...))
bf1 = compare(numerator = mod1, data = data)
if(!is.null(nullInterval)){
mod2 = mod1
attr(mod2@identifier$nullInterval, "complement") = TRUE
attr(mod2@prior$nullInterval, "complement") = TRUE
hypNames = makePropHypothesisNames(rscale, mod2@identifier$nullInterval,p)
mod2@shortName = hypNames$shortName
mod2@longName = hypNames$longName
bf2 = compare(numerator = mod2, data = data)
checkCallback(callback,as.integer(1000))
return(c(bf1, bf2))
}else{
checkCallback(callback,as.integer(1000))
return(c(bf1))
}
}
|
makeCacheMatrix <- function(x = numeric()) {
#initializing inv as empty
inv <- NULL
#sets x equal to y in the parent environment
#clears inv from any previous calculations
set <- function(y) {
x <<- y
inv <<- NULL
}
#grabs a created x from the parent environment (m1 and n2 in the examples)
#uses lexical scoping
get <- function() x
#defines inv variable as the solve function which takes the inverse of a matrix
#uses the get function like above to grab inv from the parent environment
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
#returns these functions as a list to the parent environment
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#makeCacheMatrix MUST be called before cacheSolve, otherwise cacheSolve doesn't know
#inv and getinverse
cacheSolve <- function(x, ...) {
#functions calls getinverse from parent environment
inv <- x$getinverse()
#should be null if ran makeCacheMatrix but if previous run is still valid
#this returns the inverse stored in the parent environment
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
#if inv is not null then calls the functions in makeCacheMatrix to return
#inverse. Inverse is calculated through makeCacheMatrix, NOT here
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
KJRenaud/ProgrammingAssignment2-1
|
R
| false
| false
| 1,531
|
r
|
makeCacheMatrix <- function(x = numeric()) {
#initializing inv as empty
inv <- NULL
#sets x equal to y in the parent environment
#clears inv from any previous calculations
set <- function(y) {
x <<- y
inv <<- NULL
}
#grabs a created x from the parent environment (m1 and n2 in the examples)
#uses lexical scoping
get <- function() x
#defines inv variable as the solve function which takes the inverse of a matrix
#uses the get function like above to grab inv from the parent environment
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
#returns these functions as a list to the parent environment
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#makeCacheMatrix MUST be called before cacheSolve, otherwise cacheSolve doesn't know
#inv and getinverse
cacheSolve <- function(x, ...) {
#functions calls getinverse from parent environment
inv <- x$getinverse()
#should be null if ran makeCacheMatrix but if previous run is still valid
#this returns the inverse stored in the parent environment
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
#if inv is not null then calls the functions in makeCacheMatrix to return
#inverse. Inverse is calculated through makeCacheMatrix, NOT here
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_esri_features_by_ids.R
\name{get_esri_features_by_ids}
\alias{get_esri_features_by_ids}
\title{get features}
\usage{
get_esri_features_by_ids(
ids,
url = paste0("http://portal1.snirh.gov.br/ana/",
"rest/services/Esta\%C3\%A7\%C3\%B5es_da_", "Rede_Hidrometeorol\%C3\%B3gica_Nacional",
"_em_Opera\%C3\%A7\%C3\%A3o/MapServer/1"),
query_url = paste(url, "query", sep = "/"),
fields = c("*"),
token = "",
ssl = FALSE,
simplifyDataFrame = FALSE,
simplifyVector = FALSE,
full = FALSE
)
}
\arguments{
\item{ids}{Integer.}
\item{url}{Character.}
\item{query_url}{Character.}
\item{fields}{Character.}
\item{token}{Character.}
\item{ssl}{Logical, default = FALSE.}
\item{simplifyDataFrame}{Logical, default = FALSE.}
\item{simplifyVector}{Logical, default = FALSE.}
\item{full}{Logical, default = FALSE.}
}
\description{
get features.
}
\examples{
\dontrun{
# do not run
ids <- get_object_ids()
feat <- get_esri_features_by_ids(ids = ids)
}
}
\seealso{
\code{\link{layer_info}}
}
|
/man/get_esri_features_by_ids.Rd
|
no_license
|
ibarraespinosa/ana
|
R
| false
| true
| 1,086
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_esri_features_by_ids.R
\name{get_esri_features_by_ids}
\alias{get_esri_features_by_ids}
\title{get features}
\usage{
get_esri_features_by_ids(
ids,
url = paste0("http://portal1.snirh.gov.br/ana/",
"rest/services/Esta\%C3\%A7\%C3\%B5es_da_", "Rede_Hidrometeorol\%C3\%B3gica_Nacional",
"_em_Opera\%C3\%A7\%C3\%A3o/MapServer/1"),
query_url = paste(url, "query", sep = "/"),
fields = c("*"),
token = "",
ssl = FALSE,
simplifyDataFrame = FALSE,
simplifyVector = FALSE,
full = FALSE
)
}
\arguments{
\item{ids}{Integer.}
\item{url}{Character.}
\item{query_url}{Character.}
\item{fields}{Character.}
\item{token}{Character.}
\item{ssl}{Logical, default = FALSE.}
\item{simplifyDataFrame}{Logical, default = FALSE.}
\item{simplifyVector}{Logical, default = FALSE.}
\item{full}{Logical, default = FALSE.}
}
\description{
get features.
}
\examples{
\dontrun{
# do not run
ids <- get_object_ids()
feat <- get_esri_features_by_ids(ids = ids)
}
}
\seealso{
\code{\link{layer_info}}
}
|
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 1
# Merges the training and the test sets to create one data set.
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ***************************************************************************
# Step 1: Download Zip File to Raw Data Folder and Extract contents
# ***************************************************************************
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists("rawdata")){dir.create("rawdata")}
if(!file.exists("rawdata/ProjectFiles.zip")){
download.file(fileURL,"rawdata/ProjectFiles.zip","curl")
unzip(zipfile="rawdata/ProjectFiles.zip",exdir="rawdata/")
}
# ***************************************************************************
# Check Step 1a: Show that rawdata directory was created
# ***************************************************************************
# > list.dirs(path = ".", full.names = TRUE, recursive = FALSE)
#
# [1] "./.Rproj.user" "./data" "./rawdata"
# ***************************************************************************
# Check Step 1b: Show that download and extraction of zip file was successful
# ***************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = TRUE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "ProjectFiles.zip"
# [2] "UCI HAR Dataset/activity_labels.txt"
# [3] "UCI HAR Dataset/features_info.txt"
# [4] "UCI HAR Dataset/features.txt"
# [5] "UCI HAR Dataset/README.txt"
# [6] "UCI HAR Dataset/test/Inertial Signals/body_acc_x_test.txt"
# [7] "UCI HAR Dataset/test/Inertial Signals/body_acc_y_test.txt"
# [8] "UCI HAR Dataset/test/Inertial Signals/body_acc_z_test.txt"
# [9] "UCI HAR Dataset/test/Inertial Signals/body_gyro_x_test.txt"
# [10] "UCI HAR Dataset/test/Inertial Signals/body_gyro_y_test.txt"
# [11] "UCI HAR Dataset/test/Inertial Signals/body_gyro_z_test.txt"
# [12] "UCI HAR Dataset/test/Inertial Signals/total_acc_x_test.txt"
# [13] "UCI HAR Dataset/test/Inertial Signals/total_acc_y_test.txt"
# [14] "UCI HAR Dataset/test/Inertial Signals/total_acc_z_test.txt"
# [15] "UCI HAR Dataset/test/subject_test.txt"
# [16] "UCI HAR Dataset/test/X_test.txt"
# [17] "UCI HAR Dataset/test/y_test.txt"
# [18] "UCI HAR Dataset/train/Inertial Signals/body_acc_x_train.txt"
# [19] "UCI HAR Dataset/train/Inertial Signals/body_acc_y_train.txt"
# [20] "UCI HAR Dataset/train/Inertial Signals/body_acc_z_train.txt"
# [21] "UCI HAR Dataset/train/Inertial Signals/body_gyro_x_train.txt"
# [22] "UCI HAR Dataset/train/Inertial Signals/body_gyro_y_train.txt"
# [23] "UCI HAR Dataset/train/Inertial Signals/body_gyro_z_train.txt"
# [24] "UCI HAR Dataset/train/Inertial Signals/total_acc_x_train.txt"
# [25] "UCI HAR Dataset/train/Inertial Signals/total_acc_y_train.txt"
# [26] "UCI HAR Dataset/train/Inertial Signals/total_acc_z_train.txt"
# [27] "UCI HAR Dataset/train/subject_train.txt"
# [28] "UCI HAR Dataset/train/X_train.txt"
# [29] "UCI HAR Dataset/train/y_train.txt"
# ******************************************************************
# Step 2: Load Test & Train Datasets into R
# ******************************************************************
install.packages("data.table")
library(data.table)
Test_subjectDF <- read.table("rawdata/UCI HAR Dataset/test/subject_test.txt")
Test_XDF <- read.table("rawdata/UCI HAR Dataset/test/X_test.txt")
Test_YDF <- read.table("rawdata/UCI HAR Dataset/test/Y_test.txt")
Train_subjectDF <- read.table("rawdata/UCI HAR Dataset/train/subject_train.txt")
Train_XDF <- read.table("rawdata/UCI HAR Dataset/train/X_train.txt")
Train_YDF <- read.table("rawdata/UCI HAR Dataset/train/Y_train.txt")
# ***************************************************************************
# Check Step 2: Show files imported and their dimensions are correct
# ***************************************************************************
# > dim(Test_subjectDF)
# [1] 2947 1
#
# > dim(Test_XDF)
# [1] 2947 561
#
# > dim(Test_YDF)
# [1] 2947 1
#
# > dim(Train_subjectDF)
# [1] 7352 1
#
# > dim(Train_XDF)
# [1] 7352 561
#
# > dim(Train_YDF)
# [1] 7352 1
# ***************************************************************************
# Step 3: Bind X,Y,Subject data frames into one data frame
# ***************************************************************************
XDF <- rbind(Test_XDF,Train_XDF)
YDF <- rbind(Test_YDF,Train_YDF)
subjectDF <- rbind(Test_subjectDF,Train_subjectDF)
RawDF <- cbind(XDF,YDF,subjectDF)
# ***************************************************************************
# Check Step 3: Show files merged and their dimensions are correct
# ***************************************************************************
# > dim(XDF) # should be 2947 + 7352 = 102999 and var @ 561
# [1] 10299 561
#
# > dim(YDF) # should be 2947 + 7352 = 102999 and var @ 1
# [1] 10299 1
#
# > dim(subjectDF) # should be 2947 + 7352 = 102999 and var @ 1
# [1] 10299 1
#
# > dim(RawDF) # should be 102999 rows and var 561 + 1 + 1 = 563
# [1] 10299 563
#
# ***************************************************************************
# Step 4: Write combined dataframe to rawdata folder as text file
# ***************************************************************************
fwrite(RawDF,"rawdata/combined_rawdata.txt")
# ***************************************************************************
# Check Step 4: Show file was created
# ***************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "combined_rawdata.txt" "ProjectFiles.zip" "UCI HAR Dataset"
#
# ***************************************************************************
# Step 5a: Create New Features file for combined Raw Data
# ***************************************************************************
Features <- read.table("rawdata/UCI HAR Dataset/features.txt")
Additions <- data.frame(V1 = c(max(Features$V1)+1, max(Features$V1)+2),
V2 = c("Activity_code","Subject_ID"))
RawDF_cb <- rbind(Features,Additions)
# ****************************************************************************
# Check Step 5a: Check Features and Additions data frames combined correctly.
# ****************************************************************************
# > dim(Features)
# [1] 561 2
#
# > dim(Additions)
# [1] 2 2
#
# > print(Additions)
# V1 V2
# 1 562 Activity_code
# 2 563 Subject_ID
#
# > dim(RawDF_cb)
# [1] 563 2
#
# > tail(RawDF_cb,4) #Dimension ok and last 2 rows added ok
# V1 V2
# 560 560 angle(Y,gravityMean)
# 561 561 angle(Z,gravityMean)
# 562 562 Activity_code
# 563 563 Subject_ID
# ****************************************************************************
# Step 5b: Export combined Features to file
# ****************************************************************************
fwrite(RawDF_cb,"rawdata/combined_features.txt")
# ****************************************************************************
# Check Step 5b: Export of combined Features file successful
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "combined_features.txt" "combined_rawdata.txt" "ProjectFiles.zip"
# [4] "UCI HAR Dataset"
#
# ****************************************************************************
# Step 5c: Export combined Features to file ready for cookbook.md list object
# ****************************************************************************
source("CreateCodeBook.R")
CodeBook(RawDF_cb,"rawdata/cbImport.txt")
# ****************************************************************************
# Check Step 5c: Export of combined Features file ready successful
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "cbImport.txt" "combined_features.txt" "combined_rawdata.txt"
# [4] "ProjectFiles.zip" "UCI HAR Dataset"
# ***************************************************************************
# Step 6: Clear Global environment
# ***************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 1
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 2
# Extract only the measurements on the mean and standard deviation
# for each measurement.
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ***************************************************************************
# Step 1a: Import combined RawData file and new Features file
# ***************************************************************************
library(data.table)
RawDF <- fread("rawdata/combined_rawdata.txt")
Features <- fread("rawdata/combined_features.txt")
# ***************************************************************************
# Check Step 1a: Show files and dimensions imported correctly
# ***************************************************************************
# > dim(RawDF)
# [1] 10299 563
# > dim(Features)
# [1] 563 2
# ***************************************************************************
# Step 1b: Assign new Features to Combined RawData variables
# ***************************************************************************
names(RawDF) <- Features$V2
rm(Features)
# ***************************************************************************
# Check Step 1b: Check new variables assigned correctly
# ***************************************************************************
# > head(names(RawDF),3)
# [1] "tBodyAcc-mean()-X" "tBodyAcc-mean()-Y" "tBodyAcc-mean()-Z"
# > tail(names(RawDF),3)
# [1] "angle(Z,gravityMean)" "Activity_code" "Subject_ID"
# > dim(RawDF)
# [1] 10299 563
# ***************************************************************************
# Step 2a: Find variables with words 'mean' and 'std', and
# filter data frame and include "Subject_ID" and "Activity_code"
# ***************************************************************************
MScolNums <- c(grep("Subject_ID", names(RawDF)),
grep("Activity_code", names(RawDF)),
grep("mean", names(RawDF)),
grep("std", names(RawDF)))
MeanStdDF <- subset(RawDF,select= MScolNums)
# ***************************************************************************
# Check Step 2a: New Data frame with variables only with 'mean' and 'std',
# including "Subject_ID" and "Activity_code"
# ***************************************************************************
# > length(MScolNums)
# [1] 81
# > dim(MeanStdDF)
# [1] 10299 81
#
# The following will check that MeanStdDF only contains the required variables:
#
# > lenvars <- c(length(grep("std",names(MeanStdDF))),
# + length(grep("mean",names(MeanStdDF))),
# + length(grep("Subject_ID",names(MeanStdDF))),
# + length(grep("Activity_code",names(MeanStdDF))))
#
# > lenvars
# [1] 33 46 1 1
#
# > sum(lenvars)
# [1] 81
#
# The following will check if there are any duplicate variables:
# > sum(duplicated(names(MeanStdDF)))
# [1] 0
# ****************************************************************************
# Step 2b: Export Extracted data frame to file
# ****************************************************************************
fwrite(MeanStdDF,"rawdata/Extracted_Data.txt")
# ****************************************************************************
# Check Step 2b: Export of Extracted Data file successful
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "cbImport.txt" "combined_features.txt" "combined_rawdata.txt"
# [4] "Extracted_Data.txt" "ProjectFiles.zip" "UCI HAR Dataset"
#
# ****************************************************************************
# Step 3: Export variables of Extracted Data to file;
# ready for cookbook.md list object
# ****************************************************************************
y <- data.frame(V1 = names(MeanStdDF))
x <- data.frame(V2 = seq.int(ncol(MeanStdDF)))
new <- cbind(x,y)
source("CreateCodeBook.R")
CodeBook(new,"rawdata/ExtractedcbImport.txt")
# ****************************************************************************
# Check Step 3: Export of Extracted Data variables file successful for import
# to cookbook.md
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "cbImport.txt" "combined_features.txt" "combined_rawdata.txt"
# [4] "Extracted_Data.txt" "ExtractedcbImport.txt" "ProjectFiles.zip"
# [7] "UCI HAR Dataset"
# ***************************************************************************
# Step 4: Clear Global environment
# ***************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 2
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 3
# Uses descriptive activity names to name the activities in the data set
#
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ***************************************************************************
# Step 1: Import extracted raw data & Activities Features files
# ***************************************************************************
library(data.table)
MeanStdDF <- fread("rawdata/Extracted_Data.txt")
Activities <- fread("rawdata/UCI HAR Dataset/activity_labels.txt")
# ***************************************************************************
# Check Step 1: Show data frames import successfully
# ***************************************************************************
# > dim(MeanStdDF)
# [1] 10299 81
# > dim(Activities)
# [1] 6 2
# ***************************************************************************
# Step 2: Update levels of activity codes
# ***************************************************************************
MeanStdDF$Actvty_fctrs <- factor(MeanStdDF$Activity_code)
levels(MeanStdDF$Actvty_fctrs) = Activities$V2
# Re-order data frame so that Activity factors variable is in position 3
ReorderDF <- MeanStdDF[,c(1:2,82,3:81)]
# ***************************************************************************
# Check Step 2: Show that levels of activity codes updated correctly
# ***************************************************************************
# > dim(ReorderDF)
# [1] 10299 82
# > print(Activities)
# V1 V2
# 1: 1 WALKING
# 2: 2 WALKING_UPSTAIRS
# 3: 3 WALKING_DOWNSTAIRS
# 4: 4 SITTING
# 5: 5 STANDING
# 6: 6 LAYING
# > str(ReorderDF$Actvty_fctrs)
# Factor w/ 6 levels "WALKING","WALKING_UPSTAIRS",..: 5 5 5 5 5 5 5 5 5 5 ...
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==1),1)
# [1] WALKING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# # > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==2),1)
# [1] WALKING_UPSTAIRS
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# # > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==3),1)
# [1] WALKING_DOWNSTAIRS
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==4),1)
# [1] SITTING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==5),1)
# [1] STANDING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==6),1)
# [1] LAYING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > str(ReorderDF)
# Classes ‘data.table’ and 'data.frame': 10299 obs. of 82 variables:
# $ Subject_ID : int 2 2 2 2 2 2 2 2 2 2 ...
# $ Activity_code : int 5 5 5 5 5 5 5 5 5 5 ...
# $ Actvty_fctrs : Factor w/ 6 levels "WALKING","WALKING_UPSTAIRS"...
# variables 4-81 not shown...
# ***************************************************************************
# Step 3: Export Activity updated raw data to file
# ***************************************************************************
fwrite(ReorderDF, "rawdata/ActivityUpdatedRawData.txt")
# ***************************************************************************
# Check Step 3: Updated raw data with Activity exported to file
# ***************************************************************************
# > fwrite(MeanStdDF, "rawdata/ActivityUpdatedRawData.txt")
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "ActivityUpdatedRawData.txt" "cbImport.txt" "combined_features.txt"
# [4] "combined_rawdata.txt" "Extracted_Data.txt" "ExtractedcbImport.txt"
# [7] "ProjectFiles.zip" "UCI HAR Dataset"
# ****************************************************************************
# Step 4: Export variables of Updated Activity Data to file;
# ready for cookbook.md list object
# ****************************************************************************
y <- data.frame(V1 = names(ReorderDF))
x <- data.frame(V2 = seq.int(ncol(ReorderDF)))
new <- cbind(x,y)
source("CreateCodeBook.R")
CodeBook(new,"rawdata/UpdatedActvcbImport.txt")
# ****************************************************************************
# Check Step 4: Export of Activity Updated Data variables file successful for
# import to cookbook.md
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "ActivityUpdatedRawData.txt" "cbImport.txt" "combined_features.txt"
# [4] "combined_rawdata.txt" "Extracted_Data.txt" "ExtractedcbImport.txt"
# [7] "ProjectFiles.zip" "UCI HAR Dataset" "UpdatedActvcbImport.txt"
# ****************************************************************************
# Step 5: Clear Global Environment
# ****************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 3
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 4
# Appropriately labels the data set with descriptive variable names.
#
# Rules: > Clean invalid charachters like " ( ) - , "
# > Remove duplicates words in variables
# > Make sure there are no duplicate variables
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ****************************************************************************
# Step 1: Load Activity Raw Data file into R & output column names to variable
# ****************************************************************************
library(data.table)
dirty <- fread("rawdata/ActivityUpdatedRawData.txt")
rawdataVars <- names(dirty)
# ****************************************************************************
# Check Step 1: Show data frame imported and new column names variable created.
# ****************************************************************************
# > dim(dirty)
# [1] 10299 82
# str(rawdataVars)
# chr [1:82] "Subject_ID" "Activity_code" "Actvty_fctrs" "tBodyAcc-mean()-X" ...
# ****************************************************************************
# Step 2: Cleanup column names & replace raw data frame variables with new
# list.
# ****************************************************************************
# source r script for RenderName funcion.
# See Rename.R in project folder or Coodbook.md
source("Rename.R")
cleanedVars <- unlist(lapply(rawdataVars,RenderName))
names(dirty) <- cleanedVars
# ****************************************************************************
# Check Step 2: Confirm no Duplicated variable names
# ****************************************************************************
# > sum(duplicated(s))
# [1] 0
# > match(s,rawdataVars)
# [1] 1 2 3 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
# [28] NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
# [55] NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
# [82] NA
# ****************************************************************************
# Step 3: Export Tidy data. Export variables and Descriptions to file for
# import to cookbook.md list object
# ****************************************************************************
# source r script for AddDescn funcion.
# See addDescription.R in project folder or Coodbook.md
source('addDescription.R')
temp <- unlist(lapply(cleanedVars,AddDescn))
x <- data.frame(V2 = seq.int(ncol(dirty)))
y <- data.frame(V1 = names(dirty))
z <- data.frame(V3 = temp)
new <- cbind(x,y,z)
source("CreateCodeBook.R")
CodeBook(new,"data/TidycbImport.txt")
fwrite(dirty, "data/TidyData.txt")
# ****************************************************************************
# Check Step 3: Show Export of Tidy Data variables file successful for
# import to cookbook.md
# ****************************************************************************
# > list.files(path = "data/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "TidycbImport.txt" "TidyData.txt"
# ****************************************************************************
# Step 4: Clear Global Environment
# ****************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 4
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 5
# From the data set in step 4, creates a second, independent tidy data set
# with the average of each *variable* for each *activity* and each *subject*.
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ****************************************************************************
# Step 1: Load Tidy Data file into R
# ****************************************************************************
library(data.table)
library(dplyr)
TidyData <- fread("data/TidyData.txt")
# ****************************************************************************
# Check Step 1: Show Tiday data frame imported successfully.
# ****************************************************************************
# > dim(TidyData)
# [1] 10299 82
#
# ****************************************************************************
# Step 2: Select relevant columns and group by Activity and Subject.
# Obtain average of all variables on grouped data.
# ****************************************************************************
TDF <- tbl_df(TidyData)
DT <- TDF %>%
select(Subject_ID,-Activity_code,Actvty_fctrs:FreqBodyGyroJerkMagStdev) %>%
group_by(Actvty_fctrs, Subject_ID) %>%
summarise_all(funs(mean))
# ****************************************************************************
# Check Step 2: Show grouping and mean was applied to all columns.
# ****************************************************************************
# > dim(DT)
# [1] 180 81
# ****************************************************************************
# Step 3: Export Summarised Tidy data. Export variables and Descriptions to
# file for import to cookbook.md list object
# ****************************************************************************
# Add Grouped Average description to current column names
cols <- names(DT)
cols <- paste0("GrpdAvg",cols)
cols <- sub("GrpdAvgActvty_fctrs","Actvty_fctrs",x = cols)
cols <- sub("GrpdAvgSubject_ID","Subject_ID",x = cols)
names(DT) <- cols
# source r script for AddDescn funcion.
# See addDescription.R in project folder or Coodbook.md
source('addDescription.R')
temp <- unlist(lapply(cols,AddDescn))
x <- data.frame(V2 = seq.int(ncol(DT)))
y <- data.frame(V1 = names(DT))
z <- data.frame(V3 = temp)
a <- data.frame(V4 = c("Group_by 1","Group_by 2",
rep("Calculation: Grouped Average",ncol(DT)-2)))
new <- cbind(x,y,z,a)
source("CreateCodeBook.R")
CodeBook(new,"data/GroupedAvgcbImport.txt")
fwrite(DT,"data/GroupedAvgData.txt")
# write.table used as per Coursera instructions
write.table(DT,"data/GroupedAvgDataCourseraUpload.txt", row.names = FALSE)
# ****************************************************************************
# Check Step 3: Show Export of Grouped Average Tidy Data file successful.
# Show updated columns and description file ready for cookbook.md
# ****************************************************************************
# > list.files(path = "data/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "GroupedAvgcbImport.txt" "GroupedAvgData.txt" "TidycbImport.txt"
# [4] "TidyData.txt"
# ****************************************************************************
# Step 4: Clear Global Environment
# ****************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 5
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
|
/W4_GCD_Project/run_analysis.R
|
no_license
|
wikusjvr3/W4GCD
|
R
| false
| false
| 29,731
|
r
|
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 1
# Merges the training and the test sets to create one data set.
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ***************************************************************************
# Step 1: Download Zip File to Raw Data Folder and Extract contents
# ***************************************************************************
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists("rawdata")){dir.create("rawdata")}
if(!file.exists("rawdata/ProjectFiles.zip")){
download.file(fileURL,"rawdata/ProjectFiles.zip","curl")
unzip(zipfile="rawdata/ProjectFiles.zip",exdir="rawdata/")
}
# ***************************************************************************
# Check Step 1a: Show that rawdata directory was created
# ***************************************************************************
# > list.dirs(path = ".", full.names = TRUE, recursive = FALSE)
#
# [1] "./.Rproj.user" "./data" "./rawdata"
# ***************************************************************************
# Check Step 1b: Show that download and extraction of zip file was successful
# ***************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = TRUE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "ProjectFiles.zip"
# [2] "UCI HAR Dataset/activity_labels.txt"
# [3] "UCI HAR Dataset/features_info.txt"
# [4] "UCI HAR Dataset/features.txt"
# [5] "UCI HAR Dataset/README.txt"
# [6] "UCI HAR Dataset/test/Inertial Signals/body_acc_x_test.txt"
# [7] "UCI HAR Dataset/test/Inertial Signals/body_acc_y_test.txt"
# [8] "UCI HAR Dataset/test/Inertial Signals/body_acc_z_test.txt"
# [9] "UCI HAR Dataset/test/Inertial Signals/body_gyro_x_test.txt"
# [10] "UCI HAR Dataset/test/Inertial Signals/body_gyro_y_test.txt"
# [11] "UCI HAR Dataset/test/Inertial Signals/body_gyro_z_test.txt"
# [12] "UCI HAR Dataset/test/Inertial Signals/total_acc_x_test.txt"
# [13] "UCI HAR Dataset/test/Inertial Signals/total_acc_y_test.txt"
# [14] "UCI HAR Dataset/test/Inertial Signals/total_acc_z_test.txt"
# [15] "UCI HAR Dataset/test/subject_test.txt"
# [16] "UCI HAR Dataset/test/X_test.txt"
# [17] "UCI HAR Dataset/test/y_test.txt"
# [18] "UCI HAR Dataset/train/Inertial Signals/body_acc_x_train.txt"
# [19] "UCI HAR Dataset/train/Inertial Signals/body_acc_y_train.txt"
# [20] "UCI HAR Dataset/train/Inertial Signals/body_acc_z_train.txt"
# [21] "UCI HAR Dataset/train/Inertial Signals/body_gyro_x_train.txt"
# [22] "UCI HAR Dataset/train/Inertial Signals/body_gyro_y_train.txt"
# [23] "UCI HAR Dataset/train/Inertial Signals/body_gyro_z_train.txt"
# [24] "UCI HAR Dataset/train/Inertial Signals/total_acc_x_train.txt"
# [25] "UCI HAR Dataset/train/Inertial Signals/total_acc_y_train.txt"
# [26] "UCI HAR Dataset/train/Inertial Signals/total_acc_z_train.txt"
# [27] "UCI HAR Dataset/train/subject_train.txt"
# [28] "UCI HAR Dataset/train/X_train.txt"
# [29] "UCI HAR Dataset/train/y_train.txt"
# ******************************************************************
# Step 2: Load Test & Train Datasets into R
# ******************************************************************
install.packages("data.table")
library(data.table)
Test_subjectDF <- read.table("rawdata/UCI HAR Dataset/test/subject_test.txt")
Test_XDF <- read.table("rawdata/UCI HAR Dataset/test/X_test.txt")
Test_YDF <- read.table("rawdata/UCI HAR Dataset/test/Y_test.txt")
Train_subjectDF <- read.table("rawdata/UCI HAR Dataset/train/subject_train.txt")
Train_XDF <- read.table("rawdata/UCI HAR Dataset/train/X_train.txt")
Train_YDF <- read.table("rawdata/UCI HAR Dataset/train/Y_train.txt")
# ***************************************************************************
# Check Step 2: Show files imported and their dimensions are correct
# ***************************************************************************
# > dim(Test_subjectDF)
# [1] 2947 1
#
# > dim(Test_XDF)
# [1] 2947 561
#
# > dim(Test_YDF)
# [1] 2947 1
#
# > dim(Train_subjectDF)
# [1] 7352 1
#
# > dim(Train_XDF)
# [1] 7352 561
#
# > dim(Train_YDF)
# [1] 7352 1
# ***************************************************************************
# Step 3: Bind X,Y,Subject data frames into one data frame
# ***************************************************************************
XDF <- rbind(Test_XDF,Train_XDF)
YDF <- rbind(Test_YDF,Train_YDF)
subjectDF <- rbind(Test_subjectDF,Train_subjectDF)
RawDF <- cbind(XDF,YDF,subjectDF)
# ***************************************************************************
# Check Step 3: Show files merged and their dimensions are correct
# ***************************************************************************
# > dim(XDF) # should be 2947 + 7352 = 102999 and var @ 561
# [1] 10299 561
#
# > dim(YDF) # should be 2947 + 7352 = 102999 and var @ 1
# [1] 10299 1
#
# > dim(subjectDF) # should be 2947 + 7352 = 102999 and var @ 1
# [1] 10299 1
#
# > dim(RawDF) # should be 102999 rows and var 561 + 1 + 1 = 563
# [1] 10299 563
#
# ***************************************************************************
# Step 4: Write combined dataframe to rawdata folder as text file
# ***************************************************************************
fwrite(RawDF,"rawdata/combined_rawdata.txt")
# ***************************************************************************
# Check Step 4: Show file was created
# ***************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "combined_rawdata.txt" "ProjectFiles.zip" "UCI HAR Dataset"
#
# ***************************************************************************
# Step 5a: Create New Features file for combined Raw Data
# ***************************************************************************
Features <- read.table("rawdata/UCI HAR Dataset/features.txt")
Additions <- data.frame(V1 = c(max(Features$V1)+1, max(Features$V1)+2),
V2 = c("Activity_code","Subject_ID"))
RawDF_cb <- rbind(Features,Additions)
# ****************************************************************************
# Check Step 5a: Check Features and Additions data frames combined correctly.
# ****************************************************************************
# > dim(Features)
# [1] 561 2
#
# > dim(Additions)
# [1] 2 2
#
# > print(Additions)
# V1 V2
# 1 562 Activity_code
# 2 563 Subject_ID
#
# > dim(RawDF_cb)
# [1] 563 2
#
# > tail(RawDF_cb,4) #Dimension ok and last 2 rows added ok
# V1 V2
# 560 560 angle(Y,gravityMean)
# 561 561 angle(Z,gravityMean)
# 562 562 Activity_code
# 563 563 Subject_ID
# ****************************************************************************
# Step 5b: Export combined Features to file
# ****************************************************************************
fwrite(RawDF_cb,"rawdata/combined_features.txt")
# ****************************************************************************
# Check Step 5b: Export of combined Features file successful
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "combined_features.txt" "combined_rawdata.txt" "ProjectFiles.zip"
# [4] "UCI HAR Dataset"
#
# ****************************************************************************
# Step 5c: Export combined Features to file ready for cookbook.md list object
# ****************************************************************************
source("CreateCodeBook.R")
CodeBook(RawDF_cb,"rawdata/cbImport.txt")
# ****************************************************************************
# Check Step 5c: Export of combined Features file ready successful
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "cbImport.txt" "combined_features.txt" "combined_rawdata.txt"
# [4] "ProjectFiles.zip" "UCI HAR Dataset"
# ***************************************************************************
# Step 6: Clear Global environment
# ***************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 1
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 2
# Extract only the measurements on the mean and standard deviation
# for each measurement.
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ***************************************************************************
# Step 1a: Import combined RawData file and new Features file
# ***************************************************************************
library(data.table)
RawDF <- fread("rawdata/combined_rawdata.txt")
Features <- fread("rawdata/combined_features.txt")
# ***************************************************************************
# Check Step 1a: Show files and dimensions imported correctly
# ***************************************************************************
# > dim(RawDF)
# [1] 10299 563
# > dim(Features)
# [1] 563 2
# ***************************************************************************
# Step 1b: Assign new Features to Combined RawData variables
# ***************************************************************************
names(RawDF) <- Features$V2
rm(Features)
# ***************************************************************************
# Check Step 1b: Check new variables assigned correctly
# ***************************************************************************
# > head(names(RawDF),3)
# [1] "tBodyAcc-mean()-X" "tBodyAcc-mean()-Y" "tBodyAcc-mean()-Z"
# > tail(names(RawDF),3)
# [1] "angle(Z,gravityMean)" "Activity_code" "Subject_ID"
# > dim(RawDF)
# [1] 10299 563
# ***************************************************************************
# Step 2a: Find variables with words 'mean' and 'std', and
# filter data frame and include "Subject_ID" and "Activity_code"
# ***************************************************************************
MScolNums <- c(grep("Subject_ID", names(RawDF)),
grep("Activity_code", names(RawDF)),
grep("mean", names(RawDF)),
grep("std", names(RawDF)))
MeanStdDF <- subset(RawDF,select= MScolNums)
# ***************************************************************************
# Check Step 2a: New Data frame with variables only with 'mean' and 'std',
# including "Subject_ID" and "Activity_code"
# ***************************************************************************
# > length(MScolNums)
# [1] 81
# > dim(MeanStdDF)
# [1] 10299 81
#
# The following will check that MeanStdDF only contains the required variables:
#
# > lenvars <- c(length(grep("std",names(MeanStdDF))),
# + length(grep("mean",names(MeanStdDF))),
# + length(grep("Subject_ID",names(MeanStdDF))),
# + length(grep("Activity_code",names(MeanStdDF))))
#
# > lenvars
# [1] 33 46 1 1
#
# > sum(lenvars)
# [1] 81
#
# The following will check if there are any duplicate variables:
# > sum(duplicated(names(MeanStdDF)))
# [1] 0
# ****************************************************************************
# Step 2b: Export Extracted data frame to file
# ****************************************************************************
fwrite(MeanStdDF,"rawdata/Extracted_Data.txt")
# ****************************************************************************
# Check Step 2b: Export of Extracted Data file successful
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "cbImport.txt" "combined_features.txt" "combined_rawdata.txt"
# [4] "Extracted_Data.txt" "ProjectFiles.zip" "UCI HAR Dataset"
#
# ****************************************************************************
# Step 3: Export variables of Extracted Data to file;
# ready for cookbook.md list object
# ****************************************************************************
y <- data.frame(V1 = names(MeanStdDF))
x <- data.frame(V2 = seq.int(ncol(MeanStdDF)))
new <- cbind(x,y)
source("CreateCodeBook.R")
CodeBook(new,"rawdata/ExtractedcbImport.txt")
# ****************************************************************************
# Check Step 3: Export of Extracted Data variables file successful for import
# to cookbook.md
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "cbImport.txt" "combined_features.txt" "combined_rawdata.txt"
# [4] "Extracted_Data.txt" "ExtractedcbImport.txt" "ProjectFiles.zip"
# [7] "UCI HAR Dataset"
# ***************************************************************************
# Step 4: Clear Global environment
# ***************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 2
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 3
# Uses descriptive activity names to name the activities in the data set
#
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ***************************************************************************
# Step 1: Import extracted raw data & Activities Features files
# ***************************************************************************
library(data.table)
MeanStdDF <- fread("rawdata/Extracted_Data.txt")
Activities <- fread("rawdata/UCI HAR Dataset/activity_labels.txt")
# ***************************************************************************
# Check Step 1: Show data frames import successfully
# ***************************************************************************
# > dim(MeanStdDF)
# [1] 10299 81
# > dim(Activities)
# [1] 6 2
# ***************************************************************************
# Step 2: Update levels of activity codes
# ***************************************************************************
MeanStdDF$Actvty_fctrs <- factor(MeanStdDF$Activity_code)
levels(MeanStdDF$Actvty_fctrs) = Activities$V2
# Re-order data frame so that Activity factors variable is in position 3
ReorderDF <- MeanStdDF[,c(1:2,82,3:81)]
# ***************************************************************************
# Check Step 2: Show that levels of activity codes updated correctly
# ***************************************************************************
# > dim(ReorderDF)
# [1] 10299 82
# > print(Activities)
# V1 V2
# 1: 1 WALKING
# 2: 2 WALKING_UPSTAIRS
# 3: 3 WALKING_DOWNSTAIRS
# 4: 4 SITTING
# 5: 5 STANDING
# 6: 6 LAYING
# > str(ReorderDF$Actvty_fctrs)
# Factor w/ 6 levels "WALKING","WALKING_UPSTAIRS",..: 5 5 5 5 5 5 5 5 5 5 ...
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==1),1)
# [1] WALKING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# # > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==2),1)
# [1] WALKING_UPSTAIRS
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# # > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==3),1)
# [1] WALKING_DOWNSTAIRS
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==4),1)
# [1] SITTING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==5),1)
# [1] STANDING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > head(subset(ReorderDF$Actvty_fctrs,ReorderDF$Activity_code==6),1)
# [1] LAYING
# Levels: WALKING WALKING_UPSTAIRS WALKING_DOWNSTAIRS SITTING STANDING LAYING
# > str(ReorderDF)
# Classes ‘data.table’ and 'data.frame': 10299 obs. of 82 variables:
# $ Subject_ID : int 2 2 2 2 2 2 2 2 2 2 ...
# $ Activity_code : int 5 5 5 5 5 5 5 5 5 5 ...
# $ Actvty_fctrs : Factor w/ 6 levels "WALKING","WALKING_UPSTAIRS"...
# variables 4-81 not shown...
# ***************************************************************************
# Step 3: Export Activity updated raw data to file
# ***************************************************************************
fwrite(ReorderDF, "rawdata/ActivityUpdatedRawData.txt")
# ***************************************************************************
# Check Step 3: Updated raw data with Activity exported to file
# ***************************************************************************
# > fwrite(MeanStdDF, "rawdata/ActivityUpdatedRawData.txt")
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#
# [1] "ActivityUpdatedRawData.txt" "cbImport.txt" "combined_features.txt"
# [4] "combined_rawdata.txt" "Extracted_Data.txt" "ExtractedcbImport.txt"
# [7] "ProjectFiles.zip" "UCI HAR Dataset"
# ****************************************************************************
# Step 4: Export variables of Updated Activity Data to file;
# ready for cookbook.md list object
# ****************************************************************************
y <- data.frame(V1 = names(ReorderDF))
x <- data.frame(V2 = seq.int(ncol(ReorderDF)))
new <- cbind(x,y)
source("CreateCodeBook.R")
CodeBook(new,"rawdata/UpdatedActvcbImport.txt")
# ****************************************************************************
# Check Step 4: Export of Activity Updated Data variables file successful for
# import to cookbook.md
# ****************************************************************************
# > list.files(path = "rawdata/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "ActivityUpdatedRawData.txt" "cbImport.txt" "combined_features.txt"
# [4] "combined_rawdata.txt" "Extracted_Data.txt" "ExtractedcbImport.txt"
# [7] "ProjectFiles.zip" "UCI HAR Dataset" "UpdatedActvcbImport.txt"
# ****************************************************************************
# Step 5: Clear Global Environment
# ****************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 3
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 4
# Appropriately labels the data set with descriptive variable names.
#
# Rules: > Clean invalid charachters like " ( ) - , "
# > Remove duplicates words in variables
# > Make sure there are no duplicate variables
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ****************************************************************************
# Step 1: Load Activity Raw Data file into R & output column names to variable
# ****************************************************************************
library(data.table)
dirty <- fread("rawdata/ActivityUpdatedRawData.txt")
rawdataVars <- names(dirty)
# ****************************************************************************
# Check Step 1: Show data frame imported and new column names variable created.
# ****************************************************************************
# > dim(dirty)
# [1] 10299 82
# str(rawdataVars)
# chr [1:82] "Subject_ID" "Activity_code" "Actvty_fctrs" "tBodyAcc-mean()-X" ...
# ****************************************************************************
# Step 2: Cleanup column names & replace raw data frame variables with new
# list.
# ****************************************************************************
# source r script for RenderName funcion.
# See Rename.R in project folder or Coodbook.md
source("Rename.R")
cleanedVars <- unlist(lapply(rawdataVars,RenderName))
names(dirty) <- cleanedVars
# ****************************************************************************
# Check Step 2: Confirm no Duplicated variable names
# ****************************************************************************
# > sum(duplicated(s))
# [1] 0
# > match(s,rawdataVars)
# [1] 1 2 3 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
# [28] NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
# [55] NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
# [82] NA
# ****************************************************************************
# Step 3: Export Tidy data. Export variables and Descriptions to file for
# import to cookbook.md list object
# ****************************************************************************
# source r script for AddDescn funcion.
# See addDescription.R in project folder or Coodbook.md
source('addDescription.R')
temp <- unlist(lapply(cleanedVars,AddDescn))
x <- data.frame(V2 = seq.int(ncol(dirty)))
y <- data.frame(V1 = names(dirty))
z <- data.frame(V3 = temp)
new <- cbind(x,y,z)
source("CreateCodeBook.R")
CodeBook(new,"data/TidycbImport.txt")
fwrite(dirty, "data/TidyData.txt")
# ****************************************************************************
# Check Step 3: Show Export of Tidy Data variables file successful for
# import to cookbook.md
# ****************************************************************************
# > list.files(path = "data/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "TidycbImport.txt" "TidyData.txt"
# ****************************************************************************
# Step 4: Clear Global Environment
# ****************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 4
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# *************************---------ooo---------*****************************
#
# Start of Requirement 5
# From the data set in step 4, creates a second, independent tidy data set
# with the average of each *variable* for each *activity* and each *subject*.
#
# *************************---------ooo---------*****************************
# ---------------------------------------------------------------------------
# ****************************************************************************
# Step 1: Load Tidy Data file into R
# ****************************************************************************
library(data.table)
library(dplyr)
TidyData <- fread("data/TidyData.txt")
# ****************************************************************************
# Check Step 1: Show Tiday data frame imported successfully.
# ****************************************************************************
# > dim(TidyData)
# [1] 10299 82
#
# ****************************************************************************
# Step 2: Select relevant columns and group by Activity and Subject.
# Obtain average of all variables on grouped data.
# ****************************************************************************
TDF <- tbl_df(TidyData)
DT <- TDF %>%
select(Subject_ID,-Activity_code,Actvty_fctrs:FreqBodyGyroJerkMagStdev) %>%
group_by(Actvty_fctrs, Subject_ID) %>%
summarise_all(funs(mean))
# ****************************************************************************
# Check Step 2: Show grouping and mean was applied to all columns.
# ****************************************************************************
# > dim(DT)
# [1] 180 81
# ****************************************************************************
# Step 3: Export Summarised Tidy data. Export variables and Descriptions to
# file for import to cookbook.md list object
# ****************************************************************************
# Add Grouped Average description to current column names
cols <- names(DT)
cols <- paste0("GrpdAvg",cols)
cols <- sub("GrpdAvgActvty_fctrs","Actvty_fctrs",x = cols)
cols <- sub("GrpdAvgSubject_ID","Subject_ID",x = cols)
names(DT) <- cols
# source r script for AddDescn funcion.
# See addDescription.R in project folder or Coodbook.md
source('addDescription.R')
temp <- unlist(lapply(cols,AddDescn))
x <- data.frame(V2 = seq.int(ncol(DT)))
y <- data.frame(V1 = names(DT))
z <- data.frame(V3 = temp)
a <- data.frame(V4 = c("Group_by 1","Group_by 2",
rep("Calculation: Grouped Average",ncol(DT)-2)))
new <- cbind(x,y,z,a)
source("CreateCodeBook.R")
CodeBook(new,"data/GroupedAvgcbImport.txt")
fwrite(DT,"data/GroupedAvgData.txt")
# write.table used as per Coursera instructions
write.table(DT,"data/GroupedAvgDataCourseraUpload.txt", row.names = FALSE)
# ****************************************************************************
# Check Step 3: Show Export of Grouped Average Tidy Data file successful.
# Show updated columns and description file ready for cookbook.md
# ****************************************************************************
# > list.files(path = "data/", pattern = NULL, all.files = FALSE,
# + full.names = FALSE, recursive = FALSE,
# + ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
# [1] "GroupedAvgcbImport.txt" "GroupedAvgData.txt" "TidycbImport.txt"
# [4] "TidyData.txt"
# ****************************************************************************
# Step 4: Clear Global Environment
# ****************************************************************************
rm(list=ls(all=TRUE))
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# End of Requirement 5
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
|
library(WGCNA)
library(dplyr)
library(rstatix)
library(ggpubr)
library(ComplexHeatmap)
data <- read.delim("Datasets/GSE4843.txt", row.names = 1)
data <- as.data.frame(t(data))
#sampling genes
gsg = goodSamplesGenes(data, verbose = 3);
data = data[gsg$goodSamples, gsg$goodGenes]
#soft threshold - identified as 4
powers = c(c(1:10), seq(from = 12, to=20, by=2))
sft = pickSoftThreshold(data, powerVector = powers, verbose = 3)
sizeGrWindow(9, 5)
par(mfrow = c(1,2))
cex1 = 0.9
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"))
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red")
abline(h=0.90,col="red") #cutoff
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
#Module processing
disTOM <-(1-TOMsimilarityFromExpr(data, power = 4, corType = "pearson", TOMType = "unsigned")) #TOM dissimilarity matrix
tree <- hclust(as.dist(disTOM), method= "average")
col <- cutreeDynamic(dendro = tree,distM =disTOM, cutHeight = 0.995, #Adaptive pruning of dendrogram
deepSplit = 2, pamRespectsDendro = FALSE,
minClusterSize =100);
col <- labels2colors(col)
sizeGrWindow(8,6)
plotDendroAndColors(tree, col, "Dynamic Tree Cut",
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,
main = "Melanoma WGCNA")
#Module eigengenes and merging dissimilar modules
MEList = moduleEigengenes(data, colors = col)
MEs = MEList$eigengenes
MEDiss = 1-cor(MEs)
METree = hclust(as.dist(MEDiss), method = "average");
sizeGrWindow(7, 6)
plot(METree, main = "Clustering of module eigengenes",
xlab = "", sub = "")
MEDissThres = 0.25
abline(h=MEDissThres, col = "red")
merge = mergeCloseModules(data, col, cutHeight = MEDissThres, verbose = 3)
mergedColors = merge$colors;
mergedMEs = merge$newMEs;
sizeGrWindow(12, 9)
jpeg("Figures/Fig. 3/S3A_i_Dendrogram_WGCNA")
plotDendroAndColors(tree, mergedColors,
c("Merged dynamic"),
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05)
dev.off()
#Assigning clusters to module eigengenes
clus1 <- as.character(read.delim("Datasets/Clusters/GSE4843.txt")$x)
cut <- is.finite(match(rownames(data), clus1))
cut <- replace(cut, cut==T, 1)
cut <- replace(cut, cut==F, 2)
ME1 <- mergedMEs[cut==1,]
ME2 <- mergedMEs[cut==2,]
ME1 <- cbind(ME1,c("coral2"))
names(ME1)[(ncol(ME1))] <- c("cut")
ME2 <- cbind(ME2,c("cyan3"))
names(ME2)[(ncol(ME2))] <- c("cut")
MEfin <- rbind(ME1,ME2)
n <- ncol(MEfin)
#Selecting relevant modules - Comparing expression between proliferative and invasive samples
names(MEfin) <- gsub("ME","",names(MEfin))
df <- data.frame(MEfin$cut, stack(MEfin[,1:29]))
names(df)[1] <- c("cluster")
stat.test <- df %>%
group_by(ind) %>%
t_test(values ~ cluster)%>%
adjust_pvalue(method = "bonferroni") %>%
add_significance("p.adj", cutpoints = c(0, 1e-04, 0.001, 0.01, 1),
symbols = c( "***", "**", "*", "ns"))
stat.test <- stat.test %>%
add_xy_position(x = "ind", dodge = 0.8)
jpeg("Figures/Fig. 3/S3A_ii_Eigengene_comparison.jpeg", width = 1000, height =500)
ggboxplot(
df, x = "ind", y = "values",
fill= "cluster", palette = c("#ee6a50", "#00cdcd"))+
xlab("Module")+
ylab("Eigengene")+
stat_pvalue_manual(stat.test,
label = "p.adj.signif" , tip.length = 0)+
border()+
theme(text = element_text(size=14), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), legend.position = "right")+
scale_fill_discrete(name ="Phenotype", labels = c("Proliferative", "Invasive"))
dev.off()
jpeg("Figures/Fig. 3/S3A_iii_Eigengene_scatter.jpeg", width = 500, height =250)
plot(MEfin$yellow,MEfin$salmon, xlab = "Yellow eigengene", ylab = "Salmon eigengene", col= as.character(MEfin$cut), pch = 16,
ylim = c(-0.2,0.4))
dev.off()
#Heatmap and Module eigengene value
#Salmon module
data_hm <- scale(data[,mergedColors=="salmon"])
data_hm <- data_hm[rownames(MEfin),]
jpeg("Figures/Fig. 3/3B_ii_inv_Eigengene.jpeg", width = 500, height =250)
barplot(MEfin[,"salmon"], col = as.character(MEfin$cut), ylab = "Module eigengene")
dev.off()
jpeg("Figures/Fig. 3/3B_ii_inv_Heatmap.jpeg", width = 500, height =250)
Heatmap(t(data_hm), column_split =MEfin$cut, show_row_names = FALSE, show_row_dend = FALSE, show_column_dend = FALSE, show_column_names = FALSE,
show_parent_dend_line = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, column_title = NULL,
heatmap_legend_param = list(title= c("Scale")))
dev.off()
#Yellow module
data_hm <- scale(data[,mergedColors=="yellow"])
data_hm <- data_hm[rownames(MEfin),]
jpeg("Figures/Fig. 3/3B_i_Pro_Eigengene.jpeg", width = 500, height =250)
barplot(MEfin[,"yellow"], col = as.character(MEfin$cut), ylab = "Module eigengene")
dev.off()
jpeg("Figures/Fig. 3/3B_i_Pro_Heatmap.jpeg", width = 500, height =250)
Heatmap(t(data_hm), column_split =MEfin$cut, show_row_names = FALSE, show_row_dend = FALSE, show_column_dend = FALSE, show_column_names = FALSE,
show_parent_dend_line = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, column_title = NULL,
heatmap_legend_param = list(title= c("Scale")))
dev.off()
|
/Figures/Fig. 3/Code/Fig3B_S3AB_WGCNA.R
|
no_license
|
csbBSSE/Melanoma
|
R
| false
| false
| 5,777
|
r
|
library(WGCNA)
library(dplyr)
library(rstatix)
library(ggpubr)
library(ComplexHeatmap)
data <- read.delim("Datasets/GSE4843.txt", row.names = 1)
data <- as.data.frame(t(data))
#sampling genes
gsg = goodSamplesGenes(data, verbose = 3);
data = data[gsg$goodSamples, gsg$goodGenes]
#soft threshold - identified as 4
powers = c(c(1:10), seq(from = 12, to=20, by=2))
sft = pickSoftThreshold(data, powerVector = powers, verbose = 3)
sizeGrWindow(9, 5)
par(mfrow = c(1,2))
cex1 = 0.9
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"))
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red")
abline(h=0.90,col="red") #cutoff
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
#Module processing
disTOM <-(1-TOMsimilarityFromExpr(data, power = 4, corType = "pearson", TOMType = "unsigned")) #TOM dissimilarity matrix
tree <- hclust(as.dist(disTOM), method= "average")
col <- cutreeDynamic(dendro = tree,distM =disTOM, cutHeight = 0.995, #Adaptive pruning of dendrogram
deepSplit = 2, pamRespectsDendro = FALSE,
minClusterSize =100);
col <- labels2colors(col)
sizeGrWindow(8,6)
plotDendroAndColors(tree, col, "Dynamic Tree Cut",
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,
main = "Melanoma WGCNA")
#Module eigengenes and merging dissimilar modules
MEList = moduleEigengenes(data, colors = col)
MEs = MEList$eigengenes
MEDiss = 1-cor(MEs)
METree = hclust(as.dist(MEDiss), method = "average");
sizeGrWindow(7, 6)
plot(METree, main = "Clustering of module eigengenes",
xlab = "", sub = "")
MEDissThres = 0.25
abline(h=MEDissThres, col = "red")
merge = mergeCloseModules(data, col, cutHeight = MEDissThres, verbose = 3)
mergedColors = merge$colors;
mergedMEs = merge$newMEs;
sizeGrWindow(12, 9)
jpeg("Figures/Fig. 3/S3A_i_Dendrogram_WGCNA")
plotDendroAndColors(tree, mergedColors,
c("Merged dynamic"),
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05)
dev.off()
#Assigning clusters to module eigengenes
clus1 <- as.character(read.delim("Datasets/Clusters/GSE4843.txt")$x)
cut <- is.finite(match(rownames(data), clus1))
cut <- replace(cut, cut==T, 1)
cut <- replace(cut, cut==F, 2)
ME1 <- mergedMEs[cut==1,]
ME2 <- mergedMEs[cut==2,]
ME1 <- cbind(ME1,c("coral2"))
names(ME1)[(ncol(ME1))] <- c("cut")
ME2 <- cbind(ME2,c("cyan3"))
names(ME2)[(ncol(ME2))] <- c("cut")
MEfin <- rbind(ME1,ME2)
n <- ncol(MEfin)
#Selecting relevant modules - Comparing expression between proliferative and invasive samples
names(MEfin) <- gsub("ME","",names(MEfin))
df <- data.frame(MEfin$cut, stack(MEfin[,1:29]))
names(df)[1] <- c("cluster")
stat.test <- df %>%
group_by(ind) %>%
t_test(values ~ cluster)%>%
adjust_pvalue(method = "bonferroni") %>%
add_significance("p.adj", cutpoints = c(0, 1e-04, 0.001, 0.01, 1),
symbols = c( "***", "**", "*", "ns"))
stat.test <- stat.test %>%
add_xy_position(x = "ind", dodge = 0.8)
jpeg("Figures/Fig. 3/S3A_ii_Eigengene_comparison.jpeg", width = 1000, height =500)
ggboxplot(
df, x = "ind", y = "values",
fill= "cluster", palette = c("#ee6a50", "#00cdcd"))+
xlab("Module")+
ylab("Eigengene")+
stat_pvalue_manual(stat.test,
label = "p.adj.signif" , tip.length = 0)+
border()+
theme(text = element_text(size=14), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), legend.position = "right")+
scale_fill_discrete(name ="Phenotype", labels = c("Proliferative", "Invasive"))
dev.off()
jpeg("Figures/Fig. 3/S3A_iii_Eigengene_scatter.jpeg", width = 500, height =250)
plot(MEfin$yellow,MEfin$salmon, xlab = "Yellow eigengene", ylab = "Salmon eigengene", col= as.character(MEfin$cut), pch = 16,
ylim = c(-0.2,0.4))
dev.off()
#Heatmap and Module eigengene value
#Salmon module
data_hm <- scale(data[,mergedColors=="salmon"])
data_hm <- data_hm[rownames(MEfin),]
jpeg("Figures/Fig. 3/3B_ii_inv_Eigengene.jpeg", width = 500, height =250)
barplot(MEfin[,"salmon"], col = as.character(MEfin$cut), ylab = "Module eigengene")
dev.off()
jpeg("Figures/Fig. 3/3B_ii_inv_Heatmap.jpeg", width = 500, height =250)
Heatmap(t(data_hm), column_split =MEfin$cut, show_row_names = FALSE, show_row_dend = FALSE, show_column_dend = FALSE, show_column_names = FALSE,
show_parent_dend_line = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, column_title = NULL,
heatmap_legend_param = list(title= c("Scale")))
dev.off()
#Yellow module
data_hm <- scale(data[,mergedColors=="yellow"])
data_hm <- data_hm[rownames(MEfin),]
jpeg("Figures/Fig. 3/3B_i_Pro_Eigengene.jpeg", width = 500, height =250)
barplot(MEfin[,"yellow"], col = as.character(MEfin$cut), ylab = "Module eigengene")
dev.off()
jpeg("Figures/Fig. 3/3B_i_Pro_Heatmap.jpeg", width = 500, height =250)
Heatmap(t(data_hm), column_split =MEfin$cut, show_row_names = FALSE, show_row_dend = FALSE, show_column_dend = FALSE, show_column_names = FALSE,
show_parent_dend_line = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, column_title = NULL,
heatmap_legend_param = list(title= c("Scale")))
dev.off()
|
library(BaPreStoPro)
### Name: jumpDiffusion-class
### Title: S4 class of model informations for the jump diffusion process
### Aliases: jumpDiffusion-class
### ** Examples
parameter <- list(phi = 0.01, theta = 0.1, gamma2 = 0.01, xi = c(2, 0.2))
b.fun <- function(phi, t, y) phi * y
s.fun <- function(gamma2, t, y) sqrt(gamma2) * y
h.fun <- function(theta, t, y) theta * y
Lambda <- function(t, xi) (t / xi[2])^xi[1]
priorDensity <- list(
phi = function(phi) 1,
theta = function(theta) dnorm(theta, 0.1, 0.001),
gamma2 = function(gamma2) dgamma(1/gamma2, 3, 0.01*2),
xi = function(xi) dgamma(xi, c(2, 0.2), 1)
)
start <- parameter
model <- set.to.class("jumpDiffusion", parameter, start = start,
b.fun = b.fun, s.fun = s.fun, h.fun = h.fun, Lambda = Lambda,
priorDensity = priorDensity)
|
/data/genthat_extracted_code/BaPreStoPro/examples/jumpDiffusion-class.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 807
|
r
|
library(BaPreStoPro)
### Name: jumpDiffusion-class
### Title: S4 class of model informations for the jump diffusion process
### Aliases: jumpDiffusion-class
### ** Examples
parameter <- list(phi = 0.01, theta = 0.1, gamma2 = 0.01, xi = c(2, 0.2))
b.fun <- function(phi, t, y) phi * y
s.fun <- function(gamma2, t, y) sqrt(gamma2) * y
h.fun <- function(theta, t, y) theta * y
Lambda <- function(t, xi) (t / xi[2])^xi[1]
priorDensity <- list(
phi = function(phi) 1,
theta = function(theta) dnorm(theta, 0.1, 0.001),
gamma2 = function(gamma2) dgamma(1/gamma2, 3, 0.01*2),
xi = function(xi) dgamma(xi, c(2, 0.2), 1)
)
start <- parameter
model <- set.to.class("jumpDiffusion", parameter, start = start,
b.fun = b.fun, s.fun = s.fun, h.fun = h.fun, Lambda = Lambda,
priorDensity = priorDensity)
|
X<-c(141, 162, 150, 111, 92, 74, 85, 95, 76, 68, 63, 74, 103, 81, 94, 68, 95, 81, 102, 73)
total = sum(X);
num = 20;
Xbar = mean(X);
lcl = Xbar - 3*sqrt(Xbar);
ucl = Xbar + 3*sqrt(Xbar);
cat("UCL is",ucl)
cat("LCL is",lcl)
for (i in 1:20){
if(X[i]> ucl){
total = total - X[i]
num= num -1
}
}
Xbar = total/num
lcl = Xbar - 3*sqrt(Xbar);
ucl = Xbar + 3*sqrt(Xbar);
cat("After recomputation")
cat("UCL is",ucl)
cat("LCL is",lcl)
total = total - X[4]
num = num-1;
cat("Xbar is",Xbar)
cat(" is",X[4])
Xbar = total/num
lcl = Xbar - 3*sqrt(Xbar);
ucl = Xbar + 3*sqrt(Xbar);
cat("After second recomputation")
cat("UCL is",ucl);
cat("LCL is",lcl);
cat("It appears that the process is in control with mean",Xbar);
'The mean after the second recomputation is incoreectly calculated in the textbook. It should be
((17*84.41)-111 )/16 = 82.748 whereas the value given in the book is 82.56. The values of UCL and LCL
change accordingly.'
|
/Introduction_To_Probability_And_Statistics_For_Engineers_And_Scientists_by_Sheldon_M._Ross/CH13/EX13.5.a/Ex13_5a.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 973
|
r
|
X<-c(141, 162, 150, 111, 92, 74, 85, 95, 76, 68, 63, 74, 103, 81, 94, 68, 95, 81, 102, 73)
total = sum(X);
num = 20;
Xbar = mean(X);
lcl = Xbar - 3*sqrt(Xbar);
ucl = Xbar + 3*sqrt(Xbar);
cat("UCL is",ucl)
cat("LCL is",lcl)
for (i in 1:20){
if(X[i]> ucl){
total = total - X[i]
num= num -1
}
}
Xbar = total/num
lcl = Xbar - 3*sqrt(Xbar);
ucl = Xbar + 3*sqrt(Xbar);
cat("After recomputation")
cat("UCL is",ucl)
cat("LCL is",lcl)
total = total - X[4]
num = num-1;
cat("Xbar is",Xbar)
cat(" is",X[4])
Xbar = total/num
lcl = Xbar - 3*sqrt(Xbar);
ucl = Xbar + 3*sqrt(Xbar);
cat("After second recomputation")
cat("UCL is",ucl);
cat("LCL is",lcl);
cat("It appears that the process is in control with mean",Xbar);
'The mean after the second recomputation is incoreectly calculated in the textbook. It should be
((17*84.41)-111 )/16 = 82.748 whereas the value given in the book is 82.56. The values of UCL and LCL
change accordingly.'
|
list.files("data_geo", pattern = "*.RData", full.names = T) %>% lapply(load, .GlobalEnv)
research_area <- sf::st_read("data_manually_prepared/research_area.shp")
load("data_analysis/regions.RData")
load("data_analysis/bronze1.RData")
bronze1_sf <- bronze1 %>% sf::st_as_sf(
coords = c("lon", "lat"),
crs = 4326
)
library(ggplot2)
library(sf)
xlimit <- c(-1600000, 1300000)
ylimit <- c(800000, 3800000)
hu <- ggplot() +
geom_sf(
data = land_outline,
fill = "white", colour = "black", size = 0.4
) +
geom_sf(
data = rivers,
fill = NA, colour = "black", size = 0.2
) +
geom_sf(
data = lakes,
fill = NA, colour = "black", size = 0.2
) +
geom_sf(
data = bronze1_sf,
mapping = aes(
color = burial_type,
shape = burial_construction,
size = burial_construction
),
show.legend = "point"
) +
theme_bw() +
coord_sf(
xlim = xlimit, ylim = ylimit,
crs = st_crs("+proj=aea +lat_1=43 +lat_2=62 +lat_0=30 +lon_0=10 +x_0=0 +y_0=0 +ellps=intl +units=m +no_defs")
) +
scale_shape_manual(
values = c(
"flat" = "\u268A",
"mound" = "\u25E0",
"unknown" = "\u2715"
)
) +
scale_size_manual(
values = c(
"flat" = 10,
"mound" = 10,
"unknown" = 5
)
) +
scale_color_manual(
values = c(
"cremation" = "#D55E00",
"inhumation" = "#0072B2",
"mound" = "#CC79A7",
"flat" = "#009E73",
"unknown" = "darkgrey"
)
) +
theme(
plot.title = element_text(size = 30, face = "bold"),
legend.position = "bottom",
legend.title = element_text(size = 20, face = "bold"),
axis.title = element_blank(),
axis.text = element_text(size = 15),
legend.text = element_text(size = 20),
panel.grid.major = element_line(colour = "black", size = 0.3)
) +
guides(
color = guide_legend(title = "Burial type", override.aes = list(size = 10), nrow = 2, byrow = TRUE),
shape = guide_legend(title = "Burial construction", override.aes = list(size = 10), nrow = 2, byrow = TRUE),
size = FALSE
)
hu %>%
ggsave(
"figures_plots/general_maps/general_map.jpeg",
plot = .,
device = "jpeg",
scale = 1,
dpi = 300,
width = 350, height = 360, units = "mm",
limitsize = F
)
|
/R/real_world_analysis/general_maps/general_map.R
|
no_license
|
nevrome/neomod_analysis
|
R
| false
| false
| 2,274
|
r
|
list.files("data_geo", pattern = "*.RData", full.names = T) %>% lapply(load, .GlobalEnv)
research_area <- sf::st_read("data_manually_prepared/research_area.shp")
load("data_analysis/regions.RData")
load("data_analysis/bronze1.RData")
bronze1_sf <- bronze1 %>% sf::st_as_sf(
coords = c("lon", "lat"),
crs = 4326
)
library(ggplot2)
library(sf)
xlimit <- c(-1600000, 1300000)
ylimit <- c(800000, 3800000)
hu <- ggplot() +
geom_sf(
data = land_outline,
fill = "white", colour = "black", size = 0.4
) +
geom_sf(
data = rivers,
fill = NA, colour = "black", size = 0.2
) +
geom_sf(
data = lakes,
fill = NA, colour = "black", size = 0.2
) +
geom_sf(
data = bronze1_sf,
mapping = aes(
color = burial_type,
shape = burial_construction,
size = burial_construction
),
show.legend = "point"
) +
theme_bw() +
coord_sf(
xlim = xlimit, ylim = ylimit,
crs = st_crs("+proj=aea +lat_1=43 +lat_2=62 +lat_0=30 +lon_0=10 +x_0=0 +y_0=0 +ellps=intl +units=m +no_defs")
) +
scale_shape_manual(
values = c(
"flat" = "\u268A",
"mound" = "\u25E0",
"unknown" = "\u2715"
)
) +
scale_size_manual(
values = c(
"flat" = 10,
"mound" = 10,
"unknown" = 5
)
) +
scale_color_manual(
values = c(
"cremation" = "#D55E00",
"inhumation" = "#0072B2",
"mound" = "#CC79A7",
"flat" = "#009E73",
"unknown" = "darkgrey"
)
) +
theme(
plot.title = element_text(size = 30, face = "bold"),
legend.position = "bottom",
legend.title = element_text(size = 20, face = "bold"),
axis.title = element_blank(),
axis.text = element_text(size = 15),
legend.text = element_text(size = 20),
panel.grid.major = element_line(colour = "black", size = 0.3)
) +
guides(
color = guide_legend(title = "Burial type", override.aes = list(size = 10), nrow = 2, byrow = TRUE),
shape = guide_legend(title = "Burial construction", override.aes = list(size = 10), nrow = 2, byrow = TRUE),
size = FALSE
)
hu %>%
ggsave(
"figures_plots/general_maps/general_map.jpeg",
plot = .,
device = "jpeg",
scale = 1,
dpi = 300,
width = 350, height = 360, units = "mm",
limitsize = F
)
|
writeModuleGenes <- function(results.dir, mart){
module.dir <- get.module.dir(results.dir)
for(i in 1:length(module.dir)){
results.file <- paste0(module.dir[i], "/Module.Gene.Info.csv")
decomp.mat.file <- list.files(module.dir[i], pattern = "decomp", full.names = TRUE)
if(length(decomp.mat.file) == 0){
stop(paste0("I could not find a non.decomp.mat.RData a decomp.mat.RData file in ", dir.table[i,1], " ", dir.table[i,2], ". Please make sure generate.triage.models() was run."))
}
decomp.mat <- readRDS(decomp.mat.file)
gene.id <- rownames(decomp.mat)
gene.info <- getBM(c("external_gene_name", "entrezgene_id", "chromosome_name", "start_position", "end_position"), "entrezgene_id", values = as.numeric(gene.id), mart = mart)
write.table(gene.info, results.file, quote = FALSE, sep = ",", row.name = FALSE)
}
}
|
/code/raven/writeModuleGenes.R
|
no_license
|
MahoneyLab/HhsFunctionalRankings
|
R
| false
| false
| 853
|
r
|
writeModuleGenes <- function(results.dir, mart){
module.dir <- get.module.dir(results.dir)
for(i in 1:length(module.dir)){
results.file <- paste0(module.dir[i], "/Module.Gene.Info.csv")
decomp.mat.file <- list.files(module.dir[i], pattern = "decomp", full.names = TRUE)
if(length(decomp.mat.file) == 0){
stop(paste0("I could not find a non.decomp.mat.RData a decomp.mat.RData file in ", dir.table[i,1], " ", dir.table[i,2], ". Please make sure generate.triage.models() was run."))
}
decomp.mat <- readRDS(decomp.mat.file)
gene.id <- rownames(decomp.mat)
gene.info <- getBM(c("external_gene_name", "entrezgene_id", "chromosome_name", "start_position", "end_position"), "entrezgene_id", values = as.numeric(gene.id), mart = mart)
write.table(gene.info, results.file, quote = FALSE, sep = ",", row.name = FALSE)
}
}
|
%% File Name: mice.impute.catpmm.Rd
%% File Version: 0.07
\name{mice.impute.catpmm}
\alias{mice.impute.catpmm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Imputation of a Categorical Variable Using Multivariate Predictive
Mean Matching
}
\description{
Imputes a categorical variable using multivariate predictive mean matching.
}
\usage{
mice.impute.catpmm(y, ry, x, donors=5, ridge=10^(-5), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{y}{
Incomplete data vector of length \code{n}
}
\item{ry}{
Vector of missing data pattern (\code{FALSE} -- missing,
\code{TRUE} -- observed)
}
\item{x}{
Matrix (\code{n} x \code{p}) of complete covariates.
}
\item{donors}{Number of donors used for random sampling of nearest
neighbors in imputation}
\item{ridge}{Numerical constant used for avioding collinearity issues. Noise
is added to covariates.
}
\item{\dots}{
Further arguments to be passed
}
}
\details{
The categorical outcome variable is recoded as a vector of dummy variables.
A multivariate linear regression is specified for computing predicted values.
The L1 distance (i.e., sum of absolute deviations) is utilized for
predictive mean matching. Predictive mean matching for categorical variables
has been proposed by Meinfelder (2009) using a multinomial regression instead of
ordinary linear regression.
}
\value{
A vector of length \code{nmis=sum(!ry)} with imputed values.
}
\references{
Meinfelder, F. (2009). \emph{Analysis of Incomplete Survey Data - Multiple
Imputation via Bayesian Bootstrap Predictive Mean Matching}.
Dissertation thesis. University of Bamberg, Germany.
\url{https://fis.uni-bamberg.de/handle/uniba/213}
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%See also the packages \pkg{hot.deck} and
%\pkg{HotDeckImputation}.
%}
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Imputation internat data
#############################################################################
data(data.internet, package="miceadds")
dat <- data.internet
#** empty imputation
imp0 <- mice::mice(dat, m=1, maxit=0)
method <- imp0$method
predmat <- imp0$predictorMatrix
#** define factor variable
dat1 <- dat
dat1[,1] <- as.factor(dat1[,1])
method[1] <- "catpmm"
#** impute with 'catpmm''
imp <- mice::mice(dat1, method=method1, m=5)
summary(imp)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{mice imputation method}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/mice.impute.catpmm.Rd
|
no_license
|
cran/miceadds
|
R
| false
| false
| 2,794
|
rd
|
%% File Name: mice.impute.catpmm.Rd
%% File Version: 0.07
\name{mice.impute.catpmm}
\alias{mice.impute.catpmm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Imputation of a Categorical Variable Using Multivariate Predictive
Mean Matching
}
\description{
Imputes a categorical variable using multivariate predictive mean matching.
}
\usage{
mice.impute.catpmm(y, ry, x, donors=5, ridge=10^(-5), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{y}{
Incomplete data vector of length \code{n}
}
\item{ry}{
Vector of missing data pattern (\code{FALSE} -- missing,
\code{TRUE} -- observed)
}
\item{x}{
Matrix (\code{n} x \code{p}) of complete covariates.
}
\item{donors}{Number of donors used for random sampling of nearest
neighbors in imputation}
\item{ridge}{Numerical constant used for avioding collinearity issues. Noise
is added to covariates.
}
\item{\dots}{
Further arguments to be passed
}
}
\details{
The categorical outcome variable is recoded as a vector of dummy variables.
A multivariate linear regression is specified for computing predicted values.
The L1 distance (i.e., sum of absolute deviations) is utilized for
predictive mean matching. Predictive mean matching for categorical variables
has been proposed by Meinfelder (2009) using a multinomial regression instead of
ordinary linear regression.
}
\value{
A vector of length \code{nmis=sum(!ry)} with imputed values.
}
\references{
Meinfelder, F. (2009). \emph{Analysis of Incomplete Survey Data - Multiple
Imputation via Bayesian Bootstrap Predictive Mean Matching}.
Dissertation thesis. University of Bamberg, Germany.
\url{https://fis.uni-bamberg.de/handle/uniba/213}
}
%\author{
%Alexander Robitzsch
%}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%See also the packages \pkg{hot.deck} and
%\pkg{HotDeckImputation}.
%}
\examples{
\dontrun{
#############################################################################
# EXAMPLE 1: Imputation internat data
#############################################################################
data(data.internet, package="miceadds")
dat <- data.internet
#** empty imputation
imp0 <- mice::mice(dat, m=1, maxit=0)
method <- imp0$method
predmat <- imp0$predictorMatrix
#** define factor variable
dat1 <- dat
dat1[,1] <- as.factor(dat1[,1])
method[1] <- "catpmm"
#** impute with 'catpmm''
imp <- mice::mice(dat1, method=method1, m=5)
summary(imp)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%% \keyword{mice imputation method}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#Julian Ramirez-Villegas
#UoL / CCAFS / CIAT
#December 2011
#Modified by Carlos Navarro
# February 2016
stop("error")
src.dir <- "X:/ALPACAS/Plan_Regional_de_Cambio_Climatico_Orinoquia/01-datos_clima/_scripts"
source(paste(src.dir,"/01a_GHCND-GSOD-functions.R",sep=""))
#base dir
bDir <- "S:/observed/weather_station/gsod"; setwd(bDir)
gsodDir <- paste(bDir,"/organized-data",sep="")
# odir <- "D:/CIAT/Projects/col-cormacarena"
odir <- "D:/cenavarro/col-cormacarena"
reg <- "lat"
#gsod stations
stations.gsod <- read.csv(paste(gsodDir,"/ish-history.csv",sep=""))
stations.gsod$LON <- stations.gsod$LON/1000; stations.gsod$LAT <- stations.gsod$LAT/1000
stations.gsod$ELEV..1M. <- stations.gsod$ELEV..1M./10
#1. create extents
require(raster); require(maptools); require(rgdal)
#projection extents
reg.xt <- extent(-90,-30,-40,24)
#plot the extents (for reference -commented!)
#rs <- raster(); rs[] <- rnorm(1:ncell(rs))
#data(wrld_simpl)
#plot(rs,col=colorRampPalette(c("grey10","grey90"))(100)); plot(wrld_simpl,add=T,col='white')
#plot(waf.xt,add=T,col='red'); plot(eaf.xt,add=T,col='blue'); plot(igp.xt,add=T,col='orange')
#plot(afr.xt,add=T,col='black',lty=1); plot(sas.xt,add=T,col='black',lty=2)
#2. define working gridcell
# cellSize <- 1
#3. Make inventory of data (points / day / fit region)
#define initial and final year
yearSeries <- c(1980:2009)
#select stations within 3+degree of interpolation extents
gsod.reg <- stations.gsod[which(stations.gsod$LON>=(reg.xt@xmin-3) & stations.gsod$LON<=(reg.xt@xmax+3)
& stations.gsod$LAT>=(reg.xt@ymin-3) & stations.gsod$LAT<=(reg.xt@ymax+3)),]
# gsod.sas <- stations.gsod[which(stations.gsod$LON>=(sas.xt@xmin-3) & stations.gsod$LON<=(sas.xt@xmax+3)
# & stations.gsod$LAT>=(sas.xt@ymin-3) & stations.gsod$LAT<=(sas.xt@ymax+3)),]
#clean gsod stations of Africa
gsod.reg <- gsod.reg[-which(gsod.reg$LON == 0 | gsod.reg$LAT == 0),]
#do the snowfall stuff here
library(snowfall)
sfInit(parallel=T,cpus=20) #initiate cluster
#export functions
sfExport("convertGSOD")
sfExport("createDateGrid")
sfExport("leap")
#export variables
sfExport("bDir")
IDs <- paste("USAF",gsod.reg$USAF,"_WBAN",gsod.reg$WBAN,sep="")
count <- 1
for (yr in yearSeries) {
cat(yr,paste("(",count," out of ",length(yearSeries),")",sep=""),"\n")
gdir <- paste(gsodDir,"/",yr,sep="")
ogdir <- paste(odir,"/gsod-stations-",reg, "/daily", sep=""); if (!file.exists(ogdir)) {dir.create(ogdir)}
controlConvert <- function(i) { #define a new function
convertGSOD(i,yr,gdir,ogdir)
}
sfExport("yr"); sfExport("gdir"); sfExport("ogdir")
system.time(sfSapply(as.vector(IDs), controlConvert))
count <- count+1
}
##Join all year in one file per station
ogdir <- paste(odir,"/gsod-stations-",reg, "/daily", sep="")
st_ids <- paste(gsod.reg$USAF,"-",gsod.reg$WBAN,sep="")
ogdir_mth <- paste(odir,"/gsod-stations-",reg, "/monthly", sep=""); if (!file.exists(ogdir_mth)) {dir.create(ogdir_mth)}
ogdir_30yravg <- paste(odir,"/gsod-stations-",reg, "/30yr_averages", sep=""); if (!file.exists(ogdir_30yravg)) {dir.create(ogdir_30yravg)}
getMthDataGSOD(ogdir, st_ids, ogdir_mth, ogdir_30yr_avg)
## Add coordinates to the climatologies files
varList <- c("prec", "tmin", "tmax")
st_loc <- as.data.frame(cbind("id"=paste0(gsod.reg$USAF, "-", gsod.reg$WBAN),"Lon"=gsod.reg$LON, "Lat"=gsod.reg$LAT, "Alt"=gsod.reg$ELEV..1M.))
for (var in varList){
clim <- read.csv(paste0(ogdir_30yravg, "/gsod_30yravg_", var, ".csv"), header=T)
clim <- na.omit(merge(st_loc, clim, by = "id", all = TRUE))
write.csv(clim, paste0(ogdir_30yravg, "/gsod_30yravg_", var, ".csv"), row.names=F)
}
|
/plan_regional_cc_orinoquia/00_wth_stations/01_GSOD-read.R
|
no_license
|
CIAT-DAPA/dapa-climate-change
|
R
| false
| false
| 3,684
|
r
|
#Julian Ramirez-Villegas
#UoL / CCAFS / CIAT
#December 2011
#Modified by Carlos Navarro
# February 2016
stop("error")
src.dir <- "X:/ALPACAS/Plan_Regional_de_Cambio_Climatico_Orinoquia/01-datos_clima/_scripts"
source(paste(src.dir,"/01a_GHCND-GSOD-functions.R",sep=""))
#base dir
bDir <- "S:/observed/weather_station/gsod"; setwd(bDir)
gsodDir <- paste(bDir,"/organized-data",sep="")
# odir <- "D:/CIAT/Projects/col-cormacarena"
odir <- "D:/cenavarro/col-cormacarena"
reg <- "lat"
#gsod stations
stations.gsod <- read.csv(paste(gsodDir,"/ish-history.csv",sep=""))
stations.gsod$LON <- stations.gsod$LON/1000; stations.gsod$LAT <- stations.gsod$LAT/1000
stations.gsod$ELEV..1M. <- stations.gsod$ELEV..1M./10
#1. create extents
require(raster); require(maptools); require(rgdal)
#projection extents
reg.xt <- extent(-90,-30,-40,24)
#plot the extents (for reference -commented!)
#rs <- raster(); rs[] <- rnorm(1:ncell(rs))
#data(wrld_simpl)
#plot(rs,col=colorRampPalette(c("grey10","grey90"))(100)); plot(wrld_simpl,add=T,col='white')
#plot(waf.xt,add=T,col='red'); plot(eaf.xt,add=T,col='blue'); plot(igp.xt,add=T,col='orange')
#plot(afr.xt,add=T,col='black',lty=1); plot(sas.xt,add=T,col='black',lty=2)
#2. define working gridcell
# cellSize <- 1
#3. Make inventory of data (points / day / fit region)
#define initial and final year
yearSeries <- c(1980:2009)
#select stations within 3+degree of interpolation extents
gsod.reg <- stations.gsod[which(stations.gsod$LON>=(reg.xt@xmin-3) & stations.gsod$LON<=(reg.xt@xmax+3)
& stations.gsod$LAT>=(reg.xt@ymin-3) & stations.gsod$LAT<=(reg.xt@ymax+3)),]
# gsod.sas <- stations.gsod[which(stations.gsod$LON>=(sas.xt@xmin-3) & stations.gsod$LON<=(sas.xt@xmax+3)
# & stations.gsod$LAT>=(sas.xt@ymin-3) & stations.gsod$LAT<=(sas.xt@ymax+3)),]
#clean gsod stations of Africa
gsod.reg <- gsod.reg[-which(gsod.reg$LON == 0 | gsod.reg$LAT == 0),]
#do the snowfall stuff here
library(snowfall)
sfInit(parallel=T,cpus=20) #initiate cluster
#export functions
sfExport("convertGSOD")
sfExport("createDateGrid")
sfExport("leap")
#export variables
sfExport("bDir")
IDs <- paste("USAF",gsod.reg$USAF,"_WBAN",gsod.reg$WBAN,sep="")
count <- 1
for (yr in yearSeries) {
cat(yr,paste("(",count," out of ",length(yearSeries),")",sep=""),"\n")
gdir <- paste(gsodDir,"/",yr,sep="")
ogdir <- paste(odir,"/gsod-stations-",reg, "/daily", sep=""); if (!file.exists(ogdir)) {dir.create(ogdir)}
controlConvert <- function(i) { #define a new function
convertGSOD(i,yr,gdir,ogdir)
}
sfExport("yr"); sfExport("gdir"); sfExport("ogdir")
system.time(sfSapply(as.vector(IDs), controlConvert))
count <- count+1
}
##Join all year in one file per station
ogdir <- paste(odir,"/gsod-stations-",reg, "/daily", sep="")
st_ids <- paste(gsod.reg$USAF,"-",gsod.reg$WBAN,sep="")
ogdir_mth <- paste(odir,"/gsod-stations-",reg, "/monthly", sep=""); if (!file.exists(ogdir_mth)) {dir.create(ogdir_mth)}
ogdir_30yravg <- paste(odir,"/gsod-stations-",reg, "/30yr_averages", sep=""); if (!file.exists(ogdir_30yravg)) {dir.create(ogdir_30yravg)}
getMthDataGSOD(ogdir, st_ids, ogdir_mth, ogdir_30yr_avg)
## Add coordinates to the climatologies files
varList <- c("prec", "tmin", "tmax")
st_loc <- as.data.frame(cbind("id"=paste0(gsod.reg$USAF, "-", gsod.reg$WBAN),"Lon"=gsod.reg$LON, "Lat"=gsod.reg$LAT, "Alt"=gsod.reg$ELEV..1M.))
for (var in varList){
clim <- read.csv(paste0(ogdir_30yravg, "/gsod_30yravg_", var, ".csv"), header=T)
clim <- na.omit(merge(st_loc, clim, by = "id", all = TRUE))
write.csv(clim, paste0(ogdir_30yravg, "/gsod_30yravg_", var, ".csv"), row.names=F)
}
|
# Load required libraries
library(plyr) # For spliting, applying and combining data
library(tidyr) # For cleaning and structuring data
library(lubridate) # For data manipulation
library(ggplot2) # For plotting
library(dplyr) # For data manipulation
# Load script to get data
if (!exists("flights")) {
source("src/getData.R")
}
# Get data from the database
delaysData.year <- flights %>%
select(DayOfWeek, FlightDate, Carrier, OriginCityMarketID, Origin,
CRSDepTime, DepDelay, ArrDelay) %>% # Select variables of interest
filter(Year == 2015L) %>% # Get only data from 2015
collect() %>% # Collect the data from db
mutate(
DayOfWeek = factor(DayOfWeek, levels = c(1:7,9), # Factorise day of the week
labels =
c("Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
"Unknown"
)),
Month = substr(FlightDate, 6, 7), # Get month from FlightDate
DateNum = substr(FlightDate, 9, 10), # Get day number from FlightDate
CRSDepTime = round(as.numeric(CRSDepTime) / 100, 0), # Get the hour of the departure
DepDelay = ifelse(DepDelay < 0, 0, DepDelay), # Remove early departures (0)
ArrDelay = ifelse(ArrDelay < 0, 0, ArrDelay)) %>% # Remove early arrivals (0)
filter(CRSDepTime > 5 & CRSDepTime < 24) # Get only data between 5am and 11:59pm (24h time)
# Analyze arrival and departure delays as function of departure time
## Prepare data for plotting
plotData.depTime <- delaysData.year %>%
gather(DelayType, NewDelay, DepDelay:ArrDelay) %>% # Restructure data to long format
mutate(DelayType = ifelse(DelayType == "DepDelay", "Departure Delay", "Arrival Delay")) %>% # Rename values in DepDelay
group_by(CRSDepTime, DelayType) %>% # Group data by hour and delay type
dplyr::summarise(mu = mean(as.numeric(NewDelay), na.rm=TRUE), # Get averages and stdandard error
se = sqrt(var(as.numeric(NewDelay), na.rm=TRUE) / length(na.omit(as.numeric(NewDelay)))),
obs = length(na.omit(as.numeric(NewDelay))))
## Create plot
p <- ggplot(plotData.depTime,
aes(x = CRSDepTime, y = mu, min = mu-se, max = mu+se,
group = DelayType, color = DelayType)) +
geom_line() +
geom_point() +
geom_errorbar(width = .33) +
scale_x_continuous(breaks = seq(6,23)) +
labs(x = "Hour of Day",
y = "Average Delay (Minutes)",
title = "Flight Delays by Departure Time") +
theme(legend.position = "bottom") +
scale_color_discrete(name = "Delay Type")
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Hour_DelayType.pdf",dpi = 300)
# Analyze every day of the year
## Prepare data for plotting
plotData.days <- delaysData.year %>%
group_by(Month, DateNum) %>% # Group data by month and day of month
dplyr::summarise(mu = median(as.numeric(DepDelay), na.rm=TRUE), # Get averages and stdandard error
se = sqrt(var(as.numeric(DepDelay), na.rm=TRUE) / length(na.omit(as.numeric(DepDelay)))),
obs = length(na.omit(as.numeric(DepDelay))))
## Create plot
p <- ggplot(plotData.days, aes(x = DateNum, y = mu, min = mu-se, max = mu+se, group = Month)) +
geom_line() +
geom_point() +
scale_y_continuous(breaks = c(0,10)) +
coord_cartesian(ylim = c(-4,16)) +
labs(x = "Day of month",
y = "Median Departure Delay (Minutes)",
title = "Median Flight Delays by Departure Date") +
theme(legend.position = "bottom") +
facet_grid(Month ~.) +
theme_bw()
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Departure_Date.pdf", dpi = 300)
# Analyze the 10 busiest airports
# Source: http://en.wikipedia.org/wiki/List_of_the_busiest_airports_in_the_United_States
# Section: Busiest_US_airports_by_total_passenger_boardings
## Prepare data for plotting
plotData.airports <- delaysData.year %>%
filter(Origin %in% c( # Get only data for the 10 busiest airports
"ATL",
"LAX",
"ORD",
"DFW",
"JFK",
"DEN",
"SFO",
"CLT",
"LAS",
"PHX"
)) %>%
group_by(CRSDepTime, Origin) %>% # Group by hour and origin
dplyr::summarise(mu = mean(as.numeric(DepDelay), na.rm = TRUE), # Get averages and stdandard error
se = sqrt(var(as.numeric(DepDelay), na.rm = TRUE) / length(na.omit(as.numeric(DepDelay)))),
obs = length(na.omit(as.numeric(DepDelay)))) %>%
mutate(mu = ifelse((mu - 0 < .001), NA, mu), # Correct negative values to NA
Origin = factor(Origin, levels=c( # Factorize Origin
"ATL",
"LAX",
"ORD",
"DFW",
"JFK",
"DEN",
"SFO",
"CLT",
"LAS",
"PHX")))
## Create plot
### Top 5 airports
p <- ggplot(subset(plotData.airports, as.numeric(Origin) <= 5),
aes(x = CRSDepTime, y = mu, min = mu-se, max = mu+se,
group = Origin, color = Origin, shape = Origin)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks=seq(5,23)) +
labs(x = "Hour of Day",
y = "Average Departure Delay (Minutes)",
title = "Top Five Most Popular Airports") +
theme(legend.position = "bottom") +
scale_color_discrete(name = "Airport") +
scale_shape_discrete(name = "Airport")
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Hour_Airport_Top5.pdf",dpi = 300)
### Top 6-10 airports
p <- ggplot(subset(plotData.airports, as.numeric(Origin) >5),
aes(x = CRSDepTime, y = mu, min = mu-se, max = mu+se,
group = Origin, color = Origin, shape = Origin)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = seq(5,23)) +
labs(x="Hour of Day",y="Average Departure Delay (Minutes)",title="Airports Six through Ten") +
theme(legend.position = "bottom") +
scale_color_discrete(name = "Airport") +
scale_shape_discrete(name = "Airport")
## Save plot
ggsave(p,file = "img/year/Flight_Delays_By_Hour_Airport_6to10.pdf", dpi = 300)
# Analyze 95% and 75% quantiles
## Prepare data for plotting
plotData.quantile = delaysData.year %>%
group_by(CRSDepTime) %>% # Group data by hour
dplyr::summarise(Quantile_95 = quantile(as.numeric(DepDelay), .95, na.rm = TRUE), # Calculate quantiles
Quantile_75 = quantile(as.numeric(DepDelay), .75, na.rm = TRUE),
obs = length(na.omit(as.numeric(DepDelay)))) %>%
gather(variable, value, Quantile_75:Quantile_95) %>% # Restructure data to long format
mutate(variable = factor(variable, levels = c("Quantile_95", "Quantile_75"))) # Factorize variable
## Create plot
p <- ggplot(plotData.quantile, aes(x = CRSDepTime, y = value,
group = variable, color = variable)) +
geom_line() +
scale_x_continuous(breaks = seq(5,23)) +
labs(x = "Hour of Day",
y = "Departure Delay (Minutes)",
title = "95th and 75th Percentiles of Departure Delays") +
scale_color_discrete(name = "Quantile") +
theme(legend.position = "bottom")
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Hour_95th.pdf", dpi = 300)
|
/src/analyzeYear.R
|
no_license
|
dkalisch/flight_delays
|
R
| false
| false
| 7,268
|
r
|
# Load required libraries
library(plyr) # For spliting, applying and combining data
library(tidyr) # For cleaning and structuring data
library(lubridate) # For data manipulation
library(ggplot2) # For plotting
library(dplyr) # For data manipulation
# Load script to get data
if (!exists("flights")) {
source("src/getData.R")
}
# Get data from the database
delaysData.year <- flights %>%
select(DayOfWeek, FlightDate, Carrier, OriginCityMarketID, Origin,
CRSDepTime, DepDelay, ArrDelay) %>% # Select variables of interest
filter(Year == 2015L) %>% # Get only data from 2015
collect() %>% # Collect the data from db
mutate(
DayOfWeek = factor(DayOfWeek, levels = c(1:7,9), # Factorise day of the week
labels =
c("Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
"Unknown"
)),
Month = substr(FlightDate, 6, 7), # Get month from FlightDate
DateNum = substr(FlightDate, 9, 10), # Get day number from FlightDate
CRSDepTime = round(as.numeric(CRSDepTime) / 100, 0), # Get the hour of the departure
DepDelay = ifelse(DepDelay < 0, 0, DepDelay), # Remove early departures (0)
ArrDelay = ifelse(ArrDelay < 0, 0, ArrDelay)) %>% # Remove early arrivals (0)
filter(CRSDepTime > 5 & CRSDepTime < 24) # Get only data between 5am and 11:59pm (24h time)
# Analyze arrival and departure delays as function of departure time
## Prepare data for plotting
plotData.depTime <- delaysData.year %>%
gather(DelayType, NewDelay, DepDelay:ArrDelay) %>% # Restructure data to long format
mutate(DelayType = ifelse(DelayType == "DepDelay", "Departure Delay", "Arrival Delay")) %>% # Rename values in DepDelay
group_by(CRSDepTime, DelayType) %>% # Group data by hour and delay type
dplyr::summarise(mu = mean(as.numeric(NewDelay), na.rm=TRUE), # Get averages and stdandard error
se = sqrt(var(as.numeric(NewDelay), na.rm=TRUE) / length(na.omit(as.numeric(NewDelay)))),
obs = length(na.omit(as.numeric(NewDelay))))
## Create plot
p <- ggplot(plotData.depTime,
aes(x = CRSDepTime, y = mu, min = mu-se, max = mu+se,
group = DelayType, color = DelayType)) +
geom_line() +
geom_point() +
geom_errorbar(width = .33) +
scale_x_continuous(breaks = seq(6,23)) +
labs(x = "Hour of Day",
y = "Average Delay (Minutes)",
title = "Flight Delays by Departure Time") +
theme(legend.position = "bottom") +
scale_color_discrete(name = "Delay Type")
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Hour_DelayType.pdf",dpi = 300)
# Analyze every day of the year
## Prepare data for plotting
plotData.days <- delaysData.year %>%
group_by(Month, DateNum) %>% # Group data by month and day of month
dplyr::summarise(mu = median(as.numeric(DepDelay), na.rm=TRUE), # Get averages and stdandard error
se = sqrt(var(as.numeric(DepDelay), na.rm=TRUE) / length(na.omit(as.numeric(DepDelay)))),
obs = length(na.omit(as.numeric(DepDelay))))
## Create plot
p <- ggplot(plotData.days, aes(x = DateNum, y = mu, min = mu-se, max = mu+se, group = Month)) +
geom_line() +
geom_point() +
scale_y_continuous(breaks = c(0,10)) +
coord_cartesian(ylim = c(-4,16)) +
labs(x = "Day of month",
y = "Median Departure Delay (Minutes)",
title = "Median Flight Delays by Departure Date") +
theme(legend.position = "bottom") +
facet_grid(Month ~.) +
theme_bw()
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Departure_Date.pdf", dpi = 300)
# Analyze the 10 busiest airports
# Source: http://en.wikipedia.org/wiki/List_of_the_busiest_airports_in_the_United_States
# Section: Busiest_US_airports_by_total_passenger_boardings
## Prepare data for plotting
plotData.airports <- delaysData.year %>%
filter(Origin %in% c( # Get only data for the 10 busiest airports
"ATL",
"LAX",
"ORD",
"DFW",
"JFK",
"DEN",
"SFO",
"CLT",
"LAS",
"PHX"
)) %>%
group_by(CRSDepTime, Origin) %>% # Group by hour and origin
dplyr::summarise(mu = mean(as.numeric(DepDelay), na.rm = TRUE), # Get averages and stdandard error
se = sqrt(var(as.numeric(DepDelay), na.rm = TRUE) / length(na.omit(as.numeric(DepDelay)))),
obs = length(na.omit(as.numeric(DepDelay)))) %>%
mutate(mu = ifelse((mu - 0 < .001), NA, mu), # Correct negative values to NA
Origin = factor(Origin, levels=c( # Factorize Origin
"ATL",
"LAX",
"ORD",
"DFW",
"JFK",
"DEN",
"SFO",
"CLT",
"LAS",
"PHX")))
## Create plot
### Top 5 airports
p <- ggplot(subset(plotData.airports, as.numeric(Origin) <= 5),
aes(x = CRSDepTime, y = mu, min = mu-se, max = mu+se,
group = Origin, color = Origin, shape = Origin)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks=seq(5,23)) +
labs(x = "Hour of Day",
y = "Average Departure Delay (Minutes)",
title = "Top Five Most Popular Airports") +
theme(legend.position = "bottom") +
scale_color_discrete(name = "Airport") +
scale_shape_discrete(name = "Airport")
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Hour_Airport_Top5.pdf",dpi = 300)
### Top 6-10 airports
p <- ggplot(subset(plotData.airports, as.numeric(Origin) >5),
aes(x = CRSDepTime, y = mu, min = mu-se, max = mu+se,
group = Origin, color = Origin, shape = Origin)) +
geom_line() +
geom_point() +
scale_x_continuous(breaks = seq(5,23)) +
labs(x="Hour of Day",y="Average Departure Delay (Minutes)",title="Airports Six through Ten") +
theme(legend.position = "bottom") +
scale_color_discrete(name = "Airport") +
scale_shape_discrete(name = "Airport")
## Save plot
ggsave(p,file = "img/year/Flight_Delays_By_Hour_Airport_6to10.pdf", dpi = 300)
# Analyze 95% and 75% quantiles
## Prepare data for plotting
plotData.quantile = delaysData.year %>%
group_by(CRSDepTime) %>% # Group data by hour
dplyr::summarise(Quantile_95 = quantile(as.numeric(DepDelay), .95, na.rm = TRUE), # Calculate quantiles
Quantile_75 = quantile(as.numeric(DepDelay), .75, na.rm = TRUE),
obs = length(na.omit(as.numeric(DepDelay)))) %>%
gather(variable, value, Quantile_75:Quantile_95) %>% # Restructure data to long format
mutate(variable = factor(variable, levels = c("Quantile_95", "Quantile_75"))) # Factorize variable
## Create plot
p <- ggplot(plotData.quantile, aes(x = CRSDepTime, y = value,
group = variable, color = variable)) +
geom_line() +
scale_x_continuous(breaks = seq(5,23)) +
labs(x = "Hour of Day",
y = "Departure Delay (Minutes)",
title = "95th and 75th Percentiles of Departure Delays") +
scale_color_discrete(name = "Quantile") +
theme(legend.position = "bottom")
## Save plot
ggsave(p, file = "img/year/Flight_Delays_By_Hour_95th.pdf", dpi = 300)
|
#' The RowTable class
#'
#' The RowTable is a virtual class where each row in the \linkS4class{SummarizedExperiment} is represented by no more than one row in a \code{\link{datatable}} widget.
#' In panels of this class, single and multiple selections can only be transmitted on the features.
#'
#' @section Slot overview:
#' No new slots are added.
#' All slots provided in the \linkS4class{Table} parent class are available.
#'
#' @section Supported methods:
#' In the following code snippets, \code{x} is an instance of a \linkS4class{RowTable} class.
#' Refer to the documentation for each method for more details on the remaining arguments.
#'
#' For setting up data values:
#' \itemize{
#' \item \code{\link{.refineParameters}(x, se)} replaces \code{NA} values in \code{Selected} with the first row name of \code{se}.
#' This will also call the equivalent \linkS4class{Table} method.
#' }
#'
#' For defining the interface:
#' \itemize{
#' \item \code{\link{.hideInterface}(x, field)} returns a logical scalar indicating whether the interface element corresponding to \code{field} should be hidden.
#' This returns \code{TRUE} for column selection parameters (\code{"ColumnSelectionSource"} and \code{"ColumnSelectionRestrict"}),
#' otherwise it dispatches to the \linkS4class{Panel} method.
#' }
#'
#' For monitoring reactive expressions:
#' \itemize{
#' \item \code{\link{.createObservers}(x, se, input, session, pObjects, rObjects)} sets up observers to propagate changes in the \code{Selected} to linked plots.
#' This will also call the equivalent \linkS4class{Table} method.
#' }
#'
#' For controlling selections:
#' \itemize{
#' \item \code{\link{.multiSelectionDimension}(x)} returns \code{"row"} to indicate that a row selection is being transmitted.
#' \item \code{\link{.singleSelectionDimension}(x)} returns \code{"feature"} to indicate that a feature identity is being transmitted.
#' }
#'
#' For rendering output:
#' \itemize{
#' \item \code{\link{.showSelectionDetails}(x)} returns a HTML element generated by calling the function registered in \code{\link{iSEEOptions}$get("RowTable.select.details")} on \code{x[["Selected"]]}.
#' If no function is registered, \code{NULL} is returned.
#' }
#'
#' Unless explicitly specialized above, all methods from the parent classes \linkS4class{DotPlot} and \linkS4class{Panel} are also available.
#'
#' @section Subclass expectations:
#' Subclasses are expected to implement methods for:
#' \itemize{
#' \item \code{\link{.generateTable}}
#' \item \code{\link{.fullName}}
#' \item \code{\link{.panelColor}}
#' }
#'
#' The method for \code{\link{.generateTable}} should create a \code{tab} data.frame where each row corresponds to a row in the \linkS4class{SummarizedExperiment} object.
#'
#' @seealso
#' \linkS4class{Table}, for the immediate parent class that contains the actual slot definitions.
#'
#' @author Aaron Lun
#'
#' @docType methods
#' @aliases
#' initialize,RowTable-method
#' .refineParameters,RowTable-method
#' .defineInterface,RowTable-method
#' .createObservers,RowTable-method
#' .hideInterface,RowTable-method
#' .multiSelectionDimension,RowTable-method
#' .singleSelectionDimension,RowTable-method
#' .showSelectionDetails,RowTable-method
#' @name RowTable-class
NULL
#' @export
#' @importFrom methods callNextMethod
setMethod("initialize", "RowTable", function(.Object, ...) {
args <- list(...)
# Defensive measure to avoid problems with cyclic graphs
# that the user doesn't have permissions to change!
args <- .emptyDefault(args, .selectColDynamic, FALSE)
do.call(callNextMethod, c(list(.Object), args))
})
#' @export
setMethod(".refineParameters", "RowTable", function(x, se) {
x <- callNextMethod()
if (is.null(x)) {
return(NULL)
}
x <- .replaceMissingWithFirst(x, .TableSelected, rownames(se))
x
})
#' @export
setMethod(".createObservers", "RowTable", function(x, se, input, session, pObjects, rObjects) {
callNextMethod()
.create_dimname_propagation_observer(.getEncodedName(x), choices=rownames(se),
session=session, pObjects=pObjects, rObjects=rObjects)
})
#' @export
setMethod(".hideInterface", "RowTable", function(x, field) {
if (field %in% c(.selectColSource, .selectColRestrict, .selectColDynamic)) {
TRUE
} else {
callNextMethod()
}
})
#' @export
setMethod(".multiSelectionDimension", "RowTable", function(x) "row")
#' @export
setMethod(".singleSelectionDimension", "RowTable", function(x) "feature")
#' @export
setMethod(".showSelectionDetails", "RowTable", function(x) {
FUN <- iSEEOptions$get("RowTable.select.details")
if (!is.null(FUN)) {
FUN(slot(x, .TableSelected))
}
})
|
/R/family_RowTable.R
|
permissive
|
BadSeby/iSEE
|
R
| false
| false
| 4,702
|
r
|
#' The RowTable class
#'
#' The RowTable is a virtual class where each row in the \linkS4class{SummarizedExperiment} is represented by no more than one row in a \code{\link{datatable}} widget.
#' In panels of this class, single and multiple selections can only be transmitted on the features.
#'
#' @section Slot overview:
#' No new slots are added.
#' All slots provided in the \linkS4class{Table} parent class are available.
#'
#' @section Supported methods:
#' In the following code snippets, \code{x} is an instance of a \linkS4class{RowTable} class.
#' Refer to the documentation for each method for more details on the remaining arguments.
#'
#' For setting up data values:
#' \itemize{
#' \item \code{\link{.refineParameters}(x, se)} replaces \code{NA} values in \code{Selected} with the first row name of \code{se}.
#' This will also call the equivalent \linkS4class{Table} method.
#' }
#'
#' For defining the interface:
#' \itemize{
#' \item \code{\link{.hideInterface}(x, field)} returns a logical scalar indicating whether the interface element corresponding to \code{field} should be hidden.
#' This returns \code{TRUE} for column selection parameters (\code{"ColumnSelectionSource"} and \code{"ColumnSelectionRestrict"}),
#' otherwise it dispatches to the \linkS4class{Panel} method.
#' }
#'
#' For monitoring reactive expressions:
#' \itemize{
#' \item \code{\link{.createObservers}(x, se, input, session, pObjects, rObjects)} sets up observers to propagate changes in the \code{Selected} to linked plots.
#' This will also call the equivalent \linkS4class{Table} method.
#' }
#'
#' For controlling selections:
#' \itemize{
#' \item \code{\link{.multiSelectionDimension}(x)} returns \code{"row"} to indicate that a row selection is being transmitted.
#' \item \code{\link{.singleSelectionDimension}(x)} returns \code{"feature"} to indicate that a feature identity is being transmitted.
#' }
#'
#' For rendering output:
#' \itemize{
#' \item \code{\link{.showSelectionDetails}(x)} returns a HTML element generated by calling the function registered in \code{\link{iSEEOptions}$get("RowTable.select.details")} on \code{x[["Selected"]]}.
#' If no function is registered, \code{NULL} is returned.
#' }
#'
#' Unless explicitly specialized above, all methods from the parent classes \linkS4class{DotPlot} and \linkS4class{Panel} are also available.
#'
#' @section Subclass expectations:
#' Subclasses are expected to implement methods for:
#' \itemize{
#' \item \code{\link{.generateTable}}
#' \item \code{\link{.fullName}}
#' \item \code{\link{.panelColor}}
#' }
#'
#' The method for \code{\link{.generateTable}} should create a \code{tab} data.frame where each row corresponds to a row in the \linkS4class{SummarizedExperiment} object.
#'
#' @seealso
#' \linkS4class{Table}, for the immediate parent class that contains the actual slot definitions.
#'
#' @author Aaron Lun
#'
#' @docType methods
#' @aliases
#' initialize,RowTable-method
#' .refineParameters,RowTable-method
#' .defineInterface,RowTable-method
#' .createObservers,RowTable-method
#' .hideInterface,RowTable-method
#' .multiSelectionDimension,RowTable-method
#' .singleSelectionDimension,RowTable-method
#' .showSelectionDetails,RowTable-method
#' @name RowTable-class
NULL
#' @export
#' @importFrom methods callNextMethod
setMethod("initialize", "RowTable", function(.Object, ...) {
args <- list(...)
# Defensive measure to avoid problems with cyclic graphs
# that the user doesn't have permissions to change!
args <- .emptyDefault(args, .selectColDynamic, FALSE)
do.call(callNextMethod, c(list(.Object), args))
})
#' @export
setMethod(".refineParameters", "RowTable", function(x, se) {
x <- callNextMethod()
if (is.null(x)) {
return(NULL)
}
x <- .replaceMissingWithFirst(x, .TableSelected, rownames(se))
x
})
#' @export
setMethod(".createObservers", "RowTable", function(x, se, input, session, pObjects, rObjects) {
callNextMethod()
.create_dimname_propagation_observer(.getEncodedName(x), choices=rownames(se),
session=session, pObjects=pObjects, rObjects=rObjects)
})
#' @export
setMethod(".hideInterface", "RowTable", function(x, field) {
if (field %in% c(.selectColSource, .selectColRestrict, .selectColDynamic)) {
TRUE
} else {
callNextMethod()
}
})
#' @export
setMethod(".multiSelectionDimension", "RowTable", function(x) "row")
#' @export
setMethod(".singleSelectionDimension", "RowTable", function(x) "feature")
#' @export
setMethod(".showSelectionDetails", "RowTable", function(x) {
FUN <- iSEEOptions$get("RowTable.select.details")
if (!is.null(FUN)) {
FUN(slot(x, .TableSelected))
}
})
|
#' Quasi Minimal Residual Method
#'
#' Quasia-Minimal Resudial(QMR) method is another remedy of the BiCG which shows
#' rather irregular convergence behavior. It adapts to solve the reduced tridiagonal system
#' in a least squares sense and its convergence is known to be quite smoother than BiCG.
#'
#' @param A an \eqn{(m\times n)} dense or sparse matrix. See also \code{\link[Matrix]{sparseMatrix}}.
#' @param B a vector of length \eqn{m} or an \eqn{(m\times k)} matrix (dense or sparse) for solving \eqn{k} systems simultaneously.
#' @param xinit a length-\eqn{n} vector for initial starting point. \code{NA} to start from a random initial point near 0.
#' @param reltol tolerance level for stopping iterations.
#' @param maxiter maximum number of iterations allowed.
#' @param preconditioner an \eqn{(n\times n)} preconditioning matrix; default is an identity matrix.
#' @param verbose a logical; \code{TRUE} to show progress of computation.
#'
#' @return a named list containing \describe{
#' \item{x}{solution; a vector of length \eqn{n} or a matrix of size \eqn{(n\times k)}.}
#' \item{iter}{the number of iterations required.}
#' \item{errors}{a vector of errors for stopping criterion.}
#' }
#'
#' @examples
#' ## Overdetermined System
#' A = matrix(rnorm(10*5),nrow=10)
#' x = rnorm(5)
#' b = A%*%x
#'
#' out1 = lsolve.cg(A,b)
#' out2 = lsolve.bicg(A,b)
#' out3 = lsolve.qmr(A,b)
#' matout = cbind(matrix(x),out1$x, out2$x, out3$x);
#' colnames(matout) = c("true x","CG result", "BiCG result", "QMR result")
#' print(matout)
#'
#' @references
#' \insertRef{freund_qmr:_1991}{Rlinsolve}
#'
#' @rdname krylov_QMR
#' @export
lsolve.qmr <- function(A,B,xinit=NA,reltol=1e-5,maxiter=1000,
preconditioner=diag(ncol(A)),verbose=TRUE){
###########################################################################
# Step 0. Initialization
if (verbose){
message("* lsolve.qmr : Initialiszed.")
}
if (any(is.na(A))||any(is.infinite(A))||any(is.na(B))||any(is.infinite(B))){
stop("* lsolve.qmr : no NA or Inf values allowed.")
}
sparseformats = c("dgCMatrix","dtCMatrix","dsCMatrix")
if ((class(A)%in%sparseformats)||(class(B)%in%sparseformats)||(class(preconditioner)%in%sparseformats)){
A = Matrix(A,sparse=TRUE)
B = Matrix(B,sparse=TRUE)
preconditioner = Matrix(preconditioner,sparse=TRUE)
sparseflag = TRUE
} else {
A = matrix(A,nrow=nrow(A))
if (is.vector(B)){
B = matrix(B)
} else {
B = matrix(B,nrow=nrow(B))
}
preconditioner = matrix(preconditioner,nrow=nrow(preconditioner))
sparseflag = FALSE
}
# xinit
if (is.na(xinit)){
xinit = matrix(rnorm(ncol(A)))
} else {
if (length(xinit)!=ncol(A)){
stop("* lsolve.qmr : 'xinit' has invalid size.")
}
xinit = matrix(xinit)
}
###########################################################################
# Step 1. Preprocessing
# 1-1. Neither NA nor Inf allowed.
if (any(is.infinite(A))||any(is.na(A))||any(is.infinite(B))||any(is.na(B))){
stop("* lsolve.qmr : no NA, Inf, -Inf values are allowed.")
}
# 1-2. Size Argument
m = nrow(A)
if (is.vector(B)){
mB = length(B)
if (m!=mB){
stop("* lsolve.qmr : a vector B should have a length of nrow(A).")
}
} else {
mB = nrow(B)
if (m!=mB){
stop("* lsolve.qmr : an input matrix B should have the same number of rows from A.")
}
}
if (is.vector(B)){
B = as.matrix(B)
}
# 1-3. Adjusting Case
if (m > ncol(A)){ ## Case 1. Overdetermined
B = t(A)%*%B
A = t(A)%*%A
} else if (m < ncol(A)){ ## Case 2. Underdetermined
stop("* lsolve.qmr : underdetermined case is not supported.")
}
# 1-4. Preconditioner : only valid for square case
if (!all.equal(dim(A),dim(preconditioner))){
stop("* lsolve.qmr : Preconditioner is a size-matching.")
}
if (verbose){message("* lsolve.qmr : preprocessing finished ...")}
###########################################################################
# Step 2. Main Computation
ncolB = ncol(B)
if (ncolB==1){
if (sparseflag){
luM = lu(preconditioner)
M1 = luM@L
M2 = luM@U
}
if (!sparseflag){
vecB = as.vector(B)
res = linsolve.qmr.single(A,vecB,xinit,reltol,maxiter,preconditioner)
} else {
vecB = B
res = linsolve.qmr.single.sparse(A,vecB,xinit,reltol,maxiter,preconditioner,M1,M2)
}
} else {
x = array(0,c(ncol(A),ncolB))
iter = array(0,c(1,ncolB))
errors = list()
if (sparseflag){
luM = lu(preconditioner)
M1 = luM@L
M2 = luM@U
}
for (i in 1:ncolB){
if (!sparseflag){
vecB = as.vector(B[,i])
tmpres = linsolve.qmr.single(A,vecB,xinit,reltol,maxiter,preconditioner)
} else {
vecB = Matrix(B[,i],sparse=TRUE)
tmpres = linsolve.qmr.single.sparse(A,vecB,xinit,reltol,maxiter,preconditioner,M1,M2)
}
x[,i] = tmpres$x
iter[i] = tmpres$iter
errors[[i]] = tmpres$errors
if (verbose){
message(paste("* lsolve.qmr : B's column.",i,"being processed.."))
}
}
res = list("x"=x,"iter"=iter,"errors"=errors)
}
###########################################################################
# Step 3. Finalize
if ("flag" %in% names(res)){
flagval = res$flag
if (flagval==0){
if (verbose){
message("* lsolve.qmr : convergence well achieved.")
}
} else if (flagval==1){
if (verbose){
message("* lsolve.qmr : convergence not achieved within maxiter.")
}
} else if (flagval==-1){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'rho' value.")
}
} else if (flagval==-2){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'beta' value.")
}
} else if (flagval==-3){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'gamma' value.")
}
} else if (flagval==-4){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'delta' value.")
}
} else if (flagval==-5){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'ep' value.")
}
} else if (flagval==-6){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'xi' value.")
}
}
res$flag = NULL
}
if (verbose){
message("* lsolve.qmr : computations finished.")
}
return(res)
}
|
/R/lsolve_QMR.R
|
no_license
|
harryprince/Rlinsolve
|
R
| false
| false
| 6,493
|
r
|
#' Quasi Minimal Residual Method
#'
#' Quasia-Minimal Resudial(QMR) method is another remedy of the BiCG which shows
#' rather irregular convergence behavior. It adapts to solve the reduced tridiagonal system
#' in a least squares sense and its convergence is known to be quite smoother than BiCG.
#'
#' @param A an \eqn{(m\times n)} dense or sparse matrix. See also \code{\link[Matrix]{sparseMatrix}}.
#' @param B a vector of length \eqn{m} or an \eqn{(m\times k)} matrix (dense or sparse) for solving \eqn{k} systems simultaneously.
#' @param xinit a length-\eqn{n} vector for initial starting point. \code{NA} to start from a random initial point near 0.
#' @param reltol tolerance level for stopping iterations.
#' @param maxiter maximum number of iterations allowed.
#' @param preconditioner an \eqn{(n\times n)} preconditioning matrix; default is an identity matrix.
#' @param verbose a logical; \code{TRUE} to show progress of computation.
#'
#' @return a named list containing \describe{
#' \item{x}{solution; a vector of length \eqn{n} or a matrix of size \eqn{(n\times k)}.}
#' \item{iter}{the number of iterations required.}
#' \item{errors}{a vector of errors for stopping criterion.}
#' }
#'
#' @examples
#' ## Overdetermined System
#' A = matrix(rnorm(10*5),nrow=10)
#' x = rnorm(5)
#' b = A%*%x
#'
#' out1 = lsolve.cg(A,b)
#' out2 = lsolve.bicg(A,b)
#' out3 = lsolve.qmr(A,b)
#' matout = cbind(matrix(x),out1$x, out2$x, out3$x);
#' colnames(matout) = c("true x","CG result", "BiCG result", "QMR result")
#' print(matout)
#'
#' @references
#' \insertRef{freund_qmr:_1991}{Rlinsolve}
#'
#' @rdname krylov_QMR
#' @export
lsolve.qmr <- function(A,B,xinit=NA,reltol=1e-5,maxiter=1000,
preconditioner=diag(ncol(A)),verbose=TRUE){
###########################################################################
# Step 0. Initialization
if (verbose){
message("* lsolve.qmr : Initialiszed.")
}
if (any(is.na(A))||any(is.infinite(A))||any(is.na(B))||any(is.infinite(B))){
stop("* lsolve.qmr : no NA or Inf values allowed.")
}
sparseformats = c("dgCMatrix","dtCMatrix","dsCMatrix")
if ((class(A)%in%sparseformats)||(class(B)%in%sparseformats)||(class(preconditioner)%in%sparseformats)){
A = Matrix(A,sparse=TRUE)
B = Matrix(B,sparse=TRUE)
preconditioner = Matrix(preconditioner,sparse=TRUE)
sparseflag = TRUE
} else {
A = matrix(A,nrow=nrow(A))
if (is.vector(B)){
B = matrix(B)
} else {
B = matrix(B,nrow=nrow(B))
}
preconditioner = matrix(preconditioner,nrow=nrow(preconditioner))
sparseflag = FALSE
}
# xinit
if (is.na(xinit)){
xinit = matrix(rnorm(ncol(A)))
} else {
if (length(xinit)!=ncol(A)){
stop("* lsolve.qmr : 'xinit' has invalid size.")
}
xinit = matrix(xinit)
}
###########################################################################
# Step 1. Preprocessing
# 1-1. Neither NA nor Inf allowed.
if (any(is.infinite(A))||any(is.na(A))||any(is.infinite(B))||any(is.na(B))){
stop("* lsolve.qmr : no NA, Inf, -Inf values are allowed.")
}
# 1-2. Size Argument
m = nrow(A)
if (is.vector(B)){
mB = length(B)
if (m!=mB){
stop("* lsolve.qmr : a vector B should have a length of nrow(A).")
}
} else {
mB = nrow(B)
if (m!=mB){
stop("* lsolve.qmr : an input matrix B should have the same number of rows from A.")
}
}
if (is.vector(B)){
B = as.matrix(B)
}
# 1-3. Adjusting Case
if (m > ncol(A)){ ## Case 1. Overdetermined
B = t(A)%*%B
A = t(A)%*%A
} else if (m < ncol(A)){ ## Case 2. Underdetermined
stop("* lsolve.qmr : underdetermined case is not supported.")
}
# 1-4. Preconditioner : only valid for square case
if (!all.equal(dim(A),dim(preconditioner))){
stop("* lsolve.qmr : Preconditioner is a size-matching.")
}
if (verbose){message("* lsolve.qmr : preprocessing finished ...")}
###########################################################################
# Step 2. Main Computation
ncolB = ncol(B)
if (ncolB==1){
if (sparseflag){
luM = lu(preconditioner)
M1 = luM@L
M2 = luM@U
}
if (!sparseflag){
vecB = as.vector(B)
res = linsolve.qmr.single(A,vecB,xinit,reltol,maxiter,preconditioner)
} else {
vecB = B
res = linsolve.qmr.single.sparse(A,vecB,xinit,reltol,maxiter,preconditioner,M1,M2)
}
} else {
x = array(0,c(ncol(A),ncolB))
iter = array(0,c(1,ncolB))
errors = list()
if (sparseflag){
luM = lu(preconditioner)
M1 = luM@L
M2 = luM@U
}
for (i in 1:ncolB){
if (!sparseflag){
vecB = as.vector(B[,i])
tmpres = linsolve.qmr.single(A,vecB,xinit,reltol,maxiter,preconditioner)
} else {
vecB = Matrix(B[,i],sparse=TRUE)
tmpres = linsolve.qmr.single.sparse(A,vecB,xinit,reltol,maxiter,preconditioner,M1,M2)
}
x[,i] = tmpres$x
iter[i] = tmpres$iter
errors[[i]] = tmpres$errors
if (verbose){
message(paste("* lsolve.qmr : B's column.",i,"being processed.."))
}
}
res = list("x"=x,"iter"=iter,"errors"=errors)
}
###########################################################################
# Step 3. Finalize
if ("flag" %in% names(res)){
flagval = res$flag
if (flagval==0){
if (verbose){
message("* lsolve.qmr : convergence well achieved.")
}
} else if (flagval==1){
if (verbose){
message("* lsolve.qmr : convergence not achieved within maxiter.")
}
} else if (flagval==-1){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'rho' value.")
}
} else if (flagval==-2){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'beta' value.")
}
} else if (flagval==-3){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'gamma' value.")
}
} else if (flagval==-4){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'delta' value.")
}
} else if (flagval==-5){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'ep' value.")
}
} else if (flagval==-6){
if (verbose){
message("* lsolve.qmr : breakdown due to degenerate 'xi' value.")
}
}
res$flag = NULL
}
if (verbose){
message("* lsolve.qmr : computations finished.")
}
return(res)
}
|
#--------------------------------------------------------------------------------------------------------------------------------------------
#In this script vegetation is prepared to estimate the spatial model
#--------------------------------------------------------------------------------------------------------------------------------------------
library(fields)
library(rstan)
library(stepps)
#-------------------------------------------------------------------------------------------------------------------
setwd('~/workflow_stepps_prediction/vegetation/')
help.fun.loc <- 'helper_funs/'
data.loc <- 'data/'
plot.loc <- 'plots/'
#-------------------------------------------------------------------------------------------------------------------------------------------
#load data that was used for calibration (is the same vegetation data)
#-------------------------------------------------------------------------------------------------------------------------------------------
pls.counts <- read.csv('data/wiki_outputs/plss_composition_alb_v0.9-10.csv')
veg_coords <- pls.counts[,c('x','y')]
#merge species into other hardwood and other conifer
#colnames(pls.counts)
pls_table <- readr::read_csv('~/workflow_stepps_calibration/calibration/data/veg_trans_edited.csv')
pls_trans <- translate_taxa(pls.counts, pls_table ,id_cols = colnames(pls.counts)[1:3])
y <- pls_trans[,-c(1:3)]
#--------------------------------------------------------------------------------------------------------------------------------------------------
# extract coordinates of knots through k-means
# use k-means becasue it estiamtes a center of the coordinates
#-----------------------------------------------------------------------------------------------------------------------------------------------
clust_n <- 260
knot_coords = kmeans(veg_coords, clust_n)$centers
knot_coords = unname(knot_coords)
#--------------------------------------------------------------------------------------------------------------------
#plot coordinates and compute a distance matrix
#--------------------------------------------------------------------------------------------------------------------
plot(veg_coords,pch =15,cex = 0.25)
points(knot_coords,pch = 15,col='red')
distances <- stats::dist(knot_coords)
distances1 <- as.matrix(distances)
#find distance between nearest neighbours
min.dist <- apply(distances1,2,function(x) min(x[x>0]))
s.m.d <- summary(min.dist/10^3)
#--------------------------------------------------------------------------------------------------------------------
hist(distances/10^3)
hist(min.dist/10^3)
#--------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------
#look at data from NEUS
#--------------------------------------------------------------------------------------------------------------------
library(fields)
#-------------------------------------------------------------------------------------------------------------------------------------------
#load data that was used for calibration (is the same vegetation data)
#-------------------------------------------------------------------------------------------------------------------------------------------
load('~/stepps_data/elicitation_neus_certainty_median.RData')
#--------------------------------------------------------------------------------------------------------------------------------------------------
# extract coordinates of knots through k-means
# use k-means becasue ti estiamtes a center of the coordinates
#-----------------------------------------------------------------------------------------------------------------------------------------------
clust_n <- 230
knot_coords_neus = kmeans(veg_coords, clust_n)$centers
knot_coords_neus = unname(knot_coords_neus)
#--------------------------------------------------------------------------------------------------------------------
#plot coordinates and compute a distance matrix
#--------------------------------------------------------------------------------------------------------------------
plot(veg_coords,pch =15,cex = 0.25)
points(knot_coords_neus,pch = 15,col='red')
distances.neus <- stats::dist(knot_coords_neus)
distances.neus1 <- as.matrix(distances.neus)
#find distance between nearest neighbours
min.dist.neus <- apply(distances.neus1,2,function(x) min(x[x>0]))
s.m.d.neus <- summary(min.dist.neus/10^3)
hist(distances.neus/10^3)
hist(min.dist.neus/10^3)
#compare distances umw and neus
cbind(s.m.d,s.m.d.neus)
#230 knots seems to give about the same distribution...
#--------------------------------------------------------------------------------------------------------------------
#distances are slightly smaller in the NEUS use fewer knots?
|
/vegetation/R/comparison_umw_neus.R
|
no_license
|
mtrachs/stepps_prediction_MT
|
R
| false
| false
| 4,926
|
r
|
#--------------------------------------------------------------------------------------------------------------------------------------------
#In this script vegetation is prepared to estimate the spatial model
#--------------------------------------------------------------------------------------------------------------------------------------------
library(fields)
library(rstan)
library(stepps)
#-------------------------------------------------------------------------------------------------------------------
setwd('~/workflow_stepps_prediction/vegetation/')
help.fun.loc <- 'helper_funs/'
data.loc <- 'data/'
plot.loc <- 'plots/'
#-------------------------------------------------------------------------------------------------------------------------------------------
#load data that was used for calibration (is the same vegetation data)
#-------------------------------------------------------------------------------------------------------------------------------------------
pls.counts <- read.csv('data/wiki_outputs/plss_composition_alb_v0.9-10.csv')
veg_coords <- pls.counts[,c('x','y')]
#merge species into other hardwood and other conifer
#colnames(pls.counts)
pls_table <- readr::read_csv('~/workflow_stepps_calibration/calibration/data/veg_trans_edited.csv')
pls_trans <- translate_taxa(pls.counts, pls_table ,id_cols = colnames(pls.counts)[1:3])
y <- pls_trans[,-c(1:3)]
#--------------------------------------------------------------------------------------------------------------------------------------------------
# extract coordinates of knots through k-means
# use k-means becasue it estiamtes a center of the coordinates
#-----------------------------------------------------------------------------------------------------------------------------------------------
clust_n <- 260
knot_coords = kmeans(veg_coords, clust_n)$centers
knot_coords = unname(knot_coords)
#--------------------------------------------------------------------------------------------------------------------
#plot coordinates and compute a distance matrix
#--------------------------------------------------------------------------------------------------------------------
plot(veg_coords,pch =15,cex = 0.25)
points(knot_coords,pch = 15,col='red')
distances <- stats::dist(knot_coords)
distances1 <- as.matrix(distances)
#find distance between nearest neighbours
min.dist <- apply(distances1,2,function(x) min(x[x>0]))
s.m.d <- summary(min.dist/10^3)
#--------------------------------------------------------------------------------------------------------------------
hist(distances/10^3)
hist(min.dist/10^3)
#--------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------
#look at data from NEUS
#--------------------------------------------------------------------------------------------------------------------
library(fields)
#-------------------------------------------------------------------------------------------------------------------------------------------
#load data that was used for calibration (is the same vegetation data)
#-------------------------------------------------------------------------------------------------------------------------------------------
load('~/stepps_data/elicitation_neus_certainty_median.RData')
#--------------------------------------------------------------------------------------------------------------------------------------------------
# extract coordinates of knots through k-means
# use k-means becasue ti estiamtes a center of the coordinates
#-----------------------------------------------------------------------------------------------------------------------------------------------
clust_n <- 230
knot_coords_neus = kmeans(veg_coords, clust_n)$centers
knot_coords_neus = unname(knot_coords_neus)
#--------------------------------------------------------------------------------------------------------------------
#plot coordinates and compute a distance matrix
#--------------------------------------------------------------------------------------------------------------------
plot(veg_coords,pch =15,cex = 0.25)
points(knot_coords_neus,pch = 15,col='red')
distances.neus <- stats::dist(knot_coords_neus)
distances.neus1 <- as.matrix(distances.neus)
#find distance between nearest neighbours
min.dist.neus <- apply(distances.neus1,2,function(x) min(x[x>0]))
s.m.d.neus <- summary(min.dist.neus/10^3)
hist(distances.neus/10^3)
hist(min.dist.neus/10^3)
#compare distances umw and neus
cbind(s.m.d,s.m.d.neus)
#230 knots seems to give about the same distribution...
#--------------------------------------------------------------------------------------------------------------------
#distances are slightly smaller in the NEUS use fewer knots?
|
source("info_theory_sims/sim3source.R")
####
## Identity case
####
allresults <- list()
## parallelization
mc.reps <- 1e4
mc.abe <- 1e2
abe.each <- 1e2
mcc <- 39
data.reps <- 75
## problem params
ss <- sqrt(seq(0, 200, by = 5))
ress <- array(0, dim = c(data.reps, 10, length(ss)))
p <- 10
mi_trues <- sapply(ss, function(s) mi_ident_case(p, s/sqrt(p), 1e5))
for (ii in 1:length(ss)) {
mult <- ss[ii]/sqrt(p)
Bmat <- mult * eye(p)
(mi_true <- mi_ident_case(p, mult, 1e5))
## bayes LS
k.each <- 20
t1 <- proc.time()
(est_ls <- get_abe(Bmat, k.each, abe.each, mc.abe, mcc))
proc.time() - t1
## data params
m.folds <- 1
r.each <- 1000
r.train <- floor(0.5 * r.each)
(N = m.folds * k.each * r.each)
# t1 <- proc.time()
# run_simulation(Bmat, m.folds, k.each, r.each, r.train)
# proc.time() - t1
## full-scale
t1 <- proc.time()
res <- run_simulations(Bmat, m.folds, k.each, r.each, r.train, mcc, data.reps)
proc.time() - t1
ress[, , ii] <- res
}
ress[, , 1]
load("info_theory_sims/fig4.Rdata", verbose = TRUE)
ress <- allresults[[1]]$ress
data.reps <- dim(ress)[1]
li <- floor(0.1 * data.reps + 1); ui <- floor(0.9 * data.reps + 1)
m_i <- floor(0.5 * data.reps)
lowers0 <- t(apply(ress[,c(1:4),], c(2, 3), function(v) sort(v)[li]))
uppers0 <- t(apply(ress[,c(1:4),], c(2, 3), function(v) sort(v)[ui]))
load("info_theory_sims/fig4b.Rdata", verbose = TRUE)
ress <- allresults[[1]]$ress
data.reps <- dim(ress)[1]
li <- floor(0.1 * data.reps + 1); ui <- floor(0.9 * data.reps + 1)
m_i <- floor(0.5 * data.reps)
lowers <- t(apply(ress[,c(1:4, 8),], c(2, 3), function(v) sort(v)[li]))
uppers <- t(apply(ress[,c(1:4, 8),], c(2, 3), function(v) sort(v)[ui]))
lowers[, 1:4] <- lowers0
uppers[, 1:4] <- uppers0
plot(NA, NA, xlab = "I", ylab = expression(hat(I)), xlim = c(0, max(mi_trues)),
ylim = c(0, max(uppers)))
cols <- c(hsv(h = 0:3/4, s = 0.9, v = 0.5), hsv(0, 0, 0))
for (i in 1:5) {
#lines(mi_trues, meds[, i], col = cols[i], lwd = 4)
polygon(c(mi_trues, rev(mi_trues)), c(lowers[, i], rev(uppers[, i])), col = cols[i],
border = cols[i])
}
for (i in 1:5) {
polygon(c(mi_trues, rev(mi_trues)), c(lowers[, i], rev(uppers[, i])), col = NA,
border = cols[i], lwd = 3)
}
abline(0, 1, lwd = 3, col = "white", lty = 2)
## save results
# packet <- list(ss=ss, m.folds = m.folds,
# k.each = k.each, r.each = r.each, r.train = r.train,
# mi_true, est_ls = est_ls, ress = ress,
# mc.reps = mc.reps, mc.abe = mc.abe)
# allresults <- c(allresults, list(packet))
# save(allresults, file = 'info_theory_sims/fig4b.Rdata')
|
/info_theory_sims/sim3i_fig4b.R
|
no_license
|
snarles/fmri
|
R
| false
| false
| 2,638
|
r
|
source("info_theory_sims/sim3source.R")
####
## Identity case
####
allresults <- list()
## parallelization
mc.reps <- 1e4
mc.abe <- 1e2
abe.each <- 1e2
mcc <- 39
data.reps <- 75
## problem params
ss <- sqrt(seq(0, 200, by = 5))
ress <- array(0, dim = c(data.reps, 10, length(ss)))
p <- 10
mi_trues <- sapply(ss, function(s) mi_ident_case(p, s/sqrt(p), 1e5))
for (ii in 1:length(ss)) {
mult <- ss[ii]/sqrt(p)
Bmat <- mult * eye(p)
(mi_true <- mi_ident_case(p, mult, 1e5))
## bayes LS
k.each <- 20
t1 <- proc.time()
(est_ls <- get_abe(Bmat, k.each, abe.each, mc.abe, mcc))
proc.time() - t1
## data params
m.folds <- 1
r.each <- 1000
r.train <- floor(0.5 * r.each)
(N = m.folds * k.each * r.each)
# t1 <- proc.time()
# run_simulation(Bmat, m.folds, k.each, r.each, r.train)
# proc.time() - t1
## full-scale
t1 <- proc.time()
res <- run_simulations(Bmat, m.folds, k.each, r.each, r.train, mcc, data.reps)
proc.time() - t1
ress[, , ii] <- res
}
ress[, , 1]
load("info_theory_sims/fig4.Rdata", verbose = TRUE)
ress <- allresults[[1]]$ress
data.reps <- dim(ress)[1]
li <- floor(0.1 * data.reps + 1); ui <- floor(0.9 * data.reps + 1)
m_i <- floor(0.5 * data.reps)
lowers0 <- t(apply(ress[,c(1:4),], c(2, 3), function(v) sort(v)[li]))
uppers0 <- t(apply(ress[,c(1:4),], c(2, 3), function(v) sort(v)[ui]))
load("info_theory_sims/fig4b.Rdata", verbose = TRUE)
ress <- allresults[[1]]$ress
data.reps <- dim(ress)[1]
li <- floor(0.1 * data.reps + 1); ui <- floor(0.9 * data.reps + 1)
m_i <- floor(0.5 * data.reps)
lowers <- t(apply(ress[,c(1:4, 8),], c(2, 3), function(v) sort(v)[li]))
uppers <- t(apply(ress[,c(1:4, 8),], c(2, 3), function(v) sort(v)[ui]))
lowers[, 1:4] <- lowers0
uppers[, 1:4] <- uppers0
plot(NA, NA, xlab = "I", ylab = expression(hat(I)), xlim = c(0, max(mi_trues)),
ylim = c(0, max(uppers)))
cols <- c(hsv(h = 0:3/4, s = 0.9, v = 0.5), hsv(0, 0, 0))
for (i in 1:5) {
#lines(mi_trues, meds[, i], col = cols[i], lwd = 4)
polygon(c(mi_trues, rev(mi_trues)), c(lowers[, i], rev(uppers[, i])), col = cols[i],
border = cols[i])
}
for (i in 1:5) {
polygon(c(mi_trues, rev(mi_trues)), c(lowers[, i], rev(uppers[, i])), col = NA,
border = cols[i], lwd = 3)
}
abline(0, 1, lwd = 3, col = "white", lty = 2)
## save results
# packet <- list(ss=ss, m.folds = m.folds,
# k.each = k.each, r.each = r.each, r.train = r.train,
# mi_true, est_ls = est_ls, ress = ress,
# mc.reps = mc.reps, mc.abe = mc.abe)
# allresults <- c(allresults, list(packet))
# save(allresults, file = 'info_theory_sims/fig4b.Rdata')
|
\name{sample_gamma_esem}
\alias{sample_gamma_esem}
\docType{data}
\title{
Replications of the estimated eta on x regression coefficient matrix
}
\description{
A list containing 200 replications of the estimated eta on x regression coefficient matrix provided by replication numbers 1 through 100 and 4701 through 4800 in Example 2 from Myers, Ahn, Lu, Celimli, and Zopluoglu (2016).
}
\usage{data(sample_gamma_esem)}
\format{
This datset is a list that contains 200 replications of the estimated 4*1 eta on x regression coefficient matrix provided by replication numbers 1 through 100 and 4701 through 4800 in Example 2 from Myers, Ahn, Lu, Celimli, and Zopluoglu (2016).
}
|
/man/sample_gamma_esem.Rd
|
no_license
|
cran/REREFACT
|
R
| false
| false
| 674
|
rd
|
\name{sample_gamma_esem}
\alias{sample_gamma_esem}
\docType{data}
\title{
Replications of the estimated eta on x regression coefficient matrix
}
\description{
A list containing 200 replications of the estimated eta on x regression coefficient matrix provided by replication numbers 1 through 100 and 4701 through 4800 in Example 2 from Myers, Ahn, Lu, Celimli, and Zopluoglu (2016).
}
\usage{data(sample_gamma_esem)}
\format{
This datset is a list that contains 200 replications of the estimated 4*1 eta on x regression coefficient matrix provided by replication numbers 1 through 100 and 4701 through 4800 in Example 2 from Myers, Ahn, Lu, Celimli, and Zopluoglu (2016).
}
|
#' Create a data frame of mean expression of genes per cluster
#'
#' This function takes an object of class iCellR and creates an average gene expression for every cluster.
#' @param x An object of class iCellR.
#' @return An object of class iCellR.
#' @examples
#' demo.obj <- clust.avg.exp(demo.obj)
#'
#' head(demo.obj@clust.avg)
#' @export
clust.avg.exp <- function (x = NULL) {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
DATA <- x@best.clust
# get data
sampleCondition <- DATA$clusters
conditions <- sort(unique(sampleCondition))
DATA1 <- DATA
Table = x@main.data
for(i in conditions){
IDs <- rownames(subset(DATA1, sampleCondition == i))
DATA <- Table[ , which(names(Table) %in% IDs)]
DATA <- as.matrix(DATA)
message(paste(" Averaging gene expression for cluster:",i,"..."))
message(paste(" Averaging",dim(DATA)[2],"cells ..."))
# DATA <- Table[,row.names(subset(DATA1, sampleCondition == i))]
DATA <- apply(DATA, 1, function(DATA) {mean(DATA)})
DATA <- as.data.frame(DATA)
Name=paste("meanExp_cluster",i,".txt",sep="_")
NameCol=paste("cluster",i,sep="_")
colnames(DATA) <- NameCol
DATA <- cbind(gene = rownames(DATA), DATA)
rownames(DATA) <- NULL
eval(call("<-", as.name(NameCol), DATA))
# head(DATA)
# write.table((DATA),file=Name,sep="\t", row.names =F)
}
# multmerge = function(mypath){
# filenames=list.files(pattern="meanExp")
# datalist = lapply(filenames, function(x){read.table(file=x,header=T)})
# Reduce(function(x,y) {merge(x,y)}, datalist)
# }
filenames <- ls(pattern="cluster_")
datalist <- mget(filenames)
MeanExpForClusters <- Reduce(function(x,y) {merge(x,y)}, datalist)
#
# MeanExpForClusters <- multmerge()
# file.remove(list.files(pattern="meanExp"))
MeanExpForClusters <- MeanExpForClusters[order(nchar(colnames(MeanExpForClusters)),colnames(MeanExpForClusters))]
attributes(x)$clust.avg <- MeanExpForClusters
message("All done!")
return(x)
}
|
/R/F021.clust.avg.exp.R
|
no_license
|
yandgong307/iCellR
|
R
| false
| false
| 2,033
|
r
|
#' Create a data frame of mean expression of genes per cluster
#'
#' This function takes an object of class iCellR and creates an average gene expression for every cluster.
#' @param x An object of class iCellR.
#' @return An object of class iCellR.
#' @examples
#' demo.obj <- clust.avg.exp(demo.obj)
#'
#' head(demo.obj@clust.avg)
#' @export
clust.avg.exp <- function (x = NULL) {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
DATA <- x@best.clust
# get data
sampleCondition <- DATA$clusters
conditions <- sort(unique(sampleCondition))
DATA1 <- DATA
Table = x@main.data
for(i in conditions){
IDs <- rownames(subset(DATA1, sampleCondition == i))
DATA <- Table[ , which(names(Table) %in% IDs)]
DATA <- as.matrix(DATA)
message(paste(" Averaging gene expression for cluster:",i,"..."))
message(paste(" Averaging",dim(DATA)[2],"cells ..."))
# DATA <- Table[,row.names(subset(DATA1, sampleCondition == i))]
DATA <- apply(DATA, 1, function(DATA) {mean(DATA)})
DATA <- as.data.frame(DATA)
Name=paste("meanExp_cluster",i,".txt",sep="_")
NameCol=paste("cluster",i,sep="_")
colnames(DATA) <- NameCol
DATA <- cbind(gene = rownames(DATA), DATA)
rownames(DATA) <- NULL
eval(call("<-", as.name(NameCol), DATA))
# head(DATA)
# write.table((DATA),file=Name,sep="\t", row.names =F)
}
# multmerge = function(mypath){
# filenames=list.files(pattern="meanExp")
# datalist = lapply(filenames, function(x){read.table(file=x,header=T)})
# Reduce(function(x,y) {merge(x,y)}, datalist)
# }
filenames <- ls(pattern="cluster_")
datalist <- mget(filenames)
MeanExpForClusters <- Reduce(function(x,y) {merge(x,y)}, datalist)
#
# MeanExpForClusters <- multmerge()
# file.remove(list.files(pattern="meanExp"))
MeanExpForClusters <- MeanExpForClusters[order(nchar(colnames(MeanExpForClusters)),colnames(MeanExpForClusters))]
attributes(x)$clust.avg <- MeanExpForClusters
message("All done!")
return(x)
}
|
# ----------------------------------------------------------------------------
# PROJECT
# Name: *
# Professor: *
# Author: Heather Low
# ----------------------------------------------------------------------------
# CODE
# Name: 1-
# Date: *
# Purpose: Wrangle checks and points.
# Input: "all_employeesInChecks.RData", "Co_checks.RData", "all_employeesInChecks.RData", "Co_checks.RData"
# Output: Co_checks_dt, Co_pts, Co_checks_pts
# ----------------------------------------------------------------------------
# Set-Up
source("./proj-helpers.R")
# ---------------------------------------------------------------------
################################# Checks ##############################
# Data
proj_data <- c("all_employeesInChecks.RData", "Co_checks.RData")
load_proj_data(proj_data)
# - unique, subset, merge employees and checks
all_employeesInChecks_1 <- unique(all_employeesInChecks)
rm("all_employeesInChecks")
Co_checks_1 <- unique(Co_checks)
Co_checks_1 <- subset(Co_checks_1, select = c("ID", "OpenDate", "PrintDate", "Unit_ID", "Total", "Gratuity", "PartySize", "SeatsServed"))
rm("Co_checks")
Co_checks_1a <- merge(Co_checks_1, all_employeesInChecks_1, by.x = c("ID"), by.y = c("Check_ID"))
rm(list = c("all_employeesInChecks_1", "Co_checks_1"))
sapply(Co_checks_1a,typeof) ##
############################### Date & Time
names(Co_checks_1a)[1] <- "Check_ID"
checks_dt <- Co_checks_1a
rm(Co_checks_1a)
############################### By Calander Date
# Change time zone (from assumed GMT/UTC to EST)
checks_dt$OpenDate <- as.POSIXct(checks_dt$OpenDate) - dhours(5)
checks_dt$CloseDate <- as.POSIXct(checks_dt$PrintDate) - dhours(5)
# CheckOpenTime
checks_dt <- mutate(checks_dt, CheckOpenTime = format(as.POSIXct(checks_dt$OpenDate), format = "%H:%M:%S"))
# CheckCloseTime
checks_dt <- mutate(checks_dt, CheckCloseTime = format(as.POSIXct(checks_dt$PrintDate), format = "%H:%M:%S"))
# Year
checks_dt$Year <-format(as.Date(checks_dt$OpenDate), format ="%Y", tz = "EST")
# Week
checks_dt$Week <-format(as.Date(checks_dt$OpenDate), format ="%V", tz = "EST")
# WeekDay
checks_dt <- mutate(checks_dt, WeekDay = format(as.POSIXct(checks_dt$OpenDate), format = "%a"))
# WeekDay_Numeric
checks_dt$WeekDay_Numeric <- 0
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Sun"] <- 0
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Mon"] <- 1
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Tue"] <- 2
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Wed"] <- 3
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Thu"] <- 4
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Fri"] <- 5
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Sat"] <- 6
############################### By Shift Date
# ShiftDay_Numeric
checks_dt$ShiftDay_Numeric <- 0
next_day <- "06:00:00"
checks_dt$ShiftDay_Numeric <- ifelse(checks_dt$CheckOpenTime < next_day, checks_dt$WeekDay_Numeric - 1, checks_dt$WeekDay_Numeric)
# - 1am Sat labled 0 for Sun, -1 should be 6 for Sat
checks_dt$ShiftDay_Numeric[checks_dt$ShiftDay_Numeric == -1] <- 6
# ShiftDay
checks_dt$ShiftDay <- 0
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 0 ] <- "Sun"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 1 ] <- "Mon"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 2 ] <- "Tue"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 3 ] <- "Wed"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 4 ] <- "Thu"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 5 ] <- "Fri"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 6 ] <- "Sat"
# ShiftDate
names(checks_dt)[1] <- "Check_ID"
checks_dt$ShiftDate <- checks_dt$OpenDate
afterMidnightCheckID_list <- checks_dt$Check_ID[checks_dt$ShiftDay_Numeric < checks_dt$WeekDay_Numeric]
checks_dt_AfterMidnight <- subset(checks_dt, checks_dt$Check_ID %in% afterMidnightCheckID_list)
checks_dt_AfterMidnight$ShiftDate <- checks_dt_AfterMidnight$ShiftDate - ddays(1)
checks_dt$ShiftDate[match(checks_dt_AfterMidnight$Check_ID, checks_dt$Check_ID)] <- as.POSIXct(with_tz(strptime(checks_dt_AfterMidnight$ShiftDate, "%Y-%m-%d", tz = "EST"), tz = "EST"))
checks_dt <- mutate(checks_dt, ShiftDate = format(as.POSIXct(checks_dt$ShiftDate), format = "%Y-%m-%d"))
rm(afterMidnightCheckID_list, checks_dt_AfterMidnight, next_day)
############################### In Times
Co_inTimes <- checks_dt
Co_inTimes <- select(Co_inTimes, ShiftDate, OpenDate, Employee_ID)
Co_inTimes <- as.data.table(Co_inTimes)
setkey(Co_inTimes, ShiftDate)
Co_inTimes <- Co_inTimes[,head(OpenDate, n = 1), by = list(ShiftDate, Employee_ID)]
names(Co_inTimes)[3] <- "InTime"
# Round to half-hour
typeof(Co_inTimes$InTime) ##
class(Co_inTimes$InTime) ##
Co_inTimes$InTime[1]
Co_inTimes$InTime <- align.time(Co_inTimes$InTime, 30*60)
Co_inTimes$InTime <- format(Co_inTimes$InTime, format ="%H:%M", tz = "EST") # makes character
t <- Co_inTimes[Co_inTimes$InTime < "09:00",]
t <- Co_inTimes[Co_inTimes$InTime > "18:30",]
t <- Co_inTimes[Co_inTimes$InTime > "20:30",]
# all these odd in-times less than *%
############################# Define Shift with In Times
# Use in-time to define shift
Co_inTimes$Shift <- 0
# 1pm cut off for counting to lunch in-time
Co_inTimes$Shift <- ifelse(Co_inTimes$InTime < "13:00", "Lunch", "Dinner")
# Add in-times to checks table
checks_dt <- merge(checks_dt, Co_inTimes, by = c("ShiftDate", "Employee_ID"))
# Label Double
checks_dt$Double <- 0
checks_dt$Double <- ifelse(checks_dt$InTime < "13:00" & checks_dt$CheckOpenTime > "17:00", TRUE, FALSE)
t <- checks_dt[checks_dt$Double == TRUE,]
# info
test <- as.data.table(checks_dt)
Co_freq_Dbl <- test[, .N ,by = checks_dt$Double]
checks_dt$Shift[checks_dt$CheckOpenTime > "17:00" & checks_dt$Double == TRUE] <- "Dinner"
t <- checks_dt[checks_dt$Employee_ID == "7592" & checks_dt$Double == TRUE & checks_dt$ShiftDate == "2010-10-03",]
t <- checks_dt[checks_dt$Employee_ID == "7592" & checks_dt$ShiftDate == "2010-10-03",] # dosen't label lunch double, only dinner as a second shift, but shift labels correct
rm(test, Co_freq_Dbl, t)
# Label Date_Shift
checks_dt <- mutate(checks_dt, Date_Shift = paste(checks_dt$ShiftDate, checks_dt$Shift, sep = "_"))
# Label Day_Shift
checks_dt <- mutate(checks_dt, Day_Shift = paste(checks_dt$ShiftDay, checks_dt$Shift, sep = "_"))
# Tidy columns
colnames(checks_dt)
head(checks_dt)
# t <- checks_dt[c(3, 1, 14, 15, 4, 5, 12, 13, 23, 19, 21, 20, 22, 24, 2, 6:10)]
checks_dt <- checks_dt[c(3, 1, 14, 15, 4, 5, 12, 13, 23, 19, 21, 20, 22, 24, 2, 6:10)]
Co_checks_dt <- checks_dt
rm(checks_dt)
# ---------------------------------------------------------------------
# Save checks with new date-time formats
save(Co_checks_dt, file = make_processed_data_paths("Co_checks_dt.RData"))
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
################################# Points ##############################
# Data
proj_data <- c("all_employees.RData", "theProjectData.RData")
load_proj_data(proj_data)
upsellSlices$NormalizedUserTotalPoints <- 0
upsellSlices$NormalizedSkillAveragePoints <- 0
upsellSlices$NormalizedSkillTotalPoints <- 0
all_upsellSlices <- rbind(upsellSlices, upsellSlices2)
rm(upsellSlices, upsellSlices2)
# Remove duplicates by UserContext_ID
upsellSlicesID_list <- unique(all_upsellSlices$ID)
all_upsellSlices <- subset(all_upsellSlices, all_upsellSlices$ID %in% upsellSlicesID_list)
# UpsellSlices-
locations <- c("34", "35", "36", "37")
Co_all_upsellSlices <- unique(subset(all_upsellSlices, all_upsellSlices$UnitID %in% locations))
names(Co_all_upsellSlices)[2] <- "UserContext_ID"
# Employees- Just Co Employees
Co_all_employees <- subset(all_employees, all_employees$Unit_ID %in% locations)
Co_all_employees$Unit_ID <- as.character(Co_all_employees$Unit_ID)
# Merge UpsellSlices and Employees to add Employee_ID
names(Co_all_upsellSlices)[1] <- "UpsellSlicesID"
Co_all_empUpsellSlices2 <- merge(Co_all_upsellSlices, Co_all_employees, by = "UserContext_ID") #
names(Co_all_empUpsellSlices2)[20] <- "Employee_ID"
# Employees- Remove rows with empty FirstName, look like duplicates, remove.
Co_all_empUpsellSlices2 <- subset(Co_all_empUpsellSlices2, Co_all_empUpsellSlices2$FirstName > 0)
# Dates
Co_all_empUpsellSlices2$ShiftDate <- as.POSIXct(Co_all_empUpsellSlices2$StartTime)
Co_all_empUpsellSlices2$ShiftDate <- format(Co_all_empUpsellSlices2$ShiftDate, "%Y-%m-%d")
Co_all_empUpsellSlices2$ShiftDate[1] ## "2014-06-08 EDT"
Co_points <- Co_all_empUpsellSlices2
colnames(Co_points)
Co_points <- select(Co_points, UpsellSlicesID, StartTime, ShiftDate, Employee_ID, UserTotalPoints, NormalizedUserTotalPoints, Unit_ID, FirstName, LastName)
Co_points <- unique(Co_points)
Co_points <- subset(Co_points, NormalizedUserTotalPoints != 0)
Co_points <- Co_points[order(Co_points$ShiftDate, Co_points$Employee_ID),]
# Adjust for time zone
Co_points$StartTime <- as.POSIXct(Co_points$StartTime) - dhours(5)
# StartTime_Hr
Co_points <- mutate(Co_points, StartTime_Hr = format(as.POSIXct(Co_points$StartTime), format = "%H:%M"))
# WeekDay
Co_points <- mutate(Co_points, WeekDay = format(as.POSIXct(Co_points$StartTime), format = "%a"))
# WeekDay_Numeric
Co_points$WeekDay_Numeric <- 0
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Sun"] <- 0
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Mon"] <- 1
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Tue"] <- 2
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Wed"] <- 3
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Thu"] <- 4
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Fri"] <- 5
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Sat"] <- 6
############################### By Shift Date
# ShiftDay_Numeric
Co_points$ShiftDay_Numeric <- 0
next_day <- "06:00:00"
Co_points$ShiftDay_Numeric <- ifelse(Co_points$StartTime_Hr < next_day, Co_points$WeekDay_Numeric - 1, Co_points$WeekDay_Numeric)
Co_points$ShiftDay_Numeric[Co_points$ShiftDay_Numeric == -1] <- 6 # 1am Sat labled 0 for Sun, -1 should be 6 for Sat
head(Co_points)
# ShiftDay
Co_points$ShiftDay <- 0
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 0 ] <- "Sun"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 1 ] <- "Mon"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 2 ] <- "Tue"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 3 ] <- "Wed"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 4 ] <- "Thu"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 5 ] <- "Fri"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 6 ] <- "Sat"
# ShiftDate
Co_points$Date_Shift <- Co_points$StartTime
afterMidnightUpsellSlicesID_list <- Co_points$UpsellSlicesID[Co_points$ShiftDay_Numeric < Co_points$WeekDay_Numeric]
Co_points_AfterMidnight <- subset(Co_points, Co_points$UpsellSlicesID %in% afterMidnightUpsellSlicesID_list)
Co_points_AfterMidnight$Date_Shift <- as.POSIXct(Co_points_AfterMidnight$Date_Shift) - ddays(1)
Co_points$Date_Shift[match(Co_points_AfterMidnight$UpsellSlicesID, Co_points$UpsellSlicesID)] <- as.POSIXct(Co_points_AfterMidnight$Date_Shift, "%Y-%m-%d", tz = "EST") #
rm(afterMidnightUpsellSlicesID_list, Co_points_AfterMidnight, next_day)
Co_points <- merge.data.frame(Co_points, Co_inTimes, by = c("ShiftDate", "Employee_ID"))
############################# Label Date_Shift
Co_points$Date_Shift <- strptime(Co_points$Date_Shift, format = "%Y-%m-%d")
Co_points <- mutate(Co_points, Date_Shift = paste(Co_points$Date_Shift, Co_points$Shift, sep = "_"))
Co_points_dt <- select(Co_points, UpsellSlicesID, ShiftDate, WeekDay, Shift, Employee_ID, FirstName, LastName, Date_Shift, StartTime, UserTotalPoints, NormalizedUserTotalPoints)
# ---------------------------------------------------------------------
# Save points with new date-time formats
# save_proj_data("Co_pts_dt")
save(Co_points_dt, file = make_processed_data_paths("Co_points_dt.RData"))
# ---------------------------------------------------------------------
# info
length(unique(Co_points_dt$Employee_ID))
length(unique(Co_checks_dt$Employee_ID))
# ---------------------------------------------------------------------
#################### Mergre Checks and Points #########################
# Merge
x <- Co_checks_dt
y <- Co_points_dt
Co_checks_pts <- full_join(x, y, by = c("Employee_ID", "Date_Shift"))
colnames(Co_checks_pts)
# Edit names
names(Co_checks_pts)[2] <- "Shift_Date"
names(Co_checks_pts)[11] <- "Shift"
Co_checks_pts[22] <- NULL
colnames(Co_checks_pts)
Co_checks_pts[23] <- NULL
colnames(Co_checks_pts)
Co_checks_pts <- unique(Co_checks_pts) # same
# Remove any duplicate check ids that might be left over from merging
check_ids <- Co_checks_pts$Check_ID
check_ids <- unique(Co_checks_pts$Check_ID)
head(check_ids)
Co_checks_pts <-Co_checks_pts[!duplicated(Co_checks_pts$Check_ID),]
# info
length(unique(Co_checks_pts$Employee_ID))
# ---------------------------------------------------------------------
# Save merged checks and points tables
save(Co_checks_pts, file = make_processed_data_paths("Co_checks_pts.RData"))
# ---------------------------------------------------------------------
|
/1-clean.R
|
no_license
|
hl-py/restaurantData
|
R
| false
| false
| 13,073
|
r
|
# ----------------------------------------------------------------------------
# PROJECT
# Name: *
# Professor: *
# Author: Heather Low
# ----------------------------------------------------------------------------
# CODE
# Name: 1-
# Date: *
# Purpose: Wrangle checks and points.
# Input: "all_employeesInChecks.RData", "Co_checks.RData", "all_employeesInChecks.RData", "Co_checks.RData"
# Output: Co_checks_dt, Co_pts, Co_checks_pts
# ----------------------------------------------------------------------------
# Set-Up
source("./proj-helpers.R")
# ---------------------------------------------------------------------
################################# Checks ##############################
# Data
proj_data <- c("all_employeesInChecks.RData", "Co_checks.RData")
load_proj_data(proj_data)
# - unique, subset, merge employees and checks
all_employeesInChecks_1 <- unique(all_employeesInChecks)
rm("all_employeesInChecks")
Co_checks_1 <- unique(Co_checks)
Co_checks_1 <- subset(Co_checks_1, select = c("ID", "OpenDate", "PrintDate", "Unit_ID", "Total", "Gratuity", "PartySize", "SeatsServed"))
rm("Co_checks")
Co_checks_1a <- merge(Co_checks_1, all_employeesInChecks_1, by.x = c("ID"), by.y = c("Check_ID"))
rm(list = c("all_employeesInChecks_1", "Co_checks_1"))
sapply(Co_checks_1a,typeof) ##
############################### Date & Time
names(Co_checks_1a)[1] <- "Check_ID"
checks_dt <- Co_checks_1a
rm(Co_checks_1a)
############################### By Calander Date
# Change time zone (from assumed GMT/UTC to EST)
checks_dt$OpenDate <- as.POSIXct(checks_dt$OpenDate) - dhours(5)
checks_dt$CloseDate <- as.POSIXct(checks_dt$PrintDate) - dhours(5)
# CheckOpenTime
checks_dt <- mutate(checks_dt, CheckOpenTime = format(as.POSIXct(checks_dt$OpenDate), format = "%H:%M:%S"))
# CheckCloseTime
checks_dt <- mutate(checks_dt, CheckCloseTime = format(as.POSIXct(checks_dt$PrintDate), format = "%H:%M:%S"))
# Year
checks_dt$Year <-format(as.Date(checks_dt$OpenDate), format ="%Y", tz = "EST")
# Week
checks_dt$Week <-format(as.Date(checks_dt$OpenDate), format ="%V", tz = "EST")
# WeekDay
checks_dt <- mutate(checks_dt, WeekDay = format(as.POSIXct(checks_dt$OpenDate), format = "%a"))
# WeekDay_Numeric
checks_dt$WeekDay_Numeric <- 0
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Sun"] <- 0
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Mon"] <- 1
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Tue"] <- 2
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Wed"] <- 3
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Thu"] <- 4
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Fri"] <- 5
checks_dt$WeekDay_Numeric[checks_dt$WeekDay == "Sat"] <- 6
############################### By Shift Date
# ShiftDay_Numeric
checks_dt$ShiftDay_Numeric <- 0
next_day <- "06:00:00"
checks_dt$ShiftDay_Numeric <- ifelse(checks_dt$CheckOpenTime < next_day, checks_dt$WeekDay_Numeric - 1, checks_dt$WeekDay_Numeric)
# - 1am Sat labled 0 for Sun, -1 should be 6 for Sat
checks_dt$ShiftDay_Numeric[checks_dt$ShiftDay_Numeric == -1] <- 6
# ShiftDay
checks_dt$ShiftDay <- 0
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 0 ] <- "Sun"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 1 ] <- "Mon"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 2 ] <- "Tue"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 3 ] <- "Wed"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 4 ] <- "Thu"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 5 ] <- "Fri"
checks_dt$ShiftDay[checks_dt$ShiftDay_Numeric == 6 ] <- "Sat"
# ShiftDate
names(checks_dt)[1] <- "Check_ID"
checks_dt$ShiftDate <- checks_dt$OpenDate
afterMidnightCheckID_list <- checks_dt$Check_ID[checks_dt$ShiftDay_Numeric < checks_dt$WeekDay_Numeric]
checks_dt_AfterMidnight <- subset(checks_dt, checks_dt$Check_ID %in% afterMidnightCheckID_list)
checks_dt_AfterMidnight$ShiftDate <- checks_dt_AfterMidnight$ShiftDate - ddays(1)
checks_dt$ShiftDate[match(checks_dt_AfterMidnight$Check_ID, checks_dt$Check_ID)] <- as.POSIXct(with_tz(strptime(checks_dt_AfterMidnight$ShiftDate, "%Y-%m-%d", tz = "EST"), tz = "EST"))
checks_dt <- mutate(checks_dt, ShiftDate = format(as.POSIXct(checks_dt$ShiftDate), format = "%Y-%m-%d"))
rm(afterMidnightCheckID_list, checks_dt_AfterMidnight, next_day)
############################### In Times
Co_inTimes <- checks_dt
Co_inTimes <- select(Co_inTimes, ShiftDate, OpenDate, Employee_ID)
Co_inTimes <- as.data.table(Co_inTimes)
setkey(Co_inTimes, ShiftDate)
Co_inTimes <- Co_inTimes[,head(OpenDate, n = 1), by = list(ShiftDate, Employee_ID)]
names(Co_inTimes)[3] <- "InTime"
# Round to half-hour
typeof(Co_inTimes$InTime) ##
class(Co_inTimes$InTime) ##
Co_inTimes$InTime[1]
Co_inTimes$InTime <- align.time(Co_inTimes$InTime, 30*60)
Co_inTimes$InTime <- format(Co_inTimes$InTime, format ="%H:%M", tz = "EST") # makes character
t <- Co_inTimes[Co_inTimes$InTime < "09:00",]
t <- Co_inTimes[Co_inTimes$InTime > "18:30",]
t <- Co_inTimes[Co_inTimes$InTime > "20:30",]
# all these odd in-times less than *%
############################# Define Shift with In Times
# Use in-time to define shift
Co_inTimes$Shift <- 0
# 1pm cut off for counting to lunch in-time
Co_inTimes$Shift <- ifelse(Co_inTimes$InTime < "13:00", "Lunch", "Dinner")
# Add in-times to checks table
checks_dt <- merge(checks_dt, Co_inTimes, by = c("ShiftDate", "Employee_ID"))
# Label Double
checks_dt$Double <- 0
checks_dt$Double <- ifelse(checks_dt$InTime < "13:00" & checks_dt$CheckOpenTime > "17:00", TRUE, FALSE)
t <- checks_dt[checks_dt$Double == TRUE,]
# info
test <- as.data.table(checks_dt)
Co_freq_Dbl <- test[, .N ,by = checks_dt$Double]
checks_dt$Shift[checks_dt$CheckOpenTime > "17:00" & checks_dt$Double == TRUE] <- "Dinner"
t <- checks_dt[checks_dt$Employee_ID == "7592" & checks_dt$Double == TRUE & checks_dt$ShiftDate == "2010-10-03",]
t <- checks_dt[checks_dt$Employee_ID == "7592" & checks_dt$ShiftDate == "2010-10-03",] # dosen't label lunch double, only dinner as a second shift, but shift labels correct
rm(test, Co_freq_Dbl, t)
# Label Date_Shift
checks_dt <- mutate(checks_dt, Date_Shift = paste(checks_dt$ShiftDate, checks_dt$Shift, sep = "_"))
# Label Day_Shift
checks_dt <- mutate(checks_dt, Day_Shift = paste(checks_dt$ShiftDay, checks_dt$Shift, sep = "_"))
# Tidy columns
colnames(checks_dt)
head(checks_dt)
# t <- checks_dt[c(3, 1, 14, 15, 4, 5, 12, 13, 23, 19, 21, 20, 22, 24, 2, 6:10)]
checks_dt <- checks_dt[c(3, 1, 14, 15, 4, 5, 12, 13, 23, 19, 21, 20, 22, 24, 2, 6:10)]
Co_checks_dt <- checks_dt
rm(checks_dt)
# ---------------------------------------------------------------------
# Save checks with new date-time formats
save(Co_checks_dt, file = make_processed_data_paths("Co_checks_dt.RData"))
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
################################# Points ##############################
# Data
proj_data <- c("all_employees.RData", "theProjectData.RData")
load_proj_data(proj_data)
upsellSlices$NormalizedUserTotalPoints <- 0
upsellSlices$NormalizedSkillAveragePoints <- 0
upsellSlices$NormalizedSkillTotalPoints <- 0
all_upsellSlices <- rbind(upsellSlices, upsellSlices2)
rm(upsellSlices, upsellSlices2)
# Remove duplicates by UserContext_ID
upsellSlicesID_list <- unique(all_upsellSlices$ID)
all_upsellSlices <- subset(all_upsellSlices, all_upsellSlices$ID %in% upsellSlicesID_list)
# UpsellSlices-
locations <- c("34", "35", "36", "37")
Co_all_upsellSlices <- unique(subset(all_upsellSlices, all_upsellSlices$UnitID %in% locations))
names(Co_all_upsellSlices)[2] <- "UserContext_ID"
# Employees- Just Co Employees
Co_all_employees <- subset(all_employees, all_employees$Unit_ID %in% locations)
Co_all_employees$Unit_ID <- as.character(Co_all_employees$Unit_ID)
# Merge UpsellSlices and Employees to add Employee_ID
names(Co_all_upsellSlices)[1] <- "UpsellSlicesID"
Co_all_empUpsellSlices2 <- merge(Co_all_upsellSlices, Co_all_employees, by = "UserContext_ID") #
names(Co_all_empUpsellSlices2)[20] <- "Employee_ID"
# Employees- Remove rows with empty FirstName, look like duplicates, remove.
Co_all_empUpsellSlices2 <- subset(Co_all_empUpsellSlices2, Co_all_empUpsellSlices2$FirstName > 0)
# Dates
Co_all_empUpsellSlices2$ShiftDate <- as.POSIXct(Co_all_empUpsellSlices2$StartTime)
Co_all_empUpsellSlices2$ShiftDate <- format(Co_all_empUpsellSlices2$ShiftDate, "%Y-%m-%d")
Co_all_empUpsellSlices2$ShiftDate[1] ## "2014-06-08 EDT"
Co_points <- Co_all_empUpsellSlices2
colnames(Co_points)
Co_points <- select(Co_points, UpsellSlicesID, StartTime, ShiftDate, Employee_ID, UserTotalPoints, NormalizedUserTotalPoints, Unit_ID, FirstName, LastName)
Co_points <- unique(Co_points)
Co_points <- subset(Co_points, NormalizedUserTotalPoints != 0)
Co_points <- Co_points[order(Co_points$ShiftDate, Co_points$Employee_ID),]
# Adjust for time zone
Co_points$StartTime <- as.POSIXct(Co_points$StartTime) - dhours(5)
# StartTime_Hr
Co_points <- mutate(Co_points, StartTime_Hr = format(as.POSIXct(Co_points$StartTime), format = "%H:%M"))
# WeekDay
Co_points <- mutate(Co_points, WeekDay = format(as.POSIXct(Co_points$StartTime), format = "%a"))
# WeekDay_Numeric
Co_points$WeekDay_Numeric <- 0
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Sun"] <- 0
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Mon"] <- 1
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Tue"] <- 2
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Wed"] <- 3
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Thu"] <- 4
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Fri"] <- 5
Co_points$WeekDay_Numeric[Co_points$WeekDay == "Sat"] <- 6
############################### By Shift Date
# ShiftDay_Numeric
Co_points$ShiftDay_Numeric <- 0
next_day <- "06:00:00"
Co_points$ShiftDay_Numeric <- ifelse(Co_points$StartTime_Hr < next_day, Co_points$WeekDay_Numeric - 1, Co_points$WeekDay_Numeric)
Co_points$ShiftDay_Numeric[Co_points$ShiftDay_Numeric == -1] <- 6 # 1am Sat labled 0 for Sun, -1 should be 6 for Sat
head(Co_points)
# ShiftDay
Co_points$ShiftDay <- 0
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 0 ] <- "Sun"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 1 ] <- "Mon"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 2 ] <- "Tue"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 3 ] <- "Wed"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 4 ] <- "Thu"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 5 ] <- "Fri"
Co_points$ShiftDay[Co_points$ShiftDay_Numeric == 6 ] <- "Sat"
# ShiftDate
Co_points$Date_Shift <- Co_points$StartTime
afterMidnightUpsellSlicesID_list <- Co_points$UpsellSlicesID[Co_points$ShiftDay_Numeric < Co_points$WeekDay_Numeric]
Co_points_AfterMidnight <- subset(Co_points, Co_points$UpsellSlicesID %in% afterMidnightUpsellSlicesID_list)
Co_points_AfterMidnight$Date_Shift <- as.POSIXct(Co_points_AfterMidnight$Date_Shift) - ddays(1)
Co_points$Date_Shift[match(Co_points_AfterMidnight$UpsellSlicesID, Co_points$UpsellSlicesID)] <- as.POSIXct(Co_points_AfterMidnight$Date_Shift, "%Y-%m-%d", tz = "EST") #
rm(afterMidnightUpsellSlicesID_list, Co_points_AfterMidnight, next_day)
Co_points <- merge.data.frame(Co_points, Co_inTimes, by = c("ShiftDate", "Employee_ID"))
############################# Label Date_Shift
Co_points$Date_Shift <- strptime(Co_points$Date_Shift, format = "%Y-%m-%d")
Co_points <- mutate(Co_points, Date_Shift = paste(Co_points$Date_Shift, Co_points$Shift, sep = "_"))
Co_points_dt <- select(Co_points, UpsellSlicesID, ShiftDate, WeekDay, Shift, Employee_ID, FirstName, LastName, Date_Shift, StartTime, UserTotalPoints, NormalizedUserTotalPoints)
# ---------------------------------------------------------------------
# Save points with new date-time formats
# save_proj_data("Co_pts_dt")
save(Co_points_dt, file = make_processed_data_paths("Co_points_dt.RData"))
# ---------------------------------------------------------------------
# info
length(unique(Co_points_dt$Employee_ID))
length(unique(Co_checks_dt$Employee_ID))
# ---------------------------------------------------------------------
#################### Mergre Checks and Points #########################
# Merge
x <- Co_checks_dt
y <- Co_points_dt
Co_checks_pts <- full_join(x, y, by = c("Employee_ID", "Date_Shift"))
colnames(Co_checks_pts)
# Edit names
names(Co_checks_pts)[2] <- "Shift_Date"
names(Co_checks_pts)[11] <- "Shift"
Co_checks_pts[22] <- NULL
colnames(Co_checks_pts)
Co_checks_pts[23] <- NULL
colnames(Co_checks_pts)
Co_checks_pts <- unique(Co_checks_pts) # same
# Remove any duplicate check ids that might be left over from merging
check_ids <- Co_checks_pts$Check_ID
check_ids <- unique(Co_checks_pts$Check_ID)
head(check_ids)
Co_checks_pts <-Co_checks_pts[!duplicated(Co_checks_pts$Check_ID),]
# info
length(unique(Co_checks_pts$Employee_ID))
# ---------------------------------------------------------------------
# Save merged checks and points tables
save(Co_checks_pts, file = make_processed_data_paths("Co_checks_pts.RData"))
# ---------------------------------------------------------------------
|
boston = read.csv("boston.csv")
#video2
str(boston)
plot(boston$LON, boston$LAT)
points(boston$LON[boston$CHAS==1], boston$LAT[boston$CHAS==1], col="blue", pch=19)
points(boston$LON[boston$TRACT==3531], boston$LAT[boston$TRACT==3531], col="red", pch=20)
summary(boston$NOX)
points(boston$LON[boston$NOX>=0.55], boston$LAT[boston$NOX>=0.55], col="green", pch=19)
plot(boston$LON, boston$LAT)
summary(boston$MEDV)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
#video3
plot(boston$LAT, boston$MEDV)
plot(boston$LON, boston$MEDV)
latlonlm = lm(MEDV ~ LAT + LON, data=boston)
summary(latlonlm)
plot(boston$LON, boston$LAT)
plot(boston$LON, boston$LAT)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
latlonlm$fitted.values
points(boston$LON[latlonlm$fitted.values >= 21.2], boston$LAT[latlonlm$fitted.values >= 21.2], col="blue", pch="$")
#video4
library(rpart)
library(rpart.plot)
latlontree = rpart(MEDV ~ LAT + LON, data=boston)
prp(latlontree)
plot(boston$LON, boston$LAT)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
fittedvalues = predict(latlontree)
points(boston$LON[fittedvalues>21.2], boston$LAT[fittedvalues>=21.2], col="blue", pch="$")
latlontree = rpart(MEDV ~ LAT + LON, data=boston, minbucket=50)
plot(latlontree)
text(latlontree)
plot(boston$LON, boston$LAT)
abline(v=-71.07)
text(latlontree)
plot(boston$LON, boston$LAT)
abline(v=-71.07)
abline(h=42.21)
abline(h=42.17)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
#Video5
library(caTools)
set.seed(123)
split = sample.split(boston$MEDV, SplitRatio=0.7)
train = subset(boston, split == TRUE)
test = subset(boston, split == FALSE)
linreg = lm(MEDV ~ LAT + LON + CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO, data=train)
summary(linreg)
linreg.pred = predict(linreg, newdata=test)
linreg.sse = sum((linreg.pred - test$MEDV)^2)
linreg.sse
tree = rpart(MEDV ~ LAT + LON + CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO, data=train)
prp(tree)
tree.pred = predict(tree, newdata=test)
tree.sse = sum((tree.pred - test$MEDV)^2)
tree.sse
#video7 cross validation
library(caret)
library(e1071)
tr.control = trainControl(method = "cv", number = 10)
cp.grid = expand.grid( .cp = (0:10)*0.001)
1*0.001
10*0.001
0:10
0:10 * 0.001
tr = train(MEDV ~ LAT + LON + CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO, data = train, method = "rpart", trControl = tr.control, tuneGrid = cp.grid)
best.tree = tr$finalModel
prp(best.tree)
best.tree.pred = predict(best.tree, newdata=test)
best.tree.sse = sum((best.tree.pred - test$MEDV)^2)
best.tree.sse
|
/unit4/Unit4-Recitation-Boston.R
|
no_license
|
hmartineziii/15.071x
|
R
| false
| false
| 2,740
|
r
|
boston = read.csv("boston.csv")
#video2
str(boston)
plot(boston$LON, boston$LAT)
points(boston$LON[boston$CHAS==1], boston$LAT[boston$CHAS==1], col="blue", pch=19)
points(boston$LON[boston$TRACT==3531], boston$LAT[boston$TRACT==3531], col="red", pch=20)
summary(boston$NOX)
points(boston$LON[boston$NOX>=0.55], boston$LAT[boston$NOX>=0.55], col="green", pch=19)
plot(boston$LON, boston$LAT)
summary(boston$MEDV)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
#video3
plot(boston$LAT, boston$MEDV)
plot(boston$LON, boston$MEDV)
latlonlm = lm(MEDV ~ LAT + LON, data=boston)
summary(latlonlm)
plot(boston$LON, boston$LAT)
plot(boston$LON, boston$LAT)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
latlonlm$fitted.values
points(boston$LON[latlonlm$fitted.values >= 21.2], boston$LAT[latlonlm$fitted.values >= 21.2], col="blue", pch="$")
#video4
library(rpart)
library(rpart.plot)
latlontree = rpart(MEDV ~ LAT + LON, data=boston)
prp(latlontree)
plot(boston$LON, boston$LAT)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
fittedvalues = predict(latlontree)
points(boston$LON[fittedvalues>21.2], boston$LAT[fittedvalues>=21.2], col="blue", pch="$")
latlontree = rpart(MEDV ~ LAT + LON, data=boston, minbucket=50)
plot(latlontree)
text(latlontree)
plot(boston$LON, boston$LAT)
abline(v=-71.07)
text(latlontree)
plot(boston$LON, boston$LAT)
abline(v=-71.07)
abline(h=42.21)
abline(h=42.17)
points(boston$LON[boston$MEDV>=21.2], boston$LAT[boston$MEDV>=21.2], col="red", pch=20)
#Video5
library(caTools)
set.seed(123)
split = sample.split(boston$MEDV, SplitRatio=0.7)
train = subset(boston, split == TRUE)
test = subset(boston, split == FALSE)
linreg = lm(MEDV ~ LAT + LON + CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO, data=train)
summary(linreg)
linreg.pred = predict(linreg, newdata=test)
linreg.sse = sum((linreg.pred - test$MEDV)^2)
linreg.sse
tree = rpart(MEDV ~ LAT + LON + CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO, data=train)
prp(tree)
tree.pred = predict(tree, newdata=test)
tree.sse = sum((tree.pred - test$MEDV)^2)
tree.sse
#video7 cross validation
library(caret)
library(e1071)
tr.control = trainControl(method = "cv", number = 10)
cp.grid = expand.grid( .cp = (0:10)*0.001)
1*0.001
10*0.001
0:10
0:10 * 0.001
tr = train(MEDV ~ LAT + LON + CRIM + ZN + INDUS + CHAS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO, data = train, method = "rpart", trControl = tr.control, tuneGrid = cp.grid)
best.tree = tr$finalModel
prp(best.tree)
best.tree.pred = predict(best.tree, newdata=test)
best.tree.sse = sum((best.tree.pred - test$MEDV)^2)
best.tree.sse
|
# Exercise 4: Creating and operating on vectors
# Create a vector `names` that contains your name and the names of 2 people next to you.
names <- c("Soobin", "Tejveer", "Emily")
# Use the colon operator : to create a vector `n` of numbers from 10:49
n <- 10:49
# Use `length()` to get the number of elements in `n`
length(n)
# Create a vector `m` that contains the numbers 10 to 1. Hint: use the `seq()` function
m <- seq(10, 1)
# Subtract `m` FROM `n`. Note the recycling!
n - m
# Use the `seq()` function to produce a range of numbers from -5 to 10 in `.1` increments.
# Store it in a variable `x`
x <- seq(-5, 10, .1)
# Create a vector `sin.wave` by calling the `sin()` function on each element in `x`.
sin.wave <- sin(x)
# Create a vector `cos.wave` by calling the `cos()` function on each element in `x`.
cos.wave <- cos(x)
# Create a vector `wave` by multiplying `sin.wave` and `cos.wave` together, then adding `sin.wave`
wave <- (sin.wave * cos.wave) + sin.wave
# Use the `plot()` function to plot your `wave`!
plot(wave)
|
/exercise-4/exercise.R
|
permissive
|
soobkwon/module7-vectors
|
R
| false
| false
| 1,039
|
r
|
# Exercise 4: Creating and operating on vectors
# Create a vector `names` that contains your name and the names of 2 people next to you.
names <- c("Soobin", "Tejveer", "Emily")
# Use the colon operator : to create a vector `n` of numbers from 10:49
n <- 10:49
# Use `length()` to get the number of elements in `n`
length(n)
# Create a vector `m` that contains the numbers 10 to 1. Hint: use the `seq()` function
m <- seq(10, 1)
# Subtract `m` FROM `n`. Note the recycling!
n - m
# Use the `seq()` function to produce a range of numbers from -5 to 10 in `.1` increments.
# Store it in a variable `x`
x <- seq(-5, 10, .1)
# Create a vector `sin.wave` by calling the `sin()` function on each element in `x`.
sin.wave <- sin(x)
# Create a vector `cos.wave` by calling the `cos()` function on each element in `x`.
cos.wave <- cos(x)
# Create a vector `wave` by multiplying `sin.wave` and `cos.wave` together, then adding `sin.wave`
wave <- (sin.wave * cos.wave) + sin.wave
# Use the `plot()` function to plot your `wave`!
plot(wave)
|
TreeStat <-
function(myinput,mystat,method="complete",metric="euclidean",metric.args=list()){
#index table
if(data.class(myinput)=="dist")hc<-hclust(myinput,method=method)
if(data.class(myinput)=="matrix"){
if(metric=="define.metric"){
#define.metric<-match.fun(define.metric)
define.metric<-match.fun(metric)
mymetric.args<-vector("list",length(metric.args)+1)
mymetric.args[[1]]<-myinput
if(length(mymetric.args)>1){mymetric.args[2:length(mymetric.args)]<-
metric.args}
mydis<-do.call(define.metric,mymetric.args)
mydis<-data.matrix(mydis)
#mydis<-define.metric(myinput,...)
hc<-hclust(as.dist(mydis),method=method)
}
else{
if(metric!="pearson"&metric!="kendall"&metric!="spearman"){
hc<-hclust(dist(myinput,method=metric),method=method)
}
if(metric=="pearson"|metric=="kendall"|metric=="spearman"){
hc<-hclust(as.dist(1-cor(t(myinput),method=metric,
use="pairwise.complete.obs")),method=method)
}
}
}
if(data.class(myinput)=="hclust")hc<-myinput
if(data.class(myinput)!="dist"&data.class(myinput)!="matrix"&
data.class(myinput)!="hclust")stop("Inappropriate input data")
indextable<-cbind(hc$merge,hc$height)
dimnames(indextable)[[2]]<-c("index1","index2","height")
#cluster size
clustersize<-rep(NA,nrow(indextable))
csleft<-rep(NA,nrow(indextable))
csleft[indextable[,"index1"]<0]<-1
csright<-rep(NA,nrow(indextable))
csright[indextable[,"index2"]<0]<-1
while(is.na(sum(clustersize))){
clustersize<-csleft+csright
csleft[indextable[,"index1"]>0]<-
clustersize[indextable[indextable[,"index1"]>0,"index1"]]
csright[indextable[,"index2"]>0]<-
clustersize[indextable[indextable[,"index2"]>0,"index2"]]
}
#fldc
fldc<-rep(0,nrow(indextable))
hp<-indextable[indextable[,"index1"]>0,"height"]
hc<-indextable[indextable[indextable[,"index1"]>0,"index1"],"height"]
fldc[indextable[indextable[,"index1"]>0,"index1"]]<-(hp-hc)/hp
hp<-indextable[indextable[,"index2"]>0,"height"]
hc<-indextable[indextable[indextable[,"index2"]>0,"index2"],"height"]
fldc[indextable[indextable[,"index2"]>0,"index2"]]<-(hp-hc)/hp
#NaN values occur hc==hp==0
fldc[is.na(fldc)]<-0
fldc<-abs(fldc)
#fldcc
fldcc<-rep(0,nrow(indextable))
fldcs<-rep(0,nrow(indextable))
hp<-indextable[,"height"]
hc1<-rep(0,nrow(indextable))
hc1[indextable[,"index1"]>0]<-
indextable[indextable[indextable[,"index1"]>0,"index1"],"height"]
hc2<-rep(0,nrow(indextable))
hc2[indextable[,"index2"]>0]<-
indextable[indextable[indextable[,"index2"]>0,"index2"],"height"]
hdif<-hp-(hc1-hc2)/2
fldcc[indextable[indextable[,"index1"]>0,"index1"]]<-
hdif[indextable[,"index1"]>0]/hc1[indextable[,"index1"]>0]
fldcc[indextable[indextable[,"index2"]>0,"index2"]]<-
hdif[indextable[,"index2"]>0]/hc2[indextable[,"index2"]>0]
fldcc[is.na(fldcc)]<-0
#when children node has height 0
fldcc[fldcc=="Inf"]<-1e5
rm(hp,hdif,hc1,hc2)
#bldc
bldc<-rep(0,nrow(indextable))
hl<-rep(0,nrow(indextable))
hr<-rep(0,nrow(indextable))
sl<-rep(1,nrow(indextable))
sr<-rep(1,nrow(indextable))
hl[indextable[,"index1"]>0]<-
indextable[indextable[indextable[,"index1"]>0,"index1"],"height"]
hr[indextable[,"index2"]>0]<-
indextable[indextable[indextable[,"index2"]>0,"index2"],"height"]
sl[indextable[,"index1"]>0]<-
clustersize[indextable[indextable[,"index1"]>0,"index1"]]
sr[indextable[,"index2"]>0]<-
clustersize[indextable[indextable[,"index2"]>0,"index2"]]
bldc<-(2*indextable[,"height"]-hl-hr)/2/indextable[,"height"]
#NaN values occur when node heightH==hl==hr
bldc[is.na(bldc)]<-0
slb<-2*indextable[,"height"]-hl-hr
slb[is.na(slb)]<-0
#output statistics
indextable<-cbind(indextable,clustersize,fldc,bldc,fldcc,slb)
if(any(mystat=="all"))return(indextable[,-ncol(indextable)])
if(!any(mystat=="all")){
m<-4+match(mystat,c("fldc","bldc","fldcc","slb"))
indextable<-indextable[,c(1:4,m)]
return(indextable)
}
}
|
/R/TreeStat.R
|
no_license
|
cran/TBEST
|
R
| false
| false
| 4,655
|
r
|
TreeStat <-
function(myinput,mystat,method="complete",metric="euclidean",metric.args=list()){
#index table
if(data.class(myinput)=="dist")hc<-hclust(myinput,method=method)
if(data.class(myinput)=="matrix"){
if(metric=="define.metric"){
#define.metric<-match.fun(define.metric)
define.metric<-match.fun(metric)
mymetric.args<-vector("list",length(metric.args)+1)
mymetric.args[[1]]<-myinput
if(length(mymetric.args)>1){mymetric.args[2:length(mymetric.args)]<-
metric.args}
mydis<-do.call(define.metric,mymetric.args)
mydis<-data.matrix(mydis)
#mydis<-define.metric(myinput,...)
hc<-hclust(as.dist(mydis),method=method)
}
else{
if(metric!="pearson"&metric!="kendall"&metric!="spearman"){
hc<-hclust(dist(myinput,method=metric),method=method)
}
if(metric=="pearson"|metric=="kendall"|metric=="spearman"){
hc<-hclust(as.dist(1-cor(t(myinput),method=metric,
use="pairwise.complete.obs")),method=method)
}
}
}
if(data.class(myinput)=="hclust")hc<-myinput
if(data.class(myinput)!="dist"&data.class(myinput)!="matrix"&
data.class(myinput)!="hclust")stop("Inappropriate input data")
indextable<-cbind(hc$merge,hc$height)
dimnames(indextable)[[2]]<-c("index1","index2","height")
#cluster size
clustersize<-rep(NA,nrow(indextable))
csleft<-rep(NA,nrow(indextable))
csleft[indextable[,"index1"]<0]<-1
csright<-rep(NA,nrow(indextable))
csright[indextable[,"index2"]<0]<-1
while(is.na(sum(clustersize))){
clustersize<-csleft+csright
csleft[indextable[,"index1"]>0]<-
clustersize[indextable[indextable[,"index1"]>0,"index1"]]
csright[indextable[,"index2"]>0]<-
clustersize[indextable[indextable[,"index2"]>0,"index2"]]
}
#fldc
fldc<-rep(0,nrow(indextable))
hp<-indextable[indextable[,"index1"]>0,"height"]
hc<-indextable[indextable[indextable[,"index1"]>0,"index1"],"height"]
fldc[indextable[indextable[,"index1"]>0,"index1"]]<-(hp-hc)/hp
hp<-indextable[indextable[,"index2"]>0,"height"]
hc<-indextable[indextable[indextable[,"index2"]>0,"index2"],"height"]
fldc[indextable[indextable[,"index2"]>0,"index2"]]<-(hp-hc)/hp
#NaN values occur hc==hp==0
fldc[is.na(fldc)]<-0
fldc<-abs(fldc)
#fldcc
fldcc<-rep(0,nrow(indextable))
fldcs<-rep(0,nrow(indextable))
hp<-indextable[,"height"]
hc1<-rep(0,nrow(indextable))
hc1[indextable[,"index1"]>0]<-
indextable[indextable[indextable[,"index1"]>0,"index1"],"height"]
hc2<-rep(0,nrow(indextable))
hc2[indextable[,"index2"]>0]<-
indextable[indextable[indextable[,"index2"]>0,"index2"],"height"]
hdif<-hp-(hc1-hc2)/2
fldcc[indextable[indextable[,"index1"]>0,"index1"]]<-
hdif[indextable[,"index1"]>0]/hc1[indextable[,"index1"]>0]
fldcc[indextable[indextable[,"index2"]>0,"index2"]]<-
hdif[indextable[,"index2"]>0]/hc2[indextable[,"index2"]>0]
fldcc[is.na(fldcc)]<-0
#when children node has height 0
fldcc[fldcc=="Inf"]<-1e5
rm(hp,hdif,hc1,hc2)
#bldc
bldc<-rep(0,nrow(indextable))
hl<-rep(0,nrow(indextable))
hr<-rep(0,nrow(indextable))
sl<-rep(1,nrow(indextable))
sr<-rep(1,nrow(indextable))
hl[indextable[,"index1"]>0]<-
indextable[indextable[indextable[,"index1"]>0,"index1"],"height"]
hr[indextable[,"index2"]>0]<-
indextable[indextable[indextable[,"index2"]>0,"index2"],"height"]
sl[indextable[,"index1"]>0]<-
clustersize[indextable[indextable[,"index1"]>0,"index1"]]
sr[indextable[,"index2"]>0]<-
clustersize[indextable[indextable[,"index2"]>0,"index2"]]
bldc<-(2*indextable[,"height"]-hl-hr)/2/indextable[,"height"]
#NaN values occur when node heightH==hl==hr
bldc[is.na(bldc)]<-0
slb<-2*indextable[,"height"]-hl-hr
slb[is.na(slb)]<-0
#output statistics
indextable<-cbind(indextable,clustersize,fldc,bldc,fldcc,slb)
if(any(mystat=="all"))return(indextable[,-ncol(indextable)])
if(!any(mystat=="all")){
m<-4+match(mystat,c("fldc","bldc","fldcc","slb"))
indextable<-indextable[,c(1:4,m)]
return(indextable)
}
}
|
#install.packages("alphavantager")
library(alphavantager)
library(shiny)
library(readr)
ui <- fluidPage(
textInput("Stock","US Stock Ticker ","MSFT"),
actionButton("go","Go"),
dataTableOutput("df"),
h3("Stock"),
# textOutput("text")
plotOutput("p1")
)
server <- function(input, output, session) {
av_api_key("JF0AY4TAAGLRAH6B")
# To speed up download, we use compact to download recent 100 days.
# outputsize is default to "compact"
df_res <- eventReactive(input$go, {
Stock<-isolate(input$Stock)
df_res <- av_get(Stock,av_fun = "TIME_SERIES_DAILY_ADJUSTED",outputsize="compact")
df_res
#is.na(df_res) # TRUE
saveRDS(df_res(),file= "data.Rds")
})
#
# Load data from a file into a new variable `new_var`
# plot
output$p1 <- renderPlot({
plot(df_res()$timestamp, df_res()$adjusted_close)
lines(df_res()$timestamp, df_res()$adjusted_close)
})
}
shinyApp(ui, server)
|
/2020/Assignment-2020/Individual/FE8828-Ge Weitong/Assignment 2/Assignment 2_3.R
|
no_license
|
leafyoung/fe8828
|
R
| false
| false
| 960
|
r
|
#install.packages("alphavantager")
library(alphavantager)
library(shiny)
library(readr)
ui <- fluidPage(
textInput("Stock","US Stock Ticker ","MSFT"),
actionButton("go","Go"),
dataTableOutput("df"),
h3("Stock"),
# textOutput("text")
plotOutput("p1")
)
server <- function(input, output, session) {
av_api_key("JF0AY4TAAGLRAH6B")
# To speed up download, we use compact to download recent 100 days.
# outputsize is default to "compact"
df_res <- eventReactive(input$go, {
Stock<-isolate(input$Stock)
df_res <- av_get(Stock,av_fun = "TIME_SERIES_DAILY_ADJUSTED",outputsize="compact")
df_res
#is.na(df_res) # TRUE
saveRDS(df_res(),file= "data.Rds")
})
#
# Load data from a file into a new variable `new_var`
# plot
output$p1 <- renderPlot({
plot(df_res()$timestamp, df_res()$adjusted_close)
lines(df_res()$timestamp, df_res()$adjusted_close)
})
}
shinyApp(ui, server)
|
context("sp genomemaps")
con <- ba_db()$sweetpotatobase
test_that("Genomemaps are present", {
res <- ba_genomemaps(con = con)
expect_that(nrow(res) == 1, is_true())
})
test_that("Vector output is transformed", {
res <- ba_genomemaps(con = con, rclass = "vector")
expect_that("tbl_df" %in% class(res), is_true())
})
|
/tests/sweetpotatobase/test_sp_genomemaps.R
|
no_license
|
ClayBirkett/brapi
|
R
| false
| false
| 331
|
r
|
context("sp genomemaps")
con <- ba_db()$sweetpotatobase
test_that("Genomemaps are present", {
res <- ba_genomemaps(con = con)
expect_that(nrow(res) == 1, is_true())
})
test_that("Vector output is transformed", {
res <- ba_genomemaps(con = con, rclass = "vector")
expect_that("tbl_df" %in% class(res), is_true())
})
|
###########################################################################################
# #
# #
# Produce Plot of Gene Filtering Stability #
# #
# #
###########################################################################################
library(cowplot)
# Load results from stability analysis
load("Clustering_Result_Genes_Stability.RData")
res_gen <- res
res_gen [[2]] <- res_gen[[12]]
res_gen <- res_gen[-12]
load("Assigned_Cell_Types_Dataset4.RData")
# Find intersection of all subsampled datasets
load("Clustering_Result_Dataset4.RData")
library(mclust)
library(NMI)
colnames(res)[76:86] <- c("ascend", "CellRanger", "CIDR", "countClust", "RaceID2", "RaceID", "RCA", "SC3", "scran", "Seurat","TSCAN")
algorithms <- tolower(names(res_gen))
index <- match(algorithms, tolower(colnames(res)))
comp_cluster<-function(tmp, tmp1){
comp<-mclust::adjustedRandIndex(tmp, tmp1)
return(comp)
}
res_comp<-matrix(NA, ncol=length(res_gen), nrow=5)
colnames(res_comp)<-names(res_gen)
for(i in 1:length(res_gen)){
tmp1 <- res[, index[i]]
res_tmp <- sapply(res_gen[[i]], function(x) try(comp_cluster(x, tmp1)))
try(res_comp[,i]<-res_tmp)
}
res_comp <- as.data.frame(res_comp)
ind <- apply(res_comp, 1, function(x) grep("Error", x))
ind <- sapply(1:3, function(x) cbind(x, ind[[x]]))
ind <- Reduce(rbind, ind)
res_comp[ind] <- NA
rownames(res_comp) <- c("10%", "20%", "30%", "40%", "50%")
res_comp <- apply(res_comp, 2, as.numeric)
library(reshape2)
res_comp <- melt(res_comp)
colnames(res_comp) <- c("Percentage", "Method", "ARI_comp")
res_comp$Percentage <- res_comp$Percentage*10
res_comp$Percentage <- as.factor(res_comp$Percentage)
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
colors_gg<- gg_color_hue(12)[-11]
library(ggplot2)
gg1 <- ggplot(res_comp, aes(x=`Percentage`, y=`ARI_comp`, group=Method)) + geom_point(aes(color=Method)) +
theme_minimal() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_color_manual(values=colors_gg) + geom_line(aes(color=Method)) + guides(color=FALSE)
gg1 <- ggdraw(gg1) + draw_plot_label("a")
## compare to truth
comp_truth<-function(tmp, assigned_cell_types){
comp <- mclust::adjustedRandIndex(tmp, assigned_cell_types)
return(comp)
}
res_truth <- matrix(NA, ncol=length(res_gen), nrow=5)
colnames(res_truth)<-names(res_gen)
for(i in 1:length(res_gen)){
res_tmp <- sapply(res_gen[[i]], function(x) try(comp_truth(x, assigned_cell_types$Assigned_CellType)))
try(res_truth[,i]<-res_tmp)
}
ind <- apply(res_truth, 1, function(x) grep("Error", x))
ind <- sapply(1:3, function(x) cbind(x, ind[[x]]))
ind <- Reduce(rbind, ind)
res_truth[ind] <- NA
res_truth <- apply(res_truth, 2, as.numeric)
library(reshape2)
res_truth <- melt(res_truth)
colnames(res_truth) <- c("Percentage", "Method", "ARI_truth")
res_truth$Percentage <- res_truth$Percentage*10
res_truth$Percentage <- as.factor(res_truth$Percentage)
gg2 <- ggplot(res_truth, aes(x=`Percentage`, y=`ARI_truth`, group=Method)) + geom_point(aes(color=Method)) + theme_minimal() + theme(axis.text.x = element_text(angle = 45, hjust = 1))+ scale_color_manual(values=colors_gg) + geom_line(aes(color=Method))
gg2 <- ggdraw(gg2) + draw_plot_label("b")
gg <- grid.arrange(gg1, gg2, ncol=2, widths=4:5)
ggsave(gg, file="Figure_Gene_Stability.pdf", width=12, height=5)
|
/silverstandard/analysis/Figure6_Genes_Stability_R_3.5.0.R
|
no_license
|
SaskiaFreytag/cluster_benchmarking_code
|
R
| false
| false
| 3,809
|
r
|
###########################################################################################
# #
# #
# Produce Plot of Gene Filtering Stability #
# #
# #
###########################################################################################
library(cowplot)
# Load results from stability analysis
load("Clustering_Result_Genes_Stability.RData")
res_gen <- res
res_gen [[2]] <- res_gen[[12]]
res_gen <- res_gen[-12]
load("Assigned_Cell_Types_Dataset4.RData")
# Find intersection of all subsampled datasets
load("Clustering_Result_Dataset4.RData")
library(mclust)
library(NMI)
colnames(res)[76:86] <- c("ascend", "CellRanger", "CIDR", "countClust", "RaceID2", "RaceID", "RCA", "SC3", "scran", "Seurat","TSCAN")
algorithms <- tolower(names(res_gen))
index <- match(algorithms, tolower(colnames(res)))
comp_cluster<-function(tmp, tmp1){
comp<-mclust::adjustedRandIndex(tmp, tmp1)
return(comp)
}
res_comp<-matrix(NA, ncol=length(res_gen), nrow=5)
colnames(res_comp)<-names(res_gen)
for(i in 1:length(res_gen)){
tmp1 <- res[, index[i]]
res_tmp <- sapply(res_gen[[i]], function(x) try(comp_cluster(x, tmp1)))
try(res_comp[,i]<-res_tmp)
}
res_comp <- as.data.frame(res_comp)
ind <- apply(res_comp, 1, function(x) grep("Error", x))
ind <- sapply(1:3, function(x) cbind(x, ind[[x]]))
ind <- Reduce(rbind, ind)
res_comp[ind] <- NA
rownames(res_comp) <- c("10%", "20%", "30%", "40%", "50%")
res_comp <- apply(res_comp, 2, as.numeric)
library(reshape2)
res_comp <- melt(res_comp)
colnames(res_comp) <- c("Percentage", "Method", "ARI_comp")
res_comp$Percentage <- res_comp$Percentage*10
res_comp$Percentage <- as.factor(res_comp$Percentage)
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
colors_gg<- gg_color_hue(12)[-11]
library(ggplot2)
gg1 <- ggplot(res_comp, aes(x=`Percentage`, y=`ARI_comp`, group=Method)) + geom_point(aes(color=Method)) +
theme_minimal() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_color_manual(values=colors_gg) + geom_line(aes(color=Method)) + guides(color=FALSE)
gg1 <- ggdraw(gg1) + draw_plot_label("a")
## compare to truth
comp_truth<-function(tmp, assigned_cell_types){
comp <- mclust::adjustedRandIndex(tmp, assigned_cell_types)
return(comp)
}
res_truth <- matrix(NA, ncol=length(res_gen), nrow=5)
colnames(res_truth)<-names(res_gen)
for(i in 1:length(res_gen)){
res_tmp <- sapply(res_gen[[i]], function(x) try(comp_truth(x, assigned_cell_types$Assigned_CellType)))
try(res_truth[,i]<-res_tmp)
}
ind <- apply(res_truth, 1, function(x) grep("Error", x))
ind <- sapply(1:3, function(x) cbind(x, ind[[x]]))
ind <- Reduce(rbind, ind)
res_truth[ind] <- NA
res_truth <- apply(res_truth, 2, as.numeric)
library(reshape2)
res_truth <- melt(res_truth)
colnames(res_truth) <- c("Percentage", "Method", "ARI_truth")
res_truth$Percentage <- res_truth$Percentage*10
res_truth$Percentage <- as.factor(res_truth$Percentage)
gg2 <- ggplot(res_truth, aes(x=`Percentage`, y=`ARI_truth`, group=Method)) + geom_point(aes(color=Method)) + theme_minimal() + theme(axis.text.x = element_text(angle = 45, hjust = 1))+ scale_color_manual(values=colors_gg) + geom_line(aes(color=Method))
gg2 <- ggdraw(gg2) + draw_plot_label("b")
gg <- grid.arrange(gg1, gg2, ncol=2, widths=4:5)
ggsave(gg, file="Figure_Gene_Stability.pdf", width=12, height=5)
|
# testing the new fs function example
setwd("x:/vervoort/research/ecohydrology/2dmodelling")
rdir <- "x:/vervoort/research/rcode/ecohydrology/2dmodelling"
source(paste(rdir,"20120724_FluxfunctionsforElise.R",sep="/"))
source(paste(rdir,"soilfunction.R",sep="/"))
source(paste(rdir,"vegfunction.R",sep="/"))
soilpar <- Soil("L Med Clay")
vegpar <- Veg(vtype="TreesDR", soilpar=soilpar)
DR <- vegpar$DR
# This is a key variable based on Vervoort and van der Zee (2012)
fs <- seq(0.2,0.7,length=5)
fs <- c(0.25,0.5)
Z <- seq(700,300)
Zmean <- 500
plot(RWU(Z,Zmean,fs[1]),Z,type="l",xlab="Root water uptake function",ylab="depth (cm)",
xlim=c(0,8),lwd=2,cex.axis=1.2,cex.lab=1.2,font.lab=2)
for (i in 2:length(fs)) {
lines(RWU(Z,Zmean,fs[i]),Z,lty=i,col=i,lwd=2)
}
# this shows that the new root function already includes the effect of anoxia
# if water goes above a certain Z value the root water uptake decreases
plot(Rc_B(Z,vegpar$c1,vegpar$Zr,Zmean,fs[1]),Z,type="l",xlab="Root water uptake function",ylab="depth (cm)",
xlim=c(0,8),lwd=2,cex.axis=1.2,cex.lab=1.2,font.lab=2)
for (i in 2:length(fs)) {
lines(Rc_B(Z,vegpar$c1,vegpar$Zr,Zmean,fs[i]),Z,lty=i,col=i,lwd=2)
}
vegpar$c1 <- 1.5
plot(U(z1=Z,z2=vegpar$Zr,c1=vegpar$c1),Z)
legend("topright",paste("fs =",fs),lty=1:5,col=1:5,lwd=2,cex=1.2)
|
/OlderScripts/20130621_fs_function_example.R
|
permissive
|
WillemVervoort/Ecohydr2D
|
R
| false
| false
| 1,321
|
r
|
# testing the new fs function example
setwd("x:/vervoort/research/ecohydrology/2dmodelling")
rdir <- "x:/vervoort/research/rcode/ecohydrology/2dmodelling"
source(paste(rdir,"20120724_FluxfunctionsforElise.R",sep="/"))
source(paste(rdir,"soilfunction.R",sep="/"))
source(paste(rdir,"vegfunction.R",sep="/"))
soilpar <- Soil("L Med Clay")
vegpar <- Veg(vtype="TreesDR", soilpar=soilpar)
DR <- vegpar$DR
# This is a key variable based on Vervoort and van der Zee (2012)
fs <- seq(0.2,0.7,length=5)
fs <- c(0.25,0.5)
Z <- seq(700,300)
Zmean <- 500
plot(RWU(Z,Zmean,fs[1]),Z,type="l",xlab="Root water uptake function",ylab="depth (cm)",
xlim=c(0,8),lwd=2,cex.axis=1.2,cex.lab=1.2,font.lab=2)
for (i in 2:length(fs)) {
lines(RWU(Z,Zmean,fs[i]),Z,lty=i,col=i,lwd=2)
}
# this shows that the new root function already includes the effect of anoxia
# if water goes above a certain Z value the root water uptake decreases
plot(Rc_B(Z,vegpar$c1,vegpar$Zr,Zmean,fs[1]),Z,type="l",xlab="Root water uptake function",ylab="depth (cm)",
xlim=c(0,8),lwd=2,cex.axis=1.2,cex.lab=1.2,font.lab=2)
for (i in 2:length(fs)) {
lines(Rc_B(Z,vegpar$c1,vegpar$Zr,Zmean,fs[i]),Z,lty=i,col=i,lwd=2)
}
vegpar$c1 <- 1.5
plot(U(z1=Z,z2=vegpar$Zr,c1=vegpar$c1),Z)
legend("topright",paste("fs =",fs),lty=1:5,col=1:5,lwd=2,cex=1.2)
|
## Skyline Rearrange and Compound Name Check
# Define custom file name for export
csvFileName <- paste("data_intermediate/", software.pattern, "_combined_", file.pattern, "_", currentDate, ".csv", sep = "")
# Function to remove syntactically incorrect values usually produced by Skyline
replace_nonvalues <- function(x) (gsub("#N/A", NA, x))
# Replace original compound names with updated Standards names
update_compound_names <- function(df) {
names.changed <- read.csv("https://raw.githubusercontent.com/IngallsLabUW/Ingalls_Standards/master/Ingalls_Lab_Standards.csv",
stringsAsFactors = FALSE, header = TRUE) %>%
select(Compound_Name, Compound_Name_Original) %>%
unique() %>%
full_join(df %>% rename(Compound_Name_Original = Precursor.Ion.Name)) %>%
filter(Compound_Name_Original %in% df$Precursor.Ion.Name) %>%
select(Precursor.Ion.Name = Compound_Name, everything(), -Compound_Name_Original)
}
# Identify positive and negative HILIC runs
if (runtype.pattern == "pos|neg") {
skyline.HILIC.pos <- skyline.HILIC.pos %>%
mutate(Column = "HILICpos")
skyline.HILIC.neg <- skyline.HILIC.neg %>%
mutate(Column = "HILICneg")
combined.skyline <- skyline.HILIC.pos %>%
rbind(skyline.HILIC.neg) %>%
select(Replicate.Name, Precursor.Ion.Name, Retention.Time, Area, Background, Height, Mass.Error.PPM, Column) %>%
mutate_all(replace_nonvalues)
# Change variable classes
skyline.classes.changed <- ChangeClasses(combined.skyline, start.column = 3, end.column = 7)
# Fix old compound names
skyline.names.updated <- update_compound_names(skyline.classes.changed)
# Export rearranged dataframe
write.csv(skyline.names.updated, csvFileName, row.names = FALSE)
} else {
skyline.reversephase.nonvalues <- skyline.reversephase %>%
mutate_all(replace_nonvalues)
# Change variable classes
skyline.classes.changed <- ChangeClasses(skyline.reversephase.nonvalues, start.column = 4, end.column = 8)
# Fix old compound naames
skyline.names.updated <- update_compound_names(skyline.classes.changed)
# Export rearranged dataframe
write.csv(skyline.names.updated, csvFileName, row.names = FALSE)
}
|
/src/Skyline_Rearrange.R
|
no_license
|
R-Lionheart/THAA_Test
|
R
| false
| false
| 2,223
|
r
|
## Skyline Rearrange and Compound Name Check
# Define custom file name for export
csvFileName <- paste("data_intermediate/", software.pattern, "_combined_", file.pattern, "_", currentDate, ".csv", sep = "")
# Function to remove syntactically incorrect values usually produced by Skyline
replace_nonvalues <- function(x) (gsub("#N/A", NA, x))
# Replace original compound names with updated Standards names
update_compound_names <- function(df) {
names.changed <- read.csv("https://raw.githubusercontent.com/IngallsLabUW/Ingalls_Standards/master/Ingalls_Lab_Standards.csv",
stringsAsFactors = FALSE, header = TRUE) %>%
select(Compound_Name, Compound_Name_Original) %>%
unique() %>%
full_join(df %>% rename(Compound_Name_Original = Precursor.Ion.Name)) %>%
filter(Compound_Name_Original %in% df$Precursor.Ion.Name) %>%
select(Precursor.Ion.Name = Compound_Name, everything(), -Compound_Name_Original)
}
# Identify positive and negative HILIC runs
if (runtype.pattern == "pos|neg") {
skyline.HILIC.pos <- skyline.HILIC.pos %>%
mutate(Column = "HILICpos")
skyline.HILIC.neg <- skyline.HILIC.neg %>%
mutate(Column = "HILICneg")
combined.skyline <- skyline.HILIC.pos %>%
rbind(skyline.HILIC.neg) %>%
select(Replicate.Name, Precursor.Ion.Name, Retention.Time, Area, Background, Height, Mass.Error.PPM, Column) %>%
mutate_all(replace_nonvalues)
# Change variable classes
skyline.classes.changed <- ChangeClasses(combined.skyline, start.column = 3, end.column = 7)
# Fix old compound names
skyline.names.updated <- update_compound_names(skyline.classes.changed)
# Export rearranged dataframe
write.csv(skyline.names.updated, csvFileName, row.names = FALSE)
} else {
skyline.reversephase.nonvalues <- skyline.reversephase %>%
mutate_all(replace_nonvalues)
# Change variable classes
skyline.classes.changed <- ChangeClasses(skyline.reversephase.nonvalues, start.column = 4, end.column = 8)
# Fix old compound naames
skyline.names.updated <- update_compound_names(skyline.classes.changed)
# Export rearranged dataframe
write.csv(skyline.names.updated, csvFileName, row.names = FALSE)
}
|
#' Drive concepts for motorcars.
#'
#' This many-valued context is adapted from an example of conceptual scaling.
#'
#' @format A many-valued context, stored as a data frame.
#' \describe{
#' \item{De}{drive efficiency empty}
#' \item{Dl}{drive efficiency loaded}
#' \item{R}{road holding/handling properties}
#' \item{S}{self-steering effect}
#' \item{E}{economy of space}
#' \item{C}{cost of construction}
#' \item{M}{maintainability}
#' }
#' @example inst/examples/ejem.r
#' @source Ganter, B. and Wille, R. (2005) Formal Concept Analysis: Mathematical
#' Foundations \url{http://www.springer.com/us/book/9783540627715}
"ejem"
|
/R/data.r
|
no_license
|
corybrunson/context
|
R
| false
| false
| 648
|
r
|
#' Drive concepts for motorcars.
#'
#' This many-valued context is adapted from an example of conceptual scaling.
#'
#' @format A many-valued context, stored as a data frame.
#' \describe{
#' \item{De}{drive efficiency empty}
#' \item{Dl}{drive efficiency loaded}
#' \item{R}{road holding/handling properties}
#' \item{S}{self-steering effect}
#' \item{E}{economy of space}
#' \item{C}{cost of construction}
#' \item{M}{maintainability}
#' }
#' @example inst/examples/ejem.r
#' @source Ganter, B. and Wille, R. (2005) Formal Concept Analysis: Mathematical
#' Foundations \url{http://www.springer.com/us/book/9783540627715}
"ejem"
|
library(testthat)
test_check("jaatha")
|
/jaatha/tests/testthat.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 39
|
r
|
library(testthat)
test_check("jaatha")
|
# the German Credit Data
# read comma separated file into memory
data<-read.csv("C:/Documents and Settings/MyDocuments/GermanCredit.csv")
#code to convert variable to factor
data$property <-as.factor(data$ property)
#code to convert to numeric
data$age <-as.numeric(data$age)
#code to convert to decimal
data$amount<-as.double(data$amount)
data$amount<-
as.factor(ifelse(data$amount<=2500,'0-2500',
ifelse(data$amount<=5000,'2600-5000','5000+')))
d = sort(sample(nrow(data), nrow(data)*.6))
#select training sample
train<-data[d,]
test<-data[-d,]
train<-subset(train,select=-default)
# Traditional Credit Scoring Using Logistic Regression in R
m<-glm(good_bad~.,data=train,family=binomial())
# for those interested in the step function one can use m<-
# step(m) for it
# I recommend against step due to well known issues with it
# choosing the optimal #variables out of sample
# load library
library(ROCR)
# score test data set
test$score<-predict(m,type='response',test)
pred<-prediction(test$score,test$good_bad)
perf <- performance(pred,"tpr","fpr")
plot(perf)
# Calculating KS Statistic
#this code builds on ROCR library by taking the max delt
#between cumulative bad and good rates being plotted by
#ROCR
max(attr(perf,'y.values')[[1]]-attr(perf,'x.values')[[1]])
# top3 variable affecting Credit Score Function
#get results of terms in regression
g<-predict(m,type='terms',test)
ftopk<- function(x,top=3){
res=names(x)[order(x, decreasing = TRUE)][1:top]
paste(res,collapse=";",sep="")
}
# Application of the function using the top 3 rows
topk=apply(g,1,ftopk,top=3)
#add reason list to scored tets sample
test<-cbind(test, topk)
# Cutting Edge techniques Available in R
#load tree package
library(rpart)
fit1<-rpart(good_bad~.,data=train)
plot(fit1)
text(fit1)
#test$t<-predict(fit1,type='class',test)
#score test data
test$tscore1<-predict(fit1,type='prob',test)
pred5<-prediction(test$tscore1[,2],test$good_bad)
perf5 <- performance(pred5,"tpr","fpr")
#build model using 90% 10% priors
#with smaller complexity parameter to allow more complex trees
# for tuning complexity vs. pruning see Thernau 1997
fit2<-
rpart(good_bad~.,data=train,parms=list(prior=c(.9,.1)),cp=.0002)
plot(fit2);text(fit2);
test$tscore2<-predict(fit2,type='prob',test)
pred6 <- prediction(test$tscore2[,2], test$good_bad)
perf6 <- performance(pred6, 'tpr', 'fpr')
#prints complexity and out of sample error
printcp(fit1)
#plots complexity vs. error
plotcp(fit1)
#prints complexity and out of sample error
printcp(fit2)
#plots complexity vs. error
plotcp(fit2)
plot(perf5,col='red',lty=1,
main='Tree vs Tree with Prior Prob');
plot(perf4, col='green',add=TRUE,lty=2);
legend(0.6,0.6,c('simple tree','tree with 90/10
prior'),col=c('red','green'),lwd=3)
#print rules for all classes
list.rules.rpart(fit1)
list.rules.rpart(fit2)
#custom function to only print rules for bad loans
listrules(fit1)
listrules(fit2)
# Bayesian Networks in Credit Scoring
#load library
library(deal)
#make copy of train
ksl<-train
#discrete cannot inherit from continuous so binary
#good/bad must be converted to numeric for deal package
ksl$good_bad<-as.numeric(train$good_bad)
#no missing values allowed so set any missing to 0
# ksl$history[is.na(ksl$history1)] <- 0
#drops empty factors
# ksl$property<-ksl$property[drop=TRUE]
ksl.nw <- network(ksl)
ksl.prior <- jointprior(ks.nw)
# the ban list is a matrix with two columns
#banlist <- matrix(c(5,5,6,6,7,7,9,8,9,8,9,8,9,8),ncol=2)
## ban arrows towards Sex and Year
# note this a computationally intensive procuredure and if
you know that certain variables should have not
relationships you should specify
# the arcs between variables to exclude in the banlist
ksl.nw <- learn(ksl.nw,ksl,ksl.prior)$nw
#this step appears expensive so reset restart from 2 to 1
and degree from 10 to 1
result <-
heuristic(ksl.nw,ksl,ksl.prior,restart=1,degree=1,trace=TRU
E)
thebest <- result$nw[[1]]
savenet(thebest, "ksl.net")
print(ksl.nw,condposterior=TRUE
# In particular trying 80/20,
# 90/10, 60/40, 50/50 type priors seems to be a quick and effective hueristic approach to
# getting high performing trees.
#load tree package
library(rpart)
fit1<-rpart(good_bad~.,data=train)
plot(fit1);text(fit1);
#test$t<-predict(fit1,type='class',test)
#score test data
test$tscore1<-predict(fit1,type='prob',test)
pred5<-prediction(test$tscore1[,2],test$good_bad)
perf5 <- performance(pred5,"tpr","fpr")
#build model using 90% 10% priors
#with smaller complexity parameter to allow more complex trees
# for tuning complexity vs. pruning see Thernau 1997
fit2<-rpart(good_bad~.,data=train,parms=list(prior=c(.9,.1)),cp=.0002)
plot(fit2);text(fit2);
test$tscore2<-predict(fit2,type='prob',test)
pred6<-prediction(test$tscore2[,2],test$good_bad)
perf6<- performance(pred6,"tpr","fpr")
#prints complexity and out of sample error
printcp(fit1)
#plots complexity vs. error
plotcp(fit1)
#prints complexity and out of sample error
printcp(fit2)
#plots complexity vs. error
plotcp(fit2)
# Compare ROC Performance of Trees
plot(perf5,col='red',lty=1,main='Tree vs Tree with Prior Prob');
plot(perf4, col='green',add=TRUE,lty=2);
legend(0.6,0.6,c('simple tree','tree with 90/10 prior'),col=c('red','green'),lwd=3)
# Converting Trees to Rules
#print rules for all classes
list.rules.rpart(fit1)
list.rules.rpart(fit2)
#custom function to only print rules for bad loans
listrules(fit1)
listrules(fit2)
# Conditional inference Trees
#conditional inference trees corrects for known biases in chaid and cart
library(party)
cfit1<-ctree(good_bad~.,data=train)
plot(cfit1)
resultdfr <- as.data.frame(do.call("rbind", treeresponse(cfit1, newdata = test)))
test$tscore3<-resultdfr[,2]
pred9<-prediction(test$tscore3,test$good_bad)
perf9 <- performance(pred9,"tpr","fpr")
plot(perf5,col='red',lty=1,main='Tree vs Tree with Prior Prob vs Ctree');
plot(perf6, col='green',add=TRUE,lty=2);
plot(perf9, col='blue',add=TRUE,lty=3);
legend(0.6,0.6,c('simple tree','tree with 90/10
prior','Ctree'),col=c('red','green','blue'),lwd=3)
# Using Random Forests
library(randomForest)
arf<-
randomForest(good_bad~.,data=train,importance=TRUE,proximit
y=TRUE,ntree=500, keep.forest=TRUE)
#plot variable importance
varImpPlot(arf)
testp4<-predict(arf,test,type='prob')[,2]
pred4<-prediction(testp4,test$good_bad)
perf4 <- performance(pred4,"tpr","fpr")
#plotting logistic results vs. random forest ROC
#plotting logistic results vs. random forest ROC
plot(perf,col='red',lty=1, main='ROC Logistic Vs. RF');
plot(perf4, col='blue',lty=2,add=TRUE);
legend(0.6,0.6,c('simple','RF'),col=c('red','blue'),lwd=3)
#plot variable importance
varImpPlot(arf)
library(party)
set.seed(42)
crf<-cforest(good_bad~.,control = cforest_unbiased(mtry = 2, ntree = 50), data=train)
varimp(crf)
model based on trial and error based on random forest
variable importance
m2<-glm(good_bad~.+history:other+history:employed
+checking:employed+checking:purpose,data=train,family=binom
ial())
test$score2<-predict(m2,type='response',test)
pred2<-prediction(test$score2,test$good_bad)
perf2 <- performance(pred2,"tpr","fpr")
plot(perf2)
#plotting logistic results vs. random forest ROC
plot(perf,col='red',lty=1, main='ROC Logistic Vs. RF');
plot(perf2, col='orange',lty=2,add=TRUE);
plot(perf4, col='blue',lty=3,add=TRUE);
legend(0.6,0.6,c('simple','logit w
interac','RF'),col=c('red','orange','blue'),lwd=3)
# Calculating Area under the Curve
# the following line computes the area under the curve for
# models
#simple model
performance(pred,"auc")
#random forest
performance(pred2,"auc")
#logit plus random forest interaction of affordability term
performance(pred4,"auc")
# Cross Validation
#load Data Analysis And Graphics Package for R (DAAG)
library(DAAG)
#calculate accuracy over 100 random folds of data for
simple logit
h<-CVbinary(obj=m, rand=NULL, nfolds=100,
print.details=TRUE)
#calculate accuracy over 100 random folds of data for logit
+affordability interactions
g<-CVbinary(obj=m2, rand=NULL, nfolds=100,
print.details=TRUE)
# Cutting Edge techniques: Party Package
#model based recursive paritioning
library(party)
model<-mob(good_bad~afford |
amount+other+checking+duration+savings+marital+coapp+proper
ty+resident+amount,data=train,
model=glinearModel,family=binomial())
plot(model)
test$mobscore<-predict(model, newdata = test, type =
c("response"))
pred7<-prediction(test$mobscore,test$good_bad)
perf7 <- performance(pred7,"tpr","fpr")
plot(perf5,col='red',lty=1,main='Tree vs Tree with Prior
Prob vs. Model Based Tree with Glm');
plot(perf4, col='green',add=TRUE,lty=2);
plot(perf7, col='orange',add=TRUE,lty=3);
legend(0.6,0.6,c('simple tree','tree with 90/10 prior',
'Model based tree with logit'),col=c('red','green','orange'),lwd=3)
# Appendix of Useful Functions
#function to divide to create interaction terms without divide by zero
div<-function(a,b) ifelse(b == 0, b, a/b)
list.rules.rpart <- function(model)
{
if (!inherits(model, "rpart")) stop("Not a legitimate rpart tree")
#
# Get some information.
#
frm <- model$frame
names <- row.names(frm)
ylevels <- attr(model, "ylevels")
ds.size <- model$frame[1,]$n
#
# Print each leaf node as a rule.
#
for (i in 1:nrow(frm))
{
if (frm[i,1] == "<leaf>")
{
# The following [,5] is hardwired - needs work!
cat("\n")
cat(sprintf(" Rule number: %s ", names[i]))
cat(sprintf("[yval=%s cover=%d (%.0f%%) prob=%0.2f]\n",
ylevels[frm[i,]$yval], frm[i,]$n,
round(100*frm[i,]$n/ds.size), frm[i,]$yval2[,5]))
pth <- path.rpart(model, nodes=as.numeric(names[i]),
print.it=FALSE)
cat(sprintf(" %s\n", unlist(pth)[-1]), sep="")
}
}
}
listrules<-function(model)
{
if (!inherits(model, "rpart")) stop("Not a legitimate
rpart tree")
#
# Get some information.
#
frm <- model$frame
names <- row.names(frm)
ylevels <- attr(model, "ylevels")
ds.size <- model$frame[1,]$n
#
# Print each leaf node as a rule.
#
for (i in 1:nrow(frm))
{
if (frm[i,1] == "<leaf>" & ylevels[frm[i,]$yval]=='bad')
{
# The following [,5] is hardwired - needs work!
cat("\n")
cat(sprintf(" Rule number: %s ", names[i]))
cat(sprintf("[yval=%s cover=%d N=%.0f Y=%.0f (%.0f%%)
prob=%0.2f]\n",
ylevels[frm[i,]$yval], frm[i,]$n,
formatC(frm[i,]$yval2[,2], format = "f", digits = 2),
formatC(frm[i,]$n-frm[i,]$yval2[,2], format = "f", digits
= 2),
round(100*frm[i,]$n/ds.size), frm[i,]
$yval2[,5]))
pth <- path.rpart(model, nodes=as.numeric(names[i]),
print.it=FALSE)
cat(sprintf(" %s\n", unlist(pth)[-1]), sep="")
}
}
}
|
/script/german credit data.r
|
no_license
|
goal1234/score
|
R
| false
| false
| 10,461
|
r
|
# the German Credit Data
# read comma separated file into memory
data<-read.csv("C:/Documents and Settings/MyDocuments/GermanCredit.csv")
#code to convert variable to factor
data$property <-as.factor(data$ property)
#code to convert to numeric
data$age <-as.numeric(data$age)
#code to convert to decimal
data$amount<-as.double(data$amount)
data$amount<-
as.factor(ifelse(data$amount<=2500,'0-2500',
ifelse(data$amount<=5000,'2600-5000','5000+')))
d = sort(sample(nrow(data), nrow(data)*.6))
#select training sample
train<-data[d,]
test<-data[-d,]
train<-subset(train,select=-default)
# Traditional Credit Scoring Using Logistic Regression in R
m<-glm(good_bad~.,data=train,family=binomial())
# for those interested in the step function one can use m<-
# step(m) for it
# I recommend against step due to well known issues with it
# choosing the optimal #variables out of sample
# load library
library(ROCR)
# score test data set
test$score<-predict(m,type='response',test)
pred<-prediction(test$score,test$good_bad)
perf <- performance(pred,"tpr","fpr")
plot(perf)
# Calculating KS Statistic
#this code builds on ROCR library by taking the max delt
#between cumulative bad and good rates being plotted by
#ROCR
max(attr(perf,'y.values')[[1]]-attr(perf,'x.values')[[1]])
# top3 variable affecting Credit Score Function
#get results of terms in regression
g<-predict(m,type='terms',test)
ftopk<- function(x,top=3){
res=names(x)[order(x, decreasing = TRUE)][1:top]
paste(res,collapse=";",sep="")
}
# Application of the function using the top 3 rows
topk=apply(g,1,ftopk,top=3)
#add reason list to scored tets sample
test<-cbind(test, topk)
# Cutting Edge techniques Available in R
#load tree package
library(rpart)
fit1<-rpart(good_bad~.,data=train)
plot(fit1)
text(fit1)
#test$t<-predict(fit1,type='class',test)
#score test data
test$tscore1<-predict(fit1,type='prob',test)
pred5<-prediction(test$tscore1[,2],test$good_bad)
perf5 <- performance(pred5,"tpr","fpr")
#build model using 90% 10% priors
#with smaller complexity parameter to allow more complex trees
# for tuning complexity vs. pruning see Thernau 1997
fit2<-
rpart(good_bad~.,data=train,parms=list(prior=c(.9,.1)),cp=.0002)
plot(fit2);text(fit2);
test$tscore2<-predict(fit2,type='prob',test)
pred6 <- prediction(test$tscore2[,2], test$good_bad)
perf6 <- performance(pred6, 'tpr', 'fpr')
#prints complexity and out of sample error
printcp(fit1)
#plots complexity vs. error
plotcp(fit1)
#prints complexity and out of sample error
printcp(fit2)
#plots complexity vs. error
plotcp(fit2)
plot(perf5,col='red',lty=1,
main='Tree vs Tree with Prior Prob');
plot(perf4, col='green',add=TRUE,lty=2);
legend(0.6,0.6,c('simple tree','tree with 90/10
prior'),col=c('red','green'),lwd=3)
#print rules for all classes
list.rules.rpart(fit1)
list.rules.rpart(fit2)
#custom function to only print rules for bad loans
listrules(fit1)
listrules(fit2)
# Bayesian Networks in Credit Scoring
#load library
library(deal)
#make copy of train
ksl<-train
#discrete cannot inherit from continuous so binary
#good/bad must be converted to numeric for deal package
ksl$good_bad<-as.numeric(train$good_bad)
#no missing values allowed so set any missing to 0
# ksl$history[is.na(ksl$history1)] <- 0
#drops empty factors
# ksl$property<-ksl$property[drop=TRUE]
ksl.nw <- network(ksl)
ksl.prior <- jointprior(ks.nw)
# the ban list is a matrix with two columns
#banlist <- matrix(c(5,5,6,6,7,7,9,8,9,8,9,8,9,8),ncol=2)
## ban arrows towards Sex and Year
# note this a computationally intensive procuredure and if
you know that certain variables should have not
relationships you should specify
# the arcs between variables to exclude in the banlist
ksl.nw <- learn(ksl.nw,ksl,ksl.prior)$nw
#this step appears expensive so reset restart from 2 to 1
and degree from 10 to 1
result <-
heuristic(ksl.nw,ksl,ksl.prior,restart=1,degree=1,trace=TRU
E)
thebest <- result$nw[[1]]
savenet(thebest, "ksl.net")
print(ksl.nw,condposterior=TRUE
# In particular trying 80/20,
# 90/10, 60/40, 50/50 type priors seems to be a quick and effective hueristic approach to
# getting high performing trees.
#load tree package
library(rpart)
fit1<-rpart(good_bad~.,data=train)
plot(fit1);text(fit1);
#test$t<-predict(fit1,type='class',test)
#score test data
test$tscore1<-predict(fit1,type='prob',test)
pred5<-prediction(test$tscore1[,2],test$good_bad)
perf5 <- performance(pred5,"tpr","fpr")
#build model using 90% 10% priors
#with smaller complexity parameter to allow more complex trees
# for tuning complexity vs. pruning see Thernau 1997
fit2<-rpart(good_bad~.,data=train,parms=list(prior=c(.9,.1)),cp=.0002)
plot(fit2);text(fit2);
test$tscore2<-predict(fit2,type='prob',test)
pred6<-prediction(test$tscore2[,2],test$good_bad)
perf6<- performance(pred6,"tpr","fpr")
#prints complexity and out of sample error
printcp(fit1)
#plots complexity vs. error
plotcp(fit1)
#prints complexity and out of sample error
printcp(fit2)
#plots complexity vs. error
plotcp(fit2)
# Compare ROC Performance of Trees
plot(perf5,col='red',lty=1,main='Tree vs Tree with Prior Prob');
plot(perf4, col='green',add=TRUE,lty=2);
legend(0.6,0.6,c('simple tree','tree with 90/10 prior'),col=c('red','green'),lwd=3)
# Converting Trees to Rules
#print rules for all classes
list.rules.rpart(fit1)
list.rules.rpart(fit2)
#custom function to only print rules for bad loans
listrules(fit1)
listrules(fit2)
# Conditional inference Trees
#conditional inference trees corrects for known biases in chaid and cart
library(party)
cfit1<-ctree(good_bad~.,data=train)
plot(cfit1)
resultdfr <- as.data.frame(do.call("rbind", treeresponse(cfit1, newdata = test)))
test$tscore3<-resultdfr[,2]
pred9<-prediction(test$tscore3,test$good_bad)
perf9 <- performance(pred9,"tpr","fpr")
plot(perf5,col='red',lty=1,main='Tree vs Tree with Prior Prob vs Ctree');
plot(perf6, col='green',add=TRUE,lty=2);
plot(perf9, col='blue',add=TRUE,lty=3);
legend(0.6,0.6,c('simple tree','tree with 90/10
prior','Ctree'),col=c('red','green','blue'),lwd=3)
# Using Random Forests
library(randomForest)
arf<-
randomForest(good_bad~.,data=train,importance=TRUE,proximit
y=TRUE,ntree=500, keep.forest=TRUE)
#plot variable importance
varImpPlot(arf)
testp4<-predict(arf,test,type='prob')[,2]
pred4<-prediction(testp4,test$good_bad)
perf4 <- performance(pred4,"tpr","fpr")
#plotting logistic results vs. random forest ROC
#plotting logistic results vs. random forest ROC
plot(perf,col='red',lty=1, main='ROC Logistic Vs. RF');
plot(perf4, col='blue',lty=2,add=TRUE);
legend(0.6,0.6,c('simple','RF'),col=c('red','blue'),lwd=3)
#plot variable importance
varImpPlot(arf)
library(party)
set.seed(42)
crf<-cforest(good_bad~.,control = cforest_unbiased(mtry = 2, ntree = 50), data=train)
varimp(crf)
model based on trial and error based on random forest
variable importance
m2<-glm(good_bad~.+history:other+history:employed
+checking:employed+checking:purpose,data=train,family=binom
ial())
test$score2<-predict(m2,type='response',test)
pred2<-prediction(test$score2,test$good_bad)
perf2 <- performance(pred2,"tpr","fpr")
plot(perf2)
#plotting logistic results vs. random forest ROC
plot(perf,col='red',lty=1, main='ROC Logistic Vs. RF');
plot(perf2, col='orange',lty=2,add=TRUE);
plot(perf4, col='blue',lty=3,add=TRUE);
legend(0.6,0.6,c('simple','logit w
interac','RF'),col=c('red','orange','blue'),lwd=3)
# Calculating Area under the Curve
# the following line computes the area under the curve for
# models
#simple model
performance(pred,"auc")
#random forest
performance(pred2,"auc")
#logit plus random forest interaction of affordability term
performance(pred4,"auc")
# Cross Validation
#load Data Analysis And Graphics Package for R (DAAG)
library(DAAG)
#calculate accuracy over 100 random folds of data for
simple logit
h<-CVbinary(obj=m, rand=NULL, nfolds=100,
print.details=TRUE)
#calculate accuracy over 100 random folds of data for logit
+affordability interactions
g<-CVbinary(obj=m2, rand=NULL, nfolds=100,
print.details=TRUE)
# Cutting Edge techniques: Party Package
#model based recursive paritioning
library(party)
model<-mob(good_bad~afford |
amount+other+checking+duration+savings+marital+coapp+proper
ty+resident+amount,data=train,
model=glinearModel,family=binomial())
plot(model)
test$mobscore<-predict(model, newdata = test, type =
c("response"))
pred7<-prediction(test$mobscore,test$good_bad)
perf7 <- performance(pred7,"tpr","fpr")
plot(perf5,col='red',lty=1,main='Tree vs Tree with Prior
Prob vs. Model Based Tree with Glm');
plot(perf4, col='green',add=TRUE,lty=2);
plot(perf7, col='orange',add=TRUE,lty=3);
legend(0.6,0.6,c('simple tree','tree with 90/10 prior',
'Model based tree with logit'),col=c('red','green','orange'),lwd=3)
# Appendix of Useful Functions
#function to divide to create interaction terms without divide by zero
div<-function(a,b) ifelse(b == 0, b, a/b)
list.rules.rpart <- function(model)
{
if (!inherits(model, "rpart")) stop("Not a legitimate rpart tree")
#
# Get some information.
#
frm <- model$frame
names <- row.names(frm)
ylevels <- attr(model, "ylevels")
ds.size <- model$frame[1,]$n
#
# Print each leaf node as a rule.
#
for (i in 1:nrow(frm))
{
if (frm[i,1] == "<leaf>")
{
# The following [,5] is hardwired - needs work!
cat("\n")
cat(sprintf(" Rule number: %s ", names[i]))
cat(sprintf("[yval=%s cover=%d (%.0f%%) prob=%0.2f]\n",
ylevels[frm[i,]$yval], frm[i,]$n,
round(100*frm[i,]$n/ds.size), frm[i,]$yval2[,5]))
pth <- path.rpart(model, nodes=as.numeric(names[i]),
print.it=FALSE)
cat(sprintf(" %s\n", unlist(pth)[-1]), sep="")
}
}
}
listrules<-function(model)
{
if (!inherits(model, "rpart")) stop("Not a legitimate
rpart tree")
#
# Get some information.
#
frm <- model$frame
names <- row.names(frm)
ylevels <- attr(model, "ylevels")
ds.size <- model$frame[1,]$n
#
# Print each leaf node as a rule.
#
for (i in 1:nrow(frm))
{
if (frm[i,1] == "<leaf>" & ylevels[frm[i,]$yval]=='bad')
{
# The following [,5] is hardwired - needs work!
cat("\n")
cat(sprintf(" Rule number: %s ", names[i]))
cat(sprintf("[yval=%s cover=%d N=%.0f Y=%.0f (%.0f%%)
prob=%0.2f]\n",
ylevels[frm[i,]$yval], frm[i,]$n,
formatC(frm[i,]$yval2[,2], format = "f", digits = 2),
formatC(frm[i,]$n-frm[i,]$yval2[,2], format = "f", digits
= 2),
round(100*frm[i,]$n/ds.size), frm[i,]
$yval2[,5]))
pth <- path.rpart(model, nodes=as.numeric(names[i]),
print.it=FALSE)
cat(sprintf(" %s\n", unlist(pth)[-1]), sep="")
}
}
}
|
context("Tidying data with tidy_pol")
test_that("tidy_pol tidies a data set with separator", {
data <- tibble::tibble(
'Ctrl-Ctrl' = 1:3,
'M1-Ctrl'= 1:3,
'Ctrl-LPS' = 4:6,
'M1-LPS' = 11:13
)
data2 <- tibble::tibble(
'Ctrl.Ctrl' = 1:3,
'M1.Ctrl'= 1:3,
'Ctrl.LPS' = 4:6,
'M1.LPS' = 11:13
)
tidy <- tibble::tibble(
'primary' = rep(rep(c("Ctrl", "M1"), each=3), 2),
'secondary'= rep(c("Ctrl", "LPS"), each=6),
'response' = as.integer(c(1, 2, 3, 1, 2, 3, 4, 5, 6, 11, 12, 13))
)
expect_identical(tidy_pol(data), tidy)
expect_identical(tidy_pol(data2, sep="\\."), tidy)
})
test_that("tidy_pol produces error for non-numeric data", {
not_numeric <- tibble::tibble(
'Ctrl-Ctrl' = c("1", "2", "3"),
'M1-Ctrl'= 1:3,
'Ctrl-LPS' = 4:6,
'M1-LPS' = 11:13
)
expect_error(tidy_pol(not_numeric), "Input data must be numeric")
})
test_that("tidy_pol produces error if separator missing", {
no_sep <- tibble::tibble(
'Ctrl' = 1:3,
'M1-Ctrl'= 1:3,
'Ctrl-LPS' = 4:6,
'M1-LPS' = 11:13
)
expect_error(tidy_pol(no_sep), "Column names must contain separator")
})
|
/tests/testthat/test_tidy_pol.R
|
no_license
|
ksedivyhaley/katehelpr
|
R
| false
| false
| 1,147
|
r
|
context("Tidying data with tidy_pol")
test_that("tidy_pol tidies a data set with separator", {
data <- tibble::tibble(
'Ctrl-Ctrl' = 1:3,
'M1-Ctrl'= 1:3,
'Ctrl-LPS' = 4:6,
'M1-LPS' = 11:13
)
data2 <- tibble::tibble(
'Ctrl.Ctrl' = 1:3,
'M1.Ctrl'= 1:3,
'Ctrl.LPS' = 4:6,
'M1.LPS' = 11:13
)
tidy <- tibble::tibble(
'primary' = rep(rep(c("Ctrl", "M1"), each=3), 2),
'secondary'= rep(c("Ctrl", "LPS"), each=6),
'response' = as.integer(c(1, 2, 3, 1, 2, 3, 4, 5, 6, 11, 12, 13))
)
expect_identical(tidy_pol(data), tidy)
expect_identical(tidy_pol(data2, sep="\\."), tidy)
})
test_that("tidy_pol produces error for non-numeric data", {
not_numeric <- tibble::tibble(
'Ctrl-Ctrl' = c("1", "2", "3"),
'M1-Ctrl'= 1:3,
'Ctrl-LPS' = 4:6,
'M1-LPS' = 11:13
)
expect_error(tidy_pol(not_numeric), "Input data must be numeric")
})
test_that("tidy_pol produces error if separator missing", {
no_sep <- tibble::tibble(
'Ctrl' = 1:3,
'M1-Ctrl'= 1:3,
'Ctrl-LPS' = 4:6,
'M1-LPS' = 11:13
)
expect_error(tidy_pol(no_sep), "Column names must contain separator")
})
|
#' summary
#' @description Generate a summary of the results.
#' @return The posterior mean and 95 percent credible intervals, n_eff, Rhat and WAIC.
#' @param object An object from \link{fit}.
#' @param digits An optional positive value to control the number of digits to print when printing numeric values.
#' @param ... other \link[rstan]{stan} options.
#' @examples
#'
#' \dontrun{
#'
#' fit1 <- fit(data=telomerase,
#' SID = "ID",
#' copula="fgm",
#' iter = 400,
#' warmup = 100,
#' seed=1,
#' cores=1)
#'
#' ss <- summary(fit1)
#'
#' }
#' @references {Watanabe S (2010). Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular
#' Learning Theory. Journal of Machine Learning Research, 11, 3571-3594.}
#' @references {Vehtari A, Gelman A (2014). WAIC and Cross-validation in Stan. Unpublished, pp. 1-14.}
#' @export
#' @author Victoria N Nyaga
summary.nmadasfit <- function(object,
RR = TRUE,
SIndex = TRUE,
digits=3,
...){
#=======================Extract Model Parameters ===================================#
sm <- rstan::summary(object@fit, ...)
#Obtain the summaries
obtainsummary <- function(par) {
x <- data.frame(summary(object@fit, pars=par)$summary[, c("mean", "2.5%", "50%", "97.5%", "n_eff", "Rhat")])
names(x) <- c("Mean", "Lower", "Median", "Upper", "n_eff", "Rhat")
if (par != "S"){
if (RR){
param <- c("RR.Sens", "RR.Spec")
}
if (par == "MU") {
param <- c("Sensitivity", "Specificity")
}
x$Parameter <- rep(param, each=nrow(x)/2)
x$Test <- rep(object@labels, 2)
x <- x[, c("Test", "Parameter", "Mean", "Lower", "Median", "Upper", "n_eff", "Rhat")]
x <- x[order(x$Test),]
}
else{
x$Test <- object@labels
x <- x[, c("Test", "Mean", "Lower", "Median", "Upper", "n_eff", "Rhat")]
}
row.names(x) <- NULL
x
}
MU <- obtainsummary("MU")
if (RR){
RR <- obtainsummary("RR")
}
if (SIndex) {
S <- obtainsummary("S")
}
w <- waic(object@fit)
out <- list(MU=MU,
RR = RR,
S = S,
WAIC=w,
allsm=sm)
out
}
|
/R/summary.R
|
no_license
|
VNyaga/NMADAS
|
R
| false
| false
| 2,409
|
r
|
#' summary
#' @description Generate a summary of the results.
#' @return The posterior mean and 95 percent credible intervals, n_eff, Rhat and WAIC.
#' @param object An object from \link{fit}.
#' @param digits An optional positive value to control the number of digits to print when printing numeric values.
#' @param ... other \link[rstan]{stan} options.
#' @examples
#'
#' \dontrun{
#'
#' fit1 <- fit(data=telomerase,
#' SID = "ID",
#' copula="fgm",
#' iter = 400,
#' warmup = 100,
#' seed=1,
#' cores=1)
#'
#' ss <- summary(fit1)
#'
#' }
#' @references {Watanabe S (2010). Asymptotic Equivalence of Bayes Cross Validation and Widely Applicable Information Criterion in Singular
#' Learning Theory. Journal of Machine Learning Research, 11, 3571-3594.}
#' @references {Vehtari A, Gelman A (2014). WAIC and Cross-validation in Stan. Unpublished, pp. 1-14.}
#' @export
#' @author Victoria N Nyaga
summary.nmadasfit <- function(object,
RR = TRUE,
SIndex = TRUE,
digits=3,
...){
#=======================Extract Model Parameters ===================================#
sm <- rstan::summary(object@fit, ...)
#Obtain the summaries
obtainsummary <- function(par) {
x <- data.frame(summary(object@fit, pars=par)$summary[, c("mean", "2.5%", "50%", "97.5%", "n_eff", "Rhat")])
names(x) <- c("Mean", "Lower", "Median", "Upper", "n_eff", "Rhat")
if (par != "S"){
if (RR){
param <- c("RR.Sens", "RR.Spec")
}
if (par == "MU") {
param <- c("Sensitivity", "Specificity")
}
x$Parameter <- rep(param, each=nrow(x)/2)
x$Test <- rep(object@labels, 2)
x <- x[, c("Test", "Parameter", "Mean", "Lower", "Median", "Upper", "n_eff", "Rhat")]
x <- x[order(x$Test),]
}
else{
x$Test <- object@labels
x <- x[, c("Test", "Mean", "Lower", "Median", "Upper", "n_eff", "Rhat")]
}
row.names(x) <- NULL
x
}
MU <- obtainsummary("MU")
if (RR){
RR <- obtainsummary("RR")
}
if (SIndex) {
S <- obtainsummary("S")
}
w <- waic(object@fit)
out <- list(MU=MU,
RR = RR,
S = S,
WAIC=w,
allsm=sm)
out
}
|
"Refer to the previous question. Brain volume for adult women is about 1,100 cc for women with a standard deviation of 75 cc.
Consider the sample mean of 100 random adult women from this population.
What is the 95th percentile of the distribution of that sample mean?"
p = 0.95
mu = 1100
sd = 75
n = 100
sd_err = sd/sqrt(n)
qnorm(p, mu, sd_err, lower.tail = TRUE)
|
/qnorm_02.R
|
no_license
|
vcwild/statinference
|
R
| false
| false
| 367
|
r
|
"Refer to the previous question. Brain volume for adult women is about 1,100 cc for women with a standard deviation of 75 cc.
Consider the sample mean of 100 random adult women from this population.
What is the 95th percentile of the distribution of that sample mean?"
p = 0.95
mu = 1100
sd = 75
n = 100
sd_err = sd/sqrt(n)
qnorm(p, mu, sd_err, lower.tail = TRUE)
|
# R Statistical System
# $ Rscript hello.R
# Parts adapted from tutorialspoint.com
# help:
# ?lm
# ??lm # fuzzy verbose
# help(lm)
# Created: Mon 18 Apr 2016 10:46:02 (Bob Heckel)
s <- 'hello world'; print(s)
s <- 'hello world'
s
# Colon operator creates sequence
s2 <- 0:9
s2
s3 <- seq(0, 9, by=0.5)
s3
mynumeric <- 12.34
mynumeric
myinteger <- 1234L
myinteger
myvector <- c('red', 'yellow', 'blue')
myvector
# yellow & blue
myvector2 <- myvector[c(2,3)]
myvector2
mylist <- list(1234L, 'algae')
print('mylist is')
mylist
soylentlist <- list(mylist, 'soy','people', TRUE)
print('my soylent list is')
soylentlist
myarray <- array(c('green','yellow'),dim = c(3,3, 4))
myarray
# Like Excel with R1C1 notation, no field names (like frames have)
mymatrix <- matrix(c('r1c1','r1c2','r1c3', 'r2c2','r2c2','r2c3'), nrow=2, ncol=3, byrow=TRUE)
mymatrix
cat('\n')
print('r2c2:')
mymatrix[2,2]
cat('\n')
print('r2:')
mymatrix[2,]
vapple_colors <- c('green','green','yellow','red','red','red','green')
# De-dup
myfactor <- factor(vapple_colors)
myfactor
print(nlevels(myfactor))
# Frame is list of vectors of equal length
myframe <- data.frame(
gender = c('Male', 'Male', 'Female'),
height = c(152, 171.5, 165),
age = c(42, 44, 69),
mydt = as.Date(c('2016-01-01', '2016-02-01', '2016-04-16'))
)
# Add a column
myframe$dept <- c('IT', 'Ops', 'HR')
# To add a row make a new frame then newframe <- rbind(myframe, myframe2)
# Whole frame
myframe
# Details of frame
str(myframe)
# proc means
summary(myframe)
# Partial frame
print(data.frame(myframe$age, myframe$mydt))
cat('\n')
v1 <- c(2,5.5,6,9)
v2 <- c(8,2.5,14,9)
# Vector arithmetic
print(v1+v2)
print(v1<v2)
print(v1 != v2)
v <- c("what","is","foo","bar","foo")
if ( "Foo" %in% v ) {
print("one foo is found")
} else if ( "foo" %in% v ) {
print("a 2nd foo is found")
} else {
print("foo is not found")
}
v <- c("Hello","do loop")
cnt <- 2
repeat {
print(v)
cnt <- cnt+1
if (cnt > 5) {
break
}
}
v <- c("Hello","while loop")
cnt <- 2
while (cnt < 7) {
print(v)
cnt = cnt + 1
}
v <- LETTERS[1:4]
for ( i in v) {
print(i)
}
# Standard deviation
print(sd(25:30))
new.function <- function(a, c=42) {
for(i in 1:a) {
b <- i^2
print(b)
print(c)
}
}
###new.function(3)
new.function(a=3, c=43)
# Concatenation
a <- 'foo'
b <- 'bar'
c <- 'baz'
print(paste(a,b,c, sep = "-"))
cat('\n')
# Total number of digits displayed. Last digit rounded off
result <- format(23.123456789, digits = 9)
print(result)
# Display numbers in scientific notation
result <- format(c(6, 13.14521), scientific = TRUE)
print(result)
# The minimum number of digits to the right of the decimal point
result <- format(23.47, nsmall = 5)
print(result)
# Format treats everything as a string
result <- format(42)
print(result)
# Left justify strings
result <- format("Hello", width=8, justify="l")
print(result)
cat('\n')
result <- substring("Extract", 5, 7)
print(result)
cat('\n')
print('ok')
cat('ok\n')
print(charToRaw("ABCabc"))
cat('\n')
v1 <- c(1, 2, 3, 4)
###v2 <- c(4, 5, 6, 7)
# Same as 4, 5, 4, 5 due to recycling
v2 <- c(4, 5)
vectoraddition <- v1 + v2
vectoraddition
vs <- sort(v1, decreasing=TRUE)
vs
cat('\n')
print
###help(print)
# Get library locations containing R packages
.libPaths()
# Get installed packages
library()
|
/misc/hello.R
|
permissive
|
bheckel/code
|
R
| false
| false
| 3,622
|
r
|
# R Statistical System
# $ Rscript hello.R
# Parts adapted from tutorialspoint.com
# help:
# ?lm
# ??lm # fuzzy verbose
# help(lm)
# Created: Mon 18 Apr 2016 10:46:02 (Bob Heckel)
s <- 'hello world'; print(s)
s <- 'hello world'
s
# Colon operator creates sequence
s2 <- 0:9
s2
s3 <- seq(0, 9, by=0.5)
s3
mynumeric <- 12.34
mynumeric
myinteger <- 1234L
myinteger
myvector <- c('red', 'yellow', 'blue')
myvector
# yellow & blue
myvector2 <- myvector[c(2,3)]
myvector2
mylist <- list(1234L, 'algae')
print('mylist is')
mylist
soylentlist <- list(mylist, 'soy','people', TRUE)
print('my soylent list is')
soylentlist
myarray <- array(c('green','yellow'),dim = c(3,3, 4))
myarray
# Like Excel with R1C1 notation, no field names (like frames have)
mymatrix <- matrix(c('r1c1','r1c2','r1c3', 'r2c2','r2c2','r2c3'), nrow=2, ncol=3, byrow=TRUE)
mymatrix
cat('\n')
print('r2c2:')
mymatrix[2,2]
cat('\n')
print('r2:')
mymatrix[2,]
vapple_colors <- c('green','green','yellow','red','red','red','green')
# De-dup
myfactor <- factor(vapple_colors)
myfactor
print(nlevels(myfactor))
# Frame is list of vectors of equal length
myframe <- data.frame(
gender = c('Male', 'Male', 'Female'),
height = c(152, 171.5, 165),
age = c(42, 44, 69),
mydt = as.Date(c('2016-01-01', '2016-02-01', '2016-04-16'))
)
# Add a column
myframe$dept <- c('IT', 'Ops', 'HR')
# To add a row make a new frame then newframe <- rbind(myframe, myframe2)
# Whole frame
myframe
# Details of frame
str(myframe)
# proc means
summary(myframe)
# Partial frame
print(data.frame(myframe$age, myframe$mydt))
cat('\n')
v1 <- c(2,5.5,6,9)
v2 <- c(8,2.5,14,9)
# Vector arithmetic
print(v1+v2)
print(v1<v2)
print(v1 != v2)
v <- c("what","is","foo","bar","foo")
if ( "Foo" %in% v ) {
print("one foo is found")
} else if ( "foo" %in% v ) {
print("a 2nd foo is found")
} else {
print("foo is not found")
}
v <- c("Hello","do loop")
cnt <- 2
repeat {
print(v)
cnt <- cnt+1
if (cnt > 5) {
break
}
}
v <- c("Hello","while loop")
cnt <- 2
while (cnt < 7) {
print(v)
cnt = cnt + 1
}
v <- LETTERS[1:4]
for ( i in v) {
print(i)
}
# Standard deviation
print(sd(25:30))
new.function <- function(a, c=42) {
for(i in 1:a) {
b <- i^2
print(b)
print(c)
}
}
###new.function(3)
new.function(a=3, c=43)
# Concatenation
a <- 'foo'
b <- 'bar'
c <- 'baz'
print(paste(a,b,c, sep = "-"))
cat('\n')
# Total number of digits displayed. Last digit rounded off
result <- format(23.123456789, digits = 9)
print(result)
# Display numbers in scientific notation
result <- format(c(6, 13.14521), scientific = TRUE)
print(result)
# The minimum number of digits to the right of the decimal point
result <- format(23.47, nsmall = 5)
print(result)
# Format treats everything as a string
result <- format(42)
print(result)
# Left justify strings
result <- format("Hello", width=8, justify="l")
print(result)
cat('\n')
result <- substring("Extract", 5, 7)
print(result)
cat('\n')
print('ok')
cat('ok\n')
print(charToRaw("ABCabc"))
cat('\n')
v1 <- c(1, 2, 3, 4)
###v2 <- c(4, 5, 6, 7)
# Same as 4, 5, 4, 5 due to recycling
v2 <- c(4, 5)
vectoraddition <- v1 + v2
vectoraddition
vs <- sort(v1, decreasing=TRUE)
vs
cat('\n')
print
###help(print)
# Get library locations containing R packages
.libPaths()
# Get installed packages
library()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maximumSpendingForMinimumRuinTime.R
\name{maximumSpendingForMinimumRuinTime}
\alias{maximumSpendingForMinimumRuinTime}
\title{Calculates scenarios of future value of annuity payments (fv) with stochastic returns}
\usage{
maximumSpendingForMinimumRuinTime(
wealth = 14000,
minumumRuinTime = 16,
mu = 0.03,
sigma = 0.08,
nScenarios = 200,
prob = 0.9,
seed = NULL
)
}
\arguments{
\item{wealth}{The wealth at retirement. Must be entered as a positive number}
\item{minumumRuinTime}{Minimum time to ruin. Must be entered as a positive integer}
\item{mu}{The expected interest real return per period. Default is zero. Must be entered as decimal}
\item{sigma}{Volatility of expected interest real return per period. Default is zero. Must be entered as decimal}
\item{nScenarios}{The total number of scenarios to be made. Default is one scenario}
\item{prob}{Probability to exceed minimum time to ruin. Must be entered as decimal.}
\item{seed}{Integer vector, containing the random number generator (RNG) state for random number generation in R}
}
\description{
Calculates scenarios of future value of annuity payments (fv) with stochastic returns
}
\examples{
maximumSpendingForMinimumRuinTime(wealth=14000,minumumRuinTime = 16,mu=0.03,sigma=0.08,nScenarios=200, prob = 0.9, seed =NULL)
}
|
/man/maximumSpendingForMinimumRuinTime.Rd
|
no_license
|
eaoestergaard/UNPIE
|
R
| false
| true
| 1,382
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maximumSpendingForMinimumRuinTime.R
\name{maximumSpendingForMinimumRuinTime}
\alias{maximumSpendingForMinimumRuinTime}
\title{Calculates scenarios of future value of annuity payments (fv) with stochastic returns}
\usage{
maximumSpendingForMinimumRuinTime(
wealth = 14000,
minumumRuinTime = 16,
mu = 0.03,
sigma = 0.08,
nScenarios = 200,
prob = 0.9,
seed = NULL
)
}
\arguments{
\item{wealth}{The wealth at retirement. Must be entered as a positive number}
\item{minumumRuinTime}{Minimum time to ruin. Must be entered as a positive integer}
\item{mu}{The expected interest real return per period. Default is zero. Must be entered as decimal}
\item{sigma}{Volatility of expected interest real return per period. Default is zero. Must be entered as decimal}
\item{nScenarios}{The total number of scenarios to be made. Default is one scenario}
\item{prob}{Probability to exceed minimum time to ruin. Must be entered as decimal.}
\item{seed}{Integer vector, containing the random number generator (RNG) state for random number generation in R}
}
\description{
Calculates scenarios of future value of annuity payments (fv) with stochastic returns
}
\examples{
maximumSpendingForMinimumRuinTime(wealth=14000,minumumRuinTime = 16,mu=0.03,sigma=0.08,nScenarios=200, prob = 0.9, seed =NULL)
}
|
library(tidyverse)
library(magrittr)
library(ggpubr)
library(igraph)
library(viridis)
EXPERIMENT_NAME <- "main_transmission_probability"
source(file.path("..", "helpers.R"))
# read and format config ---
arrow::read_feather(file.path("..", "..", "..", "experiments", EXPERIMENT_NAME, "configs.feather")) %>%
mutate(
Config = as.factor(Config),
Nodes = as.numeric(Nodes),
N0 = as.numeric(N0),
K = as.numeric(K),
AgentCount = as.numeric(AgentCount),
TransmissionProb = as.numeric(TransmissionProb),
TestProb = as.numeric(TestProb),
QuarantineDuration = as.numeric(QuarantineDuration),
BehaviorReductionFactor = as.numeric(BehaviorReductionFactor),
BehaviorActivated = as.logical(BehaviorActivated),
TickAuthorityRecommendation = as.numeric(TickAuthorityRecommendation),
TickAuthorityPolicy = as.numeric(TickAuthorityPolicy),
PublicSpaces = as.numeric(PublicSpaces),
InitialInfectives = as.numeric(InitialInfectives),
Iterations = as.numeric(Iterations)
) -> config
# read and format summary statistics ---
arrow::read_feather(file.path("..", "..", "..", "experiments", EXPERIMENT_NAME, "summary_statistics.feather")) %>%
mutate(
Config = as.factor(Config),
Replicate = as.numeric(Replicate),
BehaviorActivated = as.logical(BehaviorActivated),
AgentCount = as.numeric(AgentCount),
Nodes = as.numeric(Nodes),
PublicSpaces = as.numeric(PublicSpaces),
BehaviorReductionFactor = as.numeric(BehaviorReductionFactor),
TickAuthorityRecommendation = as.numeric(TickAuthorityRecommendation),
TickAuthorityPolicy = as.numeric(TickAuthorityPolicy),
TransmissionProb = as.numeric(TransmissionProb),
QuarantineDuration = as.numeric(QuarantineDuration),
PeakInfectiveCount = as.numeric(PeakInfectiveCount),
TickOfPeakInfectiveCount = as.numeric(TickOfPeakInfectiveCount),
DurationOfEpidemic = as.numeric(DurationOfEpidemic),
FractionStillSusceptible = as.numeric(FractionStillSusceptible),
PeakInfectiveFraction = PeakInfectiveCount / AgentCount
) -> summary_statistics
# aggregate summary statistics ---
summary_statistics %>%
group_by(Config, TransmissionProb, BehaviorActivated) %>%
summarize(
PeakInfectiveCountMean = mean(PeakInfectiveCount),
PeakInfectiveCountSE = standard_error(PeakInfectiveCount),
PeakInfectiveFractionMean = mean(PeakInfectiveFraction),
PeakInfectiveFractionSE = standard_error(PeakInfectiveFraction),
TickOfPeakInfectiveCountMean = mean(TickOfPeakInfectiveCount),
TickOfPeakInfectiveCountSE = standard_error(TickOfPeakInfectiveCount),
DurationOfEpidemicMean = mean(DurationOfEpidemic),
DurationOfEpidemicSE = standard_error(DurationOfEpidemic),
FractionStillSusceptibleMean = mean(FractionStillSusceptible),
FractionStillSusceptibleSE = standard_error(FractionStillSusceptible)
) %>%
ungroup() -> summary_statistics_aggregated
# read and format mdata ---
dir <- file.path("..", "..", "..", "experiments", EXPERIMENT_NAME)
archive_filename <- file.path(dir, "mdata.7z")
unpack_7z_command <- paste0("7z x ", archive_filename)
system(unpack_7z_command)
mdata <- data.frame()
for (config_key in 1:dim(config)[1]) {
config_nr <- stringr::str_pad(config_key, 2, "left", "0")
filename <- paste0("config_", config_nr, "_mdata.feather")
df <- arrow::read_feather(filename)
df$Config <- paste0("config_", config_nr)
mdata %<>% bind_rows(df)
}
system("rm *.feather")
mdata %<>%
tibble() %>%
mutate(
Config = as.factor(Config),
Replicate = as.numeric(Replicate),
Step = as.numeric(Step),
Day = floor(Step / 10),
SCount = as.numeric(SCount),
ECount = as.numeric(ECount),
IuCount = as.numeric(IuCount),
IdCount = as.numeric(IdCount),
ICount = as.numeric(ICount),
RCount = as.numeric(RCount),
BCount = as.numeric(BCount),
IdCumulative = as.numeric(IdCumulative),
ICumulative = as.numeric(ICumulative),
NewCasesReal = as.numeric(NewCasesReal),
NewCasesDetected = as.numeric(NewCasesDetected)
)
# pivot mdata long ---
mdata %>%
pivot_longer(
cols = c(
SCount, ECount, IuCount, IdCount,
ICount, RCount, BCount, IdCumulative,
ICumulative, NewCasesReal, NewCasesDetected
),
names_to = "Concept",
values_to = "Value"
) %>%
inner_join(config %>% select(Config, AgentCount), by = "Config") %>%
mutate(Fraction = Value / AgentCount) %>%
select(-AgentCount) %>%
mutate(Concept = as.factor(Concept)) -> mdata_long
# summarize mdata ---
mdata %>%
group_by(Step, Day, Config) %>%
summarize(
SCountMean = mean(SCount),
SCountSE = standard_error(SCount),
ECountMean = mean(ECount),
ECountSE = standard_error(ECount),
IuCountMean = mean(IuCount),
IuCountSE = standard_error(IuCount),
IdCountMean = mean(IdCount),
IdCountSE = standard_error(IdCount),
ICountMean = mean(ICount),
ICountSE = standard_error(ICount),
RCountMean = mean(RCount),
RCountSE = standard_error(RCount),
BCountMean = mean(BCount),
BCountSE = standard_error(BCount),
IdCumulativeMean = mean(IdCumulative),
IdCumulativeSE = standard_error(IdCumulative),
ICumulativeMean = mean(ICumulative),
ICumulativeSE = standard_error(ICumulative),
NewCasesRealMean = mean(NewCasesReal),
NewCasesRealSE = standard_error(NewCasesReal),
NewCasesDetectedMean = mean(NewCasesDetected),
NewCasesDetectedSE = standard_error(NewCasesDetected)
) %>%
ungroup() %>%
mutate(
SCountLower = SCountMean - SCountSE,
SCountUpper = SCountMean + SCountSE,
ECountLower = ECountMean - ECountSE,
ECountUpper = ECountMean + ECountSE,
IuCountLower = IuCountMean - IuCountSE,
IuCountUpper = IuCountMean + IuCountSE,
IdCountLower = IdCountMean - IdCountSE,
IdCountUpper = IdCountMean + IdCountSE,
ICountLower = ICountMean - ICountSE,
ICountUpper = ICountMean + ICountSE,
RCountLower = RCountMean - RCountSE,
RCountUpper = RCountMean + RCountSE,
BCountLower = BCountMean - BCountSE,
BCountUpper = BCountMean + BCountSE,
IdCumulativeLower = IdCumulativeMean - IdCumulativeSE,
IdCumulativeUpper = IdCumulativeMean + IdCumulativeSE,
ICumulativeLower = ICumulativeMean - ICumulativeSE,
ICumulativeUpper = ICumulativeMean + ICumulativeSE,
NewCasesRealLower = NewCasesRealMean - NewCasesRealSE,
NewCasesRealUpper = NewCasesRealMean + NewCasesRealSE,
NewCasesDetectedLower = NewCasesDetectedMean - NewCasesDetectedSE,
NewCasesDetectedUpper = NewCasesDetectedMean + NewCasesDetectedSE
) %>%
select(
-c(
SCountMean, SCountSE, ECountMean, ECountSE,
IuCountMean, IuCountSE, IdCountMean, IdCountSE,
ICountMean, ICountSE, RCountMean, RCountSE,
BCountMean, BCountSE, IdCumulativeMean, IdCumulativeSE,
ICumulativeMean, ICumulativeSE, NewCasesRealMean, NewCasesRealSE,
NewCasesDetectedMean, NewCasesDetectedSE
)
) -> mdata_summarized
# pivot summarized mdata long ---
mdata_summarized %>%
pivot_longer(
cols = c(
SCountLower, SCountUpper, ECountLower, ECountUpper,
IuCountLower, IuCountUpper, IdCountLower, IdCountUpper,
ICountLower, ICountUpper, RCountLower, RCountUpper,
BCountLower, BCountUpper, IdCumulativeLower, IdCumulativeUpper,
ICumulativeLower, ICumulativeUpper, NewCasesRealLower, NewCasesRealUpper,
NewCasesDetectedLower, NewCasesDetectedUpper
),
names_to = "Concept",
values_to = "Value"
) %>%
inner_join(config %>% select(Config, AgentCount), by = "Config") %>%
mutate(Class = sub("Lower|Upper", "", Concept)) %>%
mutate(Fraction = Value / AgentCount) %>%
select(-AgentCount) %>%
mutate(Class = as.factor(Class)) -> mdata_summarized_long
# read and format summary agent states ---
arrow::read_feather(file.path("..", "..", "..", "experiments", EXPERIMENT_NAME, "summary_agent_states.feather")) %>%
mutate(
Config = as.factor(Config),
Replicate = as.numeric(Replicate),
Step = as.numeric(Step),
Day = floor(Step / 10),
FearMean = as.numeric(FearMean),
FearSE = as.numeric(FearSE),
FearSD = as.numeric(FearSD),
FearMin = as.numeric(FearMin),
FearMax = as.numeric(FearMax),
SocialNormMean = as.numeric(SocialNormMean),
SocialNormSE = as.numeric(SocialNormSE),
SocialNormSD = as.numeric(SocialNormSD),
SocialNormMin = as.numeric(SocialNormMin),
SocialNormMax = as.numeric(SocialNormMax),
BehaviorCount = as.numeric(BehaviorCount)
) -> summary_agent_states
# save and reset ---
save(
list = c(
"config", "mdata", "mdata_long", "mdata_summarized",
"mdata_summarized_long", "summary_agent_states",
"summary_statistics", "summary_statistics_aggregated"
),
file = file.path("data.RData")
)
rm(archive_filename, config_key, config_nr, df, dir, filename, unpack_7z_command)
|
/analyses/scripts/main_transmission_probability/data_processing.R
|
permissive
|
JohannesNakayama/EpidemicModel.jl
|
R
| false
| false
| 8,922
|
r
|
library(tidyverse)
library(magrittr)
library(ggpubr)
library(igraph)
library(viridis)
EXPERIMENT_NAME <- "main_transmission_probability"
source(file.path("..", "helpers.R"))
# read and format config ---
arrow::read_feather(file.path("..", "..", "..", "experiments", EXPERIMENT_NAME, "configs.feather")) %>%
mutate(
Config = as.factor(Config),
Nodes = as.numeric(Nodes),
N0 = as.numeric(N0),
K = as.numeric(K),
AgentCount = as.numeric(AgentCount),
TransmissionProb = as.numeric(TransmissionProb),
TestProb = as.numeric(TestProb),
QuarantineDuration = as.numeric(QuarantineDuration),
BehaviorReductionFactor = as.numeric(BehaviorReductionFactor),
BehaviorActivated = as.logical(BehaviorActivated),
TickAuthorityRecommendation = as.numeric(TickAuthorityRecommendation),
TickAuthorityPolicy = as.numeric(TickAuthorityPolicy),
PublicSpaces = as.numeric(PublicSpaces),
InitialInfectives = as.numeric(InitialInfectives),
Iterations = as.numeric(Iterations)
) -> config
# read and format summary statistics ---
arrow::read_feather(file.path("..", "..", "..", "experiments", EXPERIMENT_NAME, "summary_statistics.feather")) %>%
mutate(
Config = as.factor(Config),
Replicate = as.numeric(Replicate),
BehaviorActivated = as.logical(BehaviorActivated),
AgentCount = as.numeric(AgentCount),
Nodes = as.numeric(Nodes),
PublicSpaces = as.numeric(PublicSpaces),
BehaviorReductionFactor = as.numeric(BehaviorReductionFactor),
TickAuthorityRecommendation = as.numeric(TickAuthorityRecommendation),
TickAuthorityPolicy = as.numeric(TickAuthorityPolicy),
TransmissionProb = as.numeric(TransmissionProb),
QuarantineDuration = as.numeric(QuarantineDuration),
PeakInfectiveCount = as.numeric(PeakInfectiveCount),
TickOfPeakInfectiveCount = as.numeric(TickOfPeakInfectiveCount),
DurationOfEpidemic = as.numeric(DurationOfEpidemic),
FractionStillSusceptible = as.numeric(FractionStillSusceptible),
PeakInfectiveFraction = PeakInfectiveCount / AgentCount
) -> summary_statistics
# aggregate summary statistics ---
summary_statistics %>%
group_by(Config, TransmissionProb, BehaviorActivated) %>%
summarize(
PeakInfectiveCountMean = mean(PeakInfectiveCount),
PeakInfectiveCountSE = standard_error(PeakInfectiveCount),
PeakInfectiveFractionMean = mean(PeakInfectiveFraction),
PeakInfectiveFractionSE = standard_error(PeakInfectiveFraction),
TickOfPeakInfectiveCountMean = mean(TickOfPeakInfectiveCount),
TickOfPeakInfectiveCountSE = standard_error(TickOfPeakInfectiveCount),
DurationOfEpidemicMean = mean(DurationOfEpidemic),
DurationOfEpidemicSE = standard_error(DurationOfEpidemic),
FractionStillSusceptibleMean = mean(FractionStillSusceptible),
FractionStillSusceptibleSE = standard_error(FractionStillSusceptible)
) %>%
ungroup() -> summary_statistics_aggregated
# read and format mdata ---
dir <- file.path("..", "..", "..", "experiments", EXPERIMENT_NAME)
archive_filename <- file.path(dir, "mdata.7z")
unpack_7z_command <- paste0("7z x ", archive_filename)
system(unpack_7z_command)
mdata <- data.frame()
for (config_key in 1:dim(config)[1]) {
config_nr <- stringr::str_pad(config_key, 2, "left", "0")
filename <- paste0("config_", config_nr, "_mdata.feather")
df <- arrow::read_feather(filename)
df$Config <- paste0("config_", config_nr)
mdata %<>% bind_rows(df)
}
system("rm *.feather")
mdata %<>%
tibble() %>%
mutate(
Config = as.factor(Config),
Replicate = as.numeric(Replicate),
Step = as.numeric(Step),
Day = floor(Step / 10),
SCount = as.numeric(SCount),
ECount = as.numeric(ECount),
IuCount = as.numeric(IuCount),
IdCount = as.numeric(IdCount),
ICount = as.numeric(ICount),
RCount = as.numeric(RCount),
BCount = as.numeric(BCount),
IdCumulative = as.numeric(IdCumulative),
ICumulative = as.numeric(ICumulative),
NewCasesReal = as.numeric(NewCasesReal),
NewCasesDetected = as.numeric(NewCasesDetected)
)
# pivot mdata long ---
mdata %>%
pivot_longer(
cols = c(
SCount, ECount, IuCount, IdCount,
ICount, RCount, BCount, IdCumulative,
ICumulative, NewCasesReal, NewCasesDetected
),
names_to = "Concept",
values_to = "Value"
) %>%
inner_join(config %>% select(Config, AgentCount), by = "Config") %>%
mutate(Fraction = Value / AgentCount) %>%
select(-AgentCount) %>%
mutate(Concept = as.factor(Concept)) -> mdata_long
# summarize mdata ---
mdata %>%
group_by(Step, Day, Config) %>%
summarize(
SCountMean = mean(SCount),
SCountSE = standard_error(SCount),
ECountMean = mean(ECount),
ECountSE = standard_error(ECount),
IuCountMean = mean(IuCount),
IuCountSE = standard_error(IuCount),
IdCountMean = mean(IdCount),
IdCountSE = standard_error(IdCount),
ICountMean = mean(ICount),
ICountSE = standard_error(ICount),
RCountMean = mean(RCount),
RCountSE = standard_error(RCount),
BCountMean = mean(BCount),
BCountSE = standard_error(BCount),
IdCumulativeMean = mean(IdCumulative),
IdCumulativeSE = standard_error(IdCumulative),
ICumulativeMean = mean(ICumulative),
ICumulativeSE = standard_error(ICumulative),
NewCasesRealMean = mean(NewCasesReal),
NewCasesRealSE = standard_error(NewCasesReal),
NewCasesDetectedMean = mean(NewCasesDetected),
NewCasesDetectedSE = standard_error(NewCasesDetected)
) %>%
ungroup() %>%
mutate(
SCountLower = SCountMean - SCountSE,
SCountUpper = SCountMean + SCountSE,
ECountLower = ECountMean - ECountSE,
ECountUpper = ECountMean + ECountSE,
IuCountLower = IuCountMean - IuCountSE,
IuCountUpper = IuCountMean + IuCountSE,
IdCountLower = IdCountMean - IdCountSE,
IdCountUpper = IdCountMean + IdCountSE,
ICountLower = ICountMean - ICountSE,
ICountUpper = ICountMean + ICountSE,
RCountLower = RCountMean - RCountSE,
RCountUpper = RCountMean + RCountSE,
BCountLower = BCountMean - BCountSE,
BCountUpper = BCountMean + BCountSE,
IdCumulativeLower = IdCumulativeMean - IdCumulativeSE,
IdCumulativeUpper = IdCumulativeMean + IdCumulativeSE,
ICumulativeLower = ICumulativeMean - ICumulativeSE,
ICumulativeUpper = ICumulativeMean + ICumulativeSE,
NewCasesRealLower = NewCasesRealMean - NewCasesRealSE,
NewCasesRealUpper = NewCasesRealMean + NewCasesRealSE,
NewCasesDetectedLower = NewCasesDetectedMean - NewCasesDetectedSE,
NewCasesDetectedUpper = NewCasesDetectedMean + NewCasesDetectedSE
) %>%
select(
-c(
SCountMean, SCountSE, ECountMean, ECountSE,
IuCountMean, IuCountSE, IdCountMean, IdCountSE,
ICountMean, ICountSE, RCountMean, RCountSE,
BCountMean, BCountSE, IdCumulativeMean, IdCumulativeSE,
ICumulativeMean, ICumulativeSE, NewCasesRealMean, NewCasesRealSE,
NewCasesDetectedMean, NewCasesDetectedSE
)
) -> mdata_summarized
# pivot summarized mdata long ---
mdata_summarized %>%
pivot_longer(
cols = c(
SCountLower, SCountUpper, ECountLower, ECountUpper,
IuCountLower, IuCountUpper, IdCountLower, IdCountUpper,
ICountLower, ICountUpper, RCountLower, RCountUpper,
BCountLower, BCountUpper, IdCumulativeLower, IdCumulativeUpper,
ICumulativeLower, ICumulativeUpper, NewCasesRealLower, NewCasesRealUpper,
NewCasesDetectedLower, NewCasesDetectedUpper
),
names_to = "Concept",
values_to = "Value"
) %>%
inner_join(config %>% select(Config, AgentCount), by = "Config") %>%
mutate(Class = sub("Lower|Upper", "", Concept)) %>%
mutate(Fraction = Value / AgentCount) %>%
select(-AgentCount) %>%
mutate(Class = as.factor(Class)) -> mdata_summarized_long
# read and format summary agent states ---
arrow::read_feather(file.path("..", "..", "..", "experiments", EXPERIMENT_NAME, "summary_agent_states.feather")) %>%
mutate(
Config = as.factor(Config),
Replicate = as.numeric(Replicate),
Step = as.numeric(Step),
Day = floor(Step / 10),
FearMean = as.numeric(FearMean),
FearSE = as.numeric(FearSE),
FearSD = as.numeric(FearSD),
FearMin = as.numeric(FearMin),
FearMax = as.numeric(FearMax),
SocialNormMean = as.numeric(SocialNormMean),
SocialNormSE = as.numeric(SocialNormSE),
SocialNormSD = as.numeric(SocialNormSD),
SocialNormMin = as.numeric(SocialNormMin),
SocialNormMax = as.numeric(SocialNormMax),
BehaviorCount = as.numeric(BehaviorCount)
) -> summary_agent_states
# save and reset ---
save(
list = c(
"config", "mdata", "mdata_long", "mdata_summarized",
"mdata_summarized_long", "summary_agent_states",
"summary_statistics", "summary_statistics_aggregated"
),
file = file.path("data.RData")
)
rm(archive_filename, config_key, config_nr, df, dir, filename, unpack_7z_command)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ResourceFiles.R
\name{getCohortsToDeriveTarget}
\alias{getCohortsToDeriveTarget}
\title{Get the cohorts to derive from the resource file}
\usage{
getCohortsToDeriveTarget()
}
\description{
Reads the settings in /inst/settings/CohortsToDeriveTarget.csv
}
|
/man/getCohortsToDeriveTarget.Rd
|
permissive
|
harryreyesnieva/HERACharacterization
|
R
| false
| true
| 332
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ResourceFiles.R
\name{getCohortsToDeriveTarget}
\alias{getCohortsToDeriveTarget}
\title{Get the cohorts to derive from the resource file}
\usage{
getCohortsToDeriveTarget()
}
\description{
Reads the settings in /inst/settings/CohortsToDeriveTarget.csv
}
|
library(influenceR)
### Name: bridging
### Title: Valente's Bridging vertex measure.
### Aliases: bridging
### ** Examples
ig.ex <- igraph::erdos.renyi.game(100, p.or.m=0.3) # generate an undirected 'igraph' object
bridging(ig.ex) # bridging scores for each node in the graph
|
/data/genthat_extracted_code/influenceR/examples/bridging.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 283
|
r
|
library(influenceR)
### Name: bridging
### Title: Valente's Bridging vertex measure.
### Aliases: bridging
### ** Examples
ig.ex <- igraph::erdos.renyi.game(100, p.or.m=0.3) # generate an undirected 'igraph' object
bridging(ig.ex) # bridging scores for each node in the graph
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/HomogeneousEnsemble.R
\name{getHomogeneousEnsembleModels}
\alias{getHomogeneousEnsembleModels}
\title{Returns the list of fitted models.}
\usage{
getHomogeneousEnsembleModels(model, learner.models = FALSE)
}
\arguments{
\item{model}{[\code{\link[mlr]{WrappedModel}}]\cr
Model produced by training a learner of homogeneous models.}
\item{learner.models}{[\code{logical(1)}]\cr
Return underlying R models or wrapped
mlr models (\code{\link[mlr]{WrappedModel}}).
Default is \code{FALSE}.}
}
\value{
[\code{list}].
}
\description{
Returns the list of fitted models.
}
|
/man/getHomogeneousEnsembleModels.Rd
|
no_license
|
dickoa/mlr
|
R
| false
| false
| 652
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/HomogeneousEnsemble.R
\name{getHomogeneousEnsembleModels}
\alias{getHomogeneousEnsembleModels}
\title{Returns the list of fitted models.}
\usage{
getHomogeneousEnsembleModels(model, learner.models = FALSE)
}
\arguments{
\item{model}{[\code{\link[mlr]{WrappedModel}}]\cr
Model produced by training a learner of homogeneous models.}
\item{learner.models}{[\code{logical(1)}]\cr
Return underlying R models or wrapped
mlr models (\code{\link[mlr]{WrappedModel}}).
Default is \code{FALSE}.}
}
\value{
[\code{list}].
}
\description{
Returns the list of fitted models.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DTRFunctions.R
\name{diurnal_temp_variation_sine}
\alias{diurnal_temp_variation_sine}
\title{Hourly Temperature Variation assuming a Sine Interpolation}
\usage{
diurnal_temp_variation_sine(T_max, T_min, t)
}
\arguments{
\item{T_max, T_min}{\code{numeric} maximum and minimum daily temperatures (C).}
\item{t}{\code{numeric} time for temperature estimate (hour).}
}
\value{
\code{numeric} temperature (C) at a specified hour.
}
\description{
The function estimates temperature for a specified hour using the sine interpolation in \insertCite{Campbell1998;textual}{TrenchR}.
}
\examples{
diurnal_temp_variation_sine(T_max = 30,
T_min = 10,
t = 11)
}
\references{
\insertAllCited{}
}
\seealso{
Other microclimate functions:
\code{\link{air_temp_profile_neutral}()},
\code{\link{air_temp_profile_segment}()},
\code{\link{air_temp_profile}()},
\code{\link{degree_days}()},
\code{\link{direct_solar_radiation}()},
\code{\link{diurnal_radiation_variation}()},
\code{\link{diurnal_temp_variation_sineexp}()},
\code{\link{diurnal_temp_variation_sinesqrt}()},
\code{\link{monthly_solar_radiation}()},
\code{\link{partition_solar_radiation}()},
\code{\link{proportion_diffuse_solar_radiation}()},
\code{\link{solar_radiation}()},
\code{\link{surface_roughness}()},
\code{\link{wind_speed_profile_neutral}()},
\code{\link{wind_speed_profile_segment}()}
}
\concept{microclimate functions}
|
/man/diurnal_temp_variation_sine.Rd
|
permissive
|
trenchproject/TrenchR
|
R
| false
| true
| 1,525
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DTRFunctions.R
\name{diurnal_temp_variation_sine}
\alias{diurnal_temp_variation_sine}
\title{Hourly Temperature Variation assuming a Sine Interpolation}
\usage{
diurnal_temp_variation_sine(T_max, T_min, t)
}
\arguments{
\item{T_max, T_min}{\code{numeric} maximum and minimum daily temperatures (C).}
\item{t}{\code{numeric} time for temperature estimate (hour).}
}
\value{
\code{numeric} temperature (C) at a specified hour.
}
\description{
The function estimates temperature for a specified hour using the sine interpolation in \insertCite{Campbell1998;textual}{TrenchR}.
}
\examples{
diurnal_temp_variation_sine(T_max = 30,
T_min = 10,
t = 11)
}
\references{
\insertAllCited{}
}
\seealso{
Other microclimate functions:
\code{\link{air_temp_profile_neutral}()},
\code{\link{air_temp_profile_segment}()},
\code{\link{air_temp_profile}()},
\code{\link{degree_days}()},
\code{\link{direct_solar_radiation}()},
\code{\link{diurnal_radiation_variation}()},
\code{\link{diurnal_temp_variation_sineexp}()},
\code{\link{diurnal_temp_variation_sinesqrt}()},
\code{\link{monthly_solar_radiation}()},
\code{\link{partition_solar_radiation}()},
\code{\link{proportion_diffuse_solar_radiation}()},
\code{\link{solar_radiation}()},
\code{\link{surface_roughness}()},
\code{\link{wind_speed_profile_neutral}()},
\code{\link{wind_speed_profile_segment}()}
}
\concept{microclimate functions}
|
/Endre løsmasser fra vektor til raster.r
|
no_license
|
NINAnor/stisykling
|
R
| false
| false
| 1,163
|
r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.