content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{rep.col}
\alias{rep.col}
\title{replicate a column vector into a matrix}
\usage{
\method{rep}{col}(x, n)
}
\arguments{
\item{x}{the column vector to be replicated}
\item{n}{replicate the column vector n times}
}
\value{
a matrix of the dimension of \code{length(x)} by \code{n}
}
\description{
replicate a column vector into a matrix
}
\keyword{internal}
| /man/rep.col.Rd | no_license | cran/synfd | R | false | true | 470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{rep.col}
\alias{rep.col}
\title{replicate a column vector into a matrix}
\usage{
\method{rep}{col}(x, n)
}
\arguments{
\item{x}{the column vector to be replicated}
\item{n}{replicate the column vector n times}
}
\value{
a matrix of the dimension of \code{length(x)} by \code{n}
}
\description{
replicate a column vector into a matrix
}
\keyword{internal}
|
## Functions are used to cache inversion of matrix operation
## instead of realculation for identical matrix
## Function constructs a "matrix wrapper" as list of functions
## for accessing and setting underlying data.
## 'x' is a matrix to be inverted
## Return list of functions:
# get() - returns original matrix
# set(y) - replaces original matrinx with new one;
# clears cached value of previous inversion
# setInverse(inverted) - saves inverted matrix to cache
# getInverse() - returns inverted matrix from cache
makeCacheMatrix <- function(x = matrix()) {
cachedInv <- NULL
get <- function() x
set <- function(y) {
x <<- y
cachedInv <<- NULL
}
setInverse <- function(inverted) cachedInv <<- inverted
getInverse <- function() cachedInv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function solves matrix wrapped by makeCacheMatrix() if there is no cached
## value and saves result to cache. Otherwise return cached result.
## 'x' - "matrix wrapper" returned by makeCacheMatrix().
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
solvedMatrix <- x$getInverse()
if(!is.null(solvedMatrix)) {
message("getting cached data")
return(solvedMatrix)
}
originalMatrix <- x$get()
solvedMatrix <- solve(originalMatrix, ...)
x$setInverse(solvedMatrix)
solvedMatrix
}
| /cachematrix.R | no_license | amedvedchuk/ProgrammingAssignment2 | R | false | false | 1,463 | r | ## Functions are used to cache inversion of matrix operation
## instead of realculation for identical matrix
## Function constructs a "matrix wrapper" as list of functions
## for accessing and setting underlying data.
## 'x' is a matrix to be inverted
## Return list of functions:
# get() - returns original matrix
# set(y) - replaces original matrinx with new one;
# clears cached value of previous inversion
# setInverse(inverted) - saves inverted matrix to cache
# getInverse() - returns inverted matrix from cache
makeCacheMatrix <- function(x = matrix()) {
cachedInv <- NULL
get <- function() x
set <- function(y) {
x <<- y
cachedInv <<- NULL
}
setInverse <- function(inverted) cachedInv <<- inverted
getInverse <- function() cachedInv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function solves matrix wrapped by makeCacheMatrix() if there is no cached
## value and saves result to cache. Otherwise return cached result.
## 'x' - "matrix wrapper" returned by makeCacheMatrix().
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
solvedMatrix <- x$getInverse()
if(!is.null(solvedMatrix)) {
message("getting cached data")
return(solvedMatrix)
}
originalMatrix <- x$get()
solvedMatrix <- solve(originalMatrix, ...)
x$setInverse(solvedMatrix)
solvedMatrix
}
|
# Pitch Analysis of Zach Greinke's 2015 season
# This analysis will cover pitch types, velocity over time, pitch selection for given counts,
# pitch selection early vs. late in games, locations, etc.. Focus will be on Zack Greinke's excellent
# 2015 season, and July in particular. Later, there are heat maps with contact rate across the plate,
# and average exit velocity.
# Work on pitch location is not found in this code, but can be found online
# at https://rmathis.shinyapps.io/pitchfxwebapp/ as an interactive app.
# Load basic packages, more added later as needed
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
# Load the data on Zack Greinke 2015 season
greinke <- read_csv("https://assets.datacamp.com/production/course_943/datasets/greinke2015.csv")
head(greinke)
dim(greinke)
# Check for missing data
colSums(apply(greinke, 2, FUN = is.na))
summary(greinke)
greinke[is.na(greinke$break_angle), ]
greinke[is.na(greinke$start_speed), ]
# Clean up the three pitches with NA data
greinke <- subset(greinke, subset = !is.na(greinke$start_speed))
# Check structure
str(greinke)
# Correct dates
greinke$game_date <- mdy(greinke$game_date)
class(greinke$game_date)
# Separate the months
greinke <- separate(data = greinke, col = game_date, into = c("year", "month", "day"), sep = "-", remove = FALSE)
greinke$month <- as.numeric(greinke$month)
# Isolate the month of July
greinke$july <- ifelse(greinke$month == 7, "july", "other")
# Check results
head(greinke)
summary(factor(greinke$july))
# Plot pitch speeds
summary(greinke$start_speed)
greinke %>%
ggplot(aes(x = start_speed)) +
geom_histogram(binwidth = 1)
# Clearly multi-modal
# Plot by pitch type
greinke %>%
ggplot(aes(x = start_speed)) +
geom_histogram() +
facet_wrap(~pitch_type)
# Plot speeds of main pitches in the same plot
greinke %>%
filter(pitch_type %in% c("FF", "FT","SL", "CH", "CU")) %>%
ggplot(aes(x = start_speed, fill = pitch_type)) +
geom_bar(aes(color = pitch_type), position = "dodge", alpha = 0.4)
# Examine the mean pitch speed
greinke %>%
filter(pitch_type %in% c("FF", "FT","SL", "CH", "CU")) %>%
group_by(pitch_type) %>%
summarize(mean_speed = mean(start_speed))
# Examine the four seam fastball velocity in more detail
greinke %>%
filter(pitch_type == "FF") %>%
group_by(month) %>%
summarize(mean_velocity = mean(start_speed))
# Create a boxplot by month
greinke %>%
filter(pitch_type == "FF") %>%
group_by(month) %>%
ggplot(aes(x = factor(month), y = start_speed)) +
geom_boxplot(aes(group = month)) +
labs(x = "Month", y = "Velocity (MPH)", title = "Greinke four-seam fastball speed by month")
# It looks like fastball velocity improved from the beginning of the season onward
# Lets examine the month of July more closely when veolocity began to peak
july_ff <- subset(x = greinke, subset = pitch_type == "FF" & month == 7)
other_ff <- subset(x = greinke, subset = pitch_type == "FF" & month != 7)
# Make a fastball speed histogram for other months
hist(other_ff$start_speed,
col = "#00009950", freq = FALSE,
ylim = c(0, .35), xlab = "Velocity (mph)",
main = "Greinke 4-Seam Fastball Velocity")
# Add a histogram for July
hist(july_ff$start_speed, add = TRUE, col = "#99000050", freq = FALSE)
# Draw vertical lines at the means of the two fastball histograms
abline(v = mean(other_ff$start_speed), col = "#00009950", lwd = 2)
abline(v = mean(july_ff$start_speed), col = "#99000050", lwd = 2)
# Table average four-seam fastball velocity by month
monthAvg <- data.frame(tapply(X = greinke$start_speed, INDEX = greinke$month, FUN = mean))
monthAvg[[2]] <- tapply(X = greinke$start_speed, INDEX = greinke$month, FUN = median)
names(monthAvg) <- c("mean", "median")
monthAvg
# Look at the four-seam fastball velocity by game
greinke_ff <- subset(greinke, subset = pitch_type == "FF")
ff_dt <- data.frame(tapply(greinke_ff$start_speed, greinke_ff$game_date, mean))
head(ff_dt)
ff_dt$game_date <- ymd(rownames(ff_dt))
colnames(ff_dt) <- c("start_speed", colnames(ff_dt)[-1])
row.names(ff_dt) <- NULL
head(ff_dt)
# Plot game-by-game 4-seam fastballs
plot(ff_dt$start_speed ~ ff_dt$game_date,
lwd = 4, type = "l", ylim = c(88, 95),
main = "Greinke 4-Seam Fastball Velocity",
xlab = "Date", ylab = "Velocity (MPH)")
# Add the individual pitches
points(greinke_ff$start_speed ~ jitter(as.numeric(greinke_ff$game_date)), pch = 16, col = "#99004450")
### Explore pitch mix in greater detail
# Lets start by removing the one or two eephus pitches and intentional balls
greinke <- greinke[-c(which(greinke$pitch_type == "EP" | greinke$pitch_type == "IN")), ]
table(greinke$pitch_type, greinke$month)
round(prop.table(table(greinke$pitch_type, greinke$month), margin = 2), 3)
# Specifically look at the proportion of pitches in July vs. all other months combined
type_prop <- round(prop.table(table(greinke$pitch_type, greinke$july), margin = 2), 3)
type_prop <- as.data.frame(type_prop)
type_prop <- spread(type_prop, Var2, Freq)
type_prop$Difference <- (type_prop$july - type_prop$other) / type_prop$other
# Plot the change in pitch selection in the month of July
barplot(type_prop$Difference, names.arg = type_prop$Var1,
main = "Pitch Usage in July vs. Other Months",
ylab = "Percentage Change in July",
ylim = c(-0.3, 0.3))
# Explore the pitch usage across ball-strike counts
# Create a ball-strike count column
greinke$bs_count <- paste(greinke$balls, greinke$strikes, sep = "-")
# Create bs_count_tab
bs_count_tab <- table(greinke$bs_count, greinke$july)
bs_count_tab
# Create bs_month
bs_month <- round(prop.table(bs_count_tab, margin = 2),3)
# Print bs_month
bs_month
diff_bs <- round((bs_month[ , 1] - bs_month[ , 2]) / bs_month[ , 2], 3)
# Create a bar plot of the changes
barplot(diff_bs, main = "Ball-Strike Count Rate in July vs. Other Months",
ylab = "Percentage Change in July", ylim = c(-0.15, 0.15), las = 2)
# Clearly there were more batter friendly counts in July
# Examine pitch selection
type_bs <- table(greinke$pitch_type, greinke$bs_count)
round(prop.table(type_bs, margin = 2), 3)
# Investigate if pitch selection changes late in game
greinke$late <- ifelse(greinke$inning > 5, 1, 0)
late_table <- round(prop.table(table(greinke$pitch_type, factor(greinke$late)), margin = 2), 3)
late_table <- t(late_table)
rownames(late_table) <- c("Early", "Late")
# Plot early pitch selection against later pitch selection
barplot(late_table, beside = TRUE, col = c("red", "blue"),
main = "Early vs. Late In Game Pitch Selection",
ylab = "Pitch Selection Proportion",
legend = rownames(late_table))
# Investigate pitch location
greinke %>% group_by(batter_stand, pitch_type) %>%
summarise(avg_pitch_height = mean(pz) * 12) %>%
spread(batter_stand, avg_pitch_height)
# Look at pitch height in July vs. other months
tapply(greinke$pz, greinke$july, mean) * 12
# Separate the data into left and right handed batters
greinke_lhb <- subset(greinke, batter_stand == "L")
greinke_rhb <- subset(greinke, batter_stand == "R")
# Compare the average horizontal position for RHB vs. LHB for the month of July and other months
tapply(greinke_lhb$px, greinke_lhb$july, mean) * 12
tapply(greinke_rhb$px, greinke_rhb$july, mean) * 12
# Plot pitch location window
plot(x = c(-2, 2), y = c(0, 5), type = "n",
main = "Greinke Locational Zone Proportions",
xlab = "Horizontal Location (ft.; Catcher's View)",
ylab = "Vertical Location (ft.)")
# Add the grid lines
grid(lty = "solid", col = "black")
# Or we could do it with ggplot2
p <- greinke %>%
filter(bs_count == "0-2") %>%
ggplot(aes(x = px, y = pz, size = start_speed)) +
geom_point(aes(color = pitch_type), alpha = 0.6) +
annotate("rect", ymin = 1.5, ymax = 3.4, xmin = -0.83, xmax = 0.83, color = "blue", alpha = 0.2) +
labs(title = "Greinke Pitch Location on 0-2 Count",
x = "Horizontal Location (ft. from plate)",
y = "Vertical Location (ft.)",
color = "Pitch") +
facet_grid(~batter_stand)
# Use the plotly library to make the chart interactive
library(plotly)
ggplotly(p)
greinke %>%
select(all) %>%
ggplot(aes(x = pitch_type, y = start_speed)) +
geom_boxplot()
# Examine at bat results to determine if increased fastball velocity resulted in lower contact rate
greinke_ff$bs_count <- paste(greinke_ff$balls, greinke_ff$strikes, sep = "-")
# Create a vector of no swing results
no_swing <- c("Ball", "Called Strike", "Ball In Dirt", "Hit By Pitch")
# Create a variable which is TRUE if the batter took a hack
greinke_ff$batter_swing <- ifelse(greinke_ff$pitch_result %in% no_swing, 0, 1)
# Create a subset of fastball pitches for batter swings
swing_ff <- subset(greinke_ff, greinke_ff$batter_swing == 1)
# Create a contact variable
no_contact <- c("Swinging Strike", "Missed Bunt")
swing_ff$contact <- ifelse(swing_ff$pitch_result %in% no_contact, 0, 1)
# find the mean 4-seam fastball velocity
mean(swing_ff$start_speed)
# Bin the velocities
swing_ff$velo_bin <- ifelse(swing_ff$start_speed < 90.5, "Slow", NA)
swing_ff$velo_bin <- ifelse(swing_ff$start_speed >= 90.5 & swing_ff$start_speed < 92.5, "Medium", swing_ff$velo_bin)
swing_ff$velo_bin <- ifelse(swing_ff$start_speed > 92.5, "Fast", swing_ff$velo_bin)
# Aggregate contact rate by velocity bin
tapply(X = swing_ff$contact, INDEX = swing_ff$velo_bin, FUN = mean)
# Examine the contact rate across pitch types
swing <- greinke[-which(greinke$pitch_result %in% no_swing), ]
table(swing$pitch_result)
# Create the contact column
swing$contact <- ifelse(swing$pitch_result %in% no_contact, 0, 1)
# contact rate by pitch type
swing %>%
group_by(pitch_type) %>%
summarize(contact_rate = mean(contact))
# Write a function to check the contact rate across quantiles
thirds = c(0, 1/3, 2/3, 1)
nrow(swing)
# Apply quantile function
lapply(split(swing$start_speed, as.factor(swing$pitch_type)), FUN = quantile, probs = thirds)
# Could have used tapply
tapply(swing$start_speed, INDEX = swing$pitch_type, FUN = quantile, probs = thirds)
# In order to have a dataframe instead of a list, write a for loop to function over the pitch types
types <- unique(swing$pitch_type)
pitch_quantiles <- NULL
for(type in types){
pitch_quantiles <- cbind(pitch_quantiles, quantile(swing$start_speed[swing$pitch_type == type], probs = thirds))
}
# Clean up and print
colnames(pitch_quantiles) <- types
pitch_quantiles
# Trying a different way to bin pitch quantiles within the swing dataframe
bin_pitch_speed <- function(start_speed){
as.integer(cut(start_speed, quantile(start_speed, probs = thirds), include.lowest = TRUE))
}
# Test it
mean(bin_pitch_speed(swing$start_speed[swing$pitch_type == "CU"]))
# Apply it to make sure it works for all pitches
tapply(swing$start_speed, INDEX = swing$pitch_type, FUN = bin_pitch_speed)
# Create a dummy variable
swing$velo_bin <- NA
# Loop over the pitch types and bin the velocities
for(type in types){
swing$velo_bin[swing$pitch_type == type] <- bin_pitch_speed(swing$start_speed[swing$pitch_type == type])
}
# Maybe there was an easier way to do that with dplyr
swing <- swing %>%
group_by(pitch_type) %>%
mutate(velo_bin = bin_pitch_speed(start_speed))
# Check the results by binned velocity
swing %>%
group_by(pitch_type, velo_bin) %>%
summarize(contact_rate = mean(contact)) %>%
spread(velo_bin, contact_rate)
# Check for differences for right vs. left batters
swing %>%
group_by(batter_stand, pitch_type, velo_bin) %>%
summarize(contact_rate = mean(contact)) %>%
spread(velo_bin, contact_rate)
# How many pitches of each type were thrown with a 2 strike count
table(swing[swing$strikes == 2, "pitch_type"])
# Create a table detailing contact rate of each pitch type in a two strike count
swing %>%
filter(strikes ==2) %>%
group_by(pitch_type) %>%
summarize(avg = mean(contact))
# Bin the pitch location data
pitch_bins <- greinke %>%
filter(px > -2 & px < 2 & pz > 0 & pz < 5) %>%
select(batter_stand, pitch_type, start_speed, px, pz) %>%
mutate(x_bin = as.numeric(cut(px, seq(-2, 2, 1), include.lowest = TRUE)),
y_bin = as.numeric(cut(pz, seq(0, 5, 1), include.lowest = TRUE)))
head(pitch_bins, 10)
# Create a table of counts of pitch locations
bin_tab <- table(pitch_bins$y_bin, pitch_bins$x_bin)
bin_tab
# Convert to a proportion table
pitch_prop <- round(prop.table(bin_tab), 3)
as.data.frame(pitch_prop)
# Convert to a data frame and plot
data.frame(pitch_prop) %>%
ggplot(aes(x = Var2, y = Var1, label = Freq)) +
geom_text(size = 10) +
annotate("rect", xmin = 1.5, xmax = 3.5, ymin = 1.5, ymax = 4.5, col = "blue", fill = 0) +
labs(x = "Pitch location from center of plate", y = "Pitch height from plate")
# Complete the whole process in one step
# Select left batters
pitch_bins %>%
filter(batter_stand == "L") %>%
select(y_bin, x_bin) %>%
table() %>%
prop.table() %>%
round(3) %>%
as.data.frame() %>%
ggplot(aes(x = x_bin, y = y_bin, label = Freq)) +
geom_text(size = 10) +
annotate("rect", xmin = 1.5, xmax = 3.5, ymin = 1.5, ymax = 4.5, col = "blue", fill = 0) +
labs(x = "Pitch location from center of plate", y = "Pitch height from plate") +
ggtitle("Left Batter View") +
theme_classic() +
scale_x_discrete( labels = c(-2, 1, 1, 2))
# Let's make it easier to plot and analyze pitch locations by creating a pitch location grid
# Create vector px
px <- rep(seq(-1.5, 1.5, 1), times = 5)
# Create vector pz
pz <- rep(seq(4.5, 0.5, -1), each = 4)
# Create vector of zone numbers
zone <- seq(1, 20, 1)
# Create locgrid for plotting
locgrid <- data.frame(zone = zone, px = px, pz = pz)
# Create a bin template to inner_join into our pitch bins
bin_template <- data.frame(zone = zone,
x_bin = rep(seq(1, 4, 1), times = 5),
y_bin = rep(seq(1, 5, 1), each = 4))
# Inner join to create a column with the pitch location zones
pitch_bins <- pitch_bins %>% left_join(bin_template, on = c(x_bin = x_bin, y_bin = y_bin))
head(pitch_bins)
# Load the gridExtra package
library(gridExtra)
library(RColorBrewer)
# Generate a clean data frame with contact data for left and right handed batters
# then assign a bin and replace the px and pz data with the grid coordinates
swings <- swing %>%
filter(px > -2 & px < 2 & pz > 0 & pz < 5) %>%
select(batter_stand, pitch_type, atbat_result, px, pz, balls, strikes, contact, batted_ball_velocity) %>%
mutate(x_bin = as.numeric(cut(px, seq(-2, 2, 1), include.lowest = TRUE)),
y_bin = as.numeric(cut(pz, seq(0, 5, 1), include.lowest = TRUE))) %>%
left_join(bin_template, on = c(x_bin = x_bin, y_bon = y_bin)) %>%
select(batter_stand, pitch_type, atbat_result, balls, strikes, contact, batted_ball_velocity, x_bin, y_bin, zone) %>%
left_join(locgrid, on = c(zone = zone))
head(swings)
# Let's use our new swings data frame to plot some contact grids
swings %>%
group_by(batter_stand, zone) %>%
mutate(contact_rate = mean(contact)) %>%
ungroup() %>%
ggplot(aes(x = px, y = pz)) +
geom_tile(aes(fill = contact_rate)) +
scale_fill_gradientn(name = "Contact Rate",
limits = c(0.5, 1),
breaks = seq(from = 0.5, to = 1, by = 0.1),
colors = c(brewer.pal(n = 7, name = "Reds"))) +
xlim(-2, 2) + ylim(0, 5) +
ggtitle("Contact Rates") +
labs(x = "Horizontal Location (ft.)", y = "Vertical Location (ft.)") +
geom_text(aes(x = px, y = pz, label = round(contact_rate, 3))) +
annotate("rect", xmin = -1, xmax = 1, ymin = 1, ymax = 4, col = "blue", fill = 0) +
facet_grid(~batter_stand)
# Explore batted ball exit velocity
tapply(swings$batted_ball_velocity, INDEX = swings$atbat_result, FUN = mean, na.rm = TRUE)
subset(swings, subset = contact == 1 & !is.na(batted_ball_velocity))
# Lets build a plot of exit velocities
swings %>%
filter(contact == 1 & !is.na(batted_ball_velocity)) %>%
group_by(batter_stand, zone) %>%
mutate(exit_speed = mean(batted_ball_velocity)) %>%
ungroup() %>%
ggplot(aes(x = px, y = pz)) +
geom_tile(aes(fill = exit_speed)) +
scale_fill_gradientn(name = "Exit Speed",
limits = c(60, 100),
breaks = seq(from = 60, to = 100, by = 5),
colors = c(brewer.pal(n = 5, name = "Reds"))) +
facet_grid(~batter_stand) +
geom_text(aes(x = px, y = pz, label = round(exit_speed))) +
annotate("rect", xmin = -1, xmax = 1, ymin = 1, ymax = 4, col = "blue", fill = 0) +
ggtitle("Batted Ball Exit Velocity") +
labs(x = "Horizontal Position From Center of Plate", y = "Vertical Distance From Plate")
| /Analysis_Greinke_Pitches.R | no_license | RomeoAlphaYankee/DataScienceR | R | false | false | 16,711 | r | # Pitch Analysis of Zach Greinke's 2015 season
# This analysis will cover pitch types, velocity over time, pitch selection for given counts,
# pitch selection early vs. late in games, locations, etc.. Focus will be on Zack Greinke's excellent
# 2015 season, and July in particular. Later, there are heat maps with contact rate across the plate,
# and average exit velocity.
# Work on pitch location is not found in this code, but can be found online
# at https://rmathis.shinyapps.io/pitchfxwebapp/ as an interactive app.
# Load basic packages, more added later as needed
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
# Load the data on Zack Greinke 2015 season
greinke <- read_csv("https://assets.datacamp.com/production/course_943/datasets/greinke2015.csv")
head(greinke)
dim(greinke)
# Check for missing data
colSums(apply(greinke, 2, FUN = is.na))
summary(greinke)
greinke[is.na(greinke$break_angle), ]
greinke[is.na(greinke$start_speed), ]
# Clean up the three pitches with NA data
greinke <- subset(greinke, subset = !is.na(greinke$start_speed))
# Check structure
str(greinke)
# Correct dates
greinke$game_date <- mdy(greinke$game_date)
class(greinke$game_date)
# Separate the months
greinke <- separate(data = greinke, col = game_date, into = c("year", "month", "day"), sep = "-", remove = FALSE)
greinke$month <- as.numeric(greinke$month)
# Isolate the month of July
greinke$july <- ifelse(greinke$month == 7, "july", "other")
# Check results
head(greinke)
summary(factor(greinke$july))
# Plot pitch speeds
summary(greinke$start_speed)
greinke %>%
ggplot(aes(x = start_speed)) +
geom_histogram(binwidth = 1)
# Clearly multi-modal
# Plot by pitch type
greinke %>%
ggplot(aes(x = start_speed)) +
geom_histogram() +
facet_wrap(~pitch_type)
# Plot speeds of main pitches in the same plot
greinke %>%
filter(pitch_type %in% c("FF", "FT","SL", "CH", "CU")) %>%
ggplot(aes(x = start_speed, fill = pitch_type)) +
geom_bar(aes(color = pitch_type), position = "dodge", alpha = 0.4)
# Examine the mean pitch speed
greinke %>%
filter(pitch_type %in% c("FF", "FT","SL", "CH", "CU")) %>%
group_by(pitch_type) %>%
summarize(mean_speed = mean(start_speed))
# Examine the four seam fastball velocity in more detail
greinke %>%
filter(pitch_type == "FF") %>%
group_by(month) %>%
summarize(mean_velocity = mean(start_speed))
# Create a boxplot by month
greinke %>%
filter(pitch_type == "FF") %>%
group_by(month) %>%
ggplot(aes(x = factor(month), y = start_speed)) +
geom_boxplot(aes(group = month)) +
labs(x = "Month", y = "Velocity (MPH)", title = "Greinke four-seam fastball speed by month")
# It looks like fastball velocity improved from the beginning of the season onward
# Lets examine the month of July more closely when veolocity began to peak
july_ff <- subset(x = greinke, subset = pitch_type == "FF" & month == 7)
other_ff <- subset(x = greinke, subset = pitch_type == "FF" & month != 7)
# Make a fastball speed histogram for other months
hist(other_ff$start_speed,
col = "#00009950", freq = FALSE,
ylim = c(0, .35), xlab = "Velocity (mph)",
main = "Greinke 4-Seam Fastball Velocity")
# Add a histogram for July
hist(july_ff$start_speed, add = TRUE, col = "#99000050", freq = FALSE)
# Draw vertical lines at the means of the two fastball histograms
abline(v = mean(other_ff$start_speed), col = "#00009950", lwd = 2)
abline(v = mean(july_ff$start_speed), col = "#99000050", lwd = 2)
# Table average four-seam fastball velocity by month
monthAvg <- data.frame(tapply(X = greinke$start_speed, INDEX = greinke$month, FUN = mean))
monthAvg[[2]] <- tapply(X = greinke$start_speed, INDEX = greinke$month, FUN = median)
names(monthAvg) <- c("mean", "median")
monthAvg
# Look at the four-seam fastball velocity by game
greinke_ff <- subset(greinke, subset = pitch_type == "FF")
ff_dt <- data.frame(tapply(greinke_ff$start_speed, greinke_ff$game_date, mean))
head(ff_dt)
ff_dt$game_date <- ymd(rownames(ff_dt))
colnames(ff_dt) <- c("start_speed", colnames(ff_dt)[-1])
row.names(ff_dt) <- NULL
head(ff_dt)
# Plot game-by-game 4-seam fastballs
plot(ff_dt$start_speed ~ ff_dt$game_date,
lwd = 4, type = "l", ylim = c(88, 95),
main = "Greinke 4-Seam Fastball Velocity",
xlab = "Date", ylab = "Velocity (MPH)")
# Add the individual pitches
points(greinke_ff$start_speed ~ jitter(as.numeric(greinke_ff$game_date)), pch = 16, col = "#99004450")
### Explore pitch mix in greater detail
# Lets start by removing the one or two eephus pitches and intentional balls
greinke <- greinke[-c(which(greinke$pitch_type == "EP" | greinke$pitch_type == "IN")), ]
table(greinke$pitch_type, greinke$month)
round(prop.table(table(greinke$pitch_type, greinke$month), margin = 2), 3)
# Specifically look at the proportion of pitches in July vs. all other months combined
type_prop <- round(prop.table(table(greinke$pitch_type, greinke$july), margin = 2), 3)
type_prop <- as.data.frame(type_prop)
type_prop <- spread(type_prop, Var2, Freq)
type_prop$Difference <- (type_prop$july - type_prop$other) / type_prop$other
# Plot the change in pitch selection in the month of July
barplot(type_prop$Difference, names.arg = type_prop$Var1,
main = "Pitch Usage in July vs. Other Months",
ylab = "Percentage Change in July",
ylim = c(-0.3, 0.3))
# Explore the pitch usage across ball-strike counts
# Create a ball-strike count column
greinke$bs_count <- paste(greinke$balls, greinke$strikes, sep = "-")
# Create bs_count_tab
bs_count_tab <- table(greinke$bs_count, greinke$july)
bs_count_tab
# Create bs_month
bs_month <- round(prop.table(bs_count_tab, margin = 2),3)
# Print bs_month
bs_month
diff_bs <- round((bs_month[ , 1] - bs_month[ , 2]) / bs_month[ , 2], 3)
# Create a bar plot of the changes
barplot(diff_bs, main = "Ball-Strike Count Rate in July vs. Other Months",
ylab = "Percentage Change in July", ylim = c(-0.15, 0.15), las = 2)
# Clearly there were more batter friendly counts in July
# Examine pitch selection
type_bs <- table(greinke$pitch_type, greinke$bs_count)
round(prop.table(type_bs, margin = 2), 3)
# Investigate if pitch selection changes late in game
greinke$late <- ifelse(greinke$inning > 5, 1, 0)
late_table <- round(prop.table(table(greinke$pitch_type, factor(greinke$late)), margin = 2), 3)
late_table <- t(late_table)
rownames(late_table) <- c("Early", "Late")
# Plot early pitch selection against later pitch selection
barplot(late_table, beside = TRUE, col = c("red", "blue"),
main = "Early vs. Late In Game Pitch Selection",
ylab = "Pitch Selection Proportion",
legend = rownames(late_table))
# Investigate pitch location
greinke %>% group_by(batter_stand, pitch_type) %>%
summarise(avg_pitch_height = mean(pz) * 12) %>%
spread(batter_stand, avg_pitch_height)
# Look at pitch height in July vs. other months
tapply(greinke$pz, greinke$july, mean) * 12
# Separate the data into left and right handed batters
greinke_lhb <- subset(greinke, batter_stand == "L")
greinke_rhb <- subset(greinke, batter_stand == "R")
# Compare the average horizontal position for RHB vs. LHB for the month of July and other months
tapply(greinke_lhb$px, greinke_lhb$july, mean) * 12
tapply(greinke_rhb$px, greinke_rhb$july, mean) * 12
# Plot pitch location window
plot(x = c(-2, 2), y = c(0, 5), type = "n",
main = "Greinke Locational Zone Proportions",
xlab = "Horizontal Location (ft.; Catcher's View)",
ylab = "Vertical Location (ft.)")
# Add the grid lines
grid(lty = "solid", col = "black")
# Or we could do it with ggplot2
p <- greinke %>%
filter(bs_count == "0-2") %>%
ggplot(aes(x = px, y = pz, size = start_speed)) +
geom_point(aes(color = pitch_type), alpha = 0.6) +
annotate("rect", ymin = 1.5, ymax = 3.4, xmin = -0.83, xmax = 0.83, color = "blue", alpha = 0.2) +
labs(title = "Greinke Pitch Location on 0-2 Count",
x = "Horizontal Location (ft. from plate)",
y = "Vertical Location (ft.)",
color = "Pitch") +
facet_grid(~batter_stand)
# Use the plotly library to make the chart interactive
library(plotly)
ggplotly(p)
greinke %>%
select(all) %>%
ggplot(aes(x = pitch_type, y = start_speed)) +
geom_boxplot()
# Examine at bat results to determine if increased fastball velocity resulted in lower contact rate
greinke_ff$bs_count <- paste(greinke_ff$balls, greinke_ff$strikes, sep = "-")
# Create a vector of no swing results
no_swing <- c("Ball", "Called Strike", "Ball In Dirt", "Hit By Pitch")
# Create a variable which is TRUE if the batter took a hack
greinke_ff$batter_swing <- ifelse(greinke_ff$pitch_result %in% no_swing, 0, 1)
# Create a subset of fastball pitches for batter swings
swing_ff <- subset(greinke_ff, greinke_ff$batter_swing == 1)
# Create a contact variable
no_contact <- c("Swinging Strike", "Missed Bunt")
swing_ff$contact <- ifelse(swing_ff$pitch_result %in% no_contact, 0, 1)
# find the mean 4-seam fastball velocity
mean(swing_ff$start_speed)
# Bin the velocities
swing_ff$velo_bin <- ifelse(swing_ff$start_speed < 90.5, "Slow", NA)
swing_ff$velo_bin <- ifelse(swing_ff$start_speed >= 90.5 & swing_ff$start_speed < 92.5, "Medium", swing_ff$velo_bin)
swing_ff$velo_bin <- ifelse(swing_ff$start_speed > 92.5, "Fast", swing_ff$velo_bin)
# Aggregate contact rate by velocity bin
tapply(X = swing_ff$contact, INDEX = swing_ff$velo_bin, FUN = mean)
# Examine the contact rate across pitch types
swing <- greinke[-which(greinke$pitch_result %in% no_swing), ]
table(swing$pitch_result)
# Create the contact column
swing$contact <- ifelse(swing$pitch_result %in% no_contact, 0, 1)
# contact rate by pitch type
swing %>%
group_by(pitch_type) %>%
summarize(contact_rate = mean(contact))
# Write a function to check the contact rate across quantiles
thirds = c(0, 1/3, 2/3, 1)
nrow(swing)
# Apply quantile function
lapply(split(swing$start_speed, as.factor(swing$pitch_type)), FUN = quantile, probs = thirds)
# Could have used tapply
tapply(swing$start_speed, INDEX = swing$pitch_type, FUN = quantile, probs = thirds)
# In order to have a dataframe instead of a list, write a for loop to function over the pitch types
types <- unique(swing$pitch_type)
pitch_quantiles <- NULL
for(type in types){
pitch_quantiles <- cbind(pitch_quantiles, quantile(swing$start_speed[swing$pitch_type == type], probs = thirds))
}
# Clean up and print
colnames(pitch_quantiles) <- types
pitch_quantiles
# Trying a different way to bin pitch quantiles within the swing dataframe
bin_pitch_speed <- function(start_speed){
as.integer(cut(start_speed, quantile(start_speed, probs = thirds), include.lowest = TRUE))
}
# Test it
mean(bin_pitch_speed(swing$start_speed[swing$pitch_type == "CU"]))
# Apply it to make sure it works for all pitches
tapply(swing$start_speed, INDEX = swing$pitch_type, FUN = bin_pitch_speed)
# Create a dummy variable
swing$velo_bin <- NA
# Loop over the pitch types and bin the velocities
for(type in types){
swing$velo_bin[swing$pitch_type == type] <- bin_pitch_speed(swing$start_speed[swing$pitch_type == type])
}
# Maybe there was an easier way to do that with dplyr
swing <- swing %>%
group_by(pitch_type) %>%
mutate(velo_bin = bin_pitch_speed(start_speed))
# Check the results by binned velocity
swing %>%
group_by(pitch_type, velo_bin) %>%
summarize(contact_rate = mean(contact)) %>%
spread(velo_bin, contact_rate)
# Check for differences for right vs. left batters
swing %>%
group_by(batter_stand, pitch_type, velo_bin) %>%
summarize(contact_rate = mean(contact)) %>%
spread(velo_bin, contact_rate)
# How many pitches of each type were thrown with a 2 strike count
table(swing[swing$strikes == 2, "pitch_type"])
# Create a table detailing contact rate of each pitch type in a two strike count
swing %>%
filter(strikes ==2) %>%
group_by(pitch_type) %>%
summarize(avg = mean(contact))
# Bin the pitch location data
pitch_bins <- greinke %>%
filter(px > -2 & px < 2 & pz > 0 & pz < 5) %>%
select(batter_stand, pitch_type, start_speed, px, pz) %>%
mutate(x_bin = as.numeric(cut(px, seq(-2, 2, 1), include.lowest = TRUE)),
y_bin = as.numeric(cut(pz, seq(0, 5, 1), include.lowest = TRUE)))
head(pitch_bins, 10)
# Create a table of counts of pitch locations
bin_tab <- table(pitch_bins$y_bin, pitch_bins$x_bin)
bin_tab
# Convert to a proportion table
pitch_prop <- round(prop.table(bin_tab), 3)
as.data.frame(pitch_prop)
# Convert to a data frame and plot
data.frame(pitch_prop) %>%
ggplot(aes(x = Var2, y = Var1, label = Freq)) +
geom_text(size = 10) +
annotate("rect", xmin = 1.5, xmax = 3.5, ymin = 1.5, ymax = 4.5, col = "blue", fill = 0) +
labs(x = "Pitch location from center of plate", y = "Pitch height from plate")
# Complete the whole process in one step
# Select left batters
pitch_bins %>%
filter(batter_stand == "L") %>%
select(y_bin, x_bin) %>%
table() %>%
prop.table() %>%
round(3) %>%
as.data.frame() %>%
ggplot(aes(x = x_bin, y = y_bin, label = Freq)) +
geom_text(size = 10) +
annotate("rect", xmin = 1.5, xmax = 3.5, ymin = 1.5, ymax = 4.5, col = "blue", fill = 0) +
labs(x = "Pitch location from center of plate", y = "Pitch height from plate") +
ggtitle("Left Batter View") +
theme_classic() +
scale_x_discrete( labels = c(-2, 1, 1, 2))
# Let's make it easier to plot and analyze pitch locations by creating a pitch location grid
# Create vector px
px <- rep(seq(-1.5, 1.5, 1), times = 5)
# Create vector pz
pz <- rep(seq(4.5, 0.5, -1), each = 4)
# Create vector of zone numbers
zone <- seq(1, 20, 1)
# Create locgrid for plotting
locgrid <- data.frame(zone = zone, px = px, pz = pz)
# Create a bin template to inner_join into our pitch bins
bin_template <- data.frame(zone = zone,
x_bin = rep(seq(1, 4, 1), times = 5),
y_bin = rep(seq(1, 5, 1), each = 4))
# Inner join to create a column with the pitch location zones
pitch_bins <- pitch_bins %>% left_join(bin_template, on = c(x_bin = x_bin, y_bin = y_bin))
head(pitch_bins)
# Load the gridExtra package
library(gridExtra)
library(RColorBrewer)
# Generate a clean data frame with contact data for left and right handed batters
# then assign a bin and replace the px and pz data with the grid coordinates
swings <- swing %>%
filter(px > -2 & px < 2 & pz > 0 & pz < 5) %>%
select(batter_stand, pitch_type, atbat_result, px, pz, balls, strikes, contact, batted_ball_velocity) %>%
mutate(x_bin = as.numeric(cut(px, seq(-2, 2, 1), include.lowest = TRUE)),
y_bin = as.numeric(cut(pz, seq(0, 5, 1), include.lowest = TRUE))) %>%
left_join(bin_template, on = c(x_bin = x_bin, y_bon = y_bin)) %>%
select(batter_stand, pitch_type, atbat_result, balls, strikes, contact, batted_ball_velocity, x_bin, y_bin, zone) %>%
left_join(locgrid, on = c(zone = zone))
head(swings)
# Let's use our new swings data frame to plot some contact grids
swings %>%
group_by(batter_stand, zone) %>%
mutate(contact_rate = mean(contact)) %>%
ungroup() %>%
ggplot(aes(x = px, y = pz)) +
geom_tile(aes(fill = contact_rate)) +
scale_fill_gradientn(name = "Contact Rate",
limits = c(0.5, 1),
breaks = seq(from = 0.5, to = 1, by = 0.1),
colors = c(brewer.pal(n = 7, name = "Reds"))) +
xlim(-2, 2) + ylim(0, 5) +
ggtitle("Contact Rates") +
labs(x = "Horizontal Location (ft.)", y = "Vertical Location (ft.)") +
geom_text(aes(x = px, y = pz, label = round(contact_rate, 3))) +
annotate("rect", xmin = -1, xmax = 1, ymin = 1, ymax = 4, col = "blue", fill = 0) +
facet_grid(~batter_stand)
# Explore batted ball exit velocity
tapply(swings$batted_ball_velocity, INDEX = swings$atbat_result, FUN = mean, na.rm = TRUE)
subset(swings, subset = contact == 1 & !is.na(batted_ball_velocity))
# Lets build a plot of exit velocities
swings %>%
filter(contact == 1 & !is.na(batted_ball_velocity)) %>%
group_by(batter_stand, zone) %>%
mutate(exit_speed = mean(batted_ball_velocity)) %>%
ungroup() %>%
ggplot(aes(x = px, y = pz)) +
geom_tile(aes(fill = exit_speed)) +
scale_fill_gradientn(name = "Exit Speed",
limits = c(60, 100),
breaks = seq(from = 60, to = 100, by = 5),
colors = c(brewer.pal(n = 5, name = "Reds"))) +
facet_grid(~batter_stand) +
geom_text(aes(x = px, y = pz, label = round(exit_speed))) +
annotate("rect", xmin = -1, xmax = 1, ymin = 1, ymax = 4, col = "blue", fill = 0) +
ggtitle("Batted Ball Exit Velocity") +
labs(x = "Horizontal Position From Center of Plate", y = "Vertical Distance From Plate")
|
library(ape)
testtree <- read.tree("6469_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6469_0_unrooted.txt") | /codeml_files/newick_trees_processed/6469_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("6469_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6469_0_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgcp-INLA.R
\name{matchcovariance}
\alias{matchcovariance}
\title{matchcovariance function}
\usage{
matchcovariance(
xg,
yg,
ns,
sigma,
phi,
model,
additionalparameters,
verbose = TRUE,
r = 1,
method = "Nelder-Mead"
)
}
\arguments{
\item{xg}{x grid must be equally spaced}
\item{yg}{y grid must be equally spaced}
\item{ns}{neighbourhood size}
\item{sigma}{spatial variability parameter}
\item{phi}{spatial dependence parameter}
\item{model}{covariance model, see ?CovarianceFct}
\item{additionalparameters}{additional parameters for chosen covariance model}
\item{verbose}{whether or not to print stuff generated by the optimiser}
\item{r}{parameter used in optimisation, see Rue and Held (2005) pp 188. default value 1.}
\item{method}{The choice of optimising routine must either be 'Nelder-Mead' or 'BFGS'. see ?optim}
}
\value{
...
}
\description{
A function to match the covariance matrix of a Gaussian Field with an approximate GMRF with neighbourhood size ns.
}
| /man/matchcovariance.Rd | no_license | cran/lgcp | R | false | true | 1,076 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lgcp-INLA.R
\name{matchcovariance}
\alias{matchcovariance}
\title{matchcovariance function}
\usage{
matchcovariance(
xg,
yg,
ns,
sigma,
phi,
model,
additionalparameters,
verbose = TRUE,
r = 1,
method = "Nelder-Mead"
)
}
\arguments{
\item{xg}{x grid must be equally spaced}
\item{yg}{y grid must be equally spaced}
\item{ns}{neighbourhood size}
\item{sigma}{spatial variability parameter}
\item{phi}{spatial dependence parameter}
\item{model}{covariance model, see ?CovarianceFct}
\item{additionalparameters}{additional parameters for chosen covariance model}
\item{verbose}{whether or not to print stuff generated by the optimiser}
\item{r}{parameter used in optimisation, see Rue and Held (2005) pp 188. default value 1.}
\item{method}{The choice of optimising routine must either be 'Nelder-Mead' or 'BFGS'. see ?optim}
}
\value{
...
}
\description{
A function to match the covariance matrix of a Gaussian Field with an approximate GMRF with neighbourhood size ns.
}
|
context("helpLearner")
test_that("helpLearner of learner with single help page", {
expect_true(length(helpLearner("classif.logreg")) == 1)
})
test_that("helpLearner of learner with multiple help pages", {
testfn = helpLearner
environment(testfn) = new.env(parent = environment(testfn))
environment(testfn)$readline = function(x) { cat(x, "\n") ; 0 }
expect_output(testfn("classif.qda"), "Choose help page:(\\n[0-9]+ : [0-9a-zA-Z._]+)+\\n\\.\\.\\.: *$")
expect_null(testfn("classif.qda"))
environment(testfn)$readline = function(x) { cat(x, "\n") ; 1 }
hlp1 = testfn("classif.qda")
hlp2 = testfn("classif.qda")
# for regr.randomForest, there is mlr-specific help which should be the first option.
expect_equivalent(utils::help("regr.randomForest", package = "mlr"), testfn("regr.randomForest"))
environment(testfn)$readline = function(x) { cat(x, "\n") ; 2 }
hlp3 = testfn("classif.qda")
expect_identical(hlp1, hlp2)
expect_false(identical(hlp1, hlp3))
# regr.randomForest with option '2' should give the randomForest help page.
expect_true(length(testfn("regr.randomForest")) == 1)
})
test_that("helpLearner of wrapped learner", {
# check that it doesn't give an error
helpLearner(makeBaggingWrapper(makeLearner("classif.qda"), 2))
})
test_that("helpLearnerParam", {
# mention parameters
expect_output(helpLearnerParam("classif.qda"), "method")
expect_output(helpLearnerParam("classif.qda"), "nu")
expect_output(helpLearnerParam("classif.qda", "nu"), "nu")
# mention package
expect_output(helpLearnerParam("classif.qda"), "MASS::qda")
expect_output(helpLearnerParam("classif.qda", "nu"), "MASS::qda")
# mention requirement
nureq = capture.output(print(getParamSet("classif.qda")$pars$nu$requires))
expect_output(helpLearnerParam("classif.qda", "nu"), paste("Requires:", nureq), fixed = TRUE)
# error when giving unknown parameter
expect_error(helpLearnerParam("classif.qda", "this_parameter_does_not_exist"))
# message when querying parameter without documentation
expect_output(helpLearnerParam("classif.__mlrmocklearners__2", "alpha"), "No documentation found")
# check this doesn't give an error
helpLearnerParam("classif.__mlrmocklearners__2")
# check that values are printed
expect_output(helpLearnerParam(
makeLearner("classif.qda", nu = 3), "nu"),
"Value: +3")
# values for vectorial params work
expect_output(helpLearnerParam(
makeLearner("classif.randomForest", cutoff = c(.1, .2, .3)), "cutoff"),
"Value:.+0\\.1.+0\\.2.+0\\.3")
})
test_that("helpLearnerParam of wrapped learner", {
w1 = makeBaggingWrapper(makeLearner("classif.qda", nu = 4), 2)
w2 = makeOversampleWrapper(w1)
# correct info is given
expect_output(helpLearnerParam(w1, "nu"), "Value: +4")
expect_output(helpLearnerParam(w2, "nu"), "Value: +4")
expect_message(helpLearnerParam(w1),
"is a wrapped learner. Showing documentation of 'classif.qda' instead", fixed = TRUE, all = TRUE)
expect_message(helpLearnerParam(w2),
"is a wrapped learner. Showing documentation of 'classif.qda' instead", fixed = TRUE, all = TRUE)
})
| /tests/testthat/test_base_learnerHelp.R | no_license | aeron15/mlr | R | false | false | 3,141 | r |
context("helpLearner")
test_that("helpLearner of learner with single help page", {
expect_true(length(helpLearner("classif.logreg")) == 1)
})
test_that("helpLearner of learner with multiple help pages", {
testfn = helpLearner
environment(testfn) = new.env(parent = environment(testfn))
environment(testfn)$readline = function(x) { cat(x, "\n") ; 0 }
expect_output(testfn("classif.qda"), "Choose help page:(\\n[0-9]+ : [0-9a-zA-Z._]+)+\\n\\.\\.\\.: *$")
expect_null(testfn("classif.qda"))
environment(testfn)$readline = function(x) { cat(x, "\n") ; 1 }
hlp1 = testfn("classif.qda")
hlp2 = testfn("classif.qda")
# for regr.randomForest, there is mlr-specific help which should be the first option.
expect_equivalent(utils::help("regr.randomForest", package = "mlr"), testfn("regr.randomForest"))
environment(testfn)$readline = function(x) { cat(x, "\n") ; 2 }
hlp3 = testfn("classif.qda")
expect_identical(hlp1, hlp2)
expect_false(identical(hlp1, hlp3))
# regr.randomForest with option '2' should give the randomForest help page.
expect_true(length(testfn("regr.randomForest")) == 1)
})
test_that("helpLearner of wrapped learner", {
# check that it doesn't give an error
helpLearner(makeBaggingWrapper(makeLearner("classif.qda"), 2))
})
test_that("helpLearnerParam", {
# mention parameters
expect_output(helpLearnerParam("classif.qda"), "method")
expect_output(helpLearnerParam("classif.qda"), "nu")
expect_output(helpLearnerParam("classif.qda", "nu"), "nu")
# mention package
expect_output(helpLearnerParam("classif.qda"), "MASS::qda")
expect_output(helpLearnerParam("classif.qda", "nu"), "MASS::qda")
# mention requirement
nureq = capture.output(print(getParamSet("classif.qda")$pars$nu$requires))
expect_output(helpLearnerParam("classif.qda", "nu"), paste("Requires:", nureq), fixed = TRUE)
# error when giving unknown parameter
expect_error(helpLearnerParam("classif.qda", "this_parameter_does_not_exist"))
# message when querying parameter without documentation
expect_output(helpLearnerParam("classif.__mlrmocklearners__2", "alpha"), "No documentation found")
# check this doesn't give an error
helpLearnerParam("classif.__mlrmocklearners__2")
# check that values are printed
expect_output(helpLearnerParam(
makeLearner("classif.qda", nu = 3), "nu"),
"Value: +3")
# values for vectorial params work
expect_output(helpLearnerParam(
makeLearner("classif.randomForest", cutoff = c(.1, .2, .3)), "cutoff"),
"Value:.+0\\.1.+0\\.2.+0\\.3")
})
test_that("helpLearnerParam of wrapped learner", {
w1 = makeBaggingWrapper(makeLearner("classif.qda", nu = 4), 2)
w2 = makeOversampleWrapper(w1)
# correct info is given
expect_output(helpLearnerParam(w1, "nu"), "Value: +4")
expect_output(helpLearnerParam(w2, "nu"), "Value: +4")
expect_message(helpLearnerParam(w1),
"is a wrapped learner. Showing documentation of 'classif.qda' instead", fixed = TRUE, all = TRUE)
expect_message(helpLearnerParam(w2),
"is a wrapped learner. Showing documentation of 'classif.qda' instead", fixed = TRUE, all = TRUE)
})
|
## plot4.R by Paul Fornia 4/11/2015 creates plot4.png
## for the Coursera Exploratory Data Course: Project 1 part 4
# Read in raw data.
rawData <- read.table(file = "household_power_consumption.txt",
sep = ";",
header = TRUE,
stringsAsFactors = FALSE)
# Filter to only Feb 1st and 2nd 2007
dataDateTime <- transform(rawData, DateTime = strptime(paste(Date,Time), "%d/%m/%Y %H:%M:%S"))
data2daysRaw <- dataDateTime[
dataDateTime$DateTime >= strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S") &
dataDateTime$DateTime <= strptime("02/02/2007 23:59:59", "%d/%m/%Y %H:%M:%S") &
!is.na(dataDateTime$DateTime)
,]
# Remove "?" and NAs
data2daysTemp <- data2daysRaw
for(i in 1:(length(data2daysRaw) - 1)){
data2daysTemp <- data2daysTemp[!is.na(data2daysTemp[[i]]) & data2daysTemp[[i]] != "?",]
}
data2days <- data2daysTemp
# Make plot4
# Initialize a PNG device
png(filename = "plot4.png")
# Initialize a 4-panel page
par(mfrow = c(2, 2))
## Top Left
plot(x = data2days$DateTime, y = as.numeric(data2days$Global_active_power),
xlab = "",
ylab = "Global Active Power",
type = "l")
## Top Right
plot(x = data2days$DateTime, y = as.numeric(data2days$Voltage),
xlab = "datetime",
ylab = "Voltage",
type = "l")
## Bottom Left (same as plot3.png)
plot(x = data2days$DateTime, y = as.numeric(data2days$Sub_metering_1),
xlab = "",
ylab = "Energy sub metering",
type = "l")
### Layer on Sub_metering_2 and 3
lines(x = data2days$DateTime, y = as.numeric(data2days$Sub_metering_2), col = "red")
lines(x = data2days$DateTime, y = as.numeric(data2days$Sub_metering_3), col = "blue")
### Add in legend
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
bty = "n")
## Bottom Right
plot(x = data2days$DateTime, y = as.numeric(data2days$Global_reactive_power),
xlab = "",
ylab = "Global_reactive_power",
type = "l")
dev.off()
| /plot4.R | no_license | pfornia/ExData_Plotting1 | R | false | false | 2,079 | r | ## plot4.R by Paul Fornia 4/11/2015 creates plot4.png
## for the Coursera Exploratory Data Course: Project 1 part 4
# Read in raw data.
rawData <- read.table(file = "household_power_consumption.txt",
sep = ";",
header = TRUE,
stringsAsFactors = FALSE)
# Filter to only Feb 1st and 2nd 2007
dataDateTime <- transform(rawData, DateTime = strptime(paste(Date,Time), "%d/%m/%Y %H:%M:%S"))
data2daysRaw <- dataDateTime[
dataDateTime$DateTime >= strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S") &
dataDateTime$DateTime <= strptime("02/02/2007 23:59:59", "%d/%m/%Y %H:%M:%S") &
!is.na(dataDateTime$DateTime)
,]
# Remove "?" and NAs
data2daysTemp <- data2daysRaw
for(i in 1:(length(data2daysRaw) - 1)){
data2daysTemp <- data2daysTemp[!is.na(data2daysTemp[[i]]) & data2daysTemp[[i]] != "?",]
}
data2days <- data2daysTemp
# Make plot4
# Initialize a PNG device
png(filename = "plot4.png")
# Initialize a 4-panel page
par(mfrow = c(2, 2))
## Top Left
plot(x = data2days$DateTime, y = as.numeric(data2days$Global_active_power),
xlab = "",
ylab = "Global Active Power",
type = "l")
## Top Right
plot(x = data2days$DateTime, y = as.numeric(data2days$Voltage),
xlab = "datetime",
ylab = "Voltage",
type = "l")
## Bottom Left (same as plot3.png)
plot(x = data2days$DateTime, y = as.numeric(data2days$Sub_metering_1),
xlab = "",
ylab = "Energy sub metering",
type = "l")
### Layer on Sub_metering_2 and 3
lines(x = data2days$DateTime, y = as.numeric(data2days$Sub_metering_2), col = "red")
lines(x = data2days$DateTime, y = as.numeric(data2days$Sub_metering_3), col = "blue")
### Add in legend
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
bty = "n")
## Bottom Right
plot(x = data2days$DateTime, y = as.numeric(data2days$Global_reactive_power),
xlab = "",
ylab = "Global_reactive_power",
type = "l")
dev.off()
|
root <- "C:/Users/albert.QBIDS/Coursera/Johns Hopkins/The Data Science Track/4 Exploratory Data Analysis/Project"
setwd(root)
#make sure we have only 1 graph on the screen
par(mfrow=c(1,1))
datafile <- paste(root,"/household_power_consumption.txt", sep="")
colclasses <- c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
headers <- c("Date", "Time", "Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
#read data from 2007-02-01 and 2007-02-02
#Note: after examining the data first I concluded that the data is ordered.
#Therefore I choose to use the technique to skip lines instead of subsetting them.
#first line starts at rownumber 66637 (incl header) so skip the first 66636 lines
#the first line of 2007-02-03 starts at rownumber 69517, so the last line of 2007-02-02 is at rownumber 69516
#therefore we can read 69516 - 66636 = 2880 lines to get the whole set we need
data <- read.table(datafile, sep=";",colClasses = colclasses, col.names =headers, comment.char="", na.strings="?", header=T, skip=66636, nrow=2880)
#convert the columns to the appropriate datatypes
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$Time <- strptime(data$Time, format = "%H:%M:%S")
#create the historgram
hist(data$Global_active_power, col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power")
#create a .png file
# store as image of certain size: 480 x 480
png(filename=".\\Github\\plot1.png")
hist(data$Global_active_power, col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
| /plot1.R | no_license | kjoebie/ExData_Plotting1 | R | false | false | 1,664 | r | root <- "C:/Users/albert.QBIDS/Coursera/Johns Hopkins/The Data Science Track/4 Exploratory Data Analysis/Project"
setwd(root)
#make sure we have only 1 graph on the screen
par(mfrow=c(1,1))
datafile <- paste(root,"/household_power_consumption.txt", sep="")
colclasses <- c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
headers <- c("Date", "Time", "Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
#read data from 2007-02-01 and 2007-02-02
#Note: after examining the data first I concluded that the data is ordered.
#Therefore I choose to use the technique to skip lines instead of subsetting them.
#first line starts at rownumber 66637 (incl header) so skip the first 66636 lines
#the first line of 2007-02-03 starts at rownumber 69517, so the last line of 2007-02-02 is at rownumber 69516
#therefore we can read 69516 - 66636 = 2880 lines to get the whole set we need
data <- read.table(datafile, sep=";",colClasses = colclasses, col.names =headers, comment.char="", na.strings="?", header=T, skip=66636, nrow=2880)
#convert the columns to the appropriate datatypes
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$Time <- strptime(data$Time, format = "%H:%M:%S")
#create the historgram
hist(data$Global_active_power, col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power")
#create a .png file
# store as image of certain size: 480 x 480
png(filename=".\\Github\\plot1.png")
hist(data$Global_active_power, col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nanoSingleMolecule.R
\name{tsvToMethMat}
\alias{tsvToMethMat}
\title{Convert nanopore tsv to methylation matrix}
\usage{
tsvToMethMat(tsv, genomeGRs, motif, binarise = TRUE)
}
\arguments{
\item{tsv}{A tab serparated values text file where individual motifs have been split.
Also accepts a Granges object made from tsv}
\item{genomeGRs}{Genomic Ranges object for the regions to be analysed}
\item{motif}{Motif ("CG" or "GC" to for which the tsv was called)}
\item{binarise}{Convert log likelihoods to binary values: methylated(\eqn{ln(L) \ge 2.5}): 1; unmethylated(\eqn{ln(L) \le -2.5}): 0; inconclusive(\eqn{-2.5 < ln(L) < 2.5}): NA. (default: binarise=TRUE)}
}
\value{
A methylation matrix (reads x motif positions) with binary or log likelihood values
}
\description{
Convert nanopore tsv to methylation matrix
}
\examples{
tsvToMethMat(splitMotifs(MSssI_CpG,"CG"),ttTi5605gr)
}
| /man/tsvToMethMat.Rd | no_license | CellFateNucOrg/nanodsmf | R | false | true | 963 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nanoSingleMolecule.R
\name{tsvToMethMat}
\alias{tsvToMethMat}
\title{Convert nanopore tsv to methylation matrix}
\usage{
tsvToMethMat(tsv, genomeGRs, motif, binarise = TRUE)
}
\arguments{
\item{tsv}{A tab serparated values text file where individual motifs have been split.
Also accepts a Granges object made from tsv}
\item{genomeGRs}{Genomic Ranges object for the regions to be analysed}
\item{motif}{Motif ("CG" or "GC" to for which the tsv was called)}
\item{binarise}{Convert log likelihoods to binary values: methylated(\eqn{ln(L) \ge 2.5}): 1; unmethylated(\eqn{ln(L) \le -2.5}): 0; inconclusive(\eqn{-2.5 < ln(L) < 2.5}): NA. (default: binarise=TRUE)}
}
\value{
A methylation matrix (reads x motif positions) with binary or log likelihood values
}
\description{
Convert nanopore tsv to methylation matrix
}
\examples{
tsvToMethMat(splitMotifs(MSssI_CpG,"CG"),ttTi5605gr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-ggpackets-.r
\name{add_eqv_aes}
\alias{add_eqv_aes}
\title{Add equivalent Americanized and base equivalent names to ggplot aesthetic
list}
\usage{
add_eqv_aes(aes_names)
}
\arguments{
\item{aes_names}{a character vector of aesthetic names}
}
\value{
a character vector of aesthetic names including any Americanized or
base R equivalent argument names accepted by ggplot2.
}
\description{
Add equivalent Americanized and base equivalent names to ggplot aesthetic
list
}
| /man/add_eqv_aes.Rd | no_license | lengning/gClinBiomarker | R | false | true | 554 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-ggpackets-.r
\name{add_eqv_aes}
\alias{add_eqv_aes}
\title{Add equivalent Americanized and base equivalent names to ggplot aesthetic
list}
\usage{
add_eqv_aes(aes_names)
}
\arguments{
\item{aes_names}{a character vector of aesthetic names}
}
\value{
a character vector of aesthetic names including any Americanized or
base R equivalent argument names accepted by ggplot2.
}
\description{
Add equivalent Americanized and base equivalent names to ggplot aesthetic
list
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_gains.R
\name{calculate_gains}
\alias{calculate_gains}
\title{Get gains from cryptocompare}
\usage{
calculate_gains(data)
}
\arguments{
\item{data}{A xts with ohlc-values}
}
\value{
A time series with gains
}
\description{
Calculate the gains of a cryptocurrency with respect to the
closing price within a xts of ohlc-values. Referred_to columns
must be named "close" and "open".
}
\examples{
calculate_gains(calculate_price("CCCAGG","BTC","USD",7))
}
\author{
Philipp Giese
}
| /man/calculate_gains.Rd | no_license | philgee1981/btcecho | R | false | true | 565 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_gains.R
\name{calculate_gains}
\alias{calculate_gains}
\title{Get gains from cryptocompare}
\usage{
calculate_gains(data)
}
\arguments{
\item{data}{A xts with ohlc-values}
}
\value{
A time series with gains
}
\description{
Calculate the gains of a cryptocurrency with respect to the
closing price within a xts of ohlc-values. Referred_to columns
must be named "close" and "open".
}
\examples{
calculate_gains(calculate_price("CCCAGG","BTC","USD",7))
}
\author{
Philipp Giese
}
|
#PAGE=2
a=864000000
formatC(a,format="e")
b=0.00003416
formatC(b,format="e")
| /Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH1/EX1.5/Ex1_5.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 86 | r | #PAGE=2
a=864000000
formatC(a,format="e")
b=0.00003416
formatC(b,format="e")
|
# Add reactive data frame
# We ended the previous chapter with an app that allows you to download a data file with selected variables
# from the movies dataset. We will now extend this app by adding a table output of the selected data as well.
# Given that the same dataset will be used in two outputs, it makes sense to make our code more efficient by using a reactive data frame.
library(shiny)
library(dplyr)
library(readr)
load(url("http://s3.amazonaws.com/assets.datacamp.com/production/course_4850/datasets/movies.Rdata"))
# UI
ui <- fluidPage(
sidebarLayout(
# Input(s)
sidebarPanel(
# Select filetype
radioButtons(inputId = "filetype",
label = "Select filetype:",
choices = c("csv", "tsv"),
selected = "csv"),
# Select variables to download
checkboxGroupInput(inputId = "selected_var",
label = "Select variables:",
choices = names(movies),
selected = c("title"))
),
# Output(s)
mainPanel(
HTML("Select filetype and variables, then download and/or view the data."),
br(), br(),
downloadButton(outputId = "download_data", label = "Download data"),
br(), br(),
DT::dataTableOutput(outputId = "moviestable")
)
)
)
# Server
server <- function(input, output) {
# Create reactive data frame
movies_selected <- reactive({
req(input$selected_var) # ensure input$selected_var is available
movies %>% select(input$selected_var) # select columns of movies
})
# Create data table
output$moviestable <- DT::renderDataTable({
DT::datatable(data = movies_selected(),
options = list(pageLength = 10),
rownames = FALSE)
})
# Download file
output$download_data <- downloadHandler(
filename = function() {
paste0("movies.", input$filetype)
},
content = function(file) {
if(input$filetype == "csv"){
write_csv(movies_selected(), file)
}
if(input$filetype == "tsv"){
write_tsv(movies_selected(), file)
}
}
)
}
# Create a Shiny app object
shinyApp(ui = ui, server = server) | /scripts/13.sidebarLayout_reactive.R | no_license | tonyabraham4/myfirst_shiny | R | false | false | 2,257 | r | # Add reactive data frame
# We ended the previous chapter with an app that allows you to download a data file with selected variables
# from the movies dataset. We will now extend this app by adding a table output of the selected data as well.
# Given that the same dataset will be used in two outputs, it makes sense to make our code more efficient by using a reactive data frame.
library(shiny)
library(dplyr)
library(readr)
load(url("http://s3.amazonaws.com/assets.datacamp.com/production/course_4850/datasets/movies.Rdata"))
# UI
ui <- fluidPage(
sidebarLayout(
# Input(s)
sidebarPanel(
# Select filetype
radioButtons(inputId = "filetype",
label = "Select filetype:",
choices = c("csv", "tsv"),
selected = "csv"),
# Select variables to download
checkboxGroupInput(inputId = "selected_var",
label = "Select variables:",
choices = names(movies),
selected = c("title"))
),
# Output(s)
mainPanel(
HTML("Select filetype and variables, then download and/or view the data."),
br(), br(),
downloadButton(outputId = "download_data", label = "Download data"),
br(), br(),
DT::dataTableOutput(outputId = "moviestable")
)
)
)
# Server
server <- function(input, output) {
# Create reactive data frame
movies_selected <- reactive({
req(input$selected_var) # ensure input$selected_var is available
movies %>% select(input$selected_var) # select columns of movies
})
# Create data table
output$moviestable <- DT::renderDataTable({
DT::datatable(data = movies_selected(),
options = list(pageLength = 10),
rownames = FALSE)
})
# Download file
output$download_data <- downloadHandler(
filename = function() {
paste0("movies.", input$filetype)
},
content = function(file) {
if(input$filetype == "csv"){
write_csv(movies_selected(), file)
}
if(input$filetype == "tsv"){
write_tsv(movies_selected(), file)
}
}
)
}
# Create a Shiny app object
shinyApp(ui = ui, server = server) |
#import packages
library("ggplot2")
library("dplyr")
#Question 1
#read text file into dataframe
#file compares predicted severity of mutation with severity of clinical symptoms
scores <- read.table("scores.txt", header=TRUE, sep="\t", stringsAsFactors = FALSE)
#Make scatterplot with trendline
ggplot(data = scores,
aes(x=MMS, y=COS)) +
geom_point(color="purple") +
xlab("Mutation Severity Score") +
ylab("Clinical Severity Score") +
stat_smooth(method="lm") +
theme_classic()
#Question 2
#read text file into dataframe
obs <- read.table("data.txt", header=TRUE, sep=",", stringsAsFactors = FALSE)
#Find means for each region
gd <- obs %>%
group_by(region) %>%
summarise(
observations = mean(observations)
)
#Make barplot of mean observations
ggplot(gd, aes(x=region, y = observations )) +
geom_bar(stat = "identity") +
theme_classic()
#Make scatterplot of data
ggplot(data = obs,
aes(x=region, y=observations)) +
geom_jitter(color = "pink") +
theme_classic()
#The barplot shows just the means, which make the observations from each region look similar
#However, the scatterplot shows the variation and gives a better look at the overall dataset
#For example, the south region had a mean close to 15 but the observations were actually higher or lower
#Alternatively, the north region had observations that were all very close to 15 | /exercise12.R | no_license | mlh7474/Biocomputing2020_Tutorial12 | R | false | false | 1,385 | r | #import packages
library("ggplot2")
library("dplyr")
#Question 1
#read text file into dataframe
#file compares predicted severity of mutation with severity of clinical symptoms
scores <- read.table("scores.txt", header=TRUE, sep="\t", stringsAsFactors = FALSE)
#Make scatterplot with trendline
ggplot(data = scores,
aes(x=MMS, y=COS)) +
geom_point(color="purple") +
xlab("Mutation Severity Score") +
ylab("Clinical Severity Score") +
stat_smooth(method="lm") +
theme_classic()
#Question 2
#read text file into dataframe
obs <- read.table("data.txt", header=TRUE, sep=",", stringsAsFactors = FALSE)
#Find means for each region
gd <- obs %>%
group_by(region) %>%
summarise(
observations = mean(observations)
)
#Make barplot of mean observations
ggplot(gd, aes(x=region, y = observations )) +
geom_bar(stat = "identity") +
theme_classic()
#Make scatterplot of data
ggplot(data = obs,
aes(x=region, y=observations)) +
geom_jitter(color = "pink") +
theme_classic()
#The barplot shows just the means, which make the observations from each region look similar
#However, the scatterplot shows the variation and gives a better look at the overall dataset
#For example, the south region had a mean close to 15 but the observations were actually higher or lower
#Alternatively, the north region had observations that were all very close to 15 |
\name{br_fld2b}
\alias{br_fld2b}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Baseball-Reference MLB Team Fielding - Second Base Table
%% ~~function to do ... ~~
}
\description{ Scraps MLB Team Fielding - Second Base Table from https://www.baseball-reference.com/leagues/MLB/'year'-specialpos_2b-fielding.shtml according to 'year' chosen.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
br_fld2b(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{ 'year'
\item{x}{ year
%% ~~Describe \code{x} here~~
}
}
%%\details{
%% ~~ If necessary, more details than the description above ~~
%%}
\value{ A data frame containing a representation of the MLB Team Fielding - Second Base Table statistics pertaining to the year inputed. For details, please see statistics in https://www.baseball-reference.com/leagues/MLB/2017-specialpos_2b-fielding.shtml
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
%%\references{
%% ~put references to the literature/web site here ~
%%}
\author{ Henry Alvarez
%% ~~who you are~~
}
%%\note{
%% ~~further notes~~
%%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%%}
\examples{
## Choose year you wish to view
br_fld2b(2018)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ br_fld2b }% use one of RShowDoc("KEYWORDS")
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/br_fld2b.Rd | no_license | Jonghyun-Yun/brscrap | R | false | false | 1,680 | rd | \name{br_fld2b}
\alias{br_fld2b}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Baseball-Reference MLB Team Fielding - Second Base Table
%% ~~function to do ... ~~
}
\description{ Scraps MLB Team Fielding - Second Base Table from https://www.baseball-reference.com/leagues/MLB/'year'-specialpos_2b-fielding.shtml according to 'year' chosen.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
br_fld2b(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{ 'year'
\item{x}{ year
%% ~~Describe \code{x} here~~
}
}
%%\details{
%% ~~ If necessary, more details than the description above ~~
%%}
\value{ A data frame containing a representation of the MLB Team Fielding - Second Base Table statistics pertaining to the year inputed. For details, please see statistics in https://www.baseball-reference.com/leagues/MLB/2017-specialpos_2b-fielding.shtml
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
%%\references{
%% ~put references to the literature/web site here ~
%%}
\author{ Henry Alvarez
%% ~~who you are~~
}
%%\note{
%% ~~further notes~~
%%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%%}
\examples{
## Choose year you wish to view
br_fld2b(2018)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ br_fld2b }% use one of RShowDoc("KEYWORDS")
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(dplyr)
library(ggplot2)
library(GGally)
library(caTools)
library(ROCR)
set.seed(144)
patients = read.csv("framingham.csv")
#Make sure the train and test sets have respectively 70% and 30% of patients with or without the disease
split = sample.split(patients$TenYearCHD, SplitRatio = 0.7)
patients.train = filter(patients, split == TRUE)
patients.test = filter(patients, split == FALSE)
#Check if there are the same ratio for of TenYearCHD for the train and test set
#table(patients.test$TenYearCHD) 0 1 2171 390
#table(patients$TenYearCHD) 0 1 930 167
#Approximately 18 percent
mod1 = glm(TenYearCHD ~ male + age + education + currentSmoker + cigsPerDay + BPMeds + prevalentStroke + prevalentHyp + diabetes + totChol + sysBP + diaBP + BMI + heartRate + glucose, data = patients.train , family = binomial)
summary(mod1)
#Question iv)
predTest = predict(mod1, newdata=patients.test, type="response")
summary(predTest)
table(patients.test$TenYearCHD, predTest>0.16)
#Question v)
#Question a)
rocr.Tenyear <- prediction(predTest, patients.test$TenYearCHD)
logPerformance <- performance(rocr.Tenyear, "tpr", "fpr")
plot(logPerformance, colorize = TRUE)
abline(0, 1)
as.numeric(performance(rocr.Tenyear, "auc")@y.values)
| /HW2/Hw2.R | no_license | Weulass/INDENG-242-Data-Analysis-and-Applications | R | false | false | 1,298 | r | library(dplyr)
library(ggplot2)
library(GGally)
library(caTools)
library(ROCR)
set.seed(144)
patients = read.csv("framingham.csv")
#Make sure the train and test sets have respectively 70% and 30% of patients with or without the disease
split = sample.split(patients$TenYearCHD, SplitRatio = 0.7)
patients.train = filter(patients, split == TRUE)
patients.test = filter(patients, split == FALSE)
#Check if there are the same ratio for of TenYearCHD for the train and test set
#table(patients.test$TenYearCHD) 0 1 2171 390
#table(patients$TenYearCHD) 0 1 930 167
#Approximately 18 percent
mod1 = glm(TenYearCHD ~ male + age + education + currentSmoker + cigsPerDay + BPMeds + prevalentStroke + prevalentHyp + diabetes + totChol + sysBP + diaBP + BMI + heartRate + glucose, data = patients.train , family = binomial)
summary(mod1)
#Question iv)
predTest = predict(mod1, newdata=patients.test, type="response")
summary(predTest)
table(patients.test$TenYearCHD, predTest>0.16)
#Question v)
#Question a)
rocr.Tenyear <- prediction(predTest, patients.test$TenYearCHD)
logPerformance <- performance(rocr.Tenyear, "tpr", "fpr")
plot(logPerformance, colorize = TRUE)
abline(0, 1)
as.numeric(performance(rocr.Tenyear, "auc")@y.values)
|
#' Basic plot of WSB based on huc
#'
#' Basic plot
#' @param sites character vector of site ids
#' @param col for basin fill
#' @param mapRange vector of map limits (min long, max long, min lat, max lat)
#' @import sp
#' @import rgdal
#' @export
#' @examples
#' sites <- c("01137500","01491000", "01573000", "01576000","06485500")
#' path <- system.file("extdata", package="hydroMap")
#' siteInfo <- readRDS(file.path(path,"siteInfo.rds"))
#' png("test.png",width=11,height=8,units="in",res=600,pointsize=4)
#' plotWSB(sites)
#' points(siteInfo$dec_long_va, siteInfo$dec_lat_va, pch=20, col="red", cex=3)
#' dev.off()
#'
#' plotWSB(sites[4], mapRange=c(-80,-74, 38, 46))
#' points(siteInfo$dec_long_va[4], siteInfo$dec_lat_va[4], pch=20, col="red", cex=1)
plotWSB <- function(sites,col="#A8A8A850", mapRange = NA){
shape_hydropoly <- shape_hydropoly
shape_polibounds <- shape_polibounds
shape_hydroline <- shape_hydroline
basins <- getBasin(sites)
basins <- spTransform(basins,CRS(proj4string(shape_polibounds)))
if(all(is.na(mapRange))){
plot(basins, col=col)
mapRange <- par()$usr
shape_hydropoly <- clipShape(shape_hydropoly, mapRange)
shape_polibounds <- clipShape(shape_polibounds, mapRange)
shape_hydroline <- clipShape(shape_hydroline, mapRange)
plot(shape_hydropoly,col="lightskyblue2",add=TRUE)
lines(shape_hydroline,col="lightskyblue2")
plot(shape_polibounds,add=TRUE)
} else {
shape_hydropoly <- clipShape(shape_hydropoly, mapRange)
shape_polibounds <- clipShape(shape_polibounds, mapRange)
shape_hydroline <- clipShape(shape_hydroline, mapRange)
basins <- crop(basins, extent(mapRange)) #should figure this out...clipping a SpatialPolygonsDataFrame
plot(shape_hydropoly,col="lightskyblue2")
lines(shape_hydroline,col="lightskyblue2")
plot(shape_polibounds,add=TRUE)
plot(basins, col=col,add=TRUE)
}
}
#' Basic plot of WSB based on huc
#'
#' Basic plot
#' @param shapefile shapefile to clip
#' @param mapRange vector of map limits (min long, max long, min lat, max lat)
#' @import sp
#' @import rgdal
#' @import rgeos
#' @import raster
#' @export
#' @examples
#' mapRange=c(-80,-74, 38, 46)
#' shape_hydropoly <- shape_hydropoly
#' clippedShape <- clipShape(shape_hydropoly, mapRange)
clipShape <- function(shapefile, mapRange){
ext <- extent(mapRange)
clipe <- as(ext, "SpatialPolygons")
proj4string(clipe) <- CRS(proj4string(shapefile))
cropd <- SpatialPolygonsDataFrame(clipe, data.frame(x = 1), match.ID = FALSE)
shapeClipped <- gIntersection(shapefile, cropd,byid=TRUE)
return(shapeClipped)
}
#' Get shapefile basins
#'
#' Get shapefile basins
#' @param sites character id
#' @return shapefile
#' @importFrom httr GET
#' @importFrom httr write_disk
#' @importFrom utils URLencode
#' @import sp
#' @import rgdal
#' @export
#' @examples
#' sites <- c("01491000", "01573000", "01576000","01137500","06485500")
#' basinShapes <- getBasin(sites)
getBasin <- function(sites){
baseURL <- "http://cida-test.er.usgs.gov/nwc/geoserver/NWC/ows?service=WFS&version=1.1.0&srsName=EPSG:4326&request=GetFeature&typeName=NWC:epa_basins"
siteText <- ""
for(i in sites){
siteText <- paste0(siteText,'<ogc:PropertyIsEqualTo matchCase="true">',
'<ogc:PropertyName>site_no</ogc:PropertyName>',
'<ogc:Literal>',i,'</ogc:Literal>',
'</ogc:PropertyIsEqualTo>')
}
if(length(sites) > 1){
filterXML <- paste0('<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">',
'<ogc:Or>',siteText,'</ogc:Or>',
'</ogc:Filter>')
} else {
filterXML <- paste0('<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">',
'<ogc:PropertyIsEqualTo matchCase="true">',
'<ogc:PropertyName>site_no</ogc:PropertyName>',
'<ogc:Literal>',sites,'</ogc:Literal>',
'</ogc:PropertyIsEqualTo>',
'</ogc:Filter>')
}
filterXML <- URLencode(filterXML,reserved = TRUE)
requestURL <- paste0(baseURL,"&outputFormat=shape-zip", "&filter=", filterXML)
destination = tempfile(pattern = 'basins_shape', fileext='.zip')
file <- GET(requestURL, write_disk(destination, overwrite=T))
shp.path <- tempdir()
unzip(destination, exdir = shp.path)
basins = readOGR(shp.path, layer='epa_basins')
return(basins)
}
| /R/plotWSB.R | permissive | lawinslow/hydroMaps | R | false | false | 4,471 | r | #' Basic plot of WSB based on huc
#'
#' Basic plot
#' @param sites character vector of site ids
#' @param col for basin fill
#' @param mapRange vector of map limits (min long, max long, min lat, max lat)
#' @import sp
#' @import rgdal
#' @export
#' @examples
#' sites <- c("01137500","01491000", "01573000", "01576000","06485500")
#' path <- system.file("extdata", package="hydroMap")
#' siteInfo <- readRDS(file.path(path,"siteInfo.rds"))
#' png("test.png",width=11,height=8,units="in",res=600,pointsize=4)
#' plotWSB(sites)
#' points(siteInfo$dec_long_va, siteInfo$dec_lat_va, pch=20, col="red", cex=3)
#' dev.off()
#'
#' plotWSB(sites[4], mapRange=c(-80,-74, 38, 46))
#' points(siteInfo$dec_long_va[4], siteInfo$dec_lat_va[4], pch=20, col="red", cex=1)
plotWSB <- function(sites,col="#A8A8A850", mapRange = NA){
shape_hydropoly <- shape_hydropoly
shape_polibounds <- shape_polibounds
shape_hydroline <- shape_hydroline
basins <- getBasin(sites)
basins <- spTransform(basins,CRS(proj4string(shape_polibounds)))
if(all(is.na(mapRange))){
plot(basins, col=col)
mapRange <- par()$usr
shape_hydropoly <- clipShape(shape_hydropoly, mapRange)
shape_polibounds <- clipShape(shape_polibounds, mapRange)
shape_hydroline <- clipShape(shape_hydroline, mapRange)
plot(shape_hydropoly,col="lightskyblue2",add=TRUE)
lines(shape_hydroline,col="lightskyblue2")
plot(shape_polibounds,add=TRUE)
} else {
shape_hydropoly <- clipShape(shape_hydropoly, mapRange)
shape_polibounds <- clipShape(shape_polibounds, mapRange)
shape_hydroline <- clipShape(shape_hydroline, mapRange)
basins <- crop(basins, extent(mapRange)) #should figure this out...clipping a SpatialPolygonsDataFrame
plot(shape_hydropoly,col="lightskyblue2")
lines(shape_hydroline,col="lightskyblue2")
plot(shape_polibounds,add=TRUE)
plot(basins, col=col,add=TRUE)
}
}
#' Basic plot of WSB based on huc
#'
#' Basic plot
#' @param shapefile shapefile to clip
#' @param mapRange vector of map limits (min long, max long, min lat, max lat)
#' @import sp
#' @import rgdal
#' @import rgeos
#' @import raster
#' @export
#' @examples
#' mapRange=c(-80,-74, 38, 46)
#' shape_hydropoly <- shape_hydropoly
#' clippedShape <- clipShape(shape_hydropoly, mapRange)
clipShape <- function(shapefile, mapRange){
ext <- extent(mapRange)
clipe <- as(ext, "SpatialPolygons")
proj4string(clipe) <- CRS(proj4string(shapefile))
cropd <- SpatialPolygonsDataFrame(clipe, data.frame(x = 1), match.ID = FALSE)
shapeClipped <- gIntersection(shapefile, cropd,byid=TRUE)
return(shapeClipped)
}
#' Get shapefile basins
#'
#' Get shapefile basins
#' @param sites character id
#' @return shapefile
#' @importFrom httr GET
#' @importFrom httr write_disk
#' @importFrom utils URLencode
#' @import sp
#' @import rgdal
#' @export
#' @examples
#' sites <- c("01491000", "01573000", "01576000","01137500","06485500")
#' basinShapes <- getBasin(sites)
getBasin <- function(sites){
baseURL <- "http://cida-test.er.usgs.gov/nwc/geoserver/NWC/ows?service=WFS&version=1.1.0&srsName=EPSG:4326&request=GetFeature&typeName=NWC:epa_basins"
siteText <- ""
for(i in sites){
siteText <- paste0(siteText,'<ogc:PropertyIsEqualTo matchCase="true">',
'<ogc:PropertyName>site_no</ogc:PropertyName>',
'<ogc:Literal>',i,'</ogc:Literal>',
'</ogc:PropertyIsEqualTo>')
}
if(length(sites) > 1){
filterXML <- paste0('<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">',
'<ogc:Or>',siteText,'</ogc:Or>',
'</ogc:Filter>')
} else {
filterXML <- paste0('<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">',
'<ogc:PropertyIsEqualTo matchCase="true">',
'<ogc:PropertyName>site_no</ogc:PropertyName>',
'<ogc:Literal>',sites,'</ogc:Literal>',
'</ogc:PropertyIsEqualTo>',
'</ogc:Filter>')
}
filterXML <- URLencode(filterXML,reserved = TRUE)
requestURL <- paste0(baseURL,"&outputFormat=shape-zip", "&filter=", filterXML)
destination = tempfile(pattern = 'basins_shape', fileext='.zip')
file <- GET(requestURL, write_disk(destination, overwrite=T))
shp.path <- tempdir()
unzip(destination, exdir = shp.path)
basins = readOGR(shp.path, layer='epa_basins')
return(basins)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.R
\name{integer2date}
\alias{integer2date}
\title{Integer To Date}
\usage{
integer2date(x)
}
\arguments{
\item{x}{an integer}
}
\value{
A Date.
}
\description{
Converts integers to Dates where
the 1st of Jan 2000 is equal to 1.
}
\examples{
integer2date(-1:3)
}
| /man/integer2date.Rd | permissive | poissonconsulting/jaggernaut | R | false | true | 344 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.R
\name{integer2date}
\alias{integer2date}
\title{Integer To Date}
\usage{
integer2date(x)
}
\arguments{
\item{x}{an integer}
}
\value{
A Date.
}
\description{
Converts integers to Dates where
the 1st of Jan 2000 is equal to 1.
}
\examples{
integer2date(-1:3)
}
|
# this script links metiers to VMS.
#
# First it searches through observer data in order to find a list of observed
# trips for which multiple fish tickets exist.
# Once all metiers are assigned, any fish tickets assigned that are in
# this "double list" will be removed.
# Finally for all fish tickets assigned,
# a revenue and pounds column is calculated and added for each time stamp.
link_vms.tickets <- function(window_size){
# find double observed data ----
library(dplyr)
obs <- read.csv("/Users/efuller/Desktop/CNH/rawData/Observers/WCGOPobs/Samhouri_OBFTfinal_Allfisheries_ProcessedwFunction_2009-2012_110613.csv",stringsAsFactors = FALSE)
dubs_tix <- obs %>%
dplyr::select(DRVID, D_DATE, TRIPID, sector, FISHERY, FISH_TICKETS) %>%
distinct() %>%
mutate(dubs = ifelse(grepl(";", FISH_TICKETS), 1, 0)) %>%
filter(dubs == 1) %>%
mutate(D_DATE = as.POSIXct(D_DATE, format = "%m/%d/%Y %H:%M:%S %p"))
for(i in 1:nrow(dubs_tix)){
dubs_tix$tix1[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[1],
format(dubs_tix$D_DATE[i], "%Y")), NA)
dubs_tix$tix2[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[2],
format(dubs_tix$D_DATE[i], "%Y")), NA)
dubs_tix$tix3[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[3],
format(dubs_tix$D_DATE[i], "%Y")), NA)
dubs_tix$tix4[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[4],
format(dubs_tix$D_DATE[i],"%Y")), NA)
dubs_tix$tix5[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[5],
format(dubs_tix$D_DATE[i],"%Y")), NA)
}
# remove those that begin with NA
dubs_tix$tix3[grep("^NA",dubs_tix$tix3)] <- NA
dubs_tix$tix4[grep("^NA",dubs_tix$tix4)] <- NA
dubs_tix$tix5[grep("^NA",dubs_tix$tix5)] <- NA
# make vector of all fish tickets that are part of an observed trip
# which is linked to more than 1 fish ticket
duplicate_ftids <- unique(c(dubs_tix$tix1, dubs_tix$tix2, dubs_tix$tix3, dubs_tix$tix4, dubs_tix$tix5))
duplicate_ftids <- duplicate_ftids[-which(is.na(duplicate_ftids))] # drop NA
# what's the maximum number of catches landed by a single vessel in a day? ----
# load catch
catch <- readRDS("/Users/efuller/Desktop/CNH/processedData/catch/1_cleaningData/tickets.RDS")
# finding discrete trips ----
# ok to generalize, boats get distance to coast measured, any point that's > 1.5 km from coastline is a trip
load("/Users/efuller/Desktop/CNH/processedData/spatial/2_coastline.Rdata")
library(sp)
proj4string(WC) <- CRS("+init=epsg:4269 +proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs +towgs84=0,0,0")
wc_proj <- spTransform(WC,CRS("+proj=aea +lat_1=35.13863306500551 +lat_2=46.39606296952133 +lon_0=-127.6171875"))
# go through each track, save both new trajectory and fish tickest that are not found in VMS
data.dir <- "/Users/efuller/Desktop/CNH/processedData/spatial/vms/intermediate/03_overlapMetier/"
vms_files <- dir(data.dir)
for(b in 1:length(vms_files)){
ves <- readRDS(paste0(data.dir,vms_files[b]))
find_trips <- function(vessel_track, coastline = wc_proj,
projection = CRS("+proj=aea +lat_1=35.13863306500551
+lat_2=46.39606296952133 +lon_0=-127.6171875"))
{
# project for gDistance
# default is Equal Area Albers
# make sure no positive longitudes
if(any(vessel_track$longitude>0 | vessel_track$longitude < -150)){
vessel_track <- subset(vessel_track, longitude < 0 & longitude > -150)
}
# make vessel track sp object, assign default lat/lon of NAD83
library(sp)
coordinates(vessel_track) <- ~longitude + latitude
proj4string(vessel_track) <- CRS(
"+init=epsg:4269 +proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs +towgs84=0,0,0")
# convert into equal albers projection for gDistance
vessel_track <- spTransform(vessel_track, projection)
# calculate pairwise distance to coast, because projected measures in meters
library(rgeos)
vessel_track@data$dist_coast <- NA
for(i in 1:nrow(vessel_track)){
vessel_track@data$dist_coast[i] <- gDistance(vessel_track[i,], coastline)
}
# any points that are > 1.5 km from the coast are "out" on a trip
vessel_track@data$trip <- ifelse(vessel_track@data$dist_coast/1000>1.5, 1, 0)
vessel_track <- vessel_track[order(vessel_track@data$datetime),]
# if there's a gap > 6 hours in middle of trip
# if vessel starts in the middle of a trip, discard it because will reverse algorithm
if(vessel_track@data$trip[1]==1){
# find first zero
first.zero = which(vessel_track@data$trip==0)[1]
# make first trips into 0s, even though on water, won't consider.
vessel_track@data$trip[1:first.zero] <- 0
}
time_diffs <- diff(vessel_track@data$datetime)
# if diff > 3 hours, new trip
vessel_track@data$time_diff <- c(NA, time_diffs)
gap_marker <- which(vessel_track@data$time_diff>60*3 & vessel_track@data$trip==1)
if(length(gap_marker)>0){
# so point before needs to be inserted as 0. will eventually drop these duplicate points.
new_rows <- vessel_track[gap_marker,]
new_rows$trip <- 0
foo <- rbind(vessel_track, new_rows)
foo <- foo[order(foo$datetime, foo$trip),]
vessel_track <- foo
}
vessel_track@data$trip_num <- c(0,cumsum(abs(diff(vessel_track@data$trip))))
vessel_track@data$only_trips <- ifelse(vessel_track@data$trip_num %% 2 == 0, 0, vessel_track@data$trip_num)
# drop duplicates i inserted
vessel_track <- vessel_track[-which(duplicated(vessel_track@data[,c("dist_coast","time_diff")])),]
# remove the other two indicators
vessel_track@data$trip <- NULL
vessel_track@data$trip_num <- NULL
# returns distance from coast and unique ID for trip
return(vessel_track)
}
v2_track <- find_trips(vessel_track = ves)
if(nrow(v2_track)==0){next}
# next is assigning landings to trips, argument is time window to look back in
# look_back is in hours, represents num hours to look back for vms data
assign_landings <- function(time_window, v2 = ves){
# convert look_back window to seconds
look_back <- time_window *60 *60
# find landings: subset trip ID, landing dates, metier, and port
c2 <- unique(subset(catch, drvid == unique(v2$docnum), select = c("trip_id","pcid","metier.2010","tdate")))
c2$tdate.start <- as.POSIXct(c2$tdate, format = "%d-%b-%y",tz = "Etc/GMT-8") - look_back
c2$tdate.end <- as.POSIXct(c2$tdate, format = "%d-%b-%y",tz = "Etc/GMT-8")+23.9999*60*60
c2 <- c2[order(c2$tdate.start),]
# for each 24 period, is there a trip in it? if yes, then assign that trip the landing ID.
# change to only allow one row for each tdate, need to sort by tdate.start
library(dplyr)
c2_bydate <- c2 %>%
group_by(tdate) %>%
mutate(trip_id1 = unique(trip_id)[1],
trip_id2 = as.character(ifelse(length(unique(trip_id))>1, unique(trip_id)[2], NA)),
trip_id3 = as.character(ifelse(length(unique(trip_id))>2, unique(trip_id)[3], NA)),
trip_id4 = as.character(ifelse(length(unique(trip_id))>3, unique(trip_id)[4], NA)),
trip_id5 = as.character(ifelse(length(unique(trip_id))>4, unique(trip_id)[5], NA)),
trip_id6 = as.character(ifelse(length(unique(trip_id))>5, unique(trip_id)[6], NA))) %>%
dplyr::select(-trip_id) %>%
distinct(.keep_all = TRUE)
# reorder
c2_bydate <- c2_bydate[order(c2_bydate$tdate.start),]
# check to make sure metiers and ports are the same
any(duplicated(c2_bydate[,3:8])) # should be false, means that all ports and metiers are same
return(c2_bydate)
}
c2_bydate <- assign_landings(time_window = window_size, v2 = ves)
c2_bydate <- as.data.frame(c2_bydate)
# will be more trips than landings, so go through landings
v2_track$trip_id1 <- NA
v2_track$trip_id2 <- NA
v2_track$trip_id3 <- NA
v2_track$trip_id4 <- NA
v2_track$trip_id5 <- NA
v2_track$trip_id6 <- NA
# rule: if there's a trip during the time interval of landings (determined above by time window argument), assign landings. If more than two trips, check to make sure same fishery and same port, and assign both trip_ids. Then in future, sum landings for these two trips and attribute to entire trip trajectory. Thus in future, should be able to take metier from either of these trip IDs and assign to both trips and shouldn't matter (because have already checked that they're from the same fishery)
trips_wo_vms <- NA
for(j in 1:nrow(c2_bydate)){
# find all trips
trips <- as.character(dplyr::select(as.data.frame(c2_bydate[j,]), contains("trip_id")))
if(all(trips=="NA")){
cat("corrupted catch dates")
break
}
# was a trip will landed on the day?
prior_trips <- unique(v2_track$only_trips[which(v2_track$datetime > c2_bydate$tdate.start[j] &
v2_track$datetime < c2_bydate$tdate.end[j] &
v2_track$only_trips!=0)])
# if no trips within time window hours, record trip id and move on
if(length(prior_trips)==0) {
trips_wo_vms <- c(trips_wo_vms, trips)
next
}else{ # but if there are some prior trips
# make sure the trip(s) all occurred before tdate.end,
# if not replace with NA. then drop NAs again
for(q in 1:length(prior_trips)){
return_time <- max(v2_track$datetime[which(v2_track$only_trips==prior_trips[q])])
prior_trips[q] <- ifelse(return_time > c2_bydate$tdate.end[j], NA, prior_trips[q])
}
# check that those trips don't belong to another landing ticket
for(q in 1:length(prior_trips)){
assigned_previous <- any(!is.na(v2_track$trip_id1[which(v2_track$only_trips %in% prior_trips[q])]))
prior_trips[q] <- ifelse(assigned_previous, NA, prior_trips[q])
}
# possible that you could loose all trips, if all are NOT NA, then can use those trips, else look earlier
if(!(all(is.na(prior_trips)))) {
if(any(is.na(prior_trips))){
prior_trips <- prior_trips[-which(is.na(prior_trips))]
}
# then make sure that trip wasn't already assigned a landing ticket
if(!(any(is.na(v2_track$trip_id1[which(v2_track$only_trips %in% unique(prior_trips))])))){
# means that there was a landing prior to this one that already claimed that VMS trip, so
# this landing ticket has no VMS
trips_wo_vms <- c(trips_wo_vms, trips)
next
}
# assign the trip to VMS!
v2_track[which(v2_track$only_trips %in% prior_trips),
c("trip_id1","trip_id2", "trip_id3","trip_id4","trip_id5","trip_id6")] <-
c2_bydate[j,c("trip_id1","trip_id2", "trip_id3","trip_id4","trip_id5","trip_id6")]
}else{
if(all(is.na(prior_trips))) {
trips_wo_vms <- c(trips_wo_vms, trips)
next
}else{
cat("warning:shouldn't get here")
break
}
}
}
}
# merge metier with trip_id
met_track <- merge(as.data.frame(v2_track), dplyr::select(c2_bydate, starts_with("trip_id"), metier.2010),all.x = TRUE, all.y = FALSE)
met_track <- met_track[order(met_track$datetime),]
# rename aggregate trips - adds an agg_id
met_agg <- met_track %>%
dplyr::select(only_trips, starts_with("trip_id")) %>%
distinct() %>%
filter(!is.na(trip_id1)) %>%
group_by(trip_id1) %>%
mutate(agg_id = unique(only_trips)[1]) %>%
ungroup() %>%
dplyr::select(only_trips, agg_id, - trip_id1) %>%
arrange(agg_id) %>%
right_join(met_track)
# add indicator for whether it's a duplicate observed trip
met_agg$obs_dup <- ifelse(met_agg$trip_id1 %in% duplicate_ftids, 1, 0)
# reproject trajectory to lat/lon
met_agg <- as.data.frame(met_agg)
coordinates(met_agg) <- ~longitude+latitude
proj4string(met_agg) <- proj4string(wc_proj)
met_agg <- spTransform(met_agg, proj4string(WC))
met_agg <- as.data.frame(met_agg)
# calculate revenue and lbs for each trip ----
trips_landed <- unique(c(met_agg$trip_id1, met_agg$trip_id2, met_agg$trip_id3, met_agg$trip_id4, met_agg$trip_id5, met_agg$trip_id6))
trips_landed <- trips_landed[-which(is.na(trips_landed))]
if(length(trips_landed)==0){
met_all <- met_agg
met_all[,c("lbs","revenue","n.trips","time","distance","lbs_time","rev_time","lbs_dist","lbs_trips","rev_trips")] <- NA
}else{
trip_tots <- subset(catch, trip_id %in% trips_landed) %>%
group_by(trip_id) %>%
summarize(lbs = sum(pounds,na.rm=T), revenue = sum(adj_revenue, na.rm = T))
# use only_trips to make trip_id vector long format
library(tidyr)
trip_amts <- met_agg %>%
dplyr::select( agg_id, starts_with("trip_id")) %>%
distinct() %>%
filter(!is.na(trip_id1)) %>%
gather(key=ids, value = trip_id, -agg_id) %>%
filter(trip_id!="NA") %>%
arrange(agg_id) %>%
left_join(trip_tots) %>%
group_by(agg_id) %>%
summarize(lbs = sum(lbs), revenue = sum(revenue), n.trips = length(agg_id))
# for each of these agg_id trips need to get effort data (duration of time for each `only_trips` and distance)
# returns sequential steps in km
path_dist <- function(lon, lat, dist_coast.vec){
if(length(lon)==1){ # if only one point out, then it's distance from coast
path_dist = dist_coast.vec/1000
}else{
path_dist = rep(NA, length(lon))
dist_mat <- cbind(lon, lat)
for(i in 2:length(lon)){
path_dist[i] <- spDistsN1(t(as.matrix(dist_mat[i-1,])), t(as.matrix(dist_mat[i,])), longlat = TRUE)
}
path_dist[1] <- dist_coast.vec[1]/1000
path_dist <- c(path_dist, dist_coast.vec[length(dist_coast.vec)]/1000)
}
return(path_dist)
}
effort_dat <- met_agg %>%
filter(only_trips > 0 & !is.na(agg_id)) %>%
group_by(only_trips) %>%
summarize(agg_id = unique(agg_id),
time = ifelse(length(datetime)==1, 1, difftime(max(datetime),min(datetime),units="hours")),
distance = sum(path_dist(lon = longitude, lat = latitude, dist_coast.vec = dist_coast))) %>%
group_by(agg_id) %>%
summarize(time = sum(time), distance =sum(distance))
# returns time in hours, distance in km
cpue <- merge(trip_amts, effort_dat) %>%
mutate(lbs_time = lbs/time, rev_time = revenue/time, lbs_dist = lbs/distance,
rev_dist = revenue/distance, lbs_trips = lbs/n.trips,
rev_trips = revenue/n.trips)
met_all <- left_join(met_agg, cpue)
}
saveRDS(met_all, paste0("/Users/efuller/Desktop/CNH/processedData/spatial/vms/intermediate/04_link_mets_vms/tw_",window_size,"hr/",unique(v2_track$docnum),".RDS"))
}
}
link_vms.tickets(window_size = 0)
link_vms.tickets(window_size = 24)
link_vms.tickets(window_size = 36)
link_vms.tickets(window_size = 72)
link_vms.tickets(window_size = 168)
| /processedData/spatial/vms/intermediate/04_link_mets_vms.R | no_license | emfuller/cnh | R | false | false | 15,172 | r | # this script links metiers to VMS.
#
# First it searches through observer data in order to find a list of observed
# trips for which multiple fish tickets exist.
# Once all metiers are assigned, any fish tickets assigned that are in
# this "double list" will be removed.
# Finally for all fish tickets assigned,
# a revenue and pounds column is calculated and added for each time stamp.
link_vms.tickets <- function(window_size){
# find double observed data ----
library(dplyr)
obs <- read.csv("/Users/efuller/Desktop/CNH/rawData/Observers/WCGOPobs/Samhouri_OBFTfinal_Allfisheries_ProcessedwFunction_2009-2012_110613.csv",stringsAsFactors = FALSE)
dubs_tix <- obs %>%
dplyr::select(DRVID, D_DATE, TRIPID, sector, FISHERY, FISH_TICKETS) %>%
distinct() %>%
mutate(dubs = ifelse(grepl(";", FISH_TICKETS), 1, 0)) %>%
filter(dubs == 1) %>%
mutate(D_DATE = as.POSIXct(D_DATE, format = "%m/%d/%Y %H:%M:%S %p"))
for(i in 1:nrow(dubs_tix)){
dubs_tix$tix1[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[1],
format(dubs_tix$D_DATE[i], "%Y")), NA)
dubs_tix$tix2[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[2],
format(dubs_tix$D_DATE[i], "%Y")), NA)
dubs_tix$tix3[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[3],
format(dubs_tix$D_DATE[i], "%Y")), NA)
dubs_tix$tix4[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[4],
format(dubs_tix$D_DATE[i],"%Y")), NA)
dubs_tix$tix5[i] <- ifelse(dubs_tix$dubs[i] == 1,
paste0(unlist(strsplit(dubs_tix$FISH_TICKETS[i], split = ";"))[5],
format(dubs_tix$D_DATE[i],"%Y")), NA)
}
# remove those that begin with NA
dubs_tix$tix3[grep("^NA",dubs_tix$tix3)] <- NA
dubs_tix$tix4[grep("^NA",dubs_tix$tix4)] <- NA
dubs_tix$tix5[grep("^NA",dubs_tix$tix5)] <- NA
# make vector of all fish tickets that are part of an observed trip
# which is linked to more than 1 fish ticket
duplicate_ftids <- unique(c(dubs_tix$tix1, dubs_tix$tix2, dubs_tix$tix3, dubs_tix$tix4, dubs_tix$tix5))
duplicate_ftids <- duplicate_ftids[-which(is.na(duplicate_ftids))] # drop NA
# what's the maximum number of catches landed by a single vessel in a day? ----
# load catch
catch <- readRDS("/Users/efuller/Desktop/CNH/processedData/catch/1_cleaningData/tickets.RDS")
# finding discrete trips ----
# ok to generalize, boats get distance to coast measured, any point that's > 1.5 km from coastline is a trip
load("/Users/efuller/Desktop/CNH/processedData/spatial/2_coastline.Rdata")
library(sp)
proj4string(WC) <- CRS("+init=epsg:4269 +proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs +towgs84=0,0,0")
wc_proj <- spTransform(WC,CRS("+proj=aea +lat_1=35.13863306500551 +lat_2=46.39606296952133 +lon_0=-127.6171875"))
# go through each track, save both new trajectory and fish tickest that are not found in VMS
data.dir <- "/Users/efuller/Desktop/CNH/processedData/spatial/vms/intermediate/03_overlapMetier/"
vms_files <- dir(data.dir)
for(b in 1:length(vms_files)){
ves <- readRDS(paste0(data.dir,vms_files[b]))
find_trips <- function(vessel_track, coastline = wc_proj,
projection = CRS("+proj=aea +lat_1=35.13863306500551
+lat_2=46.39606296952133 +lon_0=-127.6171875"))
{
# project for gDistance
# default is Equal Area Albers
# make sure no positive longitudes
if(any(vessel_track$longitude>0 | vessel_track$longitude < -150)){
vessel_track <- subset(vessel_track, longitude < 0 & longitude > -150)
}
# make vessel track sp object, assign default lat/lon of NAD83
library(sp)
coordinates(vessel_track) <- ~longitude + latitude
proj4string(vessel_track) <- CRS(
"+init=epsg:4269 +proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs +towgs84=0,0,0")
# convert into equal albers projection for gDistance
vessel_track <- spTransform(vessel_track, projection)
# calculate pairwise distance to coast, because projected measures in meters
library(rgeos)
vessel_track@data$dist_coast <- NA
for(i in 1:nrow(vessel_track)){
vessel_track@data$dist_coast[i] <- gDistance(vessel_track[i,], coastline)
}
# any points that are > 1.5 km from the coast are "out" on a trip
vessel_track@data$trip <- ifelse(vessel_track@data$dist_coast/1000>1.5, 1, 0)
vessel_track <- vessel_track[order(vessel_track@data$datetime),]
# if there's a gap > 6 hours in middle of trip
# if vessel starts in the middle of a trip, discard it because will reverse algorithm
if(vessel_track@data$trip[1]==1){
# find first zero
first.zero = which(vessel_track@data$trip==0)[1]
# make first trips into 0s, even though on water, won't consider.
vessel_track@data$trip[1:first.zero] <- 0
}
time_diffs <- diff(vessel_track@data$datetime)
# if diff > 3 hours, new trip
vessel_track@data$time_diff <- c(NA, time_diffs)
gap_marker <- which(vessel_track@data$time_diff>60*3 & vessel_track@data$trip==1)
if(length(gap_marker)>0){
# so point before needs to be inserted as 0. will eventually drop these duplicate points.
new_rows <- vessel_track[gap_marker,]
new_rows$trip <- 0
foo <- rbind(vessel_track, new_rows)
foo <- foo[order(foo$datetime, foo$trip),]
vessel_track <- foo
}
vessel_track@data$trip_num <- c(0,cumsum(abs(diff(vessel_track@data$trip))))
vessel_track@data$only_trips <- ifelse(vessel_track@data$trip_num %% 2 == 0, 0, vessel_track@data$trip_num)
# drop duplicates i inserted
vessel_track <- vessel_track[-which(duplicated(vessel_track@data[,c("dist_coast","time_diff")])),]
# remove the other two indicators
vessel_track@data$trip <- NULL
vessel_track@data$trip_num <- NULL
# returns distance from coast and unique ID for trip
return(vessel_track)
}
v2_track <- find_trips(vessel_track = ves)
if(nrow(v2_track)==0){next}
# next is assigning landings to trips, argument is time window to look back in
# look_back is in hours, represents num hours to look back for vms data
assign_landings <- function(time_window, v2 = ves){
# convert look_back window to seconds
look_back <- time_window *60 *60
# find landings: subset trip ID, landing dates, metier, and port
c2 <- unique(subset(catch, drvid == unique(v2$docnum), select = c("trip_id","pcid","metier.2010","tdate")))
c2$tdate.start <- as.POSIXct(c2$tdate, format = "%d-%b-%y",tz = "Etc/GMT-8") - look_back
c2$tdate.end <- as.POSIXct(c2$tdate, format = "%d-%b-%y",tz = "Etc/GMT-8")+23.9999*60*60
c2 <- c2[order(c2$tdate.start),]
# for each 24 period, is there a trip in it? if yes, then assign that trip the landing ID.
# change to only allow one row for each tdate, need to sort by tdate.start
library(dplyr)
c2_bydate <- c2 %>%
group_by(tdate) %>%
mutate(trip_id1 = unique(trip_id)[1],
trip_id2 = as.character(ifelse(length(unique(trip_id))>1, unique(trip_id)[2], NA)),
trip_id3 = as.character(ifelse(length(unique(trip_id))>2, unique(trip_id)[3], NA)),
trip_id4 = as.character(ifelse(length(unique(trip_id))>3, unique(trip_id)[4], NA)),
trip_id5 = as.character(ifelse(length(unique(trip_id))>4, unique(trip_id)[5], NA)),
trip_id6 = as.character(ifelse(length(unique(trip_id))>5, unique(trip_id)[6], NA))) %>%
dplyr::select(-trip_id) %>%
distinct(.keep_all = TRUE)
# reorder
c2_bydate <- c2_bydate[order(c2_bydate$tdate.start),]
# check to make sure metiers and ports are the same
any(duplicated(c2_bydate[,3:8])) # should be false, means that all ports and metiers are same
return(c2_bydate)
}
c2_bydate <- assign_landings(time_window = window_size, v2 = ves)
c2_bydate <- as.data.frame(c2_bydate)
# will be more trips than landings, so go through landings
v2_track$trip_id1 <- NA
v2_track$trip_id2 <- NA
v2_track$trip_id3 <- NA
v2_track$trip_id4 <- NA
v2_track$trip_id5 <- NA
v2_track$trip_id6 <- NA
# rule: if there's a trip during the time interval of landings (determined above by time window argument), assign landings. If more than two trips, check to make sure same fishery and same port, and assign both trip_ids. Then in future, sum landings for these two trips and attribute to entire trip trajectory. Thus in future, should be able to take metier from either of these trip IDs and assign to both trips and shouldn't matter (because have already checked that they're from the same fishery)
trips_wo_vms <- NA
for(j in 1:nrow(c2_bydate)){
# find all trips
trips <- as.character(dplyr::select(as.data.frame(c2_bydate[j,]), contains("trip_id")))
if(all(trips=="NA")){
cat("corrupted catch dates")
break
}
# was a trip will landed on the day?
prior_trips <- unique(v2_track$only_trips[which(v2_track$datetime > c2_bydate$tdate.start[j] &
v2_track$datetime < c2_bydate$tdate.end[j] &
v2_track$only_trips!=0)])
# if no trips within time window hours, record trip id and move on
if(length(prior_trips)==0) {
trips_wo_vms <- c(trips_wo_vms, trips)
next
}else{ # but if there are some prior trips
# make sure the trip(s) all occurred before tdate.end,
# if not replace with NA. then drop NAs again
for(q in 1:length(prior_trips)){
return_time <- max(v2_track$datetime[which(v2_track$only_trips==prior_trips[q])])
prior_trips[q] <- ifelse(return_time > c2_bydate$tdate.end[j], NA, prior_trips[q])
}
# check that those trips don't belong to another landing ticket
for(q in 1:length(prior_trips)){
assigned_previous <- any(!is.na(v2_track$trip_id1[which(v2_track$only_trips %in% prior_trips[q])]))
prior_trips[q] <- ifelse(assigned_previous, NA, prior_trips[q])
}
# possible that you could loose all trips, if all are NOT NA, then can use those trips, else look earlier
if(!(all(is.na(prior_trips)))) {
if(any(is.na(prior_trips))){
prior_trips <- prior_trips[-which(is.na(prior_trips))]
}
# then make sure that trip wasn't already assigned a landing ticket
if(!(any(is.na(v2_track$trip_id1[which(v2_track$only_trips %in% unique(prior_trips))])))){
# means that there was a landing prior to this one that already claimed that VMS trip, so
# this landing ticket has no VMS
trips_wo_vms <- c(trips_wo_vms, trips)
next
}
# assign the trip to VMS!
v2_track[which(v2_track$only_trips %in% prior_trips),
c("trip_id1","trip_id2", "trip_id3","trip_id4","trip_id5","trip_id6")] <-
c2_bydate[j,c("trip_id1","trip_id2", "trip_id3","trip_id4","trip_id5","trip_id6")]
}else{
if(all(is.na(prior_trips))) {
trips_wo_vms <- c(trips_wo_vms, trips)
next
}else{
cat("warning:shouldn't get here")
break
}
}
}
}
# merge metier with trip_id
met_track <- merge(as.data.frame(v2_track), dplyr::select(c2_bydate, starts_with("trip_id"), metier.2010),all.x = TRUE, all.y = FALSE)
met_track <- met_track[order(met_track$datetime),]
# rename aggregate trips - adds an agg_id
met_agg <- met_track %>%
dplyr::select(only_trips, starts_with("trip_id")) %>%
distinct() %>%
filter(!is.na(trip_id1)) %>%
group_by(trip_id1) %>%
mutate(agg_id = unique(only_trips)[1]) %>%
ungroup() %>%
dplyr::select(only_trips, agg_id, - trip_id1) %>%
arrange(agg_id) %>%
right_join(met_track)
# add indicator for whether it's a duplicate observed trip
met_agg$obs_dup <- ifelse(met_agg$trip_id1 %in% duplicate_ftids, 1, 0)
# reproject trajectory to lat/lon
met_agg <- as.data.frame(met_agg)
coordinates(met_agg) <- ~longitude+latitude
proj4string(met_agg) <- proj4string(wc_proj)
met_agg <- spTransform(met_agg, proj4string(WC))
met_agg <- as.data.frame(met_agg)
# calculate revenue and lbs for each trip ----
trips_landed <- unique(c(met_agg$trip_id1, met_agg$trip_id2, met_agg$trip_id3, met_agg$trip_id4, met_agg$trip_id5, met_agg$trip_id6))
trips_landed <- trips_landed[-which(is.na(trips_landed))]
if(length(trips_landed)==0){
met_all <- met_agg
met_all[,c("lbs","revenue","n.trips","time","distance","lbs_time","rev_time","lbs_dist","lbs_trips","rev_trips")] <- NA
}else{
trip_tots <- subset(catch, trip_id %in% trips_landed) %>%
group_by(trip_id) %>%
summarize(lbs = sum(pounds,na.rm=T), revenue = sum(adj_revenue, na.rm = T))
# use only_trips to make trip_id vector long format
library(tidyr)
trip_amts <- met_agg %>%
dplyr::select( agg_id, starts_with("trip_id")) %>%
distinct() %>%
filter(!is.na(trip_id1)) %>%
gather(key=ids, value = trip_id, -agg_id) %>%
filter(trip_id!="NA") %>%
arrange(agg_id) %>%
left_join(trip_tots) %>%
group_by(agg_id) %>%
summarize(lbs = sum(lbs), revenue = sum(revenue), n.trips = length(agg_id))
# for each of these agg_id trips need to get effort data (duration of time for each `only_trips` and distance)
# returns sequential steps in km
path_dist <- function(lon, lat, dist_coast.vec){
if(length(lon)==1){ # if only one point out, then it's distance from coast
path_dist = dist_coast.vec/1000
}else{
path_dist = rep(NA, length(lon))
dist_mat <- cbind(lon, lat)
for(i in 2:length(lon)){
path_dist[i] <- spDistsN1(t(as.matrix(dist_mat[i-1,])), t(as.matrix(dist_mat[i,])), longlat = TRUE)
}
path_dist[1] <- dist_coast.vec[1]/1000
path_dist <- c(path_dist, dist_coast.vec[length(dist_coast.vec)]/1000)
}
return(path_dist)
}
effort_dat <- met_agg %>%
filter(only_trips > 0 & !is.na(agg_id)) %>%
group_by(only_trips) %>%
summarize(agg_id = unique(agg_id),
time = ifelse(length(datetime)==1, 1, difftime(max(datetime),min(datetime),units="hours")),
distance = sum(path_dist(lon = longitude, lat = latitude, dist_coast.vec = dist_coast))) %>%
group_by(agg_id) %>%
summarize(time = sum(time), distance =sum(distance))
# returns time in hours, distance in km
cpue <- merge(trip_amts, effort_dat) %>%
mutate(lbs_time = lbs/time, rev_time = revenue/time, lbs_dist = lbs/distance,
rev_dist = revenue/distance, lbs_trips = lbs/n.trips,
rev_trips = revenue/n.trips)
met_all <- left_join(met_agg, cpue)
}
saveRDS(met_all, paste0("/Users/efuller/Desktop/CNH/processedData/spatial/vms/intermediate/04_link_mets_vms/tw_",window_size,"hr/",unique(v2_track$docnum),".RDS"))
}
}
link_vms.tickets(window_size = 0)
link_vms.tickets(window_size = 24)
link_vms.tickets(window_size = 36)
link_vms.tickets(window_size = 72)
link_vms.tickets(window_size = 168)
|
###################################################
### chunk number 1: setup
###################################################
library("RbcBook1")
###################################################
### chunk number 2: checkVersions
###################################################
library("arrayQuality")
library("marray")
library("beta7")
stopifnot(package.version("arrayQuality") >= package_version("1.0.9"))
stopifnot(package.version("marray") >= package_version("1.5.29"))
stopifnot(package.version("beta7") >= package_version("0.5.4"))
###################################################
### chunk number 3: GEODo
###################################################
library("AnnBuilder")
samp.6Hs.166 <- cache("samp.6Hs.166",
queryGEO(GEO(), "GSM16689"))
###################################################
### chunk number 4: GEOshow eval=FALSE
###################################################
## library("AnnBuilder")
## samp.6Hs.166 <- queryGEO(GEO(), "GSM16689")
###################################################
### chunk number 5: readbeta7
###################################################
datadir <- system.file("beta7", package="beta7")
TargetInfo <- read.marrayInfo(file.path(datadir, "TargetBeta7.txt"))
###################################################
### chunk number 6: info1
###################################################
TargetInfo@maNotes <- "Files were loaded from beta7 package."
###################################################
### chunk number 7: info2
###################################################
TargetInfo
###################################################
### chunk number 8: Kote13
###################################################
galinfo <- read.Galfile("6Hs.166.gpr", path=datadir)
###################################################
### chunk number 9: oldwd1
###################################################
oldwd <- getwd()
setwd(datadir)
###################################################
### chunk number 10: read.GenePix
###################################################
setwd(datadir)
files <- c("6Hs.166.gpr", "6Hs.187.1.gpr")
mraw <- read.GenePix(files, name.Gb=NULL, name.Rb=NULL)
###################################################
### chunk number 11: oldwd2
###################################################
setwd(oldwd)
###################################################
### chunk number 12: gnurps3
###################################################
library("beta7")
checkTargetInfo(beta7)
###################################################
### chunk number 13: maGeneTable
###################################################
maGeneTable(beta7)[1:4, 1:5]
###################################################
### chunk number 14: whatAnUglyHack
###################################################
beta7nbg <- beta7
beta7nbg@maGb <- beta7nbg@maRb <- 0 * beta7nbg@maRb
###################################################
### chunk number 15: subsett
###################################################
beta7sub <- beta7[1:100,2:3]
###################################################
### chunk number 16: subsetu
###################################################
coord <- maCompCoord(1:maNgr(beta7), 1:maNgc(beta7), maNsr(beta7), 1:3)
ind <- maCoord2Ind(coord, L=maLayout(beta7))
###################################################
### chunk number 17: eval=FALSE
###################################################
## maQualityPlots(beta7)
###################################################
### chunk number 18: eval=FALSE
###################################################
## agQuality()
###################################################
### chunk number 19: ZZ1
###################################################
image(beta7[,5], xvar = "maRb", bar = TRUE)
###################################################
### chunk number 20: ZZ2
###################################################
RGcol <- maPalette(low = "blue", mid = "gray", high = "yellow", k = 50)
image(beta7[, 3], xvar = "maM", col=RGcol)
###################################################
### chunk number 21: ZZ3
###################################################
flags <- beta7@maW[,1] < -50
image(beta7[,1], xvar="maA", overlay=flags)
###################################################
### chunk number 22: maBoxplotplate
###################################################
par(mar=c(5, 3,3,3), cex.axis=0.7)
boxplot(beta7[, 3], xvar = "maPlate", yvar = "maA", outline=FALSE, las=2)
###################################################
### chunk number 23: eval=FALSE
###################################################
## boxplot(beta7, main = "beta7 arrays", las=2)
###################################################
### chunk number 24: maBoxplotarrays
###################################################
par(mar=c(5, 3,3,3), cex.axis=0.7) #, cex.main=0.8)
boxplot(beta7, ylim=c(-4,4), main = "beta7 arrays", outline=FALSE, las=2)
###################################################
### chunk number 25: maplot2col
###################################################
plot(beta7nbg[,2], lines.func=NULL, legend.func=NULL)
points(beta7nbg[,2], subset=abs(maM(beta7nbg)[,2]) > 2,
col="red", pch=18)
points(beta7nbg[,2], subset=maControls(beta7nbg) == "Empty", col="blue", pch=18)
###################################################
### chunk number 26: beta7normDo
###################################################
beta7norm <- cache("beta7norm", maNorm(beta7, norm="p"))
###################################################
### chunk number 27: beta7normShow eval=FALSE
###################################################
## beta7norm <- maNorm(beta7, norm="p")
###################################################
### chunk number 28: boxplotscale
###################################################
beta7norm.scale <- maNormScale(beta7norm)
###################################################
### chunk number 29: twoStepSeparateChanel
###################################################
beta7norm@maW <- matrix(0,0,0) ## Remove weights
beta7.p <- as(beta7norm, "MAList") ## convert data to RGList
beta7.pq <- normalizeBetweenArrays(beta7.p, method="quantile")
###################################################
### chunk number 30: plotdensityP
###################################################
plotDensities(beta7.p)
###################################################
### chunk number 31: plotdensityPQ
###################################################
plotDensities(beta7.pq)
###################################################
### chunk number 32: vsn0
###################################################
library("vsn")
###################################################
### chunk number 33: vsn1 eval=FALSE
###################################################
## library("vsn")
## beta7.vsn <- normalizeBetweenArrays(as(beta7, "RGList"), method="vsn")
###################################################
### chunk number 34: vsnDo
###################################################
beta7.vsn <- cache("beta7.vsn", vsn(beta7))
###################################################
### chunk number 35: vsnShow eval=FALSE
###################################################
## beta7.vsn <- vsn(beta7)
###################################################
### chunk number 36: getExprs
###################################################
b7 <- exprs(beta7.vsn)
###################################################
### chunk number 37: vsnundercover
###################################################
fn <- as.character(maInfo(maTargets(beta7))$FileNames)
colnames(b7) <- paste(rep(fn, each=2), c("green", "red"), sep="\n")
b7 <- b7[sample(nrow(b7), 4000), ]
###################################################
### chunk number 38: plotvsn
###################################################
upPan <- function(...){
points(..., col="darkblue")
abline(a=0,b=1,col="red")
}
lowPan <- function(x, y, ...){
text(mean(par("usr")[1:2]), mean(par("usr")[3:4]),signif(cor(x, y),2),cex=2)
}
pairs(b7[, 1:6], pch=".", lower.panel = lowPan, upper.panel=upPan)
###################################################
### chunk number 39:
###################################################
library("beta7")
###################################################
### chunk number 40:
###################################################
library("arrayQuality")
###################################################
### chunk number 41: eval=FALSE
###################################################
## TargetInfo <- read.marrayInfo("TargetBeta7.txt")
###################################################
### chunk number 42: eval=FALSE
###################################################
## mraw <- read.GenePix(targets = TargetInfo)
###################################################
### chunk number 43: eval=FALSE
###################################################
## maQualityPlots(mraw)
###################################################
### chunk number 44: eval=FALSE
###################################################
## normdata <- maNorm(mraw)
###################################################
### chunk number 45: eval=FALSE
###################################################
## write.marray(normdata)
###################################################
### chunk number 46: eval=FALSE
###################################################
## library("convert")
## mdata <- as(normdata, "exprSet")
###################################################
### chunk number 47: eval=FALSE
###################################################
## LMres <- lmFit(normdata, design = c(1, -1, -1, 1, 1, -1), weights=NULL)
###################################################
### chunk number 48: eval=FALSE
###################################################
## LMres <- eBayes(LMres)
###################################################
### chunk number 49: eval=FALSE
###################################################
## restable <- topTable(LMres, number=10,resort.by="M")
## table2html(restable, disp="file")
| /assets/help/publications/books/bioinformatics-and-computational-biology-solutions/chapter-code/TwoColorPre.R | no_license | Bioconductor/bioconductor.org | R | false | false | 10,107 | r | ###################################################
### chunk number 1: setup
###################################################
library("RbcBook1")
###################################################
### chunk number 2: checkVersions
###################################################
library("arrayQuality")
library("marray")
library("beta7")
stopifnot(package.version("arrayQuality") >= package_version("1.0.9"))
stopifnot(package.version("marray") >= package_version("1.5.29"))
stopifnot(package.version("beta7") >= package_version("0.5.4"))
###################################################
### chunk number 3: GEODo
###################################################
library("AnnBuilder")
samp.6Hs.166 <- cache("samp.6Hs.166",
queryGEO(GEO(), "GSM16689"))
###################################################
### chunk number 4: GEOshow eval=FALSE
###################################################
## library("AnnBuilder")
## samp.6Hs.166 <- queryGEO(GEO(), "GSM16689")
###################################################
### chunk number 5: readbeta7
###################################################
datadir <- system.file("beta7", package="beta7")
TargetInfo <- read.marrayInfo(file.path(datadir, "TargetBeta7.txt"))
###################################################
### chunk number 6: info1
###################################################
TargetInfo@maNotes <- "Files were loaded from beta7 package."
###################################################
### chunk number 7: info2
###################################################
TargetInfo
###################################################
### chunk number 8: Kote13
###################################################
galinfo <- read.Galfile("6Hs.166.gpr", path=datadir)
###################################################
### chunk number 9: oldwd1
###################################################
oldwd <- getwd()
setwd(datadir)
###################################################
### chunk number 10: read.GenePix
###################################################
setwd(datadir)
files <- c("6Hs.166.gpr", "6Hs.187.1.gpr")
mraw <- read.GenePix(files, name.Gb=NULL, name.Rb=NULL)
###################################################
### chunk number 11: oldwd2
###################################################
setwd(oldwd)
###################################################
### chunk number 12: gnurps3
###################################################
library("beta7")
checkTargetInfo(beta7)
###################################################
### chunk number 13: maGeneTable
###################################################
maGeneTable(beta7)[1:4, 1:5]
###################################################
### chunk number 14: whatAnUglyHack
###################################################
beta7nbg <- beta7
beta7nbg@maGb <- beta7nbg@maRb <- 0 * beta7nbg@maRb
###################################################
### chunk number 15: subsett
###################################################
beta7sub <- beta7[1:100,2:3]
###################################################
### chunk number 16: subsetu
###################################################
coord <- maCompCoord(1:maNgr(beta7), 1:maNgc(beta7), maNsr(beta7), 1:3)
ind <- maCoord2Ind(coord, L=maLayout(beta7))
###################################################
### chunk number 17: eval=FALSE
###################################################
## maQualityPlots(beta7)
###################################################
### chunk number 18: eval=FALSE
###################################################
## agQuality()
###################################################
### chunk number 19: ZZ1
###################################################
image(beta7[,5], xvar = "maRb", bar = TRUE)
###################################################
### chunk number 20: ZZ2
###################################################
RGcol <- maPalette(low = "blue", mid = "gray", high = "yellow", k = 50)
image(beta7[, 3], xvar = "maM", col=RGcol)
###################################################
### chunk number 21: ZZ3
###################################################
flags <- beta7@maW[,1] < -50
image(beta7[,1], xvar="maA", overlay=flags)
###################################################
### chunk number 22: maBoxplotplate
###################################################
par(mar=c(5, 3,3,3), cex.axis=0.7)
boxplot(beta7[, 3], xvar = "maPlate", yvar = "maA", outline=FALSE, las=2)
###################################################
### chunk number 23: eval=FALSE
###################################################
## boxplot(beta7, main = "beta7 arrays", las=2)
###################################################
### chunk number 24: maBoxplotarrays
###################################################
par(mar=c(5, 3,3,3), cex.axis=0.7) #, cex.main=0.8)
boxplot(beta7, ylim=c(-4,4), main = "beta7 arrays", outline=FALSE, las=2)
###################################################
### chunk number 25: maplot2col
###################################################
plot(beta7nbg[,2], lines.func=NULL, legend.func=NULL)
points(beta7nbg[,2], subset=abs(maM(beta7nbg)[,2]) > 2,
col="red", pch=18)
points(beta7nbg[,2], subset=maControls(beta7nbg) == "Empty", col="blue", pch=18)
###################################################
### chunk number 26: beta7normDo
###################################################
beta7norm <- cache("beta7norm", maNorm(beta7, norm="p"))
###################################################
### chunk number 27: beta7normShow eval=FALSE
###################################################
## beta7norm <- maNorm(beta7, norm="p")
###################################################
### chunk number 28: boxplotscale
###################################################
beta7norm.scale <- maNormScale(beta7norm)
###################################################
### chunk number 29: twoStepSeparateChanel
###################################################
beta7norm@maW <- matrix(0,0,0) ## Remove weights
beta7.p <- as(beta7norm, "MAList") ## convert data to RGList
beta7.pq <- normalizeBetweenArrays(beta7.p, method="quantile")
###################################################
### chunk number 30: plotdensityP
###################################################
plotDensities(beta7.p)
###################################################
### chunk number 31: plotdensityPQ
###################################################
plotDensities(beta7.pq)
###################################################
### chunk number 32: vsn0
###################################################
library("vsn")
###################################################
### chunk number 33: vsn1 eval=FALSE
###################################################
## library("vsn")
## beta7.vsn <- normalizeBetweenArrays(as(beta7, "RGList"), method="vsn")
###################################################
### chunk number 34: vsnDo
###################################################
beta7.vsn <- cache("beta7.vsn", vsn(beta7))
###################################################
### chunk number 35: vsnShow eval=FALSE
###################################################
## beta7.vsn <- vsn(beta7)
###################################################
### chunk number 36: getExprs
###################################################
b7 <- exprs(beta7.vsn)
###################################################
### chunk number 37: vsnundercover
###################################################
fn <- as.character(maInfo(maTargets(beta7))$FileNames)
colnames(b7) <- paste(rep(fn, each=2), c("green", "red"), sep="\n")
b7 <- b7[sample(nrow(b7), 4000), ]
###################################################
### chunk number 38: plotvsn
###################################################
upPan <- function(...){
points(..., col="darkblue")
abline(a=0,b=1,col="red")
}
lowPan <- function(x, y, ...){
text(mean(par("usr")[1:2]), mean(par("usr")[3:4]),signif(cor(x, y),2),cex=2)
}
pairs(b7[, 1:6], pch=".", lower.panel = lowPan, upper.panel=upPan)
###################################################
### chunk number 39:
###################################################
library("beta7")
###################################################
### chunk number 40:
###################################################
library("arrayQuality")
###################################################
### chunk number 41: eval=FALSE
###################################################
## TargetInfo <- read.marrayInfo("TargetBeta7.txt")
###################################################
### chunk number 42: eval=FALSE
###################################################
## mraw <- read.GenePix(targets = TargetInfo)
###################################################
### chunk number 43: eval=FALSE
###################################################
## maQualityPlots(mraw)
###################################################
### chunk number 44: eval=FALSE
###################################################
## normdata <- maNorm(mraw)
###################################################
### chunk number 45: eval=FALSE
###################################################
## write.marray(normdata)
###################################################
### chunk number 46: eval=FALSE
###################################################
## library("convert")
## mdata <- as(normdata, "exprSet")
###################################################
### chunk number 47: eval=FALSE
###################################################
## LMres <- lmFit(normdata, design = c(1, -1, -1, 1, 1, -1), weights=NULL)
###################################################
### chunk number 48: eval=FALSE
###################################################
## LMres <- eBayes(LMres)
###################################################
### chunk number 49: eval=FALSE
###################################################
## restable <- topTable(LMres, number=10,resort.by="M")
## table2html(restable, disp="file")
|
#!/usr/bin/env Rscript
library(tidyverse)
.fs = list.files(path=".", pattern=".*fat.csv$", full.names=T, recursive=T)
.f = vector("list", length(.fs))
for (i in seq_along(.fs)) {
cat(.fs[i],"\n")
.f[[i]] = data.table::fread(.fs[i], colClasses = 'character')
}
x = bind_rows(.f)
write_csv(x, "x.csv")
saveRDS(x, "x.rds")
| /fat/collect_fat.R | no_license | gnayyc/cyy.utils | R | false | false | 333 | r | #!/usr/bin/env Rscript
library(tidyverse)
.fs = list.files(path=".", pattern=".*fat.csv$", full.names=T, recursive=T)
.f = vector("list", length(.fs))
for (i in seq_along(.fs)) {
cat(.fs[i],"\n")
.f[[i]] = data.table::fread(.fs[i], colClasses = 'character')
}
x = bind_rows(.f)
write_csv(x, "x.csv")
saveRDS(x, "x.rds")
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 9070
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 9070
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#126.A#48.c#.w#3.s#36.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3197
c no.of clauses 9070
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 9070
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#126.A#48.c#.w#3.s#36.asp.qdimacs 3197 9070 E1 [] 0 126 3071 9070 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#126.A#48.c#.w#3.s#36.asp/ctrl.e#1.a#3.E#126.A#48.c#.w#3.s#36.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 726 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 9070
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 9070
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#126.A#48.c#.w#3.s#36.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3197
c no.of clauses 9070
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 9070
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#126.A#48.c#.w#3.s#36.asp.qdimacs 3197 9070 E1 [] 0 126 3071 9070 NONE
|
\name{testdata}
\alias{testdata}
\alias{testdata2}
\docType{data}
\title{
A real-world data set on household income and expenditures
}
\description{
A concise (1-5 lines) description of the dataset.
}
\usage{
data(testdata)
data(testdata2)
}
\format{
A data frame with 4580 observations on the following 14 variables.
\describe{
\item{\code{urbrur}}{a numeric vector}
\item{\code{roof}}{a numeric vector}
\item{\code{walls}}{a numeric vector}
\item{\code{water}}{a numeric vector}
\item{\code{electcon}}{a numeric vector}
\item{\code{relat}}{a numeric vector}
\item{\code{sex}}{a numeric vector}
\item{\code{age}}{a numeric vector}
\item{\code{hhcivil}}{a numeric vector}
\item{\code{expend}}{a numeric vector}
\item{\code{income}}{a numeric vector}
\item{\code{savings}}{a numeric vector}
\item{\code{ori_hid}}{a numeric vector}
\item{\code{sampling_weight}}{a numeric vector}
}
A data frame with 93 observations on the following 19 variables.
\describe{
\item{\code{urbrur}}{a numeric vector}
\item{\code{roof}}{a numeric vector}
\item{\code{walls}}{a numeric vector}
\item{\code{water}}{a numeric vector}
\item{\code{electcon}}{a numeric vector}
\item{\code{relat}}{a numeric vector}
\item{\code{sex}}{a numeric vector}
\item{\code{age}}{a numeric vector}
\item{\code{hhcivil}}{a numeric vector}
\item{\code{expend}}{a numeric vector}
\item{\code{income}}{a numeric vector}
\item{\code{savings}}{a numeric vector}
\item{\code{ori_hid}}{a numeric vector}
\item{\code{sampling_weight}}{a numeric vector}
\item{\code{represent}}{a numeric vector}
\item{\code{category_count}}{a numeric vector}
\item{\code{relat2}}{a numeric vector}
\item{\code{water2}}{a numeric vector}
\item{\code{water3}}{a numeric vector}
}
}
\references{
The International Household Survey Network, www.ihsn.org
}
\examples{
data(testdata)
## maybe str(testdata) ; plot(testdata) ...
}
\keyword{datasets} | /man/testdata.Rd | no_license | orlinresearch/sdcMicro | R | false | false | 2,083 | rd | \name{testdata}
\alias{testdata}
\alias{testdata2}
\docType{data}
\title{
A real-world data set on household income and expenditures
}
\description{
A concise (1-5 lines) description of the dataset.
}
\usage{
data(testdata)
data(testdata2)
}
\format{
A data frame with 4580 observations on the following 14 variables.
\describe{
\item{\code{urbrur}}{a numeric vector}
\item{\code{roof}}{a numeric vector}
\item{\code{walls}}{a numeric vector}
\item{\code{water}}{a numeric vector}
\item{\code{electcon}}{a numeric vector}
\item{\code{relat}}{a numeric vector}
\item{\code{sex}}{a numeric vector}
\item{\code{age}}{a numeric vector}
\item{\code{hhcivil}}{a numeric vector}
\item{\code{expend}}{a numeric vector}
\item{\code{income}}{a numeric vector}
\item{\code{savings}}{a numeric vector}
\item{\code{ori_hid}}{a numeric vector}
\item{\code{sampling_weight}}{a numeric vector}
}
A data frame with 93 observations on the following 19 variables.
\describe{
\item{\code{urbrur}}{a numeric vector}
\item{\code{roof}}{a numeric vector}
\item{\code{walls}}{a numeric vector}
\item{\code{water}}{a numeric vector}
\item{\code{electcon}}{a numeric vector}
\item{\code{relat}}{a numeric vector}
\item{\code{sex}}{a numeric vector}
\item{\code{age}}{a numeric vector}
\item{\code{hhcivil}}{a numeric vector}
\item{\code{expend}}{a numeric vector}
\item{\code{income}}{a numeric vector}
\item{\code{savings}}{a numeric vector}
\item{\code{ori_hid}}{a numeric vector}
\item{\code{sampling_weight}}{a numeric vector}
\item{\code{represent}}{a numeric vector}
\item{\code{category_count}}{a numeric vector}
\item{\code{relat2}}{a numeric vector}
\item{\code{water2}}{a numeric vector}
\item{\code{water3}}{a numeric vector}
}
}
\references{
The International Household Survey Network, www.ihsn.org
}
\examples{
data(testdata)
## maybe str(testdata) ; plot(testdata) ...
}
\keyword{datasets} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kcdfhpj.R
\name{hpjkcdfest2}
\alias{hpjkcdfest2}
\title{computing HPJ kernel CDF estimate for odd T}
\usage{
hpjkcdfest2(x, X, X1, X2, X3, X4, h)
}
\arguments{
\item{x}{point at which the CDF is estimated}
\item{X}{vector of original cross-sectional data}
\item{X1}{vector of half-panel cross-sectional data based on time series 1 ~ floor(T/2)}
\item{X2}{vector of half-panel cross-sectional data based on time series (floor(T/2) + 1) ~ T}
\item{X3}{vector of half-panel cross-sectional data based on time series 1 ~ ceiling(T/2)}
\item{X4}{vector of half-panel cross-sectional data based on time series (ceiling(T/2) + 1) ~ T}
\item{h}{bandwidth}
}
\description{
computing HPJ kernel CDF estimate for odd T
}
| /man/hpjkcdfest2.Rd | no_license | anhnguyendepocen/panelhetero | R | false | true | 794 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kcdfhpj.R
\name{hpjkcdfest2}
\alias{hpjkcdfest2}
\title{computing HPJ kernel CDF estimate for odd T}
\usage{
hpjkcdfest2(x, X, X1, X2, X3, X4, h)
}
\arguments{
\item{x}{point at which the CDF is estimated}
\item{X}{vector of original cross-sectional data}
\item{X1}{vector of half-panel cross-sectional data based on time series 1 ~ floor(T/2)}
\item{X2}{vector of half-panel cross-sectional data based on time series (floor(T/2) + 1) ~ T}
\item{X3}{vector of half-panel cross-sectional data based on time series 1 ~ ceiling(T/2)}
\item{X4}{vector of half-panel cross-sectional data based on time series (ceiling(T/2) + 1) ~ T}
\item{h}{bandwidth}
}
\description{
computing HPJ kernel CDF estimate for odd T
}
|
coef.cv.glmgraph <- function(object,s=c("lambda1.min","lambda1.1se"),...){
s <- match.arg(s)
if(s=="lambda1.min") return(object$beta.min)
else if(s=="lambda1.1se") return(object$beta.1se)
else stop("Invalid type")
}
| /fuzzedpackages/glmgraph/R/coef.cv.glmgraph.R | no_license | akhikolla/testpackages | R | false | false | 240 | r | coef.cv.glmgraph <- function(object,s=c("lambda1.min","lambda1.1se"),...){
s <- match.arg(s)
if(s=="lambda1.min") return(object$beta.min)
else if(s=="lambda1.1se") return(object$beta.1se)
else stop("Invalid type")
}
|
context("measures")
test_that("aux_mean", {
expect_equal(aux_mean(10, 0.3), 3)
expect_equal(aux_mean(6, 0.5), 3)
expect_equal(aux_mean(10, 0.69), 6.9)
})
test_that("aux_variance", {
expect_equal(aux_variance(10, 0.3), 2.1)
expect_equal(aux_variance(6, 0.5), 1.5)
expect_equal(aux_variance(10, 0.69), 2.139)
})
test_that("aux_mode", {
expect_equal(aux_mode(10, 0.3), 3)
expect_equal(aux_mode(6, 0.5), 3)
expect_equal(aux_mode(10, 0.69), 7)
})
test_that("aux_skewness", {
expect_equal(aux_skewness(10, 0.5), 0)
expect_equal(aux_skewness(6, 0.5), 0)
expect_equal(aux_skewness(5, 0.5), 0)
})
test_that("aux_kurtosis", {
expect_equal(aux_kurtosis(10, 0.5), -0.2)
expect_equal(aux_kurtosis(20, 0.5), -0.1)
expect_equal(aux_kurtosis(5, 0.5), -0.4)
})
| /binomial/tests/testthat/test_measures.R | no_license | stat133-sp19/hw-stat133-lucasoliu | R | false | false | 779 | r | context("measures")
test_that("aux_mean", {
expect_equal(aux_mean(10, 0.3), 3)
expect_equal(aux_mean(6, 0.5), 3)
expect_equal(aux_mean(10, 0.69), 6.9)
})
test_that("aux_variance", {
expect_equal(aux_variance(10, 0.3), 2.1)
expect_equal(aux_variance(6, 0.5), 1.5)
expect_equal(aux_variance(10, 0.69), 2.139)
})
test_that("aux_mode", {
expect_equal(aux_mode(10, 0.3), 3)
expect_equal(aux_mode(6, 0.5), 3)
expect_equal(aux_mode(10, 0.69), 7)
})
test_that("aux_skewness", {
expect_equal(aux_skewness(10, 0.5), 0)
expect_equal(aux_skewness(6, 0.5), 0)
expect_equal(aux_skewness(5, 0.5), 0)
})
test_that("aux_kurtosis", {
expect_equal(aux_kurtosis(10, 0.5), -0.2)
expect_equal(aux_kurtosis(20, 0.5), -0.1)
expect_equal(aux_kurtosis(5, 0.5), -0.4)
})
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252729458e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615834634-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 270 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252729458e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
library(aurelius)
### Name: aurelius
### Title: 'aurelius' package
### Aliases: aurelius aurelius-package
### ** Examples
## Not run:
##D library("aurelius")
##D
##D # build a model
##D lm_model <- lm(mpg ~ hp, data = mtcars)
##D
##D # convert the lm object to a list of lists PFA representation
##D lm_model_as_pfa <- pfa(lm_model)
##D
##D # save as plain-text JSON
##D write_pfa(lm_model_as_pfa, file = "my-model.pfa")
##D
##D # read the model back in
##D read_pfa(file("my-model.pfa"))
## End(Not run)
| /data/genthat_extracted_code/aurelius/examples/aurelius.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 521 | r | library(aurelius)
### Name: aurelius
### Title: 'aurelius' package
### Aliases: aurelius aurelius-package
### ** Examples
## Not run:
##D library("aurelius")
##D
##D # build a model
##D lm_model <- lm(mpg ~ hp, data = mtcars)
##D
##D # convert the lm object to a list of lists PFA representation
##D lm_model_as_pfa <- pfa(lm_model)
##D
##D # save as plain-text JSON
##D write_pfa(lm_model_as_pfa, file = "my-model.pfa")
##D
##D # read the model back in
##D read_pfa(file("my-model.pfa"))
## End(Not run)
|
##Load packages
library(dplyr)
library(data.table)
##Read all data
allData <- read.table("household_power_consumption.txt", sep = ";",
na.strings = "?", header = TRUE)
##Convert to data table and keep only required dates
allDF <- tbl_df(allData)
reqDates <- filter(allDF, allDF$Date == "1/2/2007" | allDF$Date == "2/2/2007")
##Convert date column to date format
reqDates$Date <- as.Date(reqDates$Date, format = "%d/%m/%Y")
##Create new DateTime column and convert to date format
reqDates$DateTime <- paste(reqDates$Date, reqDates$Time)
reqDates$DateTime <- strptime(reqDates$DateTime, format = "%Y-%m-%d %H:%M:%S")
png("plot4.png")
#Set 4-panel plot
par(mfrow = c(2, 2))
#Plot 1
plot(reqDates$DateTime, reqDates$Global_active_power, type = "l",
xlab = "Day", ylab = "Global Active Power")
#Plot 2
plot(reqDates$DateTime, reqDates$Voltage, type = "l",
xlab = "Day", ylab = "Voltage")
#Plot 3
plot(reqDates$DateTime, reqDates$Sub_metering_1, type = "l",
xlab = "Day", ylab = "Energy Sub Metering", col = "red")
lines(reqDates$DateTime, reqDates$Sub_metering_2, col = "green")
lines(reqDates$DateTime, reqDates$Sub_metering_3, col = "blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1, 1, 1),
col = c("red", "green", "blue"),
cex = 0.7)
#Plot 4
plot(reqDates$DateTime, reqDates$Global_reactive_power, type = "l",
xlab = "Day", ylab = "Global Reactive Power", lwd = 0.5)
dev.off()
| /plot4.R | no_license | zahirbaig21/ExData_Plotting1 | R | false | false | 1,511 | r | ##Load packages
library(dplyr)
library(data.table)
##Read all data
allData <- read.table("household_power_consumption.txt", sep = ";",
na.strings = "?", header = TRUE)
##Convert to data table and keep only required dates
allDF <- tbl_df(allData)
reqDates <- filter(allDF, allDF$Date == "1/2/2007" | allDF$Date == "2/2/2007")
##Convert date column to date format
reqDates$Date <- as.Date(reqDates$Date, format = "%d/%m/%Y")
##Create new DateTime column and convert to date format
reqDates$DateTime <- paste(reqDates$Date, reqDates$Time)
reqDates$DateTime <- strptime(reqDates$DateTime, format = "%Y-%m-%d %H:%M:%S")
png("plot4.png")
#Set 4-panel plot
par(mfrow = c(2, 2))
#Plot 1
plot(reqDates$DateTime, reqDates$Global_active_power, type = "l",
xlab = "Day", ylab = "Global Active Power")
#Plot 2
plot(reqDates$DateTime, reqDates$Voltage, type = "l",
xlab = "Day", ylab = "Voltage")
#Plot 3
plot(reqDates$DateTime, reqDates$Sub_metering_1, type = "l",
xlab = "Day", ylab = "Energy Sub Metering", col = "red")
lines(reqDates$DateTime, reqDates$Sub_metering_2, col = "green")
lines(reqDates$DateTime, reqDates$Sub_metering_3, col = "blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1, 1, 1),
col = c("red", "green", "blue"),
cex = 0.7)
#Plot 4
plot(reqDates$DateTime, reqDates$Global_reactive_power, type = "l",
xlab = "Day", ylab = "Global Reactive Power", lwd = 0.5)
dev.off()
|
## arguments for high performance computing
FAST <- FALSE ## compute on small subset of data
mc.cores <- 27 ## test parallel, but not may cores
Nboot <- 100
| /analysis/args_hpc.R | no_license | longjp/causal-bias-code | R | false | false | 157 | r | ## arguments for high performance computing
FAST <- FALSE ## compute on small subset of data
mc.cores <- 27 ## test parallel, but not may cores
Nboot <- 100
|
#' @rdname query
#' @usage NULL
dbSendStatement_MariaDBConnection_character <- function(conn, statement, params = NULL, ...) {
dbSend(conn, statement, params, is_statement = TRUE)
}
#' @rdname query
#' @export
setMethod("dbSendStatement", signature("MariaDBConnection", "character"), dbSendStatement_MariaDBConnection_character)
| /R/dbSendStatement_MariaDBConnection_character.R | permissive | r-dbi/RMariaDB | R | false | false | 332 | r | #' @rdname query
#' @usage NULL
dbSendStatement_MariaDBConnection_character <- function(conn, statement, params = NULL, ...) {
dbSend(conn, statement, params, is_statement = TRUE)
}
#' @rdname query
#' @export
setMethod("dbSendStatement", signature("MariaDBConnection", "character"), dbSendStatement_MariaDBConnection_character)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/min_depth_distribution.R
\name{plot_min_depth_distribution}
\alias{plot_min_depth_distribution}
\title{Plot the distribution of minimal depth in a random forest}
\usage{
plot_min_depth_distribution(min_depth_frame, k = 10, min_no_of_trees = 0,
mean_sample = "top_trees", mean_scale = FALSE, mean_round = 2,
main = "Distribution of minimal depth and its mean")
}
\arguments{
\item{min_depth_frame}{A data frame output of min_depth_distribution function or a randomForest object}
\item{k}{The maximal number of variables with lowest mean minimal depth to be used for plotting}
\item{min_no_of_trees}{The minimal number of trees in which a variable has to be used for splitting to be used for plotting}
\item{mean_sample}{The sample of trees on which mean minimal depth is calculated, possible values are "all_trees", "top_trees", "relevant_trees"}
\item{mean_scale}{Logical: should the values of mean minimal depth be rescaled to the interval [0,1]?}
\item{mean_round}{The number of digits used for displaying mean minimal depth}
\item{main}{A string to be used as title of the plot}
}
\value{
A ggplot object
}
\description{
Plot the distribution of minimal depth in a random forest
}
\examples{
forest <- randomForest::randomForest(Species ~ ., data = iris, ntree = 300)
plot_min_depth_distribution(min_depth_distribution(forest))
}
| /man/plot_min_depth_distribution.Rd | no_license | KasiaKobylinska/randomForestExplainer | R | false | true | 1,422 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/min_depth_distribution.R
\name{plot_min_depth_distribution}
\alias{plot_min_depth_distribution}
\title{Plot the distribution of minimal depth in a random forest}
\usage{
plot_min_depth_distribution(min_depth_frame, k = 10, min_no_of_trees = 0,
mean_sample = "top_trees", mean_scale = FALSE, mean_round = 2,
main = "Distribution of minimal depth and its mean")
}
\arguments{
\item{min_depth_frame}{A data frame output of min_depth_distribution function or a randomForest object}
\item{k}{The maximal number of variables with lowest mean minimal depth to be used for plotting}
\item{min_no_of_trees}{The minimal number of trees in which a variable has to be used for splitting to be used for plotting}
\item{mean_sample}{The sample of trees on which mean minimal depth is calculated, possible values are "all_trees", "top_trees", "relevant_trees"}
\item{mean_scale}{Logical: should the values of mean minimal depth be rescaled to the interval [0,1]?}
\item{mean_round}{The number of digits used for displaying mean minimal depth}
\item{main}{A string to be used as title of the plot}
}
\value{
A ggplot object
}
\description{
Plot the distribution of minimal depth in a random forest
}
\examples{
forest <- randomForest::randomForest(Species ~ ., data = iris, ntree = 300)
plot_min_depth_distribution(min_depth_distribution(forest))
}
|
set.seed(256)
par(mfrow=c(3,3))
for (i in 1:9) {
hist(rnorm(n = 25), probability = TRUE)
curve(dnorm, add=TRUE, col='red', lwd=3)
}
my.ozone <- airquality$Ozone[!is.na(airquality$Ozone) & airquality$Ozone>1]
mean.1 <- mean(my.ozone)
sd.1 <- sd(my.ozone)
pts <- rnorm(length(my.ozone), mean = mean.1, sd = sd.1)
qqplot(my.ozone, pts)
lines(1:150)
mean.2 <- mean(log(my.ozone))
sd.2 <- sd(log(my.ozone))
ptsl <- rnorm(length(my.ozone), mean = mean.2, sd = sd.2)
qqplot(log(my.ozone), ptsl)
lines(1:5)
set.seed(457778)
y <- numeric(1000)
for (i in 1:1000) {
x <- sum(sample(1:6, 2, replace = TRUE))
y[i] <- sum(sample(1:6, x, replace = TRUE))
}
hist(y)
rnorm(3, mean=2, sd=1)
n<-10000
doone <- function(){
x<-rbinom(1,50,1/6)
p<-x/50
p
}
p.sim<-replicate(n,doone())
mean(p.sim)
| /Simulation.R | no_license | nakicam/DAT209x | R | false | false | 801 | r | set.seed(256)
par(mfrow=c(3,3))
for (i in 1:9) {
hist(rnorm(n = 25), probability = TRUE)
curve(dnorm, add=TRUE, col='red', lwd=3)
}
my.ozone <- airquality$Ozone[!is.na(airquality$Ozone) & airquality$Ozone>1]
mean.1 <- mean(my.ozone)
sd.1 <- sd(my.ozone)
pts <- rnorm(length(my.ozone), mean = mean.1, sd = sd.1)
qqplot(my.ozone, pts)
lines(1:150)
mean.2 <- mean(log(my.ozone))
sd.2 <- sd(log(my.ozone))
ptsl <- rnorm(length(my.ozone), mean = mean.2, sd = sd.2)
qqplot(log(my.ozone), ptsl)
lines(1:5)
set.seed(457778)
y <- numeric(1000)
for (i in 1:1000) {
x <- sum(sample(1:6, 2, replace = TRUE))
y[i] <- sum(sample(1:6, x, replace = TRUE))
}
hist(y)
rnorm(3, mean=2, sd=1)
n<-10000
doone <- function(){
x<-rbinom(1,50,1/6)
p<-x/50
p
}
p.sim<-replicate(n,doone())
mean(p.sim)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ML_BARTModel.R
\name{BARTModel}
\alias{BARTModel}
\title{Bayesian Additive Regression Trees Model}
\usage{
BARTModel(
K = NULL,
sparse = FALSE,
theta = 0,
omega = 1,
a = 0.5,
b = 1,
rho = NULL,
augment = FALSE,
xinfo = NULL,
usequants = FALSE,
sigest = NA,
sigdf = 3,
sigquant = 0.9,
lambda = NA,
k = 2,
power = 2,
base = 0.95,
tau.num = NULL,
offset = NULL,
ntree = NULL,
numcut = 100,
ndpost = 1000,
nskip = NULL,
keepevery = NULL,
printevery = 1000
)
}
\arguments{
\item{K}{if provided, then coarsen the times of survival responses per the
quantiles \eqn{1/K, 2/K, ..., K/K} to reduce computational burdern.}
\item{sparse}{logical indicating whether to perform variable selection based
on a sparse Dirichlet prior rather than simply uniform; see Linero 2016.}
\item{theta, omega}{\eqn{theta} and \eqn{omega} parameters; zero means
random.}
\item{a, b}{sparse parameters for \eqn{Beta(a, b)} prior:
\eqn{0.5 <= a <= 1} where lower values induce more sparsity and typically
\eqn{b = 1}.}
\item{rho}{sparse parameter: typically \eqn{rho = p} where \eqn{p} is the
number of covariates under consideration.}
\item{augment}{whether data augmentation is to be performed in sparse
variable selection.}
\item{xinfo}{optional matrix whose rows are the covariates and columns their
cutpoints.}
\item{usequants}{whether covariate cutpoints are defined by uniform quantiles
or generated uniformly.}
\item{sigest}{normal error variance prior for numeric response variables.}
\item{sigdf}{degrees of freedom for error variance prior.}
\item{sigquant}{quantile at which a rough estimate of the error standard
deviation is placed.}
\item{lambda}{scale of the prior error variance.}
\item{k}{number of standard deviations \eqn{f(x)} is away from +/-3 for
categorical response variables.}
\item{power, base}{power and base parameters for tree prior.}
\item{tau.num}{numerator in the \eqn{tau} definition, i.e.,
\eqn{tau = tau.num / (k * sqrt(ntree))}.}
\item{offset}{override for the default \eqn{offset} of \eqn{F^-1(mean(y))}
in the multivariate response probability
\eqn{P(y[j] = 1 | x) = F(f(x)[j] + offset[j])}.}
\item{ntree}{number of trees in the sum.}
\item{numcut}{number of possible covariate cutoff values.}
\item{ndpost}{number of posterior draws returned.}
\item{nskip}{number of MCMC iterations to be treated as burn in.}
\item{keepevery}{interval at which to keep posterior draws.}
\item{printevery}{interval at which to print MCMC progress.}
}
\value{
\code{MLModel} class object.
}
\description{
Flexible nonparametric modeling of covariates for continuous, binary,
categorical and time-to-event outcomes.
}
\details{
\describe{
\item{Response Types:}{\code{factor}, \code{numeric}, \code{Surv}}
}
Default values for the \code{NULL} arguments and further model details can be
found in the source links below.
}
\examples{
\donttest{
fit(sale_amount ~ ., data = ICHomes, model = BARTModel)
}
}
\seealso{
\code{\link[BART]{gbart}}, \code{\link[BART]{mbart}},
\code{\link[BART]{surv.bart}}, \code{\link{fit}}, \code{\link{resample}}
}
| /man/BARTModel.Rd | no_license | chen061218/MachineShop | R | false | true | 3,187 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ML_BARTModel.R
\name{BARTModel}
\alias{BARTModel}
\title{Bayesian Additive Regression Trees Model}
\usage{
BARTModel(
K = NULL,
sparse = FALSE,
theta = 0,
omega = 1,
a = 0.5,
b = 1,
rho = NULL,
augment = FALSE,
xinfo = NULL,
usequants = FALSE,
sigest = NA,
sigdf = 3,
sigquant = 0.9,
lambda = NA,
k = 2,
power = 2,
base = 0.95,
tau.num = NULL,
offset = NULL,
ntree = NULL,
numcut = 100,
ndpost = 1000,
nskip = NULL,
keepevery = NULL,
printevery = 1000
)
}
\arguments{
\item{K}{if provided, then coarsen the times of survival responses per the
quantiles \eqn{1/K, 2/K, ..., K/K} to reduce computational burdern.}
\item{sparse}{logical indicating whether to perform variable selection based
on a sparse Dirichlet prior rather than simply uniform; see Linero 2016.}
\item{theta, omega}{\eqn{theta} and \eqn{omega} parameters; zero means
random.}
\item{a, b}{sparse parameters for \eqn{Beta(a, b)} prior:
\eqn{0.5 <= a <= 1} where lower values induce more sparsity and typically
\eqn{b = 1}.}
\item{rho}{sparse parameter: typically \eqn{rho = p} where \eqn{p} is the
number of covariates under consideration.}
\item{augment}{whether data augmentation is to be performed in sparse
variable selection.}
\item{xinfo}{optional matrix whose rows are the covariates and columns their
cutpoints.}
\item{usequants}{whether covariate cutpoints are defined by uniform quantiles
or generated uniformly.}
\item{sigest}{normal error variance prior for numeric response variables.}
\item{sigdf}{degrees of freedom for error variance prior.}
\item{sigquant}{quantile at which a rough estimate of the error standard
deviation is placed.}
\item{lambda}{scale of the prior error variance.}
\item{k}{number of standard deviations \eqn{f(x)} is away from +/-3 for
categorical response variables.}
\item{power, base}{power and base parameters for tree prior.}
\item{tau.num}{numerator in the \eqn{tau} definition, i.e.,
\eqn{tau = tau.num / (k * sqrt(ntree))}.}
\item{offset}{override for the default \eqn{offset} of \eqn{F^-1(mean(y))}
in the multivariate response probability
\eqn{P(y[j] = 1 | x) = F(f(x)[j] + offset[j])}.}
\item{ntree}{number of trees in the sum.}
\item{numcut}{number of possible covariate cutoff values.}
\item{ndpost}{number of posterior draws returned.}
\item{nskip}{number of MCMC iterations to be treated as burn in.}
\item{keepevery}{interval at which to keep posterior draws.}
\item{printevery}{interval at which to print MCMC progress.}
}
\value{
\code{MLModel} class object.
}
\description{
Flexible nonparametric modeling of covariates for continuous, binary,
categorical and time-to-event outcomes.
}
\details{
\describe{
\item{Response Types:}{\code{factor}, \code{numeric}, \code{Surv}}
}
Default values for the \code{NULL} arguments and further model details can be
found in the source links below.
}
\examples{
\donttest{
fit(sale_amount ~ ., data = ICHomes, model = BARTModel)
}
}
\seealso{
\code{\link[BART]{gbart}}, \code{\link[BART]{mbart}},
\code{\link[BART]{surv.bart}}, \code{\link{fit}}, \code{\link{resample}}
}
|
library(ape)
testtree <- read.tree("8749_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8749_0_unrooted.txt") | /codeml_files/newick_trees_processed/8749_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("8749_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8749_0_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{survey_data}
\alias{survey_data}
\title{SLEAC survey data from Sierra Leone}
\format{
A tibble with 14 rows and 6 columns:
\describe{
\item{\code{country}}{Country}
\item{\code{province}}{Province}
\item{\code{district}}{District}
\item{\code{in_cases}}{Cases found who are in the programme}
\item{\code{out_cases}}{Cases found who are not in the programme}
\item{\code{n}}{Total number of under 5 children sampled}
}
}
\source{
Ministry of Health, Sierra Leone
}
\usage{
survey_data
}
\description{
SLEAC survey data from Sierra Leone
}
\examples{
survey_data
}
\keyword{datasets}
| /man/survey_data.Rd | no_license | nutriverse/sleacr | R | false | true | 689 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{survey_data}
\alias{survey_data}
\title{SLEAC survey data from Sierra Leone}
\format{
A tibble with 14 rows and 6 columns:
\describe{
\item{\code{country}}{Country}
\item{\code{province}}{Province}
\item{\code{district}}{District}
\item{\code{in_cases}}{Cases found who are in the programme}
\item{\code{out_cases}}{Cases found who are not in the programme}
\item{\code{n}}{Total number of under 5 children sampled}
}
}
\source{
Ministry of Health, Sierra Leone
}
\usage{
survey_data
}
\description{
SLEAC survey data from Sierra Leone
}
\examples{
survey_data
}
\keyword{datasets}
|
#Predicting passsenger traffic using Prophet
"The dataset is a univariate time series that contains hourly passenger traffic
for a new public transport service. We are trying to forecast the traffic for next
7 months given historical traffic data of last 25 months."
library(prophet)
library(data.table)
library(dplyr)
library(ggplot2)
library(tidyr)
require(xlsx)
setwd("C:/Users/yuvbhard/Desktop/Data-Science-for-Marketing-Analytics-Practice/Facebook Prophet Time Series Model (R & Python)")
# read data
train = fread("Train_SU63ISt.csv")
test = fread("Test_0qrQsBZ.csv")
# Extract date from the Datetime variable
train$Date = as.POSIXct(strptime(train$Datetime, "%d-%m-%Y"))
test$Date = as.POSIXct(strptime(test$Datetime, "%d-%m-%Y"))
# Convert 'Datetime' variable from character to date-time format
train$Datetime = as.POSIXct(strptime(train$Datetime, "%d-%m-%Y %H:%M"))
test$Datetime = as.POSIXct(strptime(test$Datetime, "%d-%m-%Y %H:%M"))
# Aggregate train data day-wise
aggr_train = train[,list(Count = sum(Count)), by = Date]
#Remove row with NA value from df
aggr_train = aggr_train[!is.na(aggr_train$Date), ]
# Visualize the data
ggplot(aggr_train) + geom_line(aes(Date, Count))
# Change column names from Date, Count to ds,y
names(aggr_train) = c("ds", "y")
# Model building
m = prophet(aggr_train, daily.seasonality=TRUE)
future = make_future_dataframe(m, periods = 213)
forecast = predict(m, future)
# Visualize forecast
plot(m, forecast)
# proportion of mean hourly 'Count' based on train data
mean_hourly_count = train %>%
group_by(hour = hour(train$Datetime)) %>%
summarise(mean_count = mean(Count))
s = sum(mean_hourly_count$mean_count)
mean_hourly_count$count_proportion = mean_hourly_count$mean_count/s
# variable to store hourly Count
test_count = NULL
for(i in 763:nrow(forecast)){
test_count = append(test_count, mean_hourly_count$count_proportion * forecast$yhat[i])
}
new_t = lapply(strsplit(as.character(test_count),split=','),trimws)
test$Count = test_count['Value']
| /Facebook Prophet Time Series Model (R & Python)/predicting_passenger_traffic_time_series_prophet.R | permissive | Yuvansh1/Data-Science-for-Marketing-Analytics-Practice | R | false | false | 2,027 | r | #Predicting passsenger traffic using Prophet
"The dataset is a univariate time series that contains hourly passenger traffic
for a new public transport service. We are trying to forecast the traffic for next
7 months given historical traffic data of last 25 months."
library(prophet)
library(data.table)
library(dplyr)
library(ggplot2)
library(tidyr)
require(xlsx)
setwd("C:/Users/yuvbhard/Desktop/Data-Science-for-Marketing-Analytics-Practice/Facebook Prophet Time Series Model (R & Python)")
# read data
train = fread("Train_SU63ISt.csv")
test = fread("Test_0qrQsBZ.csv")
# Extract date from the Datetime variable
train$Date = as.POSIXct(strptime(train$Datetime, "%d-%m-%Y"))
test$Date = as.POSIXct(strptime(test$Datetime, "%d-%m-%Y"))
# Convert 'Datetime' variable from character to date-time format
train$Datetime = as.POSIXct(strptime(train$Datetime, "%d-%m-%Y %H:%M"))
test$Datetime = as.POSIXct(strptime(test$Datetime, "%d-%m-%Y %H:%M"))
# Aggregate train data day-wise
aggr_train = train[,list(Count = sum(Count)), by = Date]
#Remove row with NA value from df
aggr_train = aggr_train[!is.na(aggr_train$Date), ]
# Visualize the data
ggplot(aggr_train) + geom_line(aes(Date, Count))
# Change column names from Date, Count to ds,y
names(aggr_train) = c("ds", "y")
# Model building
m = prophet(aggr_train, daily.seasonality=TRUE)
future = make_future_dataframe(m, periods = 213)
forecast = predict(m, future)
# Visualize forecast
plot(m, forecast)
# proportion of mean hourly 'Count' based on train data
mean_hourly_count = train %>%
group_by(hour = hour(train$Datetime)) %>%
summarise(mean_count = mean(Count))
s = sum(mean_hourly_count$mean_count)
mean_hourly_count$count_proportion = mean_hourly_count$mean_count/s
# variable to store hourly Count
test_count = NULL
for(i in 763:nrow(forecast)){
test_count = append(test_count, mean_hourly_count$count_proportion * forecast$yhat[i])
}
new_t = lapply(strsplit(as.character(test_count),split=','),trimws)
test$Count = test_count['Value']
|
#lab
install.packages("tree")
library(tree)
library (ISLR)
attach (Carseats )
High=ifelse (Sales <=8," No"," Yes ")
Carseats =data.frame(Carseats ,High)
tree.carseats =tree(High???. -Sales ,Carseats)
summary (tree.carseats )
tree.carseats
set.seed(2)
train=sample (1: nrow(Carseats ), 200)
Carseats.test=Carseats [-train ,]
High.test=High[-train ]
tree.carseats =tree(High???.-Sales ,Carseats ,subset =train )
tree.pred=predict (tree.carseats ,Carseats.test ,type ="class")
table(tree.pred ,High.test)
# 71.5%
set.seed(3)
cv.carseats = cv.tree(tree.carseats ,FUN=prune.misclass )
names(cv.carseats)
cv.carseats
par(mfrow =c(1,2))
plot(cv.carseats$size ,cv.carseats$dev ,type="b")
plot(cv.carseats$k ,cv.carseats$dev ,type="b")
prune.carseats =prune.misclass (tree.carseats ,best =9)
plot(prune.carseats )
text(prune.carseats ,pretty =0)
tree.pred=predict (prune.carseats , Carseats .test ,type=" class ")
table(tree.pred ,High.test)
#0.77
library (MASS)
set.seed (1)
train = sample (1: nrow(Boston ), nrow(Boston )/2)
tree.boston =tree(medv???.,Boston ,subset =train)
summary (tree.boston)
plot(tree.boston )
text(tree.boston ,pretty =0)
cv.boston =cv.tree(tree.boston )
plot(cv.boston$size ,cv.boston$dev)
prune.boston =prune.tree(tree.boston ,best =5)
plot(prune.boston)
text(prune.boston,pretty =0)
yhat=predict (tree.boston ,newdata =Boston [-train ,])
boston.test=Boston[-train,"medv"]
plot(yhat ,boston.test)
abline (0,1)
mean((yhat -boston.test)^2)
install.packages("randomForest")
library (randomForest)
set.seed (1)
bag.boston =randomForest(medv???.,data=Boston ,subset =train ,
mtry=13, importance =TRUE)
bag.boston
yhat.bag = predict (bag.boston ,newdata =Boston [-train ,])
plot(yhat.bag , boston.test)
abline (0,1)
mean((yhat.bag -boston.test)^2)
bag.boston =randomForest(medv???.,data=Boston ,subset =train ,
mtry=13, ntree =25)
yhat.bag = predict (bag.boston ,newdata =Boston[-train ,])
mean(( yhat.bag -boston.test)^2)
set.seed (1)
rf.boston =randomForest(medv???.,data=Boston ,subset =train ,
mtry=6, importance =TRUE)
yhat.rf = predict (rf.boston ,newdata =Boston[-train ,])
mean((yhat.rf -boston.test)^2)
importance (rf.boston)
varImpPlot(rf.boston)
install.packages("gbm")
library(gbm)
set.seed(1)
boost.boston =gbm(medv???.,data=Boston[train,], distribution=
"gaussian",n.trees =5000 , interaction.depth =4)
summary(boost.boston)
par(mfrow =c(1,2))
plot(boost.boston ,i="rm")
plot(boost.boston ,i=" lstat ")
yhat.boost=predict(boost.boston ,newdata =Boston[-train ,],
n.trees =5000)
mean(( yhat.boost -boston.test)^2)
boost.boston =gbm(medv???.,data=Boston [train,], distribution=
"gaussian",n.trees =5000 , interaction.depth =4, shrinkage =0.2,
verbose =F)
yhat.boost=predict (boost.boston ,newdata =Boston[-train ,],
n.trees =5000)
mean((yhat.boost -boston.test)^2)
#Problems---------------------
#problem 1
# problem 3
p <- seq(0, 1, 0.01)
gini.index <- 2 * p * (1 - p)
class.error <- 1 - pmax(p, 1 - p)
cross.entropy <- - (p * log(p) + (1 - p) * log(1 - p))
matplot(p, cbind(gini.index, class.error, cross.entropy), col = c("red", "green", "blue"))
# problem 4
par(xpd = NA)
plot(NA, NA, type = "n", xlim = c(-2, 2), ylim = c(-3, 3), xlab = "X1", ylab = "X2")
# X2 < 1
lines(x = c(-2, 2), y = c(1, 1))
# X1 < 1 with X2 < 1
lines(x = c(1, 1), y = c(-3, 1))
text(x = (-2 + 1)/2, y = -1, labels = c(-1.8))
text(x = 1.5, y = -1, labels = c(0.63))
# X2 < 2 with X2 >= 1
lines(x = c(-2, 2), y = c(2, 2))
text(x = 0, y = 2.5, labels = c(2.49))
# X1 < 0 with X2<2 and X2>=1
lines(x = c(0, 0), y = c(1, 2))
text(x = -1, y = 1.5, labels = c(-1.06))
text(x = 1, y = 1.5, labels = c(0.21))
# problem 5
# majority vote will tell us x is red since 6 values are more than 0.5
# probability will tell us X is green since p avg is 0.45
# problem 6
# we find the split that minizes RSS to the largest extent
# the same is applied multiple times until there are few observations in each bucket
# we have alpha which measures cost complexity
# optimal alpha is established in a way that minimizes the (y - yi)^2 + alpha*T
# higher alpha - lesser tree length
# we perform cv to find the optimal T
#problem 7
set.seed(1)
train <- sample(1:nrow(Boston), nrow(Boston) / 2)
Boston.train <- Boston[train, -14]
Boston.test <- Boston[-train, -14]
Y.train <- Boston[train, 14]
Y.test <- Boston[-train, 14]
rf.boston1 <- randomForest(Boston.train, y = Y.train, xtest = Boston.test, ytest = Y.test, mtry = ncol(Boston) - 1, ntree = 500)
rf.boston2 <- randomForest(Boston.train, y = Y.train, xtest = Boston.test, ytest = Y.test, mtry = (ncol(Boston) - 1) / 2, ntree = 500)
rf.boston3 <- randomForest(Boston.train, y = Y.train, xtest = Boston.test, ytest = Y.test, mtry = sqrt(ncol(Boston) - 1), ntree = 500)
plot(1:500, rf.boston1$test$mse, col = "green", type = "l", xlab = "Number of Trees", ylab = "Test MSE", ylim = c(10, 19))
lines(1:500, rf.boston2$test$mse, col = "red", type = "l")
lines(1:500, rf.boston3$test$mse, col = "blue", type = "l")
legend("topright", c("m = p", "m = p/2", "m = sqrt(p)"), col = c("green", "red", "blue"), cex = 1, lty = 1)
#problem 8
# part a
library(ISLR)
set.seed(1)
train <- sample(1:nrow(Carseats), nrow(Carseats) / 2)
Carseats.train <- Carseats[train, ]
Carseats.test <- Carseats[-train, ]
# part b
tree.carseats <- tree(Sales ~ ., data = Carseats.train)
summary(tree.carseats)
pred <- predict(tree.carseats, newdata = Carseats.test)
mean((pred - Carseats.test$Sales)^2)
#part c
cv.carseats <- cv.tree(tree.carseats)
plot(cv.carseats$size, cv.carseats$dev, type = "b")
tree.min <- which.min(cv.carseats$dev)
points(tree.min, cv.carseats$dev[tree.min], col = "red", cex = 2, pch = 20)
prune.carseats <- prune.tree(tree.carseats, best = 8)
plot(prune.carseats)
text(prune.carseats, pretty = 0)
yhat <- predict(prune.carseats, newdata = Carseats.test)
mean((yhat - Carseats.test$Sales)^2)
# part d
bag.carseats <- randomForest(Sales ~ ., data = Carseats.train, mtry = 10, ntree = 500, importance = TRUE)
yhat.bag <- predict(bag.carseats, newdata = Carseats.test)
mean((yhat.bag - Carseats.test$Sales)^2)
importance(bag.carseats)
# part e
rf.carseats <- randomForest(Sales ~ ., data = Carseats.train, mtry = 3, ntree = 500, importance = TRUE)
yhat.rf <- predict(rf.carseats, newdata = Carseats.test)
mean((yhat.rf - Carseats.test$Sales)^2)
importance(rf.carseats)
# question 9
# part a
set.seed(1)
train <- sample(1:nrow(OJ), 800)
OJ.train <- OJ[train, ]
OJ.test <- OJ[-train, ]
# part b
tree.oj <- tree(Purchase ~ ., data = OJ.train)
summary(tree.oj)
# part c
tree.oj
# part d
plot(tree.oj)
text(tree.oj, pretty = 0)
# part e
tree.pred <- predict(tree.oj, OJ.test, type = "class")
table(tree.pred, OJ.test$Purchase)
# part f
cv.oj <- cv.tree(tree.oj, FUN = prune.misclass)
cv.oj
# part g
plot(cv.oj$size, cv.oj$dev, type = "b", xlab = "Tree size", ylab = "Deviance")
# part h
# the two node tree
# part i
prune.oj <- prune.misclass(tree.oj, best = 2)
plot(prune.oj)
text(prune.oj, pretty = 0)
# part j
summary(tree.oj)
summary(prune.oj)
# pruned tree doesn't do better
# part k
prune.pred <- predict(prune.oj, OJ.test, type = "class")
table(prune.pred, OJ.test$Purchase)
#problem 10
# part a
Hitters <- na.omit(Hitters)
Hitters$Salary <- log(Hitters$Salary)
# part b
train <- 1:200
Hitters.train <- Hitters[train, ]
Hitters.test <- Hitters[-train, ]
# part c
library(gbm)
set.seed(1)
pows <- seq(-10, -0.2, by = 0.1)
lambdas <- 10^pows
train.err <- rep(NA, length(lambdas))
for (i in 1:length(lambdas)) {
boost.hitters <- gbm(Salary ~ ., data = Hitters.train, distribution = "gaussian", n.trees = 1000, shrinkage = lambdas[i])
pred.train <- predict(boost.hitters, Hitters.train, n.trees = 1000)
train.err[i] <- mean((pred.train - Hitters.train$Salary)^2)
}
plot(lambdas, train.err, type = "b", xlab = "Shrinkage values", ylab = "Training MSE")
# part d
set.seed(1)
test.err <- rep(NA, length(lambdas))
for (i in 1:length(lambdas)) {
boost.hitters <- gbm(Salary ~ ., data = Hitters.train, distribution = "gaussian", n.trees = 1000, shrinkage = lambdas[i])
yhat <- predict(boost.hitters, Hitters.test, n.trees = 1000)
test.err[i] <- mean((yhat - Hitters.test$Salary)^2)
}
plot(lambdas, test.err, type = "b", xlab = "Shrinkage values", ylab = "Test MSE")
min(test.err)
lambdas[which.min(test.err)]
# part e
library(glmnet)
fit1 <- lm(Salary ~ ., data = Hitters.train)
pred1 <- predict(fit1, Hitters.test)
mean((pred1 - Hitters.test$Salary)^2)
fit1 <- lm(Salary ~ ., data = Hitters.train)
pred1 <- predict(fit1, Hitters.test)
mean((pred1 - Hitters.test$Salary)^2)
x <- model.matrix(Salary ~ ., data = Hitters.train)
x.test <- model.matrix(Salary ~ ., data = Hitters.test)
y <- Hitters.train$Salary
fit2 <- glmnet(x, y, alpha = 0)
pred2 <- predict(fit2, s = 0.01, newx = x.test)
mean((pred2 - Hitters.test$Salary)^2)
# part f
library(gbm)
boost.hitters <- gbm(Salary ~ ., data = Hitters.train, distribution = "gaussian", n.trees = 1000, shrinkage = lambdas[which.min(test.err)])
summary(boost.hitters)
# part g
set.seed(1)
bag.hitters <- randomForest(Salary ~ ., data = Hitters.train, mtry = 19, ntree = 500)
yhat.bag <- predict(bag.hitters, newdata = Hitters.test)
mean((yhat.bag - Hitters.test$Salary)^2)
# problem 12
#################### Trees ####################
# what percentage of target column is 1?
nrow(subset(train, train$target == 1))*100/nrow(train)
# just 3.64%. We have a class imbalance problem in this data set. Need to think about that
# decision trees
library(tree)
train2 = train
train2[c("ps_car_11_cat")] <- list(NULL) # I am removing the columns that it is a categorical variable but has 104 unique values
train2$target = as.factor(train2$target)
# this is a classification problem
tree.porto =tree(target ~. ,train2)
summary(tree.porto)
tree.porto
# residual mean deviance = 0.0313
# importantly, misclassification = 0.03645
test = train2[sample(nrow(train2), 10000), ]
tree.pred =predict(tree.porto,test,type ="class")
table(tree.pred,test$target)
#tree.pred 0 1
#0 9638 362
#1 0 0
library(rpart)
prob <- predict(tree.porto, test, type = "prob")
# lets do cross validation to find optimal value
cv.porto <- cv.tree(tree.porto, FUN=prune.misclass)
plot(cv.porto$size, cv.porto$dev, type = "b")
tree.min <- which.min(cv.porto$dev)
points(tree.min, cv.porto$dev[tree.min], col = "red", cex = 2, pch = 20)
# what does it mean it cannot prune further. Let's see the tree
plot(tree.porto)
text(tree.porto, pretty = 0)
# lets explore some more trees based approaches. we will see what turns up
# bagging approach
library(randomForest)
train3 <- train2[sample(nrow(train2), 10000), ]
test = train2[sample(nrow(train2), 10000), ]
summary(train3$target)
# 3.66% are 1's. This is pretty close to real value
bag.porto <- randomForest(target ~ ., data = train3, mtry = 55, ntree = 5, importance = TRUE)
yhat.bag <- predict(bag.porto, newdata = test, type = "class")
summary(yhat.bag)
table(yhat.bag,test$target)
prob <- predict(bag.porto, test, type = "prob")
# there's a key learning here. Classification trees run much faster than regression trees. Tried that
#yhat.bag 0 1
# 0 9608 353
# 1 39 8
# random forest
train4 <- train2[sample(nrow(train2), 10000), ]
test = train2[sample(nrow(train2), 10000), ]
rf.porto <- randomForest(target ~ ., data = train4, mtry = 8, ntree = 5, importance = TRUE)
yhat.bag <- predict(rf.porto, newdata = test, type = "class")
# mtry is square root of total predictors
summary(yhat.bag)
table(yhat.bag,test$target)
#yhat.bag 0 1
#0 9607 360
#1 29 4
prob <- predict(rf.porto, test, type = "prob")
importance(rf.porto)
# 0 1 MeanDecreaseAccuracy MeanDecreaseGini
# id -0.04025452 1.19480792 0.22436613 38.435765253
# ps_ind_01 7.26756030 0.66030796 7.37068398 16.014471436
# ps_ind_02_cat 6.31924829 -2.76233898 5.73173411 7.723166354
# ps_ind_03 9.63014257 -0.80416356 9.39749901 22.728275983
# ps_ind_04_cat 3.02688606 -1.15456769 2.76923899 2.717580042
# ps_ind_05_cat 3.40735190 5.59960499 4.80798570 12.870403906
# ps_ind_06_bin 6.32610310 -4.30309718 5.68893905 4.070708102
# ps_ind_07_bin 9.10699732 0.43839186 9.01106816 4.616474510
# ps_ind_08_bin -0.16343486 -0.47308637 -0.24338395 3.729193202
# ps_ind_09_bin 2.23581646 0.22723215 2.27478513 3.424232808
# ps_ind_10_bin 0.00000000 0.00000000 0.00000000 0.006555556
# ps_ind_11_bin -0.63149883 0.50181888 -0.53881494 0.900562965
# ps_ind_12_bin -1.29387359 -0.28742447 -1.35540318 0.675569327
# ps_ind_13_bin 0.00000000 0.00000000 0.00000000 0.011000000
# ps_ind_14 -0.46055574 -0.93651001 -0.64025888 1.151615790
# ps_ind_15 4.14353762 -2.98249073 3.56687577 20.458815383
# ps_ind_16_bin 3.32474858 1.04347526 3.37351544 4.683771938
# ps_ind_17_bin 3.32909503 -1.47988561 3.10815190 4.115044208
# ps_ind_18_bin 4.87978072 -1.94385470 4.55183159 3.765969895
# ps_reg_01 9.10131802 -2.13150926 8.68911012 14.916797865
# ps_reg_02 15.76111410 -4.58177899 15.28321769 21.291418343
# ps_reg_03 17.64536016 -4.24819971 17.21763925 35.080668913
# ps_car_01_cat 12.15826592 -1.09687902 11.82951055 21.893125697
# ps_car_02_cat 5.17487659 -0.84526378 5.05511422 1.823042205
# ps_car_04_cat 10.31848484 -4.44213631 10.37836365 7.594751139
# ps_car_06_cat 15.19963665 -3.16994795 14.75500687 34.450838803
# ps_car_07_cat 1.93152475 0.97869192 2.14254174 2.415850881
# ps_car_08_cat 5.36645517 -1.63867233 5.04314170 2.461433323
# ps_car_09_cat 9.55069867 -2.15774461 9.18570057 9.569265712
# ps_car_10_cat 0.86159299 -1.13184780 0.60131984 1.008233945
# ps_car_11 8.07042022 -1.54194628 7.98228545 7.825892097
# ps_car_12 15.69841377 -5.80065546 15.60270581 16.655136838
# ps_car_13 23.24586819 -6.19574103 23.08973305 36.670207999
# ps_car_14 17.23885918 -4.07272447 16.97557335 31.099228675
# ps_car_15 11.96761504 -2.70281046 11.70712394 17.871010417
# ps_calc_01 0.29694591 0.85880967 0.47588421 18.769930229
# ps_calc_02 -0.15760363 1.16557736 0.08235381 19.186725772
# ps_calc_03 -0.58138077 -0.74451936 -0.72507750 19.991917859
# ps_calc_04 1.48638950 0.65619288 1.58804445 15.620913856
# ps_calc_05 -0.68430568 0.31019346 -0.60348025 13.714182263
# ps_calc_06 -1.67403848 0.04657021 -1.61485861 16.520092623
# ps_calc_07 1.90385655 1.62223998 2.21127337 17.594644864
# ps_calc_08 -1.92376163 0.07354145 -1.87086006 19.185903123
# ps_calc_09 0.96613520 -0.32422003 0.88134313 16.891908493
# ps_calc_10 0.77287775 0.16943014 0.74451541 23.171962667
# ps_calc_11 -0.60163415 0.15572323 -0.54568160 22.328087578
# ps_calc_12 0.82113456 0.16485055 0.84045166 14.335544525
# ps_calc_13 -1.32710452 1.12377149 -1.06006525 20.847960793
# ps_calc_14 -0.97513747 0.10661703 -0.97331766 22.352432738
# ps_calc_15_bin -0.56437663 1.21231494 -0.35806428 4.597289599
# ps_calc_16_bin -1.30474076 -0.44964802 -1.33012676 5.060269362
# ps_calc_17_bin 0.18868541 1.93110915 0.65823878 5.496522677
# ps_calc_18_bin 2.29677307 -0.33518759 2.13992097 5.323885313
# ps_calc_19_bin -0.22784601 -0.05488170 -0.22386072 5.717105071
# ps_calc_20_bin 0.30370860 0.50430792 0.39440685 3.522452208
# Boosting
train5 <- train2[sample(nrow(train2), 10000), ]
library(gbm)
boost.porto =gbm(target~.,data=train5, distribution = "bernoulli", n.trees =500, interaction.depth = 10, shrinkage = 0.2, verbose = F)
# actually, in my boosting model, there are no predictors that had non zero influence!
#"A gradient boosted model with bernoulli loss function.
#500 iterations were performed.
#There were 55 predictors of which 0 had non-zero influence."
summary(boost.porto)
yhat.bag <- predict(boost.porto, newdata = test, n.trees = 50)
yhat.bag
prob <- predict(boost.porto, test, type = "prob")
summary(yhat.bag)
table(yhat.bag,test$target)
# Based on what our group has observed so far, logistic regression is better than tree based methods. With that being said, this data set may not be the ideal data set to test the effectiveness of tree based methods due to extremely high class imbalance.
| /Assignments/Assignment 6/Assign6.R | no_license | saiprasanthblk/Data-Mining-assignments | R | false | false | 17,936 | r | #lab
install.packages("tree")
library(tree)
library (ISLR)
attach (Carseats )
High=ifelse (Sales <=8," No"," Yes ")
Carseats =data.frame(Carseats ,High)
tree.carseats =tree(High???. -Sales ,Carseats)
summary (tree.carseats )
tree.carseats
set.seed(2)
train=sample (1: nrow(Carseats ), 200)
Carseats.test=Carseats [-train ,]
High.test=High[-train ]
tree.carseats =tree(High???.-Sales ,Carseats ,subset =train )
tree.pred=predict (tree.carseats ,Carseats.test ,type ="class")
table(tree.pred ,High.test)
# 71.5%
set.seed(3)
cv.carseats = cv.tree(tree.carseats ,FUN=prune.misclass )
names(cv.carseats)
cv.carseats
par(mfrow =c(1,2))
plot(cv.carseats$size ,cv.carseats$dev ,type="b")
plot(cv.carseats$k ,cv.carseats$dev ,type="b")
prune.carseats =prune.misclass (tree.carseats ,best =9)
plot(prune.carseats )
text(prune.carseats ,pretty =0)
tree.pred=predict (prune.carseats , Carseats .test ,type=" class ")
table(tree.pred ,High.test)
#0.77
library (MASS)
set.seed (1)
train = sample (1: nrow(Boston ), nrow(Boston )/2)
tree.boston =tree(medv???.,Boston ,subset =train)
summary (tree.boston)
plot(tree.boston )
text(tree.boston ,pretty =0)
cv.boston =cv.tree(tree.boston )
plot(cv.boston$size ,cv.boston$dev)
prune.boston =prune.tree(tree.boston ,best =5)
plot(prune.boston)
text(prune.boston,pretty =0)
yhat=predict (tree.boston ,newdata =Boston [-train ,])
boston.test=Boston[-train,"medv"]
plot(yhat ,boston.test)
abline (0,1)
mean((yhat -boston.test)^2)
install.packages("randomForest")
library (randomForest)
set.seed (1)
bag.boston =randomForest(medv???.,data=Boston ,subset =train ,
mtry=13, importance =TRUE)
bag.boston
yhat.bag = predict (bag.boston ,newdata =Boston [-train ,])
plot(yhat.bag , boston.test)
abline (0,1)
mean((yhat.bag -boston.test)^2)
bag.boston =randomForest(medv???.,data=Boston ,subset =train ,
mtry=13, ntree =25)
yhat.bag = predict (bag.boston ,newdata =Boston[-train ,])
mean(( yhat.bag -boston.test)^2)
set.seed (1)
rf.boston =randomForest(medv???.,data=Boston ,subset =train ,
mtry=6, importance =TRUE)
yhat.rf = predict (rf.boston ,newdata =Boston[-train ,])
mean((yhat.rf -boston.test)^2)
importance (rf.boston)
varImpPlot(rf.boston)
install.packages("gbm")
library(gbm)
set.seed(1)
boost.boston =gbm(medv???.,data=Boston[train,], distribution=
"gaussian",n.trees =5000 , interaction.depth =4)
summary(boost.boston)
par(mfrow =c(1,2))
plot(boost.boston ,i="rm")
plot(boost.boston ,i=" lstat ")
yhat.boost=predict(boost.boston ,newdata =Boston[-train ,],
n.trees =5000)
mean(( yhat.boost -boston.test)^2)
boost.boston =gbm(medv???.,data=Boston [train,], distribution=
"gaussian",n.trees =5000 , interaction.depth =4, shrinkage =0.2,
verbose =F)
yhat.boost=predict (boost.boston ,newdata =Boston[-train ,],
n.trees =5000)
mean((yhat.boost -boston.test)^2)
#Problems---------------------
#problem 1
# problem 3
p <- seq(0, 1, 0.01)
gini.index <- 2 * p * (1 - p)
class.error <- 1 - pmax(p, 1 - p)
cross.entropy <- - (p * log(p) + (1 - p) * log(1 - p))
matplot(p, cbind(gini.index, class.error, cross.entropy), col = c("red", "green", "blue"))
# problem 4
par(xpd = NA)
plot(NA, NA, type = "n", xlim = c(-2, 2), ylim = c(-3, 3), xlab = "X1", ylab = "X2")
# X2 < 1
lines(x = c(-2, 2), y = c(1, 1))
# X1 < 1 with X2 < 1
lines(x = c(1, 1), y = c(-3, 1))
text(x = (-2 + 1)/2, y = -1, labels = c(-1.8))
text(x = 1.5, y = -1, labels = c(0.63))
# X2 < 2 with X2 >= 1
lines(x = c(-2, 2), y = c(2, 2))
text(x = 0, y = 2.5, labels = c(2.49))
# X1 < 0 with X2<2 and X2>=1
lines(x = c(0, 0), y = c(1, 2))
text(x = -1, y = 1.5, labels = c(-1.06))
text(x = 1, y = 1.5, labels = c(0.21))
# problem 5
# majority vote will tell us x is red since 6 values are more than 0.5
# probability will tell us X is green since p avg is 0.45
# problem 6
# we find the split that minizes RSS to the largest extent
# the same is applied multiple times until there are few observations in each bucket
# we have alpha which measures cost complexity
# optimal alpha is established in a way that minimizes the (y - yi)^2 + alpha*T
# higher alpha - lesser tree length
# we perform cv to find the optimal T
#problem 7
set.seed(1)
train <- sample(1:nrow(Boston), nrow(Boston) / 2)
Boston.train <- Boston[train, -14]
Boston.test <- Boston[-train, -14]
Y.train <- Boston[train, 14]
Y.test <- Boston[-train, 14]
rf.boston1 <- randomForest(Boston.train, y = Y.train, xtest = Boston.test, ytest = Y.test, mtry = ncol(Boston) - 1, ntree = 500)
rf.boston2 <- randomForest(Boston.train, y = Y.train, xtest = Boston.test, ytest = Y.test, mtry = (ncol(Boston) - 1) / 2, ntree = 500)
rf.boston3 <- randomForest(Boston.train, y = Y.train, xtest = Boston.test, ytest = Y.test, mtry = sqrt(ncol(Boston) - 1), ntree = 500)
plot(1:500, rf.boston1$test$mse, col = "green", type = "l", xlab = "Number of Trees", ylab = "Test MSE", ylim = c(10, 19))
lines(1:500, rf.boston2$test$mse, col = "red", type = "l")
lines(1:500, rf.boston3$test$mse, col = "blue", type = "l")
legend("topright", c("m = p", "m = p/2", "m = sqrt(p)"), col = c("green", "red", "blue"), cex = 1, lty = 1)
#problem 8
# part a
library(ISLR)
set.seed(1)
train <- sample(1:nrow(Carseats), nrow(Carseats) / 2)
Carseats.train <- Carseats[train, ]
Carseats.test <- Carseats[-train, ]
# part b
tree.carseats <- tree(Sales ~ ., data = Carseats.train)
summary(tree.carseats)
pred <- predict(tree.carseats, newdata = Carseats.test)
mean((pred - Carseats.test$Sales)^2)
#part c
cv.carseats <- cv.tree(tree.carseats)
plot(cv.carseats$size, cv.carseats$dev, type = "b")
tree.min <- which.min(cv.carseats$dev)
points(tree.min, cv.carseats$dev[tree.min], col = "red", cex = 2, pch = 20)
prune.carseats <- prune.tree(tree.carseats, best = 8)
plot(prune.carseats)
text(prune.carseats, pretty = 0)
yhat <- predict(prune.carseats, newdata = Carseats.test)
mean((yhat - Carseats.test$Sales)^2)
# part d
bag.carseats <- randomForest(Sales ~ ., data = Carseats.train, mtry = 10, ntree = 500, importance = TRUE)
yhat.bag <- predict(bag.carseats, newdata = Carseats.test)
mean((yhat.bag - Carseats.test$Sales)^2)
importance(bag.carseats)
# part e
rf.carseats <- randomForest(Sales ~ ., data = Carseats.train, mtry = 3, ntree = 500, importance = TRUE)
yhat.rf <- predict(rf.carseats, newdata = Carseats.test)
mean((yhat.rf - Carseats.test$Sales)^2)
importance(rf.carseats)
# question 9
# part a
set.seed(1)
train <- sample(1:nrow(OJ), 800)
OJ.train <- OJ[train, ]
OJ.test <- OJ[-train, ]
# part b
tree.oj <- tree(Purchase ~ ., data = OJ.train)
summary(tree.oj)
# part c
tree.oj
# part d
plot(tree.oj)
text(tree.oj, pretty = 0)
# part e
tree.pred <- predict(tree.oj, OJ.test, type = "class")
table(tree.pred, OJ.test$Purchase)
# part f
cv.oj <- cv.tree(tree.oj, FUN = prune.misclass)
cv.oj
# part g
plot(cv.oj$size, cv.oj$dev, type = "b", xlab = "Tree size", ylab = "Deviance")
# part h
# the two node tree
# part i
prune.oj <- prune.misclass(tree.oj, best = 2)
plot(prune.oj)
text(prune.oj, pretty = 0)
# part j
summary(tree.oj)
summary(prune.oj)
# pruned tree doesn't do better
# part k
prune.pred <- predict(prune.oj, OJ.test, type = "class")
table(prune.pred, OJ.test$Purchase)
#problem 10
# part a
Hitters <- na.omit(Hitters)
Hitters$Salary <- log(Hitters$Salary)
# part b
train <- 1:200
Hitters.train <- Hitters[train, ]
Hitters.test <- Hitters[-train, ]
# part c
library(gbm)
set.seed(1)
pows <- seq(-10, -0.2, by = 0.1)
lambdas <- 10^pows
train.err <- rep(NA, length(lambdas))
for (i in 1:length(lambdas)) {
boost.hitters <- gbm(Salary ~ ., data = Hitters.train, distribution = "gaussian", n.trees = 1000, shrinkage = lambdas[i])
pred.train <- predict(boost.hitters, Hitters.train, n.trees = 1000)
train.err[i] <- mean((pred.train - Hitters.train$Salary)^2)
}
plot(lambdas, train.err, type = "b", xlab = "Shrinkage values", ylab = "Training MSE")
# part d
set.seed(1)
test.err <- rep(NA, length(lambdas))
for (i in 1:length(lambdas)) {
boost.hitters <- gbm(Salary ~ ., data = Hitters.train, distribution = "gaussian", n.trees = 1000, shrinkage = lambdas[i])
yhat <- predict(boost.hitters, Hitters.test, n.trees = 1000)
test.err[i] <- mean((yhat - Hitters.test$Salary)^2)
}
plot(lambdas, test.err, type = "b", xlab = "Shrinkage values", ylab = "Test MSE")
min(test.err)
lambdas[which.min(test.err)]
# part e
library(glmnet)
fit1 <- lm(Salary ~ ., data = Hitters.train)
pred1 <- predict(fit1, Hitters.test)
mean((pred1 - Hitters.test$Salary)^2)
fit1 <- lm(Salary ~ ., data = Hitters.train)
pred1 <- predict(fit1, Hitters.test)
mean((pred1 - Hitters.test$Salary)^2)
x <- model.matrix(Salary ~ ., data = Hitters.train)
x.test <- model.matrix(Salary ~ ., data = Hitters.test)
y <- Hitters.train$Salary
fit2 <- glmnet(x, y, alpha = 0)
pred2 <- predict(fit2, s = 0.01, newx = x.test)
mean((pred2 - Hitters.test$Salary)^2)
# part f
library(gbm)
boost.hitters <- gbm(Salary ~ ., data = Hitters.train, distribution = "gaussian", n.trees = 1000, shrinkage = lambdas[which.min(test.err)])
summary(boost.hitters)
# part g
set.seed(1)
bag.hitters <- randomForest(Salary ~ ., data = Hitters.train, mtry = 19, ntree = 500)
yhat.bag <- predict(bag.hitters, newdata = Hitters.test)
mean((yhat.bag - Hitters.test$Salary)^2)
# problem 12
#################### Trees ####################
# what percentage of target column is 1?
nrow(subset(train, train$target == 1))*100/nrow(train)
# just 3.64%. We have a class imbalance problem in this data set. Need to think about that
# decision trees
library(tree)
train2 = train
train2[c("ps_car_11_cat")] <- list(NULL) # I am removing the columns that it is a categorical variable but has 104 unique values
train2$target = as.factor(train2$target)
# this is a classification problem
tree.porto =tree(target ~. ,train2)
summary(tree.porto)
tree.porto
# residual mean deviance = 0.0313
# importantly, misclassification = 0.03645
test = train2[sample(nrow(train2), 10000), ]
tree.pred =predict(tree.porto,test,type ="class")
table(tree.pred,test$target)
#tree.pred 0 1
#0 9638 362
#1 0 0
library(rpart)
prob <- predict(tree.porto, test, type = "prob")
# lets do cross validation to find optimal value
cv.porto <- cv.tree(tree.porto, FUN=prune.misclass)
plot(cv.porto$size, cv.porto$dev, type = "b")
tree.min <- which.min(cv.porto$dev)
points(tree.min, cv.porto$dev[tree.min], col = "red", cex = 2, pch = 20)
# what does it mean it cannot prune further. Let's see the tree
plot(tree.porto)
text(tree.porto, pretty = 0)
# lets explore some more trees based approaches. we will see what turns up
# bagging approach
library(randomForest)
train3 <- train2[sample(nrow(train2), 10000), ]
test = train2[sample(nrow(train2), 10000), ]
summary(train3$target)
# 3.66% are 1's. This is pretty close to real value
bag.porto <- randomForest(target ~ ., data = train3, mtry = 55, ntree = 5, importance = TRUE)
yhat.bag <- predict(bag.porto, newdata = test, type = "class")
summary(yhat.bag)
table(yhat.bag,test$target)
prob <- predict(bag.porto, test, type = "prob")
# there's a key learning here. Classification trees run much faster than regression trees. Tried that
#yhat.bag 0 1
# 0 9608 353
# 1 39 8
# random forest
train4 <- train2[sample(nrow(train2), 10000), ]
test = train2[sample(nrow(train2), 10000), ]
rf.porto <- randomForest(target ~ ., data = train4, mtry = 8, ntree = 5, importance = TRUE)
yhat.bag <- predict(rf.porto, newdata = test, type = "class")
# mtry is square root of total predictors
summary(yhat.bag)
table(yhat.bag,test$target)
#yhat.bag 0 1
#0 9607 360
#1 29 4
prob <- predict(rf.porto, test, type = "prob")
importance(rf.porto)
# 0 1 MeanDecreaseAccuracy MeanDecreaseGini
# id -0.04025452 1.19480792 0.22436613 38.435765253
# ps_ind_01 7.26756030 0.66030796 7.37068398 16.014471436
# ps_ind_02_cat 6.31924829 -2.76233898 5.73173411 7.723166354
# ps_ind_03 9.63014257 -0.80416356 9.39749901 22.728275983
# ps_ind_04_cat 3.02688606 -1.15456769 2.76923899 2.717580042
# ps_ind_05_cat 3.40735190 5.59960499 4.80798570 12.870403906
# ps_ind_06_bin 6.32610310 -4.30309718 5.68893905 4.070708102
# ps_ind_07_bin 9.10699732 0.43839186 9.01106816 4.616474510
# ps_ind_08_bin -0.16343486 -0.47308637 -0.24338395 3.729193202
# ps_ind_09_bin 2.23581646 0.22723215 2.27478513 3.424232808
# ps_ind_10_bin 0.00000000 0.00000000 0.00000000 0.006555556
# ps_ind_11_bin -0.63149883 0.50181888 -0.53881494 0.900562965
# ps_ind_12_bin -1.29387359 -0.28742447 -1.35540318 0.675569327
# ps_ind_13_bin 0.00000000 0.00000000 0.00000000 0.011000000
# ps_ind_14 -0.46055574 -0.93651001 -0.64025888 1.151615790
# ps_ind_15 4.14353762 -2.98249073 3.56687577 20.458815383
# ps_ind_16_bin 3.32474858 1.04347526 3.37351544 4.683771938
# ps_ind_17_bin 3.32909503 -1.47988561 3.10815190 4.115044208
# ps_ind_18_bin 4.87978072 -1.94385470 4.55183159 3.765969895
# ps_reg_01 9.10131802 -2.13150926 8.68911012 14.916797865
# ps_reg_02 15.76111410 -4.58177899 15.28321769 21.291418343
# ps_reg_03 17.64536016 -4.24819971 17.21763925 35.080668913
# ps_car_01_cat 12.15826592 -1.09687902 11.82951055 21.893125697
# ps_car_02_cat 5.17487659 -0.84526378 5.05511422 1.823042205
# ps_car_04_cat 10.31848484 -4.44213631 10.37836365 7.594751139
# ps_car_06_cat 15.19963665 -3.16994795 14.75500687 34.450838803
# ps_car_07_cat 1.93152475 0.97869192 2.14254174 2.415850881
# ps_car_08_cat 5.36645517 -1.63867233 5.04314170 2.461433323
# ps_car_09_cat 9.55069867 -2.15774461 9.18570057 9.569265712
# ps_car_10_cat 0.86159299 -1.13184780 0.60131984 1.008233945
# ps_car_11 8.07042022 -1.54194628 7.98228545 7.825892097
# ps_car_12 15.69841377 -5.80065546 15.60270581 16.655136838
# ps_car_13 23.24586819 -6.19574103 23.08973305 36.670207999
# ps_car_14 17.23885918 -4.07272447 16.97557335 31.099228675
# ps_car_15 11.96761504 -2.70281046 11.70712394 17.871010417
# ps_calc_01 0.29694591 0.85880967 0.47588421 18.769930229
# ps_calc_02 -0.15760363 1.16557736 0.08235381 19.186725772
# ps_calc_03 -0.58138077 -0.74451936 -0.72507750 19.991917859
# ps_calc_04 1.48638950 0.65619288 1.58804445 15.620913856
# ps_calc_05 -0.68430568 0.31019346 -0.60348025 13.714182263
# ps_calc_06 -1.67403848 0.04657021 -1.61485861 16.520092623
# ps_calc_07 1.90385655 1.62223998 2.21127337 17.594644864
# ps_calc_08 -1.92376163 0.07354145 -1.87086006 19.185903123
# ps_calc_09 0.96613520 -0.32422003 0.88134313 16.891908493
# ps_calc_10 0.77287775 0.16943014 0.74451541 23.171962667
# ps_calc_11 -0.60163415 0.15572323 -0.54568160 22.328087578
# ps_calc_12 0.82113456 0.16485055 0.84045166 14.335544525
# ps_calc_13 -1.32710452 1.12377149 -1.06006525 20.847960793
# ps_calc_14 -0.97513747 0.10661703 -0.97331766 22.352432738
# ps_calc_15_bin -0.56437663 1.21231494 -0.35806428 4.597289599
# ps_calc_16_bin -1.30474076 -0.44964802 -1.33012676 5.060269362
# ps_calc_17_bin 0.18868541 1.93110915 0.65823878 5.496522677
# ps_calc_18_bin 2.29677307 -0.33518759 2.13992097 5.323885313
# ps_calc_19_bin -0.22784601 -0.05488170 -0.22386072 5.717105071
# ps_calc_20_bin 0.30370860 0.50430792 0.39440685 3.522452208
# Boosting
train5 <- train2[sample(nrow(train2), 10000), ]
library(gbm)
boost.porto =gbm(target~.,data=train5, distribution = "bernoulli", n.trees =500, interaction.depth = 10, shrinkage = 0.2, verbose = F)
# actually, in my boosting model, there are no predictors that had non zero influence!
#"A gradient boosted model with bernoulli loss function.
#500 iterations were performed.
#There were 55 predictors of which 0 had non-zero influence."
summary(boost.porto)
yhat.bag <- predict(boost.porto, newdata = test, n.trees = 50)
yhat.bag
prob <- predict(boost.porto, test, type = "prob")
summary(yhat.bag)
table(yhat.bag,test$target)
# Based on what our group has observed so far, logistic regression is better than tree based methods. With that being said, this data set may not be the ideal data set to test the effectiveness of tree based methods due to extremely high class imbalance.
|
## This script will generate a plot of total PM2.5 emissions from all sources in
## the EPA NEI dataset for each of the years 1999, 2002, 2005, and 2008 in base.
## download the data
if(!file.exists("./data")) {dir.create("./data")}
zipURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
#Windows:
download.file(zipURL, destfile="./data/NEIdata.zip")
#Mac:
download.file(zipURL, destfile="./data/NEIdata.zip", method="curl")
dateDownloaded <- date()
## unzip
unzip("./data/NEIdata.zip", exdir="./data")
## load into R
nei <- readRDS("./data/summarySCC_PM25.rds")
scc <- readRDS("./data/Source_Classification_Code.rds")
## get totals by year
totals <- aggregate(nei$Emissions, by=list(Year = nei$year), FUN=sum)
## open graphics device
png(file = "plot1.png", width = 480, height = 480, units = "px")
## plot
plot(totals$Year, totals$x, pch=19, main="Total PM2.5 Emissions per Year, United States",
xlab="Year", ylab="Total Emissions (tons)", xlim=c(1998, 2010))
abline(lm(totals$x ~ totals$Year))
text(totals$Year + 0.5, totals$x, labels=c("1999", "2002", "2005", "2008"))
## close graphics device
dev.off()
| /plot1.R | no_license | cs79/ExData_Plotting2 | R | false | false | 1,147 | r | ## This script will generate a plot of total PM2.5 emissions from all sources in
## the EPA NEI dataset for each of the years 1999, 2002, 2005, and 2008 in base.
## download the data
if(!file.exists("./data")) {dir.create("./data")}
zipURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
#Windows:
download.file(zipURL, destfile="./data/NEIdata.zip")
#Mac:
download.file(zipURL, destfile="./data/NEIdata.zip", method="curl")
dateDownloaded <- date()
## unzip
unzip("./data/NEIdata.zip", exdir="./data")
## load into R
nei <- readRDS("./data/summarySCC_PM25.rds")
scc <- readRDS("./data/Source_Classification_Code.rds")
## get totals by year
totals <- aggregate(nei$Emissions, by=list(Year = nei$year), FUN=sum)
## open graphics device
png(file = "plot1.png", width = 480, height = 480, units = "px")
## plot
plot(totals$Year, totals$x, pch=19, main="Total PM2.5 Emissions per Year, United States",
xlab="Year", ylab="Total Emissions (tons)", xlim=c(1998, 2010))
abline(lm(totals$x ~ totals$Year))
text(totals$Year + 0.5, totals$x, labels=c("1999", "2002", "2005", "2008"))
## close graphics device
dev.off()
|
#' ISOImageryPlan
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO imagery Plan
#' @return Object of \code{\link{R6Class}} for modelling an ISO imagery Plan
#' @format \code{\link{R6Class}} object.
#'
#' @examples
#' md <- ISOImageryPlan$new()
#' md$setType("point")
#' md$setStatus("completed")
#'
#' #add citation
#' rp1 <- ISOResponsibleParty$new()
#' rp1$setIndividualName("someone1")
#' rp1$setOrganisationName("somewhere1")
#' rp1$setPositionName("someposition1")
#' rp1$setRole("pointOfContact")
#' contact1 <- ISOContact$new()
#' phone1 <- ISOTelephone$new()
#' phone1$setVoice("myphonenumber1")
#' phone1$setFacsimile("myfacsimile1")
#' contact1$setPhone(phone1)
#' address1 <- ISOAddress$new()
#' address1$setDeliveryPoint("theaddress1")
#' address1$setCity("thecity1")
#' address1$setPostalCode("111")
#' address1$setCountry("France")
#' address1$setEmail("someone1@@theorg.org")
#' contact1$setAddress(address1)
#' res <- ISOOnlineResource$new()
#' res$setLinkage("http://www.somewhereovertheweb.org")
#' res$setName("somename")
#' contact1$setOnlineResource(res)
#' rp1$setContactInfo(contact1)
#'
#' #citation
#' ct <- ISOCitation$new()
#' ct$setTitle("sometitle")
#' d <- ISODate$new()
#' d$setDate(ISOdate(2015, 1, 1, 1))
#' d$setDateType("publication")
#' ct$addDate(d)
#' ct$setEdition("1.0")
#' ct$setEditionDate(ISOdate(2015,1,1))
#' ct$addIdentifier(ISOMetaIdentifier$new(code = "identifier"))
#' ct$addPresentationForm("mapDigital")
#' ct$addCitedResponsibleParty(rp1)
#' md$setCitation(ct)
#' xml <- md$encode()
#'
#' @references
#' ISO 19115-2:2009 - Geographic information -- Metadata Part 2: Extensions for imagery and gridded data
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
ISOImageryPlan <- R6Class("ISOImageryPlan",
inherit = ISOAbstractObject,
private = list(
xmlElement = "MI_Plan",
xmlNamespacePrefix = "GMI"
),
public = list(
#'@field type type [0..1]: ISOImageryGeometryType
type = NULL,
#'@field status status [1..1]: ISOProgress
status = NULL,
#'@field citation citation [1..1]: ISOCitation
citation = NULL,
#'@field operation operation [0..*]: ISOImageryOperation
operation = list(),
#'@field satisfiedRequirement satisfiedRequirement [0..*]: ISOImageryRequirement
satisfiedRequirement = list(),
#'@description Initializes object
#'@param xml object of class \link{XMLInternalNode-class}
initialize = function(xml = NULL){
super$initialize(xml = xml)
},
#'@description Set type
#'@param type object of class \link{ISOImageryGeometryType} or any \link{character}
#' among values returned by \code{ISOImageryGeometryType$values()}
setType = function(type){
if(is(type, "character")){
type <- ISOImageryGeometryType$new(value = type)
}else{
if(!is(type, "ISOImageryGeometryType")){
stop("The argument should be an object of class 'character' or 'ISOImageryGeometryType")
}
}
self$type <- type
},
#'@description Set status
#'@param status object of class \link{ISOStatus} or any \link{character}
#' among values returned by \code{ISOStatus$values()}
setStatus = function(status){
if(is(status, "character")){
status <- ISOStatus$new(value = status)
}else{
if(!is(status, "ISOStatus")){
stop("The argument should be an object of class 'ISOStatus' or 'character'")
}
}
self$status <- status
},
#'@description Set citation
#'@param citation object of class \link{ISOCitation}
setCitation = function(citation){
if(!is(citation, "ISOCitation")){
stop("The argument should be an object of class 'ISOCitation")
}
self$citation <- citation
},
#'@description Adds operation
#'@param operation object of class \link{ISOImageryOperation}
#'@return \code{TRUE} if added, \code{FALSE} otherwise
addOperation = function(operation){
if(!is(operation, "ISOImageryOperation")){
stop("The argument should be an object of class 'ISOImageryOperation'")
}
return(self$addListElement("operation", operation))
},
#'@description Deletes operation
#'@param operation object of class \link{ISOImageryOperation}
#'@return \code{TRUE} if deleted, \code{FALSE} otherwise
delOperation = function(operation){
if(!is(operation, "ISOImageryOperation")){
stop("The argument should be an object of class 'ISOImageryOperation'")
}
return(self$delListElement("operation", operation))
},
#'@description Adds satisfied requirement
#'@param requirement object of class \link{ISOImageryRequirement}
#'@return \code{TRUE} if added, \code{FALSE} otherwise
addSatisfiedRequirement = function(requirement){
if(!is(requirement, "ISOImageryRequirement")){
stop("The argument should be an object of class 'ISOImageryRequirement'")
}
return(self$addListElement("satisfiedRequirement", requirement))
},
#'@description Deletes satisfied requirement
#'@param requirement object of class \link{ISOImageryRequirement}
#'@return \code{TRUE} if deleted, \code{FALSE} otherwise
delSatisfiedRequirement = function(requirement){
if(!is(requirement, "ISOImageryRequirement")){
stop("The argument should be an object of class 'ISOImageryRequirement'")
}
return(self$delListElement("satisfiedRequirement", requirement))
}
)
) | /R/ISOImageryPlan.R | no_license | cran/geometa | R | false | false | 5,977 | r | #' ISOImageryPlan
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO imagery Plan
#' @return Object of \code{\link{R6Class}} for modelling an ISO imagery Plan
#' @format \code{\link{R6Class}} object.
#'
#' @examples
#' md <- ISOImageryPlan$new()
#' md$setType("point")
#' md$setStatus("completed")
#'
#' #add citation
#' rp1 <- ISOResponsibleParty$new()
#' rp1$setIndividualName("someone1")
#' rp1$setOrganisationName("somewhere1")
#' rp1$setPositionName("someposition1")
#' rp1$setRole("pointOfContact")
#' contact1 <- ISOContact$new()
#' phone1 <- ISOTelephone$new()
#' phone1$setVoice("myphonenumber1")
#' phone1$setFacsimile("myfacsimile1")
#' contact1$setPhone(phone1)
#' address1 <- ISOAddress$new()
#' address1$setDeliveryPoint("theaddress1")
#' address1$setCity("thecity1")
#' address1$setPostalCode("111")
#' address1$setCountry("France")
#' address1$setEmail("someone1@@theorg.org")
#' contact1$setAddress(address1)
#' res <- ISOOnlineResource$new()
#' res$setLinkage("http://www.somewhereovertheweb.org")
#' res$setName("somename")
#' contact1$setOnlineResource(res)
#' rp1$setContactInfo(contact1)
#'
#' #citation
#' ct <- ISOCitation$new()
#' ct$setTitle("sometitle")
#' d <- ISODate$new()
#' d$setDate(ISOdate(2015, 1, 1, 1))
#' d$setDateType("publication")
#' ct$addDate(d)
#' ct$setEdition("1.0")
#' ct$setEditionDate(ISOdate(2015,1,1))
#' ct$addIdentifier(ISOMetaIdentifier$new(code = "identifier"))
#' ct$addPresentationForm("mapDigital")
#' ct$addCitedResponsibleParty(rp1)
#' md$setCitation(ct)
#' xml <- md$encode()
#'
#' @references
#' ISO 19115-2:2009 - Geographic information -- Metadata Part 2: Extensions for imagery and gridded data
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
ISOImageryPlan <- R6Class("ISOImageryPlan",
inherit = ISOAbstractObject,
private = list(
xmlElement = "MI_Plan",
xmlNamespacePrefix = "GMI"
),
public = list(
#'@field type type [0..1]: ISOImageryGeometryType
type = NULL,
#'@field status status [1..1]: ISOProgress
status = NULL,
#'@field citation citation [1..1]: ISOCitation
citation = NULL,
#'@field operation operation [0..*]: ISOImageryOperation
operation = list(),
#'@field satisfiedRequirement satisfiedRequirement [0..*]: ISOImageryRequirement
satisfiedRequirement = list(),
#'@description Initializes object
#'@param xml object of class \link{XMLInternalNode-class}
initialize = function(xml = NULL){
super$initialize(xml = xml)
},
#'@description Set type
#'@param type object of class \link{ISOImageryGeometryType} or any \link{character}
#' among values returned by \code{ISOImageryGeometryType$values()}
setType = function(type){
if(is(type, "character")){
type <- ISOImageryGeometryType$new(value = type)
}else{
if(!is(type, "ISOImageryGeometryType")){
stop("The argument should be an object of class 'character' or 'ISOImageryGeometryType")
}
}
self$type <- type
},
#'@description Set status
#'@param status object of class \link{ISOStatus} or any \link{character}
#' among values returned by \code{ISOStatus$values()}
setStatus = function(status){
if(is(status, "character")){
status <- ISOStatus$new(value = status)
}else{
if(!is(status, "ISOStatus")){
stop("The argument should be an object of class 'ISOStatus' or 'character'")
}
}
self$status <- status
},
#'@description Set citation
#'@param citation object of class \link{ISOCitation}
setCitation = function(citation){
if(!is(citation, "ISOCitation")){
stop("The argument should be an object of class 'ISOCitation")
}
self$citation <- citation
},
#'@description Adds operation
#'@param operation object of class \link{ISOImageryOperation}
#'@return \code{TRUE} if added, \code{FALSE} otherwise
addOperation = function(operation){
if(!is(operation, "ISOImageryOperation")){
stop("The argument should be an object of class 'ISOImageryOperation'")
}
return(self$addListElement("operation", operation))
},
#'@description Deletes operation
#'@param operation object of class \link{ISOImageryOperation}
#'@return \code{TRUE} if deleted, \code{FALSE} otherwise
delOperation = function(operation){
if(!is(operation, "ISOImageryOperation")){
stop("The argument should be an object of class 'ISOImageryOperation'")
}
return(self$delListElement("operation", operation))
},
#'@description Adds satisfied requirement
#'@param requirement object of class \link{ISOImageryRequirement}
#'@return \code{TRUE} if added, \code{FALSE} otherwise
addSatisfiedRequirement = function(requirement){
if(!is(requirement, "ISOImageryRequirement")){
stop("The argument should be an object of class 'ISOImageryRequirement'")
}
return(self$addListElement("satisfiedRequirement", requirement))
},
#'@description Deletes satisfied requirement
#'@param requirement object of class \link{ISOImageryRequirement}
#'@return \code{TRUE} if deleted, \code{FALSE} otherwise
delSatisfiedRequirement = function(requirement){
if(!is(requirement, "ISOImageryRequirement")){
stop("The argument should be an object of class 'ISOImageryRequirement'")
}
return(self$delListElement("satisfiedRequirement", requirement))
}
)
) |
##########extract list was generated after we filter out all the SNPs with 1M around the known SNPs region###
##########all the SNPs with pvalue <= 5E-06 was token out
new_filter <- read.csv("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2_fixed/result/Filter_based_on_Montse.csv",header=T,stringsAsFactors = F)
new_filter[,2] <- as.numeric(gsub(",","",new_filter[,2]))
setwd("/data/zhangh24/breast_cancer_data_analysis/")
n.raw <- 109713
snpvalue <- rep(0,n.raw)
subject.file <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
library(data.table)
Icog.order <- read.table(gzfile(subject.file))
setwd("/data/zhangh24/breast_cancer_data_analysis/")
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
x.covar.mis1 <- data1[,c(5:14,204)]
idx.fil <- Icog.order[,1]%in%SG_ID
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#Icog.order.match <- Icog.order[idx.fil,1][idx.match]
library(bc2)
extract.num <- nrow(new_filter)
snpid.result <- rep("c",extract.num)
n.sub <- 72411
snpvalue.result <- matrix(0,n.sub,extract.num)
total <- 0
for(i in 1:564){
print(i)
geno.file <- paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Julie_Icog",i,".txt"
)
num <- as.numeric(system(paste0('cat ',geno.file,' | wc -l '),intern=T))
if(num!=0){
con <- file(geno.file)
temp <- 0
open(con)
for(i in 1:num){
oneLine <- readLines(con,n=1)
myVector <- strsplit(oneLine," ")
snpid <- as.character(myVector[[1]][3])
temp <- temp+1
snpid.result[temp+total] <- snpid
snpvalue <- rep(0,n)
snppro <- as.numeric(unlist(myVector)[7:length(myVector[[1]])])
snpvalue <- convert(snppro,n.raw)
snpvalue <- snpvalue[idx.fil][idx.match]
snpvalue.result[,temp+total] <- snpvalue
}
close(con)
total <- total+num
}
# if(is.null(result[[1]])==0){
# temp <- length(result[[1]])
# snpid.result[total+(1:temp)] <- result[[1]]
# snpvalue.result[,total+(1:temp)] <- result[[2]]
# total <- temp+total
# }
}
snpid.result <- snpid.result[1:total]
snpvalue.result <- snpvalue.result[,1:total]
load("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Julie_snp_name_match.Rdata")
idx.match <- match(Julie_snp$SNP.ICOGS,snpid.result)
snpid.result <- snpid.result[idx.match]
all.equal(snpid.result,Julie_snp$SNP.ICOGS)
snpvalue.result <- snpvalue.result[,idx.match]
extract.result <- list(snpid.result,snpvalue.result)
colnames(snpvalue.result) <- snpid.result
write.csv(snpvalue.result,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Julie_snp_icog.csv",row.names = F,quote=F)
| /whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/code/merge_julie_icog.R | no_license | andrewhaoyu/breast_cancer_data_analysis | R | false | false | 3,100 | r |
##########extract list was generated after we filter out all the SNPs with 1M around the known SNPs region###
##########all the SNPs with pvalue <= 5E-06 was token out
new_filter <- read.csv("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2_fixed/result/Filter_based_on_Montse.csv",header=T,stringsAsFactors = F)
new_filter[,2] <- as.numeric(gsub(",","",new_filter[,2]))
setwd("/data/zhangh24/breast_cancer_data_analysis/")
n.raw <- 109713
snpvalue <- rep(0,n.raw)
subject.file <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_order.txt.gz"
library(data.table)
Icog.order <- read.table(gzfile(subject.file))
setwd("/data/zhangh24/breast_cancer_data_analysis/")
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#x.test.all.mis1 <- data1[,c(27:206)]
SG_ID <- data1$SG_ID
x.covar.mis1 <- data1[,c(5:14,204)]
idx.fil <- Icog.order[,1]%in%SG_ID
idx.match <- match(SG_ID,Icog.order[idx.fil,1])
#Icog.order.match <- Icog.order[idx.fil,1][idx.match]
library(bc2)
extract.num <- nrow(new_filter)
snpid.result <- rep("c",extract.num)
n.sub <- 72411
snpvalue.result <- matrix(0,n.sub,extract.num)
total <- 0
for(i in 1:564){
print(i)
geno.file <- paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Julie_Icog",i,".txt"
)
num <- as.numeric(system(paste0('cat ',geno.file,' | wc -l '),intern=T))
if(num!=0){
con <- file(geno.file)
temp <- 0
open(con)
for(i in 1:num){
oneLine <- readLines(con,n=1)
myVector <- strsplit(oneLine," ")
snpid <- as.character(myVector[[1]][3])
temp <- temp+1
snpid.result[temp+total] <- snpid
snpvalue <- rep(0,n)
snppro <- as.numeric(unlist(myVector)[7:length(myVector[[1]])])
snpvalue <- convert(snppro,n.raw)
snpvalue <- snpvalue[idx.fil][idx.match]
snpvalue.result[,temp+total] <- snpvalue
}
close(con)
total <- total+num
}
# if(is.null(result[[1]])==0){
# temp <- length(result[[1]])
# snpid.result[total+(1:temp)] <- result[[1]]
# snpvalue.result[,total+(1:temp)] <- result[[2]]
# total <- temp+total
# }
}
snpid.result <- snpid.result[1:total]
snpvalue.result <- snpvalue.result[,1:total]
load("./whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Julie_snp_name_match.Rdata")
idx.match <- match(Julie_snp$SNP.ICOGS,snpid.result)
snpid.result <- snpid.result[idx.match]
all.equal(snpid.result,Julie_snp$SNP.ICOGS)
snpvalue.result <- snpvalue.result[,idx.match]
extract.result <- list(snpid.result,snpvalue.result)
colnames(snpvalue.result) <- snpid.result
write.csv(snpvalue.result,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Julie_snp_icog.csv",row.names = F,quote=F)
|
##Subsetting and Sorting
set.seed(13425)
x <- data.frame("var1" = sample(1:5), "var2"=sample(6:10), "var3"=sample(11:15))
x <- x[sample(1:5), ]; x$var2[c(1, 3)] <- NA
x
x[ , 1]
x[, "var1"]
x[c(1, 2), "var2"]
x[x$var1 >=3, ]
x[(x$var1>=3 & x$var3 <=12), ]
x[(x$var1>=3 | x$var3 <=12), ]
x[x$var2 <=8, ] #goes crazy when NAs present
x[which(x$var2 <= 8), ]
which(x$var2 <= 8) ## gives indices instead of actual values
?sort
sort(x$var1) #gives the actual values
sort(x$var1, decreasing = T)
sort(x$var2) ##does not give the NA values
sort(x$var2, na.last = T)
sort(x$var2, na.last = F)
?order
order(x$var2)
order(x$var3) ##GIVES THE INDICES instead of actual values, after arranging
x[order(x$var1), ] #sort whole rows according to the col of var1
x[sort(x$var1), ]
identical(x[order(x$var1), ],
x[sort(x$var1), ]
) #False
x[order(x$var2, x$var3), ] #first var2, if 2 same values then according to var3
install.packages("plyr")
library(plyr)
?arrange
arrange(x, var1)
arrange(x, desc(var1))
identical(x[order(x$var1), ], arrange(x, var1)) #False ,
#both gives similar results except that the order subsetting preserves
#the row number indices, but 'arrange' func of 'plyr' package resets it
x$var4 <- round(rnorm(5, 10, 5), 0)
x
y <- cbind(x, rbinom(5, 10, 0.2))
y
##Summarizing Data
urll <- "https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
?download.file
download.file(url = urll, destfile = "restdata.csv", method = "curl")
read.csv("restdata.csv")
?read.csv
restdata <- read.csv(file = urll, stringsAsFactors = F)
head(restdata)
tail(restdata)
colnames(restdata)
nrow(restdata)
summary(restdata)
str(restdata)
quantile(restdata$councilDistrict, na.rm = T)
?quantile
quantile(restdata$councilDistrict, probs = c(.5, .6))
?table
table(restdata$zipCode)
table(restdata$zipCode, useNA = "ifany") #will make a heading of NA if there is any
table(restdata$zipCode, useNA = "always")#will make a heading of NA regardless of presence
table(restdata$zipCode, restdata$councilDistrict) #will make contingency table
is.na(restdata) #will make the whole table of logical operation
colSums(is.na(restdata))
any(is.na(restdata))
all(restdata$zipCode>1)
table(restdata$zipCode==21212) #fine for 1
table(restdata$zipCode==c(21212,21213)) #gives error for more than one
table(restdata$zipCode %in% 21212)
table(restdata$zipCode %in% c(21212, 21213, 21214)) #this gives correct answer
nrow(restdata[restdata$zipCode %in% c(21212, 21213), ])
restdata[restdata$zipCode %in% c(21212, 21213), ] # can be used for subsetting
# instead of using |
restdata[(restdata$zipCode == 21212 | restdata$zipCode ==21213), ]
nrow(restdata[(restdata$zipCode == 21212 | restdata$zipCode ==21213), ])
data("UCBAdmissions")
df <- as.data.frame(UCBAdmissions)
head(df)
df
UCBAdmissions
summary(df)
dim(UCBAdmissions)
?xtabs
colnames(df)
xtabs(Freq ~ Gender + Admit, data = df)
xtabs(Freq ~ Dept + Admit, data = df)
sum(df[df$Dept == "B" & df$Admit == "Rejected", "Freq"])
xtabs(Admit ~ Gender + Dept, data = df)
##Error in Summary.factor(1:2, na.rm = TRUE) :
##'sum' not meaningful for factors
str(warpbreaks)
head(warpbreaks)
nrow(warpbreaks)
xtabs(breaks ~ tension + wool, data = warpbreaks)
xt <- xtabs(breaks ~ ., data = warpbreaks) ## using "." instead of variable names will make
# cross tabs of all the variables. 2 X 2 is easy to understand, as more are added
# multidimensional arrays are made which are difficult to understand
warpbreaks$replicate <- rep(1:9, length = nrow(warpbreaks))
head(warpbreaks)
xt <- xtabs(breaks ~ ., data = warpbreaks)
?ftable
ftable(xt) #converts multidimentional table into 2 x 2 table
object.size(UCBAdmissions)
## CREATING NEW VARIABLES
?seq
seq(1, 10, by = 3)
seq(1, 10, by = 2)
seq(1, 10, length.out = 4)
x <- round(rnorm(10, 20, 5), 0)
x
seq_along(x)
seq(along = x) #both will give the index values of all the variables
colnames(restdata)
head(restdata$neighborhood)
restdata2 <- restdata # just in case
restdata$nearme <- restdata$neighborhood %in% c("Frankford", "Clifton Park")
head (restdata$nearme)
restdata[restdata$nearme == T, "name"]
table(restdata$nearme)
?ifelse
restdata$zipwrong <- ifelse(restdata$zipCode < 0, TRUE, FALSE)
table(restdata$zipwrong)
?cut
restdata$zipgroups <- cut(restdata$zipCode, breaks = quantile(restdata$zipCode),
labels = c("first", "second", "third", "fourth"))
table(restdata$zipgroups)
install.packages("Hmisc")
library(Hmisc)
?cut2
restdata$zipgroups2 <- cut2(restdata$zipCode, g = 4)
table(restdata$zipgroups2)
restdata$zcf <- factor(restdata$zipCode)
head(restdata$zcf)
table(restdata$zcf)
?relevel
library(plyr)
?mutate
restdata3 <- mutate(restdata, zipgroups3 = cut2(restdata$zipCode, g=4))
head(restdata3)
##RESHAPING DATA
install.packages("reshape2")
library(reshape2)
library(stringi)
library(stringr)
install.packages("stringr")
colnames(mtcars)
rownames(mtcars)
head(mtcars, 3)
mtcars$carname <- rownames(mtcars)
head(mtcars, 3)
?melt
carmelt <- melt(mtcars, id = c('carname', 'gear', 'cyl'), measure.vars = c('mpg', 'hp'))
head(carmelt)
tail(carmelt)
?dcast
cyldata <- dcast(carmelt, cyl ~ variable)
cyldata
geardata <- dcast(carmelt, gear ~ variable)
geardata
cyldatahp <- dcast(carmelt, cyl ~ variable.names('hp'))
cyldatahp
cyldata2 <- dcast(carmelt, cyl ~ variable.names('hp', 'mpg'))
cyldata2
cylmean <- dcast(carmelt, cyl ~ variable, mean)
round(cylmean, 0)
gearsd <- round(dcast(carmelt, gear ~ variable, sd),1)
gearsd
gearmean <- round(dcast(carmelt, gear ~ variable, mean), 1)
gearmean
class(gearmean)
head(InsectSprays)
table(InsectSprays$spray)
?tapply
tapply(InsectSprays$count, InsectSprays$spray, sum)
tapply(InsectSprays$count, InsectSprays$spray, sum, simplify = F) #provides a list
?split
spins <- split(InsectSprays$count, InsectSprays$spray)
spins
spinsd <- split(InsectSprays$count, InsectSprays$spray, drop = T)
spinsd
lapply(spins, sum)
unlist(lapply(spins, sum))
sapply(spins, sum)
library(plyr)
?ddply
ddply(InsectSprays, .(spray))
ddply(mtcars, .(gear))
ddply(InsectSprays, .(spray), summarise, sum = sum(count))
#func(dataset, summarize, spray, by summing the count variable)
ddply(InsectSprays, .(spray), summarize, mean = round(mean(count), 1))
ddply(InsectSprays, 'spray', summarize, sum =sum(count))
?ave
ave(mtcars$mpg, mtcars$gear, mean) #will calculate the mean mpg of the cars of that particular
# gear, and gives this value for each observation/row.
#tapply will give mean for each factor
#ave will give the same value, but will print for each observation according to factor
round(ave(mtcars$mpg, mtcars$gear), 0)
cbind(mtcars$gear, round(ave(mtcars$mpg, mtcars$gear), 0))
cbind(gear = mtcars$gear, mpg = round(ave(mtcars$mpg, mtcars$gear), 0))
tapply(mtcars$mpg, mtcars$gear, mean)
round(tapply(mtcars$mpg, mtcars$gear, mean), 0)
ave(InsectSprays$count, InsectSprays$spray, FUN = sum) #sum by each factor (spray)
ave(InsectSprays$count, FUN = sum) #sums all
spraysums <- ddply(InsectSprays, .(spray), summarise, sum = ave(count, FUN = sum))
#for ave func, factor argument was not needed as ddply was already summarizing spray (factor variable)
head(spraysums)
tail(spraysums)
colnames(mtcars)
head(mtcars, 2)
mtcars$carname <- rownames(mtcars)
meltcars <- melt(mtcars, id = c('carname', 'cyl', 'gear'), measure.vars = c('mpg', 'qsec', 'hp'))
dcast(meltcars, cyl ~ variable, mean)
round(dcast(meltcars, cyl ~ variable, mean), 1)
##MANAGING DATA WITH dplyr- INTRODUCTION
install.packages('dplyr')
library(dplyr)
chicago <- readRDS("chicago.rds")
colnames(chicago)
str(chicago)
table(chicago$city)
?select
head(select(chicago, tmpd))
head(select(chicago, c(city, tmpd)))
head(select(chicago, city, dptp, tmpd, date))
head(select(chicago, city:date))
head(select(chicago, -(city:date)))
?filter
head(chicago)
head(filter(chicago, tmpd >= 40))
head(filter(chicago, tmpd >40 & dptp > 40))
?arrange
head(arrange(chicago, tmpd))
head(arrange(chicago, -tmpd))# == head(arrange(chicago, desc(tmpd)))
?rename
chicago <- rename(chicago, temp = tmpd, dew = dptp, pm25 = pm25tmean2, pm10 = pm10tmean2)
names(chicago)
chicago <- rename(chicago, o3 = o3tmean2, no2 = no2tmean2)
names(chicago)
?mutate
chicago <- mutate(chicago, pm25md = pm25 - mean(pm25, na.rm = T))
tail(chicago)
chicago <- mutate(chicago, tempcat = factor(1*(temp >= 80), labels = c('cold', 'hot')))
head(filter(chicago, temp > 78))
summarize(chicago, maxtemp = max(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
hotcold <- group_by(chicago, tempcat)
head(hotcold)
summarize(hotcold, maxtemp = max(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
l <- as.POSIXlt(Sys.time()) #stored time as detailed list of information
c <- as.POSIXct(Sys.time()) #stored time as a single very long number (seconds from 1-1-1970)
unclass(c)
unclass(l)
names(chicago)
chicago <- mutate(chicago, year = as.POSIXlt(date)$year + 1900)
year <- group_by(chicago, year)
summarize(year, meantemp = mean(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
#pipeline operator %>%
chicago %>% mutate(month = as.POSIXlt(date)$mon + 1) %>%
group_by(month) %>% summarize(meantemp = mean(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
## MERGING DATA
names(mtcars)
mtcars <- mutate(mtcars, carname = row.names(mtcars))
mtcars2 <- mtcars %>% select(carname, mpg, disp, drat, cyl) %>% arrange(mpg)
mtcars3 <- mtcars %>% select(carname, cyl, hp, wt, mpg) %>% arrange(cyl)
head(mtcars2)
head(mtcars3)
head(merge(mtcars2, mtcars3)) #merge by all the common names
head(merge(mtcars2, mtcars3, by = "carname")) #keep common names other than mentioned separate
head(merge(mtcars2, mtcars3, by = c("carname", "cyl")))
#now lets distort the data then merge
mtcars2n <- mtcars2
mtcars2n$mpg <- 1:32
mtcars3n <- mtcars3
mtcars3n$cyl <- 101:132
head(mtcars2)
head(mtcars2n)
head(mtcars3)
head(mtcars3n)
head(merge(mtcars2n, mtcars3n, all = T))
head(merge(mtcars2n, mtcars3n, by = 'carname'))
head(merge(mtcars2n, mtcars3n, by = c('carname', 'cyl'), all = T))
head(merge(mtcars2n, mtcars3n, by= 'mpg', all = T))
?join
join(mtcars2, mtcars3)
join(mtcars2, mtcars3, by = 'carname') #similar to merge but not specifying x and y
join(mtcars2n, mtcars3n) #variables of 2 are complete, unmatched in 3 are empty
join(mtcars3n, mtcars2n) # vice versa
#merge has better control
df1 <- data.frame(id = sample(1:10), x = rnorm(10))
df2 <- data.frame(id = sample(1:10), y = rnorm(10))
df3 <- data.frame(id = sample(1:10), z = rnorm(10))
arrange(join(df1, df2), id)
dflist <- list(df1, df2, df3)
?join_all
join_all(dflist) #and that is the reason to use join
arrange(join_all(dflist), id)
##QUIZ
#1
house <- read.csv(file = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv")
agricultureLogical <- ifelse(house$AGS == 6 & house$ACR == 3, TRUE, FALSE)
head(which(agricultureLogical), 3)
#2
install.packages('jpeg')
library(jpeg)
?readJPEG
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg", destfile = "test.jpg", method = "curl")
jpeg <- readJPEG(source = "test.jpg", native = T)
?quantile
quantile(jpeg, probs = c(.3, .8))
#3
library(dplyr)
edu <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv")
# gdp file was not clean, modification was done
gdp <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv",
skip = 5,
header = F,
nrows = 190,
stringsAsFactors = F) %>%
select(V1,V2, V4:V5) %>%
rename(countrycode = V1, ranking = V2, country = V4, milusd = V5)
arrange(gdp, desc(ranking))[13, "country"]
#4
?merge
gdpedu <- merge(gdp, edu, by.x = "countrycode", by.y = "CountryCode")
income <- group_by(gdpedu, Income.Group)
summarise(income, averank = mean(ranking))
tapply(gdpedu$rank, gdpedu$Income.Group, mean, simplify = T)
#5
library(Hmisc)
gdpedu$quantiles <- cut(gdpedu$ranking, breaks=5)
rankgroups <- group_by(gdpedu, quantiles)
filter(rankgroups, Income.Group == "Lower middle income") %>% summarise(count = length(country))
#simpler and more elegant method
table(gdpedu$Income.Group, gdpedu$quantiles)
| /scribbling w3.R | no_license | ali-rabbani/Getting-and-Cleaning-Data-Coursera- | R | false | false | 12,300 | r | ##Subsetting and Sorting
set.seed(13425)
x <- data.frame("var1" = sample(1:5), "var2"=sample(6:10), "var3"=sample(11:15))
x <- x[sample(1:5), ]; x$var2[c(1, 3)] <- NA
x
x[ , 1]
x[, "var1"]
x[c(1, 2), "var2"]
x[x$var1 >=3, ]
x[(x$var1>=3 & x$var3 <=12), ]
x[(x$var1>=3 | x$var3 <=12), ]
x[x$var2 <=8, ] #goes crazy when NAs present
x[which(x$var2 <= 8), ]
which(x$var2 <= 8) ## gives indices instead of actual values
?sort
sort(x$var1) #gives the actual values
sort(x$var1, decreasing = T)
sort(x$var2) ##does not give the NA values
sort(x$var2, na.last = T)
sort(x$var2, na.last = F)
?order
order(x$var2)
order(x$var3) ##GIVES THE INDICES instead of actual values, after arranging
x[order(x$var1), ] #sort whole rows according to the col of var1
x[sort(x$var1), ]
identical(x[order(x$var1), ],
x[sort(x$var1), ]
) #False
x[order(x$var2, x$var3), ] #first var2, if 2 same values then according to var3
install.packages("plyr")
library(plyr)
?arrange
arrange(x, var1)
arrange(x, desc(var1))
identical(x[order(x$var1), ], arrange(x, var1)) #False ,
#both gives similar results except that the order subsetting preserves
#the row number indices, but 'arrange' func of 'plyr' package resets it
x$var4 <- round(rnorm(5, 10, 5), 0)
x
y <- cbind(x, rbinom(5, 10, 0.2))
y
##Summarizing Data
urll <- "https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
?download.file
download.file(url = urll, destfile = "restdata.csv", method = "curl")
read.csv("restdata.csv")
?read.csv
restdata <- read.csv(file = urll, stringsAsFactors = F)
head(restdata)
tail(restdata)
colnames(restdata)
nrow(restdata)
summary(restdata)
str(restdata)
quantile(restdata$councilDistrict, na.rm = T)
?quantile
quantile(restdata$councilDistrict, probs = c(.5, .6))
?table
table(restdata$zipCode)
table(restdata$zipCode, useNA = "ifany") #will make a heading of NA if there is any
table(restdata$zipCode, useNA = "always")#will make a heading of NA regardless of presence
table(restdata$zipCode, restdata$councilDistrict) #will make contingency table
is.na(restdata) #will make the whole table of logical operation
colSums(is.na(restdata))
any(is.na(restdata))
all(restdata$zipCode>1)
table(restdata$zipCode==21212) #fine for 1
table(restdata$zipCode==c(21212,21213)) #gives error for more than one
table(restdata$zipCode %in% 21212)
table(restdata$zipCode %in% c(21212, 21213, 21214)) #this gives correct answer
nrow(restdata[restdata$zipCode %in% c(21212, 21213), ])
restdata[restdata$zipCode %in% c(21212, 21213), ] # can be used for subsetting
# instead of using |
restdata[(restdata$zipCode == 21212 | restdata$zipCode ==21213), ]
nrow(restdata[(restdata$zipCode == 21212 | restdata$zipCode ==21213), ])
data("UCBAdmissions")
df <- as.data.frame(UCBAdmissions)
head(df)
df
UCBAdmissions
summary(df)
dim(UCBAdmissions)
?xtabs
colnames(df)
xtabs(Freq ~ Gender + Admit, data = df)
xtabs(Freq ~ Dept + Admit, data = df)
sum(df[df$Dept == "B" & df$Admit == "Rejected", "Freq"])
xtabs(Admit ~ Gender + Dept, data = df)
##Error in Summary.factor(1:2, na.rm = TRUE) :
##'sum' not meaningful for factors
str(warpbreaks)
head(warpbreaks)
nrow(warpbreaks)
xtabs(breaks ~ tension + wool, data = warpbreaks)
xt <- xtabs(breaks ~ ., data = warpbreaks) ## using "." instead of variable names will make
# cross tabs of all the variables. 2 X 2 is easy to understand, as more are added
# multidimensional arrays are made which are difficult to understand
warpbreaks$replicate <- rep(1:9, length = nrow(warpbreaks))
head(warpbreaks)
xt <- xtabs(breaks ~ ., data = warpbreaks)
?ftable
ftable(xt) #converts multidimentional table into 2 x 2 table
object.size(UCBAdmissions)
## CREATING NEW VARIABLES
?seq
seq(1, 10, by = 3)
seq(1, 10, by = 2)
seq(1, 10, length.out = 4)
x <- round(rnorm(10, 20, 5), 0)
x
seq_along(x)
seq(along = x) #both will give the index values of all the variables
colnames(restdata)
head(restdata$neighborhood)
restdata2 <- restdata # just in case
restdata$nearme <- restdata$neighborhood %in% c("Frankford", "Clifton Park")
head (restdata$nearme)
restdata[restdata$nearme == T, "name"]
table(restdata$nearme)
?ifelse
restdata$zipwrong <- ifelse(restdata$zipCode < 0, TRUE, FALSE)
table(restdata$zipwrong)
?cut
restdata$zipgroups <- cut(restdata$zipCode, breaks = quantile(restdata$zipCode),
labels = c("first", "second", "third", "fourth"))
table(restdata$zipgroups)
install.packages("Hmisc")
library(Hmisc)
?cut2
restdata$zipgroups2 <- cut2(restdata$zipCode, g = 4)
table(restdata$zipgroups2)
restdata$zcf <- factor(restdata$zipCode)
head(restdata$zcf)
table(restdata$zcf)
?relevel
library(plyr)
?mutate
restdata3 <- mutate(restdata, zipgroups3 = cut2(restdata$zipCode, g=4))
head(restdata3)
##RESHAPING DATA
install.packages("reshape2")
library(reshape2)
library(stringi)
library(stringr)
install.packages("stringr")
colnames(mtcars)
rownames(mtcars)
head(mtcars, 3)
mtcars$carname <- rownames(mtcars)
head(mtcars, 3)
?melt
carmelt <- melt(mtcars, id = c('carname', 'gear', 'cyl'), measure.vars = c('mpg', 'hp'))
head(carmelt)
tail(carmelt)
?dcast
cyldata <- dcast(carmelt, cyl ~ variable)
cyldata
geardata <- dcast(carmelt, gear ~ variable)
geardata
cyldatahp <- dcast(carmelt, cyl ~ variable.names('hp'))
cyldatahp
cyldata2 <- dcast(carmelt, cyl ~ variable.names('hp', 'mpg'))
cyldata2
cylmean <- dcast(carmelt, cyl ~ variable, mean)
round(cylmean, 0)
gearsd <- round(dcast(carmelt, gear ~ variable, sd),1)
gearsd
gearmean <- round(dcast(carmelt, gear ~ variable, mean), 1)
gearmean
class(gearmean)
head(InsectSprays)
table(InsectSprays$spray)
?tapply
tapply(InsectSprays$count, InsectSprays$spray, sum)
tapply(InsectSprays$count, InsectSprays$spray, sum, simplify = F) #provides a list
?split
spins <- split(InsectSprays$count, InsectSprays$spray)
spins
spinsd <- split(InsectSprays$count, InsectSprays$spray, drop = T)
spinsd
lapply(spins, sum)
unlist(lapply(spins, sum))
sapply(spins, sum)
library(plyr)
?ddply
ddply(InsectSprays, .(spray))
ddply(mtcars, .(gear))
ddply(InsectSprays, .(spray), summarise, sum = sum(count))
#func(dataset, summarize, spray, by summing the count variable)
ddply(InsectSprays, .(spray), summarize, mean = round(mean(count), 1))
ddply(InsectSprays, 'spray', summarize, sum =sum(count))
?ave
ave(mtcars$mpg, mtcars$gear, mean) #will calculate the mean mpg of the cars of that particular
# gear, and gives this value for each observation/row.
#tapply will give mean for each factor
#ave will give the same value, but will print for each observation according to factor
round(ave(mtcars$mpg, mtcars$gear), 0)
cbind(mtcars$gear, round(ave(mtcars$mpg, mtcars$gear), 0))
cbind(gear = mtcars$gear, mpg = round(ave(mtcars$mpg, mtcars$gear), 0))
tapply(mtcars$mpg, mtcars$gear, mean)
round(tapply(mtcars$mpg, mtcars$gear, mean), 0)
ave(InsectSprays$count, InsectSprays$spray, FUN = sum) #sum by each factor (spray)
ave(InsectSprays$count, FUN = sum) #sums all
spraysums <- ddply(InsectSprays, .(spray), summarise, sum = ave(count, FUN = sum))
#for ave func, factor argument was not needed as ddply was already summarizing spray (factor variable)
head(spraysums)
tail(spraysums)
colnames(mtcars)
head(mtcars, 2)
mtcars$carname <- rownames(mtcars)
meltcars <- melt(mtcars, id = c('carname', 'cyl', 'gear'), measure.vars = c('mpg', 'qsec', 'hp'))
dcast(meltcars, cyl ~ variable, mean)
round(dcast(meltcars, cyl ~ variable, mean), 1)
##MANAGING DATA WITH dplyr- INTRODUCTION
install.packages('dplyr')
library(dplyr)
chicago <- readRDS("chicago.rds")
colnames(chicago)
str(chicago)
table(chicago$city)
?select
head(select(chicago, tmpd))
head(select(chicago, c(city, tmpd)))
head(select(chicago, city, dptp, tmpd, date))
head(select(chicago, city:date))
head(select(chicago, -(city:date)))
?filter
head(chicago)
head(filter(chicago, tmpd >= 40))
head(filter(chicago, tmpd >40 & dptp > 40))
?arrange
head(arrange(chicago, tmpd))
head(arrange(chicago, -tmpd))# == head(arrange(chicago, desc(tmpd)))
?rename
chicago <- rename(chicago, temp = tmpd, dew = dptp, pm25 = pm25tmean2, pm10 = pm10tmean2)
names(chicago)
chicago <- rename(chicago, o3 = o3tmean2, no2 = no2tmean2)
names(chicago)
?mutate
chicago <- mutate(chicago, pm25md = pm25 - mean(pm25, na.rm = T))
tail(chicago)
chicago <- mutate(chicago, tempcat = factor(1*(temp >= 80), labels = c('cold', 'hot')))
head(filter(chicago, temp > 78))
summarize(chicago, maxtemp = max(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
hotcold <- group_by(chicago, tempcat)
head(hotcold)
summarize(hotcold, maxtemp = max(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
l <- as.POSIXlt(Sys.time()) #stored time as detailed list of information
c <- as.POSIXct(Sys.time()) #stored time as a single very long number (seconds from 1-1-1970)
unclass(c)
unclass(l)
names(chicago)
chicago <- mutate(chicago, year = as.POSIXlt(date)$year + 1900)
year <- group_by(chicago, year)
summarize(year, meantemp = mean(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
#pipeline operator %>%
chicago %>% mutate(month = as.POSIXlt(date)$mon + 1) %>%
group_by(month) %>% summarize(meantemp = mean(temp, na.rm = T), mean25 = mean(pm25, na.rm = T))
## MERGING DATA
names(mtcars)
mtcars <- mutate(mtcars, carname = row.names(mtcars))
mtcars2 <- mtcars %>% select(carname, mpg, disp, drat, cyl) %>% arrange(mpg)
mtcars3 <- mtcars %>% select(carname, cyl, hp, wt, mpg) %>% arrange(cyl)
head(mtcars2)
head(mtcars3)
head(merge(mtcars2, mtcars3)) #merge by all the common names
head(merge(mtcars2, mtcars3, by = "carname")) #keep common names other than mentioned separate
head(merge(mtcars2, mtcars3, by = c("carname", "cyl")))
#now lets distort the data then merge
mtcars2n <- mtcars2
mtcars2n$mpg <- 1:32
mtcars3n <- mtcars3
mtcars3n$cyl <- 101:132
head(mtcars2)
head(mtcars2n)
head(mtcars3)
head(mtcars3n)
head(merge(mtcars2n, mtcars3n, all = T))
head(merge(mtcars2n, mtcars3n, by = 'carname'))
head(merge(mtcars2n, mtcars3n, by = c('carname', 'cyl'), all = T))
head(merge(mtcars2n, mtcars3n, by= 'mpg', all = T))
?join
join(mtcars2, mtcars3)
join(mtcars2, mtcars3, by = 'carname') #similar to merge but not specifying x and y
join(mtcars2n, mtcars3n) #variables of 2 are complete, unmatched in 3 are empty
join(mtcars3n, mtcars2n) # vice versa
#merge has better control
df1 <- data.frame(id = sample(1:10), x = rnorm(10))
df2 <- data.frame(id = sample(1:10), y = rnorm(10))
df3 <- data.frame(id = sample(1:10), z = rnorm(10))
arrange(join(df1, df2), id)
dflist <- list(df1, df2, df3)
?join_all
join_all(dflist) #and that is the reason to use join
arrange(join_all(dflist), id)
##QUIZ
#1
house <- read.csv(file = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv")
agricultureLogical <- ifelse(house$AGS == 6 & house$ACR == 3, TRUE, FALSE)
head(which(agricultureLogical), 3)
#2
install.packages('jpeg')
library(jpeg)
?readJPEG
download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg", destfile = "test.jpg", method = "curl")
jpeg <- readJPEG(source = "test.jpg", native = T)
?quantile
quantile(jpeg, probs = c(.3, .8))
#3
library(dplyr)
edu <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv")
# gdp file was not clean, modification was done
gdp <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv",
skip = 5,
header = F,
nrows = 190,
stringsAsFactors = F) %>%
select(V1,V2, V4:V5) %>%
rename(countrycode = V1, ranking = V2, country = V4, milusd = V5)
arrange(gdp, desc(ranking))[13, "country"]
#4
?merge
gdpedu <- merge(gdp, edu, by.x = "countrycode", by.y = "CountryCode")
income <- group_by(gdpedu, Income.Group)
summarise(income, averank = mean(ranking))
tapply(gdpedu$rank, gdpedu$Income.Group, mean, simplify = T)
#5
library(Hmisc)
gdpedu$quantiles <- cut(gdpedu$ranking, breaks=5)
rankgroups <- group_by(gdpedu, quantiles)
filter(rankgroups, Income.Group == "Lower middle income") %>% summarise(count = length(country))
#simpler and more elegant method
table(gdpedu$Income.Group, gdpedu$quantiles)
|
"Fst" <-
function(rval,N){
k<-N/sum(N)
Fst.val<-k%*%diag(rval)
}
| /R/Fst.R | no_license | cran/Malmig | R | false | false | 69 | r | "Fst" <-
function(rval,N){
k<-N/sum(N)
Fst.val<-k%*%diag(rval)
}
|
# source additional functions
source("./scripts/simulation/load_packages.R")
source("./scripts/simulation/load_data_Carneiro.R")
n_exp <- 10000
ES_true <- ES_data_Carneiro$ES_d
# set seed to reproduce results
set.seed(4321)
# sample from ES distribution and show histograms of empirical and sampled ES
current_ES <- sample(ES_true, n_exp, replace = TRUE)
hist(ES_true, breaks = 200)
hist(current_ES, breaks = 200)
min(ES_true)
max(ES_true)
min(current_ES)
max(current_ES)
# how many hypothesis over SESOI threshold
# make a matrix of prevalence, positives, and negatives for each SESOI
# important for calculation of outcomes (PPV, FPR, FNR) later
SESOI <- c(.5, 1)
mat <- matrix(NA, nrow = 3, ncol = length(SESOI),
dimnames = list(c("prev_pop", "all_positives", "all_negatives"),
c(.5, 1)))
prev_pop <- vector()
all_positives <- vector()
all_negatives <- vector()
counter = 0
for (ES in SESOI) {
counter = counter + 1
prev <- round(sum(ES_true > ES)/length(ES_true), 3)
all_pos <- sum(current_ES > ES)
all_neg <- n_exp - all_pos
print(ES)
prev_pop[counter] <- prev
all_positives[counter] <- all_pos
all_negatives[counter] <- all_neg
}
mat[1, ] <- prev_pop
mat[2, ] <- all_positives
mat[3, ] <- all_negatives
mat
| /sim_pessimistic/scripts/analysis/prior_probs_for_analysis.R | no_license | Meggiedanziger/ResearchTrajectory | R | false | false | 1,296 | r |
# source additional functions
source("./scripts/simulation/load_packages.R")
source("./scripts/simulation/load_data_Carneiro.R")
n_exp <- 10000
ES_true <- ES_data_Carneiro$ES_d
# set seed to reproduce results
set.seed(4321)
# sample from ES distribution and show histograms of empirical and sampled ES
current_ES <- sample(ES_true, n_exp, replace = TRUE)
hist(ES_true, breaks = 200)
hist(current_ES, breaks = 200)
min(ES_true)
max(ES_true)
min(current_ES)
max(current_ES)
# how many hypothesis over SESOI threshold
# make a matrix of prevalence, positives, and negatives for each SESOI
# important for calculation of outcomes (PPV, FPR, FNR) later
SESOI <- c(.5, 1)
mat <- matrix(NA, nrow = 3, ncol = length(SESOI),
dimnames = list(c("prev_pop", "all_positives", "all_negatives"),
c(.5, 1)))
prev_pop <- vector()
all_positives <- vector()
all_negatives <- vector()
counter = 0
for (ES in SESOI) {
counter = counter + 1
prev <- round(sum(ES_true > ES)/length(ES_true), 3)
all_pos <- sum(current_ES > ES)
all_neg <- n_exp - all_pos
print(ES)
prev_pop[counter] <- prev
all_positives[counter] <- all_pos
all_negatives[counter] <- all_neg
}
mat[1, ] <- prev_pop
mat[2, ] <- all_positives
mat[3, ] <- all_negatives
mat
|
library(dplyr)
##read in the files
read.table("activity_labels.txt") -> activity_labels
read.table("features.txt") -> features
read.table("./train/subject_train.txt") -> subject_train
read.table("./train/X_train.txt") -> X_train
read.table("./train/y_train.txt") -> y_train
read.table("./test/subject_test.txt") -> subject_test
read.table("./test/X_test.txt") -> X_test
read.table("./test/y_test.txt") -> y_test
## create variable names from table of feature names
colnames(features) <- c("V1", "feature")
colnames(X_test) <- features$feature
colnames(X_train) <- features$feature
##create "subject" variable and combine with data
cbind(subject_test, X_test) -> test
cbind(subject_train, X_train) -> train
colnames(train)[1] <- "subject"
colnames(test)[1] <- "subject"
##combine activity data with main data
merge(y_train, activity_labels) -> train_activity
merge(y_test, activity_labels) -> test_activity
rename(test_activity, activity = V2) -> test_activity
rename(train_activity, activity = V2) -> train_activity
##combine to one dataset
cbind(test_activity, test) -> test
cbind(train_activity, train) -> train
rbind(test, train) -> bigdat
##select only columns with std or mean
grep("std", colnames(bigdat)) -> std_cols
grep("mean", colnames(bigdat)) -> mean_cols
c(mean_cols, std_cols) -> ext_col
bigdat[,c("subject", "activity")] -> new_cols
bigdat[,ext_col] -> ext_cols
cbind(new_cols, ext_cols) -> df_final
##create table of averages grouped by activity and subject
df_final %>% group_by(activity, subject) %>% summarise_all(mean) -> avgs
arrange(avgs, subject) -> avgs
write.table(avgs, file = "averages.txt", row.names = FALSE)
| /run_analysis.R | no_license | chrisfanshier/gettingandcleaningdata_courseproject | R | false | false | 1,699 | r | library(dplyr)
##read in the files
read.table("activity_labels.txt") -> activity_labels
read.table("features.txt") -> features
read.table("./train/subject_train.txt") -> subject_train
read.table("./train/X_train.txt") -> X_train
read.table("./train/y_train.txt") -> y_train
read.table("./test/subject_test.txt") -> subject_test
read.table("./test/X_test.txt") -> X_test
read.table("./test/y_test.txt") -> y_test
## create variable names from table of feature names
colnames(features) <- c("V1", "feature")
colnames(X_test) <- features$feature
colnames(X_train) <- features$feature
##create "subject" variable and combine with data
cbind(subject_test, X_test) -> test
cbind(subject_train, X_train) -> train
colnames(train)[1] <- "subject"
colnames(test)[1] <- "subject"
##combine activity data with main data
merge(y_train, activity_labels) -> train_activity
merge(y_test, activity_labels) -> test_activity
rename(test_activity, activity = V2) -> test_activity
rename(train_activity, activity = V2) -> train_activity
##combine to one dataset
cbind(test_activity, test) -> test
cbind(train_activity, train) -> train
rbind(test, train) -> bigdat
##select only columns with std or mean
grep("std", colnames(bigdat)) -> std_cols
grep("mean", colnames(bigdat)) -> mean_cols
c(mean_cols, std_cols) -> ext_col
bigdat[,c("subject", "activity")] -> new_cols
bigdat[,ext_col] -> ext_cols
cbind(new_cols, ext_cols) -> df_final
##create table of averages grouped by activity and subject
df_final %>% group_by(activity, subject) %>% summarise_all(mean) -> avgs
arrange(avgs, subject) -> avgs
write.table(avgs, file = "averages.txt", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ppc_committees.R
\name{ppc_committees}
\alias{ppc_committees}
\title{Committees}
\usage{
ppc_committees(
congress = "116",
chamber = c("joint", "house", "senate"),
api_key = NULL,
raw = FALSE
)
}
\arguments{
\item{congress}{The number of Congress of interest}
\item{chamber}{Specify the chamber of Congress typically "house" or "senate";
sometimes "both" or "joint"}
\item{api_key}{The actual API key string provided by ProPublica.}
\item{raw}{Logical indicating whether to return the raw response object. The
default (FALSE) parses the content and returns a tibble data frame.}
}
\value{
A data frame of congressional committees information
}
\description{
Lists of Committees
}
\examples{
\dontrun{
## get committes info for house members in 115th congress
h115com <- ppc_committees("115", "house")
}
}
| /man/ppc_committees.Rd | permissive | r-congress/ppcong | R | false | true | 894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ppc_committees.R
\name{ppc_committees}
\alias{ppc_committees}
\title{Committees}
\usage{
ppc_committees(
congress = "116",
chamber = c("joint", "house", "senate"),
api_key = NULL,
raw = FALSE
)
}
\arguments{
\item{congress}{The number of Congress of interest}
\item{chamber}{Specify the chamber of Congress typically "house" or "senate";
sometimes "both" or "joint"}
\item{api_key}{The actual API key string provided by ProPublica.}
\item{raw}{Logical indicating whether to return the raw response object. The
default (FALSE) parses the content and returns a tibble data frame.}
}
\value{
A data frame of congressional committees information
}
\description{
Lists of Committees
}
\examples{
\dontrun{
## get committes info for house members in 115th congress
h115com <- ppc_committees("115", "house")
}
}
|
# Code modified from
# https://github.com/MarioniLab/MNN2017/blob/master/Simulations/simulateBatches.R
source("func_data.R")
# This script generates some (highly synthetic!) expression data with a batch effect
# and uneven population composition between batches.
# this.dir <- dirname(parent.frame(2)$ofile)
# setwd(this.dir)
ncells <- 2000 # Number of cells
ngenes <- 100 # Number of genes
# Our simulation involves three cell types/components.
# Cells are distributed according to a bivariate normal in a 2-D biological subspace.
# Each cell type has a different x/y center and a different SD.
num_clust = 4
xmus <- c(0,5,5,0)
xsds <- c(0.8,0.1,0.4,0.2)
ymus <- c(5,5,0,0)
ysds <- c(0.8,0.1,0.4,0.2)
set.seed(0)
prop1 <- runif(num_clust,0,1)
prop1 = prop1/sum(prop1)
set.seed(999)
prop2 <- runif(num_clust,0,1)
prop2 = prop2/sum(prop2)
# Note that the different centers should not lie on the same y=mx line; this represents populations that differ only in library size.
# Such differences should not be present in normalized data, and will be eliminated by the cosine normalization step.
# The centers above are chosen so as to guarantee good separation between the different components.
#####################################
# Generating data for batch 1, with a given proportion of cells in each component.
comp1 <- sample(1:num_clust, prob=prop1, size=ncells, replace=TRUE)
# Sampling locations for cells in each component.
set.seed(0)
samples1 <- cbind(rnorm(n=ncells, mean=xmus[comp1],sd=xsds[comp1]),
rnorm(n=ncells, mean=ymus[comp1],sd=ysds[comp1]))
# Random projection to D dimensional space, to mimic high-dimensional expression data.
set.seed(0)
proj <- matrix(rnorm(ngenes*ncells), nrow=ngenes, ncol=2)
A1 <- samples1 %*% t(proj)
# Add normally distributed noise.
A1 <- A1 + rnorm(ngenes*ncells)
rownames(A1) <- paste0("Cell", seq_len(ncells), "-1")
colnames(A1) <- paste0("Gene", seq_len(ngenes))
#####################################
# Setting proportions of each of the three cell types in batch 2.
comp2 <- sample(1:num_clust, prob=prop2, size=ncells, replace=TRUE)
# Sampling locations for cells in each component.
set.seed(0)
samples2 <- cbind(rnorm(n=ncells, mean=xmus[comp2], sd=xsds[comp2]),
rnorm(n=ncells, mean=ymus[comp2], sd=ysds[comp2]))
# Random projection, followed by adding batch effects and random noise.
A2 <- samples2 %*% t(proj)
A2 <- A2 + matrix(rep(rnorm(ngenes), each=ncells), ncol=ngenes) # gene-specific batch effect (genes are columns)
A2 <- A2 + rnorm(ngenes*ncells) # noise
rownames(A2) <- paste0("Cell", seq_len(ncells), "-2")
colnames(A2) <- paste0("Gene", seq_len(ngenes))
#####################################
# save simulated data
write_dataset("gaussian_batch_1.csv", t(A1), rep(1, ncol(t(A1))), comp1)
write_dataset("gaussian_batch_2.csv", t(A2), rep(1, ncol(t(A2))), comp2)
| /old/R/gaussian.R | permissive | garedaba/BERMUDA | R | false | false | 2,889 | r | # Code modified from
# https://github.com/MarioniLab/MNN2017/blob/master/Simulations/simulateBatches.R
source("func_data.R")
# This script generates some (highly synthetic!) expression data with a batch effect
# and uneven population composition between batches.
# this.dir <- dirname(parent.frame(2)$ofile)
# setwd(this.dir)
ncells <- 2000 # Number of cells
ngenes <- 100 # Number of genes
# Our simulation involves three cell types/components.
# Cells are distributed according to a bivariate normal in a 2-D biological subspace.
# Each cell type has a different x/y center and a different SD.
num_clust = 4
xmus <- c(0,5,5,0)
xsds <- c(0.8,0.1,0.4,0.2)
ymus <- c(5,5,0,0)
ysds <- c(0.8,0.1,0.4,0.2)
set.seed(0)
prop1 <- runif(num_clust,0,1)
prop1 = prop1/sum(prop1)
set.seed(999)
prop2 <- runif(num_clust,0,1)
prop2 = prop2/sum(prop2)
# Note that the different centers should not lie on the same y=mx line; this represents populations that differ only in library size.
# Such differences should not be present in normalized data, and will be eliminated by the cosine normalization step.
# The centers above are chosen so as to guarantee good separation between the different components.
#####################################
# Generating data for batch 1, with a given proportion of cells in each component.
comp1 <- sample(1:num_clust, prob=prop1, size=ncells, replace=TRUE)
# Sampling locations for cells in each component.
set.seed(0)
samples1 <- cbind(rnorm(n=ncells, mean=xmus[comp1],sd=xsds[comp1]),
rnorm(n=ncells, mean=ymus[comp1],sd=ysds[comp1]))
# Random projection to D dimensional space, to mimic high-dimensional expression data.
set.seed(0)
proj <- matrix(rnorm(ngenes*ncells), nrow=ngenes, ncol=2)
A1 <- samples1 %*% t(proj)
# Add normally distributed noise.
A1 <- A1 + rnorm(ngenes*ncells)
rownames(A1) <- paste0("Cell", seq_len(ncells), "-1")
colnames(A1) <- paste0("Gene", seq_len(ngenes))
#####################################
# Setting proportions of each of the three cell types in batch 2.
comp2 <- sample(1:num_clust, prob=prop2, size=ncells, replace=TRUE)
# Sampling locations for cells in each component.
set.seed(0)
samples2 <- cbind(rnorm(n=ncells, mean=xmus[comp2], sd=xsds[comp2]),
rnorm(n=ncells, mean=ymus[comp2], sd=ysds[comp2]))
# Random projection, followed by adding batch effects and random noise.
A2 <- samples2 %*% t(proj)
A2 <- A2 + matrix(rep(rnorm(ngenes), each=ncells), ncol=ngenes) # gene-specific batch effect (genes are columns)
A2 <- A2 + rnorm(ngenes*ncells) # noise
rownames(A2) <- paste0("Cell", seq_len(ncells), "-2")
colnames(A2) <- paste0("Gene", seq_len(ngenes))
#####################################
# save simulated data
write_dataset("gaussian_batch_1.csv", t(A1), rep(1, ncol(t(A1))), comp1)
write_dataset("gaussian_batch_2.csv", t(A2), rep(1, ncol(t(A2))), comp2)
|
##设置一个特殊矩阵对象,并求其逆矩阵(假设矩阵都可逆),
##如果缓存中存在逆矩阵,则从缓存读取,否则重新计算逆矩阵并存入缓存
## makeCacheMatrix函数用于创建可缓存逆矩阵的特殊“矩阵”对象。
makeCacheMatrix <- function(x = matrix()) {
## s 是逆矩阵运算结果,首先在创建矩阵对象时候置为空值
s <- NULL
## set属性,设置原始矩阵数据,并清除缓存数据
set <- function(m1){
x <<- m1
s <<- NULL
}
## get属性,获得原始矩阵
get <- function() x
## 缓存逆矩阵
setsolve <- function(solve) s <<- solve
## 获得缓存逆矩阵
getsolve <- function() s
## 返回特殊对象,是一个list对象
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve 函数用于计算上述makeCacheMatrix返回的特殊“矩阵”的逆矩阵。
## 如果已经计算逆矩阵(且尚未更改矩阵),那么cachesolve将检索缓存中的逆矩阵。
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## 参数x为特殊矩阵对象
## 首先读取缓存,如果存在逆矩阵,直接返回缓存中的数据
## 并显示 "getting cached data" 示意结果是从缓存中读取
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
## 如果缓存中不存在逆矩阵的数据,则计算solve()
data <- x$get()
s <- solve(data, ...)
## 将计算结果存入缓存,返回逆矩阵结果
x$setsolve(s)
s
}
| /cachematrix.R | no_license | cookie-z/ProgrammingAssignment2 | R | false | false | 1,816 | r |
##设置一个特殊矩阵对象,并求其逆矩阵(假设矩阵都可逆),
##如果缓存中存在逆矩阵,则从缓存读取,否则重新计算逆矩阵并存入缓存
## makeCacheMatrix函数用于创建可缓存逆矩阵的特殊“矩阵”对象。
makeCacheMatrix <- function(x = matrix()) {
## s 是逆矩阵运算结果,首先在创建矩阵对象时候置为空值
s <- NULL
## set属性,设置原始矩阵数据,并清除缓存数据
set <- function(m1){
x <<- m1
s <<- NULL
}
## get属性,获得原始矩阵
get <- function() x
## 缓存逆矩阵
setsolve <- function(solve) s <<- solve
## 获得缓存逆矩阵
getsolve <- function() s
## 返回特殊对象,是一个list对象
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve 函数用于计算上述makeCacheMatrix返回的特殊“矩阵”的逆矩阵。
## 如果已经计算逆矩阵(且尚未更改矩阵),那么cachesolve将检索缓存中的逆矩阵。
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## 参数x为特殊矩阵对象
## 首先读取缓存,如果存在逆矩阵,直接返回缓存中的数据
## 并显示 "getting cached data" 示意结果是从缓存中读取
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
## 如果缓存中不存在逆矩阵的数据,则计算solve()
data <- x$get()
s <- solve(data, ...)
## 将计算结果存入缓存,返回逆矩阵结果
x$setsolve(s)
s
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784410019636e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615832281-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784410019636e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
library(reshape2)
setwd("~/Documents/Batcave/GEO/ccdata/data-raw/atc4")
#load in cmap drugs
cmap_instances <- read.table("~/Documents/Batcave/GEO/ccdata/data-raw/raw/cmap_instances_02.csv",
header=T, sep="\t", quote='',
fill=T, stringsAsFactors=F)
drugs <- unique(cmap_instances$cmap_name)
#load in atc codes
load("AllData-WHOCC-dump-2016-02-12.RData")
atc <- as.data.frame(AllData[["atc"]], stringsAsFactors=F)
#get atc codes for drugs (7 chars) in cmap
atc <- atc[nchar(atc$key) == 7, ]
atc <- atc[atc$name %in% drugs, ]
#obtain list of 4th level atc codes
atc4 <- list()
for (drug in unique(atc$name)) {
#full atc codes
keys <- atc[atc$name == drug, ]$key
#4th level atc codes
keys <-gsub("(.+)\\d\\d", "\\1", keys)
atc4[[drug]] <- unique(keys)
}
devtools::use_data(atc4, ccdata)
| /data-raw/atc4/atc4.R | no_license | alexvpickering/ccdata | R | false | false | 868 | r | library(reshape2)
setwd("~/Documents/Batcave/GEO/ccdata/data-raw/atc4")
#load in cmap drugs
cmap_instances <- read.table("~/Documents/Batcave/GEO/ccdata/data-raw/raw/cmap_instances_02.csv",
header=T, sep="\t", quote='',
fill=T, stringsAsFactors=F)
drugs <- unique(cmap_instances$cmap_name)
#load in atc codes
load("AllData-WHOCC-dump-2016-02-12.RData")
atc <- as.data.frame(AllData[["atc"]], stringsAsFactors=F)
#get atc codes for drugs (7 chars) in cmap
atc <- atc[nchar(atc$key) == 7, ]
atc <- atc[atc$name %in% drugs, ]
#obtain list of 4th level atc codes
atc4 <- list()
for (drug in unique(atc$name)) {
#full atc codes
keys <- atc[atc$name == drug, ]$key
#4th level atc codes
keys <-gsub("(.+)\\d\\d", "\\1", keys)
atc4[[drug]] <- unique(keys)
}
devtools::use_data(atc4, ccdata)
|
#################################################
## Paper Figure 1 ###############################
#################################################
## Here analysis and Plotting
## Einführungsplot (GENERAL --> von Kira kopieren)
library(dplyr)
library(latex2exp)
source("Functions/STACYmap_5.R")
library(PaleoSpec)
ANALYSIS$CORR <- list()
#################################################
## CALCULATION ##################################
#################################################
# 1) FIELD (TEMP-ISOT and PREC-ISOT)
ANALYSIS$CORR$FIELD <- list(
CORR_TEMP_ISOT = array(dim = c(96,73)),
CORR_TEMP_ISOT_P = array(dim = c(96,73)),
CORR_PREC_ISOT = array(dim = c(96,73)),
CORR_PREC_ISOT_P = array(dim = c(96,73))
)
for (lon in 1:96){
for (lat in 1:73){
#TEMP ISOT
if(!any(is.na(DATA_past1000$SIM_yearly$ISOT[lon,lat,]))){
COR_TI = cor.test(DATA_past1000$SIM_yearly$TEMP[lon,lat,], DATA_past1000$SIM_yearly$ISOT[lon,lat,], na.rm = TRUE)
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT[lon,lat] = COR_TI$estimate[[1]]
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT_P[lon,lat] = COR_TI$p.value
}else{
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT[lon,lat] = NA
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT_P[lon,lat] = NA
}
if(!any(is.na(DATA_past1000$SIM_yearly$ISOT[lon,lat,]))){
COR_PI = cor.test(DATA_past1000$SIM_yearly$PREC[lon,lat,], DATA_past1000$SIM_yearly$ISOT[lon,lat,], na.rm = TRUE)
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT[lon,lat] = COR_PI$estimate[[1]]
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT_P[lon,lat] = COR_PI$p.value
}else{
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT[lon,lat] = NA
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT_P[lon,lat] = NA
}
}
}
remove(lon,lat, COR_TP, COR_TI, COR_PI)
# 2) POINT (TEMP-d18O_dw_eq and PREC-d18O_dw_eq)
length_cave = length(DATA_past1000$CAVES$entity_info$entity_id)
ANALYSIS$CORR$POINTS <- data.frame(
entity_id = numeric(length_cave),
CORR = numeric(length_cave),
PVALUE = numeric(length_cave),
CORR_TEMP = numeric(length_cave),
PVALUE_TEMP = numeric(length_cave),
CORR_PREC = numeric(length_cave),
PVALUE_PREC = numeric(length_cave),
CORR_pw = numeric(length_cave),
PVALUE_pw = numeric(length_cave)
)
for(ii in 1:length_cave){
print(ii)
entity <- DATA_past1000$CAVES$entity_info$entity_id[ii]
site <- DATA_past1000$CAVES$entity_info$site_id[ii]
ANALYSIS$CORR$POINTS$entity_id[ii] <- entity
ANALYSIS$CORR$POINTS$entity_id[ii] <- entity
# CAREFULL --> CORRELATION ONLY WORKS FOR EQUIDISTANT DATAPOINTS
diff_dt = mean(diff(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age), na.rm = T)
if(length(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age)>4 & ii != 95 & ii != 53 & ii != 109){
#### SIM WITH RECORD
record <- PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age,DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$d18O_dw_eq,
time.target = seq(from = head(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age, n = 1),
to = tail(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age, n = 1),
by = diff_dt))
COR <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$ISOT,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR[ii] = COR$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE[ii] = COR$p.value
COR_T <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$TEMP,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR_TEMP[ii] = COR_T$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] = COR_T$p.value
COR_P <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$PREC,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR_PREC[ii] = COR_P$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] = COR_P$p.value
COR_pw <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$ITPC,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR_pw[ii] = COR_pw$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE_pw[ii] = COR_pw$p.value
}else{
ANALYSIS$CORR$POINTS$CORR[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE[ii] = NA
ANALYSIS$CORR$POINTS$CORR_TEMP[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] = NA
ANALYSIS$CORR$POINTS$CORR_PREC[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] = NA
ANALYSIS$CORR$POINTS$CORR_pw[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE_pw[ii] = NA
}
}
#################################################
## PLOTS ########################################
#################################################
Plot_lyr_temp <- ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT
Plot_lyr_temp_p <- ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT_P
Plot_lyr_temp[Plot_lyr_temp_p > 0.1] <- NA
Plot_lyr_temp[abs(Plot_lyr_temp) < 0.2] <- NA
Plot_lyr_prec <- ANALYSIS$CORR$FIELD$CORR_PREC_ISOT
Plot_lyr_prec_p <- ANALYSIS$CORR$FIELD$CORR_PREC_ISOT_P
Plot_lyr_prec[Plot_lyr_prec_p > 0.1] <- NA
Plot_lyr_prec[abs(Plot_lyr_prec) < 0.2] <- NA
Plot_lyr_temp <- rbind(Plot_lyr_temp[49:96,1:73],
Plot_lyr_temp[1:48,1:73])
Plot_lyr_prec <- rbind(Plot_lyr_prec[49:96,1:73],
Plot_lyr_prec[1:48,1:73])
##### Point Layer
Point_Lyr_temp <- list(lon = list(), lat = list(), value = list())
Point_Lyr_prec <- list(lon = list(), lat = list(), value = list())
length_cave = length(DATA_past1000$CAVES$entity_info$site_id)
for(ii in 1:length_cave){
site <- DATA_past1000$CAVES$entity_info$site_id[ii]
print(ii)
if(is.na(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii])){next}
# 1) sortiert aus, was nicht signifikant ist
if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1){
Point_Lyr_temp$lon = c(Point_Lyr_temp$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_temp$lat = c(Point_Lyr_temp$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_temp$value = c(Point_Lyr_temp$value, ANALYSIS$CORR$POINTS$CORR_TEMP[ii])
# 2) betrachte signifikante Korrelationen:
}
if(is.na(ANALYSIS$CORR$POINTS$PVALUE_PREC[ii])){next}
# 1) sortiert aus, was nicht signifikant ist
if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1){
Point_Lyr_prec$lon = c(Point_Lyr_prec$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_prec$lat = c(Point_Lyr_prec$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_prec$value = c(Point_Lyr_prec$value, ANALYSIS$CORR$POINTS$CORR_PREC[ii])
# 2) betrachte signifikante Korrelationen:
}
}
Point_Lyr_temp$lon = as.numeric(Point_Lyr_temp$lon)
Point_Lyr_temp$lat = as.numeric(Point_Lyr_temp$lat)
Point_Lyr_temp$value = as.numeric(Point_Lyr_temp$value)
Point_Lyr_prec$lon = as.numeric(Point_Lyr_prec$lon)
Point_Lyr_prec$lat = as.numeric(Point_Lyr_prec$lat)
Point_Lyr_prec$value = as.numeric(Point_Lyr_prec$value)
GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE <- 3
plot_temp <- STACYmap(gridlyr = Plot_lyr_temp, centercolor = 0, graticules = T,
ptlyr = as.data.frame(Point_Lyr_temp), legend_names = list(grid = 'Temp.-Correlation (p<0.1)')) +
theme(panel.border = element_blank(),
legend.background = element_blank(),
axis.text = element_blank(),
text = element_text(size = 12),
legend.title = element_text(size = 12))
plot_prec <- STACYmap(gridlyr = Plot_lyr_prec, centercolor = 0, graticules = T,
ptlyr = as.data.frame(Point_Lyr_prec), legend_names = list(grid = 'Prec.-Correlation (p<0.1)')) +
theme(panel.border = element_blank(),
legend.background = element_blank(),
axis.text = element_blank(),
text = element_text(size = 12),
legend.title = element_text(size = 12))
library(ggpubr)
plot <- ggarrange(plot_temp, plot_prec,
labels = c("A", "B"),
ncol = 2, nrow = 1)
plot %>% ggsave(filename = paste('Paper_Plot_5_Correlation', 'pdf', sep = '.'), plot = ., path = 'Plots',
width = 2*12, height = 12/8.3*PLOTTING_VARIABLES$HEIGHT, units = 'cm', dpi = 'print', device = "pdf")
#################################################
## Here the all in all Plot #####################
#################################################
# source("Functions/projection_ptlyr.R")
# # Grid Layer for plotting:
# # all areas where d18O correlates better with temperature are marked in red
# # all areas where d18O correlates better with precipitation are marked in blue
# Plot_lyr_temp <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_TEMP_ISOT
# Plot_lyr_temp_p <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_TEMP_ISOT_P
# Plot_lyr_prec <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_PREC_ISOT
# Plot_lyr_prec_p <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_PREC_ISOT_P
# Plot_lyr_temp[Plot_lyr_temp_p > 0.1] <- 0
# Plot_lyr_temp[abs(Plot_lyr_temp) < 0.2] <- 0
# Plot_lyr_prec[Plot_lyr_prec_p > 0.1] <- 0
# Plot_lyr_prec[abs(Plot_lyr_prec) < 0.2] <- 0
#
# Plot_lyr_2 <- Plot_lyr_temp
# Plot_lyr_3 <- Plot_lyr_prec
#
# Plot_lyr_2[abs(Plot_lyr_prec)>abs(Plot_lyr_temp)] <- 0
# Plot_lyr_3[abs(Plot_lyr_temp)>abs(Plot_lyr_prec)] <- 0
#
# Plot_lyr <- abs(Plot_lyr_2)- abs(Plot_lyr_3)
# Plot_lyr[Plot_lyr == 0] <- NA
#
# Plot_lyr <- rbind(Plot_lyr[49:96,1:73],
# Plot_lyr[1:48,1:73])
#
# remove(Plot_lyr_2, Plot_lyr_3, Plot_lyr_prec, Plot_lyr_prec_p, Plot_lyr_temp, Plot_lyr_temp_p)
#
# ##### Point Layer
#
# # How should points be colored? Is it so relevant if sign is equal?
#
# # 0) Check for significance --> if not then, then put in Point_lyr_2
# # 1) Check for what the absolute corellation is stronger
# # 2) make different shapes depending on sign fitting or not
#
#
# ### HERE HERE HERE ############################
# ## es muss noch angepasst werden, dass alle Punktlisten mit unterschiedlichem Symbol über eine andere Liste gemacht wird.
# Point_Lyr_sign <- list(lon = list(), lat = list(), value = list())
# Point_Lyr_notsign <- list(lon = list(), lat = list(), value = list())
# Point2_Lyr <- list(lon = list(), lat = list(), value = list())
#
# length_cave = length(DATA_past1000$CAVES$entity_info$site_id)
#
# for(ii in 1:length_cave){
# site <- DATA_past1000$CAVES$entity_info$site_id[ii]
# print(ii)
# if(is.na(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii])){next}
# # 1) sortiert aus, was nicht signifikant ist
# if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1 & ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] > 0.1){
# Point2_Lyr$lon = c(Point2_Lyr$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point2_Lyr$lat = c(Point2_Lyr$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point2_Lyr$value = c(Point2_Lyr$value, ANALYSIS$CORR$POINTS$CORR_TEMP[ii])
# # 2) betrachte signifikante Korrelationen:
# }else{
# # 2.1) Nur signifikante Korrelation bei Temp
# if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] < 0.1 & ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] > 0.1){
# #Check sign to determine shape
# if(sign(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_TI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))
# }
# }
#
# # 2.2) Nur signifikante Korrelation bei Prec
# else if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1 & ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] < 0.1){
# if(sign(ANALYSIS$CORR$POINTS$CORR_PREC[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_PI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))
# }
# }
#
# # 2.3) Sowohl signifikant für Prec wie für Temp
# else{
# # 2.3.1) absolute CORR größer für Temp als für Prec
# if(abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]) > abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii])){
# if(sign(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_TI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))}
# }
# # 2.3.2) absolute CORR größer für Prec als für Temp
# else{
# if(sign(ANALYSIS$CORR$POINTS$CORR_PREC[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_PI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))}
# }
# }
# }
# }
#
#
#
# Point_Lyr_sign$lon = as.numeric(Point_Lyr_sign$lon)
# Point_Lyr_sign$lat = as.numeric(Point_Lyr_sign$lat)
# Point_Lyr_sign$value = as.numeric(Point_Lyr_sign$value)
#
# Point_Lyr_notsign$lon = as.numeric(Point_Lyr_notsign$lon)
# Point_Lyr_notsign$lat = as.numeric(Point_Lyr_notsign$lat)
# Point_Lyr_notsign$value = as.numeric(Point_Lyr_notsign$value)
#
# Point2_Lyr$lon = as.numeric(Point2_Lyr$lon)
# Point2_Lyr$lat = as.numeric(Point2_Lyr$lat)
# Point2_Lyr$value = as.numeric(Point2_Lyr$value)
#
#
#
# GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE <- 3
#
# Point_Lyr_sign_p <- projection_ptlyr(as.data.frame(Point_Lyr_sign), as.character('+proj=robin +datum=WGS84'))
# Point_Lyr_notsign_p <- projection_ptlyr(as.data.frame(Point_Lyr_notsign), as.character('+proj=robin +datum=WGS84'))
# Point2_Lyr_p <- projection_ptlyr(as.data.frame(Point2_Lyr), as.character('+proj=robin +datum=WGS84'))
#
# remove(Point_Lyr_sign, Point_Lyr_notsign, Point2_Lyr)
#
# # Jetzt existiert ein Plot Layer und 2 Point Layer die man nur noch plotten muss und eine richtige Legende dafür braucht...
#
# source("Functions/STACYmap_6.R")
# source("Functions/STACYmap_5_2_logscale_corr.R")
#
# plot <- STACYmap_isot_corr(gridlyr = Plot_lyr, centercolor = 0, graticules = T,
# legend_names = list(grid = "abs(Corr.)"),
# breaks_isot = c(-1, -0.5, 0, 0.51, 1),
# labels_isot = c(1, "corr prec", "0", "corr temp", 1)) +
# geom_point(data = Point2_Lyr_p, aes(x = long, y = lat, shape = "1"), fill = 'gray', size = GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE-1, show.legend = c(shape = T)) +
# geom_point(data = Point_Lyr_sign_p, aes(x = long, y = lat, fill = layer, shape = "2"), size = GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE, show.legend = c(color = T, shape = T)) +
# geom_point(data = Point_Lyr_notsign_p, aes(x = long, y = lat, fill = layer, shape = "3"), size = GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE, show.legend = c(color = T, shape = T)) +
# scale_shape_manual(name = NULL, labels = c("no corr.-sites", "same sign", "different sign"),
# values = c(20,21,23))+
# #guides(fill = guide_colorbar(label = F, direction = "horizontal", title = "|Corr.| blue prec, red temp")) +
# theme(panel.border = element_blank(),
# legend.background = element_blank(),
# axis.text = element_blank(),
# text = element_text(size = 12),
# legend.title = element_text(size = 12))
#
# plot
#
# plot %>% ggsave(filename = paste('Paper_Plot_5_Correlation', 'pdf', sep = '.'), plot = ., path = 'Plots/Paper',
# width = 2*PLOTTING_VARIABLES$WIDTH, height = 2*PLOTTING_VARIABLES$HEIGHT, units = 'cm', dpi = 'print', device = "pdf")
#
| /Archive/3_Plots_Fig5_Correlation.R | no_license | ginnyweasleyIUP/202002_PaperDraft | R | false | false | 20,415 | r | #################################################
## Paper Figure 1 ###############################
#################################################
## Here analysis and Plotting
## Einführungsplot (GENERAL --> von Kira kopieren)
library(dplyr)
library(latex2exp)
source("Functions/STACYmap_5.R")
library(PaleoSpec)
ANALYSIS$CORR <- list()
#################################################
## CALCULATION ##################################
#################################################
# 1) FIELD (TEMP-ISOT and PREC-ISOT)
ANALYSIS$CORR$FIELD <- list(
CORR_TEMP_ISOT = array(dim = c(96,73)),
CORR_TEMP_ISOT_P = array(dim = c(96,73)),
CORR_PREC_ISOT = array(dim = c(96,73)),
CORR_PREC_ISOT_P = array(dim = c(96,73))
)
for (lon in 1:96){
for (lat in 1:73){
#TEMP ISOT
if(!any(is.na(DATA_past1000$SIM_yearly$ISOT[lon,lat,]))){
COR_TI = cor.test(DATA_past1000$SIM_yearly$TEMP[lon,lat,], DATA_past1000$SIM_yearly$ISOT[lon,lat,], na.rm = TRUE)
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT[lon,lat] = COR_TI$estimate[[1]]
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT_P[lon,lat] = COR_TI$p.value
}else{
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT[lon,lat] = NA
ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT_P[lon,lat] = NA
}
if(!any(is.na(DATA_past1000$SIM_yearly$ISOT[lon,lat,]))){
COR_PI = cor.test(DATA_past1000$SIM_yearly$PREC[lon,lat,], DATA_past1000$SIM_yearly$ISOT[lon,lat,], na.rm = TRUE)
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT[lon,lat] = COR_PI$estimate[[1]]
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT_P[lon,lat] = COR_PI$p.value
}else{
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT[lon,lat] = NA
ANALYSIS$CORR$FIELD$CORR_PREC_ISOT_P[lon,lat] = NA
}
}
}
remove(lon,lat, COR_TP, COR_TI, COR_PI)
# 2) POINT (TEMP-d18O_dw_eq and PREC-d18O_dw_eq)
length_cave = length(DATA_past1000$CAVES$entity_info$entity_id)
ANALYSIS$CORR$POINTS <- data.frame(
entity_id = numeric(length_cave),
CORR = numeric(length_cave),
PVALUE = numeric(length_cave),
CORR_TEMP = numeric(length_cave),
PVALUE_TEMP = numeric(length_cave),
CORR_PREC = numeric(length_cave),
PVALUE_PREC = numeric(length_cave),
CORR_pw = numeric(length_cave),
PVALUE_pw = numeric(length_cave)
)
for(ii in 1:length_cave){
print(ii)
entity <- DATA_past1000$CAVES$entity_info$entity_id[ii]
site <- DATA_past1000$CAVES$entity_info$site_id[ii]
ANALYSIS$CORR$POINTS$entity_id[ii] <- entity
ANALYSIS$CORR$POINTS$entity_id[ii] <- entity
# CAREFULL --> CORRELATION ONLY WORKS FOR EQUIDISTANT DATAPOINTS
diff_dt = mean(diff(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age), na.rm = T)
if(length(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age)>4 & ii != 95 & ii != 53 & ii != 109){
#### SIM WITH RECORD
record <- PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age,DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$d18O_dw_eq,
time.target = seq(from = head(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age, n = 1),
to = tail(DATA_past1000$CAVES$record_data[[paste0("ENTITY", entity)]]$interp_age, n = 1),
by = diff_dt))
COR <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$ISOT,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR[ii] = COR$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE[ii] = COR$p.value
COR_T <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$TEMP,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR_TEMP[ii] = COR_T$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] = COR_T$p.value
COR_P <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$PREC,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR_PREC[ii] = COR_P$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] = COR_P$p.value
COR_pw <- cor.test(record, PaleoSpec::MakeEquidistant(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age, DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$ITPC,
time.target = seq(from = FirstElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
to = LastElement(DATA_past1000$CAVES$sim_data_downsampled[[paste0("ENTITY", entity)]]$interp_age),
by = diff_dt)))
ANALYSIS$CORR$POINTS$CORR_pw[ii] = COR_pw$estimate[[1]]
ANALYSIS$CORR$POINTS$PVALUE_pw[ii] = COR_pw$p.value
}else{
ANALYSIS$CORR$POINTS$CORR[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE[ii] = NA
ANALYSIS$CORR$POINTS$CORR_TEMP[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] = NA
ANALYSIS$CORR$POINTS$CORR_PREC[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] = NA
ANALYSIS$CORR$POINTS$CORR_pw[ii] = NA
ANALYSIS$CORR$POINTS$PVALUE_pw[ii] = NA
}
}
#################################################
## PLOTS ########################################
#################################################
Plot_lyr_temp <- ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT
Plot_lyr_temp_p <- ANALYSIS$CORR$FIELD$CORR_TEMP_ISOT_P
Plot_lyr_temp[Plot_lyr_temp_p > 0.1] <- NA
Plot_lyr_temp[abs(Plot_lyr_temp) < 0.2] <- NA
Plot_lyr_prec <- ANALYSIS$CORR$FIELD$CORR_PREC_ISOT
Plot_lyr_prec_p <- ANALYSIS$CORR$FIELD$CORR_PREC_ISOT_P
Plot_lyr_prec[Plot_lyr_prec_p > 0.1] <- NA
Plot_lyr_prec[abs(Plot_lyr_prec) < 0.2] <- NA
Plot_lyr_temp <- rbind(Plot_lyr_temp[49:96,1:73],
Plot_lyr_temp[1:48,1:73])
Plot_lyr_prec <- rbind(Plot_lyr_prec[49:96,1:73],
Plot_lyr_prec[1:48,1:73])
##### Point Layer
Point_Lyr_temp <- list(lon = list(), lat = list(), value = list())
Point_Lyr_prec <- list(lon = list(), lat = list(), value = list())
length_cave = length(DATA_past1000$CAVES$entity_info$site_id)
for(ii in 1:length_cave){
site <- DATA_past1000$CAVES$entity_info$site_id[ii]
print(ii)
if(is.na(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii])){next}
# 1) sortiert aus, was nicht signifikant ist
if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1){
Point_Lyr_temp$lon = c(Point_Lyr_temp$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_temp$lat = c(Point_Lyr_temp$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_temp$value = c(Point_Lyr_temp$value, ANALYSIS$CORR$POINTS$CORR_TEMP[ii])
# 2) betrachte signifikante Korrelationen:
}
if(is.na(ANALYSIS$CORR$POINTS$PVALUE_PREC[ii])){next}
# 1) sortiert aus, was nicht signifikant ist
if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1){
Point_Lyr_prec$lon = c(Point_Lyr_prec$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_prec$lat = c(Point_Lyr_prec$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
Point_Lyr_prec$value = c(Point_Lyr_prec$value, ANALYSIS$CORR$POINTS$CORR_PREC[ii])
# 2) betrachte signifikante Korrelationen:
}
}
Point_Lyr_temp$lon = as.numeric(Point_Lyr_temp$lon)
Point_Lyr_temp$lat = as.numeric(Point_Lyr_temp$lat)
Point_Lyr_temp$value = as.numeric(Point_Lyr_temp$value)
Point_Lyr_prec$lon = as.numeric(Point_Lyr_prec$lon)
Point_Lyr_prec$lat = as.numeric(Point_Lyr_prec$lat)
Point_Lyr_prec$value = as.numeric(Point_Lyr_prec$value)
GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE <- 3
plot_temp <- STACYmap(gridlyr = Plot_lyr_temp, centercolor = 0, graticules = T,
ptlyr = as.data.frame(Point_Lyr_temp), legend_names = list(grid = 'Temp.-Correlation (p<0.1)')) +
theme(panel.border = element_blank(),
legend.background = element_blank(),
axis.text = element_blank(),
text = element_text(size = 12),
legend.title = element_text(size = 12))
plot_prec <- STACYmap(gridlyr = Plot_lyr_prec, centercolor = 0, graticules = T,
ptlyr = as.data.frame(Point_Lyr_prec), legend_names = list(grid = 'Prec.-Correlation (p<0.1)')) +
theme(panel.border = element_blank(),
legend.background = element_blank(),
axis.text = element_blank(),
text = element_text(size = 12),
legend.title = element_text(size = 12))
library(ggpubr)
plot <- ggarrange(plot_temp, plot_prec,
labels = c("A", "B"),
ncol = 2, nrow = 1)
plot %>% ggsave(filename = paste('Paper_Plot_5_Correlation', 'pdf', sep = '.'), plot = ., path = 'Plots',
width = 2*12, height = 12/8.3*PLOTTING_VARIABLES$HEIGHT, units = 'cm', dpi = 'print', device = "pdf")
#################################################
## Here the all in all Plot #####################
#################################################
# source("Functions/projection_ptlyr.R")
# # Grid Layer for plotting:
# # all areas where d18O correlates better with temperature are marked in red
# # all areas where d18O correlates better with precipitation are marked in blue
# Plot_lyr_temp <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_TEMP_ISOT
# Plot_lyr_temp_p <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_TEMP_ISOT_P
# Plot_lyr_prec <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_PREC_ISOT
# Plot_lyr_prec_p <- CORR_ANALYSIS$GLOBAL_CORRELATION$CORR_PREC_ISOT_P
# Plot_lyr_temp[Plot_lyr_temp_p > 0.1] <- 0
# Plot_lyr_temp[abs(Plot_lyr_temp) < 0.2] <- 0
# Plot_lyr_prec[Plot_lyr_prec_p > 0.1] <- 0
# Plot_lyr_prec[abs(Plot_lyr_prec) < 0.2] <- 0
#
# Plot_lyr_2 <- Plot_lyr_temp
# Plot_lyr_3 <- Plot_lyr_prec
#
# Plot_lyr_2[abs(Plot_lyr_prec)>abs(Plot_lyr_temp)] <- 0
# Plot_lyr_3[abs(Plot_lyr_temp)>abs(Plot_lyr_prec)] <- 0
#
# Plot_lyr <- abs(Plot_lyr_2)- abs(Plot_lyr_3)
# Plot_lyr[Plot_lyr == 0] <- NA
#
# Plot_lyr <- rbind(Plot_lyr[49:96,1:73],
# Plot_lyr[1:48,1:73])
#
# remove(Plot_lyr_2, Plot_lyr_3, Plot_lyr_prec, Plot_lyr_prec_p, Plot_lyr_temp, Plot_lyr_temp_p)
#
# ##### Point Layer
#
# # How should points be colored? Is it so relevant if sign is equal?
#
# # 0) Check for significance --> if not then, then put in Point_lyr_2
# # 1) Check for what the absolute corellation is stronger
# # 2) make different shapes depending on sign fitting or not
#
#
# ### HERE HERE HERE ############################
# ## es muss noch angepasst werden, dass alle Punktlisten mit unterschiedlichem Symbol über eine andere Liste gemacht wird.
# Point_Lyr_sign <- list(lon = list(), lat = list(), value = list())
# Point_Lyr_notsign <- list(lon = list(), lat = list(), value = list())
# Point2_Lyr <- list(lon = list(), lat = list(), value = list())
#
# length_cave = length(DATA_past1000$CAVES$entity_info$site_id)
#
# for(ii in 1:length_cave){
# site <- DATA_past1000$CAVES$entity_info$site_id[ii]
# print(ii)
# if(is.na(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii])){next}
# # 1) sortiert aus, was nicht signifikant ist
# if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1 & ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] > 0.1){
# Point2_Lyr$lon = c(Point2_Lyr$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point2_Lyr$lat = c(Point2_Lyr$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point2_Lyr$value = c(Point2_Lyr$value, ANALYSIS$CORR$POINTS$CORR_TEMP[ii])
# # 2) betrachte signifikante Korrelationen:
# }else{
# # 2.1) Nur signifikante Korrelation bei Temp
# if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] < 0.1 & ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] > 0.1){
# #Check sign to determine shape
# if(sign(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_TI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))
# }
# }
#
# # 2.2) Nur signifikante Korrelation bei Prec
# else if(ANALYSIS$CORR$POINTS$PVALUE_TEMP[ii] > 0.1 & ANALYSIS$CORR$POINTS$PVALUE_PREC[ii] < 0.1){
# if(sign(ANALYSIS$CORR$POINTS$CORR_PREC[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_PI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))
# }
# }
#
# # 2.3) Sowohl signifikant für Prec wie für Temp
# else{
# # 2.3.1) absolute CORR größer für Temp als für Prec
# if(abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]) > abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii])){
# if(sign(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_TI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, abs(ANALYSIS$CORR$POINTS$CORR_TEMP[ii]))}
# }
# # 2.3.2) absolute CORR größer für Prec als für Temp
# else{
# if(sign(ANALYSIS$CORR$POINTS$CORR_PREC[ii]) == sign(CORR_ANALYSIS$SITE_CORRELATION$CORR_PI[ii])){
# Point_Lyr_sign$lon = c(Point_Lyr_sign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$lat = c(Point_Lyr_sign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_sign$value = c(Point_Lyr_sign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))
# }else{
# Point_Lyr_notsign$lon = c(Point_Lyr_notsign$lon, DATA_past1000$CAVES$site_info$longitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$lat = c(Point_Lyr_notsign$lat, DATA_past1000$CAVES$site_info$latitude[DATA_past1000$CAVES$site_info$site_id == site])
# Point_Lyr_notsign$value = c(Point_Lyr_notsign$value, - abs(ANALYSIS$CORR$POINTS$CORR_PREC[ii]))}
# }
# }
# }
# }
#
#
#
# Point_Lyr_sign$lon = as.numeric(Point_Lyr_sign$lon)
# Point_Lyr_sign$lat = as.numeric(Point_Lyr_sign$lat)
# Point_Lyr_sign$value = as.numeric(Point_Lyr_sign$value)
#
# Point_Lyr_notsign$lon = as.numeric(Point_Lyr_notsign$lon)
# Point_Lyr_notsign$lat = as.numeric(Point_Lyr_notsign$lat)
# Point_Lyr_notsign$value = as.numeric(Point_Lyr_notsign$value)
#
# Point2_Lyr$lon = as.numeric(Point2_Lyr$lon)
# Point2_Lyr$lat = as.numeric(Point2_Lyr$lat)
# Point2_Lyr$value = as.numeric(Point2_Lyr$value)
#
#
#
# GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE <- 3
#
# Point_Lyr_sign_p <- projection_ptlyr(as.data.frame(Point_Lyr_sign), as.character('+proj=robin +datum=WGS84'))
# Point_Lyr_notsign_p <- projection_ptlyr(as.data.frame(Point_Lyr_notsign), as.character('+proj=robin +datum=WGS84'))
# Point2_Lyr_p <- projection_ptlyr(as.data.frame(Point2_Lyr), as.character('+proj=robin +datum=WGS84'))
#
# remove(Point_Lyr_sign, Point_Lyr_notsign, Point2_Lyr)
#
# # Jetzt existiert ein Plot Layer und 2 Point Layer die man nur noch plotten muss und eine richtige Legende dafür braucht...
#
# source("Functions/STACYmap_6.R")
# source("Functions/STACYmap_5_2_logscale_corr.R")
#
# plot <- STACYmap_isot_corr(gridlyr = Plot_lyr, centercolor = 0, graticules = T,
# legend_names = list(grid = "abs(Corr.)"),
# breaks_isot = c(-1, -0.5, 0, 0.51, 1),
# labels_isot = c(1, "corr prec", "0", "corr temp", 1)) +
# geom_point(data = Point2_Lyr_p, aes(x = long, y = lat, shape = "1"), fill = 'gray', size = GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE-1, show.legend = c(shape = T)) +
# geom_point(data = Point_Lyr_sign_p, aes(x = long, y = lat, fill = layer, shape = "2"), size = GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE, show.legend = c(color = T, shape = T)) +
# geom_point(data = Point_Lyr_notsign_p, aes(x = long, y = lat, fill = layer, shape = "3"), size = GLOBAL_STACY_OPTIONS$GLOBAL_POINT_SIZE, show.legend = c(color = T, shape = T)) +
# scale_shape_manual(name = NULL, labels = c("no corr.-sites", "same sign", "different sign"),
# values = c(20,21,23))+
# #guides(fill = guide_colorbar(label = F, direction = "horizontal", title = "|Corr.| blue prec, red temp")) +
# theme(panel.border = element_blank(),
# legend.background = element_blank(),
# axis.text = element_blank(),
# text = element_text(size = 12),
# legend.title = element_text(size = 12))
#
# plot
#
# plot %>% ggsave(filename = paste('Paper_Plot_5_Correlation', 'pdf', sep = '.'), plot = ., path = 'Plots/Paper',
# width = 2*PLOTTING_VARIABLES$WIDTH, height = 2*PLOTTING_VARIABLES$HEIGHT, units = 'cm', dpi = 'print', device = "pdf")
#
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wpd_sql.R
\name{wpd_sql}
\alias{wpd_sql}
\title{wpd_sql}
\usage{
wpd_sql(query_string, ...)
}
\arguments{
\item{query_string}{a query string}
\item{...}{values to put into string, see \link{sprintf}}
}
\description{
wpd_sql
}
| /man/wpd_sql.Rd | no_license | petermeissner/wikipediadumbs | R | false | true | 305 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wpd_sql.R
\name{wpd_sql}
\alias{wpd_sql}
\title{wpd_sql}
\usage{
wpd_sql(query_string, ...)
}
\arguments{
\item{query_string}{a query string}
\item{...}{values to put into string, see \link{sprintf}}
}
\description{
wpd_sql
}
|
stickers = function( N = 40 ) {
x = sample( 1 : 20, N, replace = T )
arr = rep.int(F,20)
for(i in 1:N) {
arr[x[i]] = T
}
for(i in 1:20) {
if(arr[i]==F) return(F);
}
return(T);
}
rep.stickers = function( n ) {
c = 0
for( i in 1 : n)
c = c + stickers()
return(c)
}
| /R/Homework1/unusedFuncts.r | no_license | 5ko99/FMI-Semester-5 | R | false | false | 296 | r | stickers = function( N = 40 ) {
x = sample( 1 : 20, N, replace = T )
arr = rep.int(F,20)
for(i in 1:N) {
arr[x[i]] = T
}
for(i in 1:20) {
if(arr[i]==F) return(F);
}
return(T);
}
rep.stickers = function( n ) {
c = 0
for( i in 1 : n)
c = c + stickers()
return(c)
}
|
## Caching the Inverse of a Matrix
## Matrix inversion is usually a costly computation and there may be some benefit to caching
## the inverse of a matrix rather than compute it repeatedly. The below functions will create and
## cache an inverse of a matrix. The caching will allow retrieval, rather than re-calculation of
## existing inverted matrices.
## Function below creates a cached inversed matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The below function will create the inverse function if it is not already been calculated.
## It will rerturn the cached inverse if it exists.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
| /cachematrix.R | no_license | Daveycarts/ProgrammingAssignment2 | R | false | false | 1,105 | r | ## Caching the Inverse of a Matrix
## Matrix inversion is usually a costly computation and there may be some benefit to caching
## the inverse of a matrix rather than compute it repeatedly. The below functions will create and
## cache an inverse of a matrix. The caching will allow retrieval, rather than re-calculation of
## existing inverted matrices.
## Function below creates a cached inversed matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The below function will create the inverse function if it is not already been calculated.
## It will rerturn the cached inverse if it exists.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
\name{glmdm}
\title{Generalized Linear Mixed Dirichlet Model}
\alias{glmdm}
\description{R code for simulation of GLMDM}
\usage{glmdm(formula, family=gaussian, data, num.reps=1000, a1=3, b1=2,
d=0.25, MM=15, VV=30, ...)}
\arguments{
\item{formula}{a symbolic description of the model to be fit.}
\item{family}{a descreption of the error distribution and link function to be used in the model.}
\item{data}{an optional data frame, list or environment containing the variables in the model.}
\item{num.reps}{num.reps}
\item{a1}{a1}
\item{b1}{b1}
\item{d}{d}
\item{MM}{MM}
\item{VV}{VV}
\item{...}{..}
}
\alias{A.K.can}
\alias{A.K.labels.can}
\alias{A.n.can}
\alias{bb}
\alias{cand}
\alias{eta}
\alias{f.ratio}
\alias{f.y.can}
\alias{f.y.old}
\alias{glmdm}
\alias{j}
\alias{K.can}
\alias{L.m.hat}
\alias{L.m.s.hat}
\alias{like.K.can}
\alias{Lms.hat}
\alias{M}
\alias{m.hat}
\alias{m.hat.s}
\alias{m.hess.s}
\alias{m.hessian}
\alias{Mb}
\alias{mean}
\alias{mean.m}
\alias{meta}
\alias{mle.m}
\alias{mle.m.s}
\alias{mn}
\alias{mult.can}
\alias{mult.old}
\alias{mult.ratio}
\alias{new.q}
\alias{nu}
\alias{p.A.can}
\alias{p.A.old}
\alias{p.ratio}
\alias{pq}
\alias{psi.can}
\alias{rho}
\alias{Sca}
\alias{Sha}
\alias{test}
\alias{var.m}
\alias{X.betaM}
\alias{log}
\alias{...}
\examples{
\dontrun{
data(scotvote)
glmdm.linear.out <- glmdm(PerYesParl ~ PrivateHousingStarts + CouncilTax
+ Percentage5to15 + PrimaryPTRatio + PerBirthsOut + PerClaimantFemale,
data=scotvote, num.reps=5000)
data(ssas)
glmdm.probit.ssas <- glmdm(scotpar2 ~ househld + rsex + rage + relgsums + ptyallgs
+ idlosem + marrmus + ukintnat + natinnat + voiceuk3 + nhssat, data=ssas,
family=binomial(link="probit"), num.reps=10000, log=TRUE)
data(asia)
glmdm.probit.asia <- glmdm(ATT ~ DEM + FED + SYS + AUT, data=asia,
family=binomial(link="probit"), num.reps=10000, log=TRUE)
}
}
| /man/glmdm.Rd | no_license | pnandak/glmdm | R | false | false | 1,907 | rd | \name{glmdm}
\title{Generalized Linear Mixed Dirichlet Model}
\alias{glmdm}
\description{R code for simulation of GLMDM}
\usage{glmdm(formula, family=gaussian, data, num.reps=1000, a1=3, b1=2,
d=0.25, MM=15, VV=30, ...)}
\arguments{
\item{formula}{a symbolic description of the model to be fit.}
\item{family}{a descreption of the error distribution and link function to be used in the model.}
\item{data}{an optional data frame, list or environment containing the variables in the model.}
\item{num.reps}{num.reps}
\item{a1}{a1}
\item{b1}{b1}
\item{d}{d}
\item{MM}{MM}
\item{VV}{VV}
\item{...}{..}
}
\alias{A.K.can}
\alias{A.K.labels.can}
\alias{A.n.can}
\alias{bb}
\alias{cand}
\alias{eta}
\alias{f.ratio}
\alias{f.y.can}
\alias{f.y.old}
\alias{glmdm}
\alias{j}
\alias{K.can}
\alias{L.m.hat}
\alias{L.m.s.hat}
\alias{like.K.can}
\alias{Lms.hat}
\alias{M}
\alias{m.hat}
\alias{m.hat.s}
\alias{m.hess.s}
\alias{m.hessian}
\alias{Mb}
\alias{mean}
\alias{mean.m}
\alias{meta}
\alias{mle.m}
\alias{mle.m.s}
\alias{mn}
\alias{mult.can}
\alias{mult.old}
\alias{mult.ratio}
\alias{new.q}
\alias{nu}
\alias{p.A.can}
\alias{p.A.old}
\alias{p.ratio}
\alias{pq}
\alias{psi.can}
\alias{rho}
\alias{Sca}
\alias{Sha}
\alias{test}
\alias{var.m}
\alias{X.betaM}
\alias{log}
\alias{...}
\examples{
\dontrun{
data(scotvote)
glmdm.linear.out <- glmdm(PerYesParl ~ PrivateHousingStarts + CouncilTax
+ Percentage5to15 + PrimaryPTRatio + PerBirthsOut + PerClaimantFemale,
data=scotvote, num.reps=5000)
data(ssas)
glmdm.probit.ssas <- glmdm(scotpar2 ~ househld + rsex + rage + relgsums + ptyallgs
+ idlosem + marrmus + ukintnat + natinnat + voiceuk3 + nhssat, data=ssas,
family=binomial(link="probit"), num.reps=10000, log=TRUE)
data(asia)
glmdm.probit.asia <- glmdm(ATT ~ DEM + FED + SYS + AUT, data=asia,
family=binomial(link="probit"), num.reps=10000, log=TRUE)
}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{mongo.add.user}
\alias{mongo.add.user}
\title{Add a user and password}
\usage{
mongo.add.user(mongo, username, password, db = "admin")
}
\arguments{
\item{mongo}{(\link{mongo}) a mongo connection object.}
\item{username}{(string) username to add.}
\item{password}{(string) password corresponding to username.}
\item{db}{(string) The database on the server to which to add the username
and password.}
}
\description{
Add a user and password to the given database on a MongoDB server for
authentication purposes.
}
\details{
See \url{http://www.mongodb.org/display/DOCS/Security+and+Authentication}.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo))
mongo.add.user(mongo, "Jeff", "H87b5dog")
}
\seealso{
\code{\link{mongo.authenticate}},\cr \link{mongo},\cr
\code{\link{mongo.create}}.
}
| /man/mongo.add.user.Rd | no_license | agnaldodasilva/rmongodb | R | false | false | 871 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{mongo.add.user}
\alias{mongo.add.user}
\title{Add a user and password}
\usage{
mongo.add.user(mongo, username, password, db = "admin")
}
\arguments{
\item{mongo}{(\link{mongo}) a mongo connection object.}
\item{username}{(string) username to add.}
\item{password}{(string) password corresponding to username.}
\item{db}{(string) The database on the server to which to add the username
and password.}
}
\description{
Add a user and password to the given database on a MongoDB server for
authentication purposes.
}
\details{
See \url{http://www.mongodb.org/display/DOCS/Security+and+Authentication}.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo))
mongo.add.user(mongo, "Jeff", "H87b5dog")
}
\seealso{
\code{\link{mongo.authenticate}},\cr \link{mongo},\cr
\code{\link{mongo.create}}.
}
|
library(SeerMapperRegs)
### Name: trXX_dXX
### Title: Provides the U.S. 2000 Census Tract Boundary datasets for 15
### States that have Seer Registries.
### Aliases: trXX_dXX trXX_d00 tr02_d00 tr04_d00 tr06_d00 tr09_d00 tr13_d00
### tr15_d00 tr19_d00 tr21_d00 tr22_d00 tr26_d00 tr34_d00 tr35_d00
### tr40_d00 tr49_d00 tr53_d00
### Keywords: Census2000 Census2010 datasets
### ** Examples
#
# These examples are a test to ensure each census tract file
# can be read and a plot of the state generated.
#
stList <- c("02","04","06","09","13",
"15","19","21","22","26",
"34","35","40","49","53")
stName <- c("Alaska","Arizona","California","Connecticut","Georgia",
"Hawaii","Iowa","Kentucky","Louisiana","Michigan",
"New Jersery","New Mexico","Oklahoma","Utah","Washington")
cY <- "00"
require("sp")
pdf("SeerMapperRegs00-Test-Res.pdf",width=7,height=10)
for (stN in seq(from=1, to=length(stList), by=6)) {
stID <- stList[stN]
stNa <- stName[stN]
trFN <- paste0("tr",stID,"_d",cY)
TT_tr <- paste0("U. S. Census Tracts - ",stNa," Fips=",stID," file=",trFN)
data(list=trFN)
wrSP <- get(trFN)
str(wrSP)
plot(wrSP,main=TT_tr)
rm(list=trFN)
}
dev.off()
| /data/genthat_extracted_code/SeerMapperRegs/examples/trXX_dXX.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,312 | r | library(SeerMapperRegs)
### Name: trXX_dXX
### Title: Provides the U.S. 2000 Census Tract Boundary datasets for 15
### States that have Seer Registries.
### Aliases: trXX_dXX trXX_d00 tr02_d00 tr04_d00 tr06_d00 tr09_d00 tr13_d00
### tr15_d00 tr19_d00 tr21_d00 tr22_d00 tr26_d00 tr34_d00 tr35_d00
### tr40_d00 tr49_d00 tr53_d00
### Keywords: Census2000 Census2010 datasets
### ** Examples
#
# These examples are a test to ensure each census tract file
# can be read and a plot of the state generated.
#
stList <- c("02","04","06","09","13",
"15","19","21","22","26",
"34","35","40","49","53")
stName <- c("Alaska","Arizona","California","Connecticut","Georgia",
"Hawaii","Iowa","Kentucky","Louisiana","Michigan",
"New Jersery","New Mexico","Oklahoma","Utah","Washington")
cY <- "00"
require("sp")
pdf("SeerMapperRegs00-Test-Res.pdf",width=7,height=10)
for (stN in seq(from=1, to=length(stList), by=6)) {
stID <- stList[stN]
stNa <- stName[stN]
trFN <- paste0("tr",stID,"_d",cY)
TT_tr <- paste0("U. S. Census Tracts - ",stNa," Fips=",stID," file=",trFN)
data(list=trFN)
wrSP <- get(trFN)
str(wrSP)
plot(wrSP,main=TT_tr)
rm(list=trFN)
}
dev.off()
|
library(argparse)
library(data.table)
source(file.path(Sys.getenv('R_UTIL_APA'),'paddle.r'))
if (T) {
parser <- ArgumentParser(description='prepropa')
parser$add_argument("-i", "--prepropa", type="character", required=TRUE,
dest="prepropa",
help="prepropa rd file path")
parser$add_argument("-d", "--polyadb_fpath", type="character", required=TRUE,
dest="polyadb_fpath",
help="polya database file path")
parser$add_argument("-c", "--ctrl", type="character", required=FALSE,
dest="ctrl", default="HCT116",
help="control group tag [HCT116]")
parser$add_argument("-e", "--expr", type="character", required=FALSE,
dest="expr", default="DKO",
help="experimental group tag [DKO]")
parser$add_argument("-t", "--tmp_dir", type="character", required=FALSE,
dest="tmp_dir", default="/tmp",
help="temporary directory [/tmp]")
parser$add_argument("-o", "--out_dir", type="character", required=TRUE,
dest="out_dir",
help="output directory")
args <- parser$parse_args()
} else {
args <- data.table(prepropa='../../01_wkd/out/03_CallApa/output/prepropa.rd',
ctrl='HCT116',
expr='DKO',
tmp_dir="/tmp",
polyadb_fpath="../../data/polya_db2/PolyADB_2.bed",
out_dir="../../01_wkd/out/03_CallApa/output")
}
load(args$prepropa)
library(goldmine)
# Want to show HCT/DKO univ vs DB vs Null
hexlist <- list()
# HCT/DKO pA sites
pr_count_cln <- colnames(pr$counts$raw)
for (tag in c(args$ctrl,args$expr)) {
pr_clns <- pr_count_cln[startsWith(pr_count_cln,tag)]
for (pr_cln in pr_clns) {
pr_dt <- pr$counts$raw[,eval(pr_cln)]
pr_dt <- pr_dt[pr_dt>0]
pr_of_interest <- names(pr_dt)
hexlist[[pr_cln]] <- countHexes(pr$pr[(pr$pr$pr %in% pr_of_interest),])
}
}
#allpr <- pr$pr
#allpr <- countHexes(allpr)
#hexlist$allpr <- allpr
# PolyA DB 2
pad <- fread(args$polyadb_fpath)
pa <- with(pad,GRanges(V1,IRanges(V2,V3),strand=V6))
pa <- pa[seqnames(pa) %in% handy::chrs()]
pa <- unique(pa)
padp <- countHexes(gr=pa)
hexlist$pAdb2 <- padp
# Null
tmp_dir <- file.path(args$tmp,"10_pas_stats_figure")
if (!dir.exists(tmp_dir)) {dir.create(tmp_dir)}
cnt <- pr$means$raw
hctdko <- pr$pr[(cnt[,args$ctrl]>=10)|(cnt[,args$expr]>=10)]
hctdko <- countHexes(hctdko)
dgp <- drawGenomePool(query=hctdko,
n=10,
genome="hg19",
cachedir=tmp_dir,
sync=FALSE)
null <- countHexes(gr=dgp)
hexlist$background <- null
# Make tables
hextabs <- lapply(hexlist,function(x) as.data.frame(table(x$hex)))
for(h in 1:length(hextabs))
{
hextabs[[h]]$run <- names(hexlist)[h]
}
tab <- rbindlist(hextabs)
# Do plots
cols <- c("#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#ffff99","#b15928","#969696","#666666")
hexlist_names <- names(hexlist)
L <- length(hexlist_names)
sample_names <- hexlist_names[1:(L-2)]
tab.per <- tab[,list(Var1=Var1,Freq=Freq/sum(Freq)),by="run"]
tab.per[run=="background",run:="null"]
tab.per$run <- factor(tab.per$run,levels=c(sample_names,"pAdb2","null"))
tab.per$motif <- tab.per$Var1
library(ggplot2)
if (!dir.exists(args$out_dir)) {dir.create(args$out_dir)}
out_fpath <- file.path(args$out_dir,'hexamer.pdf')
pdf(file=out_fpath,width=4,height=4)
#ggplot(tab,aes(x=run,y=Freq,fill=Var1)) + geom_bar(stat="identity") + handy::ggnice() + scale_fill_manual(values=cols)
p <- ggplot(tab.per,aes(x=run,y=Freq,fill=motif)) +
geom_bar(stat="identity") +
handy::ggnice() +
scale_fill_manual(values=cols) +
labs(y="Fraction of Regions",x="") +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(p)
dev.off()
if (dir.exists(tmp_dir)) {unlink(tmp_dir,recursive = T)}
| /01_polyAseq/scripts/s03/08_pas_stats_figure.r | no_license | hwanglab/apa_atingLab2019 | R | false | false | 4,048 | r | library(argparse)
library(data.table)
source(file.path(Sys.getenv('R_UTIL_APA'),'paddle.r'))
if (T) {
parser <- ArgumentParser(description='prepropa')
parser$add_argument("-i", "--prepropa", type="character", required=TRUE,
dest="prepropa",
help="prepropa rd file path")
parser$add_argument("-d", "--polyadb_fpath", type="character", required=TRUE,
dest="polyadb_fpath",
help="polya database file path")
parser$add_argument("-c", "--ctrl", type="character", required=FALSE,
dest="ctrl", default="HCT116",
help="control group tag [HCT116]")
parser$add_argument("-e", "--expr", type="character", required=FALSE,
dest="expr", default="DKO",
help="experimental group tag [DKO]")
parser$add_argument("-t", "--tmp_dir", type="character", required=FALSE,
dest="tmp_dir", default="/tmp",
help="temporary directory [/tmp]")
parser$add_argument("-o", "--out_dir", type="character", required=TRUE,
dest="out_dir",
help="output directory")
args <- parser$parse_args()
} else {
args <- data.table(prepropa='../../01_wkd/out/03_CallApa/output/prepropa.rd',
ctrl='HCT116',
expr='DKO',
tmp_dir="/tmp",
polyadb_fpath="../../data/polya_db2/PolyADB_2.bed",
out_dir="../../01_wkd/out/03_CallApa/output")
}
load(args$prepropa)
library(goldmine)
# Want to show HCT/DKO univ vs DB vs Null
hexlist <- list()
# HCT/DKO pA sites
pr_count_cln <- colnames(pr$counts$raw)
for (tag in c(args$ctrl,args$expr)) {
pr_clns <- pr_count_cln[startsWith(pr_count_cln,tag)]
for (pr_cln in pr_clns) {
pr_dt <- pr$counts$raw[,eval(pr_cln)]
pr_dt <- pr_dt[pr_dt>0]
pr_of_interest <- names(pr_dt)
hexlist[[pr_cln]] <- countHexes(pr$pr[(pr$pr$pr %in% pr_of_interest),])
}
}
#allpr <- pr$pr
#allpr <- countHexes(allpr)
#hexlist$allpr <- allpr
# PolyA DB 2
pad <- fread(args$polyadb_fpath)
pa <- with(pad,GRanges(V1,IRanges(V2,V3),strand=V6))
pa <- pa[seqnames(pa) %in% handy::chrs()]
pa <- unique(pa)
padp <- countHexes(gr=pa)
hexlist$pAdb2 <- padp
# Null
tmp_dir <- file.path(args$tmp,"10_pas_stats_figure")
if (!dir.exists(tmp_dir)) {dir.create(tmp_dir)}
cnt <- pr$means$raw
hctdko <- pr$pr[(cnt[,args$ctrl]>=10)|(cnt[,args$expr]>=10)]
hctdko <- countHexes(hctdko)
dgp <- drawGenomePool(query=hctdko,
n=10,
genome="hg19",
cachedir=tmp_dir,
sync=FALSE)
null <- countHexes(gr=dgp)
hexlist$background <- null
# Make tables
hextabs <- lapply(hexlist,function(x) as.data.frame(table(x$hex)))
for(h in 1:length(hextabs))
{
hextabs[[h]]$run <- names(hexlist)[h]
}
tab <- rbindlist(hextabs)
# Do plots
cols <- c("#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#ffff99","#b15928","#969696","#666666")
hexlist_names <- names(hexlist)
L <- length(hexlist_names)
sample_names <- hexlist_names[1:(L-2)]
tab.per <- tab[,list(Var1=Var1,Freq=Freq/sum(Freq)),by="run"]
tab.per[run=="background",run:="null"]
tab.per$run <- factor(tab.per$run,levels=c(sample_names,"pAdb2","null"))
tab.per$motif <- tab.per$Var1
library(ggplot2)
if (!dir.exists(args$out_dir)) {dir.create(args$out_dir)}
out_fpath <- file.path(args$out_dir,'hexamer.pdf')
pdf(file=out_fpath,width=4,height=4)
#ggplot(tab,aes(x=run,y=Freq,fill=Var1)) + geom_bar(stat="identity") + handy::ggnice() + scale_fill_manual(values=cols)
p <- ggplot(tab.per,aes(x=run,y=Freq,fill=motif)) +
geom_bar(stat="identity") +
handy::ggnice() +
scale_fill_manual(values=cols) +
labs(y="Fraction of Regions",x="") +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(p)
dev.off()
if (dir.exists(tmp_dir)) {unlink(tmp_dir,recursive = T)}
|
#' @title Double machine learning for partially linear regression models
#'
#' @description
#' Double machine learning for partially linear regression models.
#'
#' @format [R6::R6Class] object inheriting from [DoubleML].
#'
#' @family DoubleML
#' @details
#' Partially linear regression (PLR) models take the form
#'
#' \eqn{Y = D\theta_0 + g_0(X) + \zeta,}
#'
#' \eqn{D = m_0(X) + V,}
#'
#' with \eqn{E[\zeta|D,X]=0} and \eqn{E[V|X] = 0}. \eqn{Y} is the outcome
#' variable variable and \eqn{D} is the policy variable of interest.
#' The high-dimensional vector \eqn{X = (X_1, \ldots, X_p)} consists of other
#' confounding covariates, and \eqn{\zeta} and \eqn{V} are stochastic errors.
#'
#' @usage NULL
#'
#' @examples
#' \donttest{
#' library(DoubleML)
#' library(mlr3)
#' library(mlr3learners)
#' library(data.table)
#' set.seed(2)
#' ml_g = lrn("regr.ranger", num.trees = 10, max.depth = 2)
#' ml_m = ml_g$clone()
#' obj_dml_data = make_plr_CCDDHNR2018(alpha = 0.5)
#' dml_plr_obj = DoubleMLPLR$new(obj_dml_data, ml_g, ml_m)
#' dml_plr_obj$fit()
#' dml_plr_obj$summary()
#' }
#'
#' \dontrun{
#' library(DoubleML)
#' library(mlr3)
#' library(mlr3learners)
#' library(mlr3tuning)
#' library(data.table)
#' set.seed(2)
#' ml_g = lrn("regr.rpart")
#' ml_m = ml_g$clone()
#' obj_dml_data = make_plr_CCDDHNR2018(alpha = 0.5)
#' dml_plr_obj = DoubleMLPLR$new(obj_dml_data, ml_g, ml_m)
#'
#' param_grid = list(
#' "ml_g" = paradox::ParamSet$new(list(
#' paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
#' paradox::ParamInt$new("minsplit", lower = 1, upper = 2))),
#' "ml_m" = paradox::ParamSet$new(list(
#' paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
#' paradox::ParamInt$new("minsplit", lower = 1, upper = 2))))
#'
#' # minimum requirements for tune_settings
#' tune_settings = list(
#' terminator = mlr3tuning::trm("evals", n_evals = 5),
#' algorithm = mlr3tuning::tnr("grid_search", resolution = 5))
#' dml_plr_obj$tune(param_set = param_grid, tune_settings = tune_settings)
#' dml_plr_obj$fit()
#' dml_plr_obj$summary()
#' }
#' @export
DoubleMLPLR = R6Class("DoubleMLPLR",
inherit = DoubleML, public = list(
#' @description
#' Creates a new instance of this R6 class.
#'
#' @param data (`DoubleMLData`) \cr
#' The `DoubleMLData` object providing the data and specifying the
#' variables of the causal model.
#'
#' @param ml_g ([`LearnerRegr`][mlr3::LearnerRegr], `character(1)`,) \cr
#' An object of the class [mlr3 regression learner][mlr3::LearnerRegr]
#' to pass a learner, possibly with specified parameters, for example
#' `lrn("regr.cv_glmnet", s = "lambda.min")`.
#' Alternatively, a `character(1)` specifying the name of a
#' [mlr3 regression learner][mlr3::LearnerRegr] that is available in
#' [mlr3](https://mlr3.mlr-org.com/index.html) or its extension packages
#' [mlr3learners](https://mlr3learners.mlr-org.com/) or
#' [mlr3extralearners](https://mlr3extralearners.mlr-org.com/),
#' for example `"regr.cv_glmnet"`. \cr
#' `ml_g` refers to the nuisance function \eqn{g_0(X) = E[Y|X]}.
#'
#' @param ml_m ([`LearnerRegr`][mlr3::LearnerRegr],
#' [`LearnerClassif`][mlr3::LearnerClassif], `character(1)`,) \cr
#' An object of the class [mlr3 regression learner][mlr3::LearnerRegr] to
#' pass a learner, possibly with specified parameters, for example
#' `lrn("regr.cv_glmnet", s = "lambda.min")`. For binary treatment
#' variables, an object of the class
#' [`LearnerClassif`][mlr3::LearnerClassif] can be passed, for example
#' `lrn("classif.cv_glmnet", s = "lambda.min")`. Alternatively, a
#' `character(1)` specifying the name of a
#' [mlr3 regression learner][mlr3::LearnerRegr] that is available in
#' [mlr3](https://mlr3.mlr-org.com/index.html) or its extension packages
#' [mlr3learners](https://mlr3learners.mlr-org.com/) or
#' [mlr3extralearners](https://mlr3extralearners.mlr-org.com/), for example
#' `"regr.cv_glmnet"`. \cr
#' `ml_m` refers to the nuisance function \eqn{m_0(X) = E[D|X]}.
#'
#' @param n_folds (`integer(1)`)\cr
#' Number of folds. Default is `5`.
#'
#' @param n_rep (`integer(1)`) \cr
#' Number of repetitions for the sample splitting. Default is `1`.
#'
#' @param score (`character(1)`, `function()`) \cr
#' A `character(1)` (`"partialling out"` or `IV-type`) or a `function()`
#' specifying the score function.
#' If a `function()` is provided, it must be of the form
#' `function(y, d, g_hat, m_hat, smpls)` and
#' the returned output must be a named `list()` with elements `psi_a` and
#' `psi_b`. Default is `"partialling out"`.
#'
#' @param dml_procedure (`character(1)`) \cr
#' A `character(1)` (`"dml1"` or `"dml2"`) specifying the double machine
#' learning algorithm. Default is `"dml2"`.
#'
#' @param draw_sample_splitting (`logical(1)`) \cr
#' Indicates whether the sample splitting should be drawn during
#' initialization of the object. Default is `TRUE`.
#'
#' @param apply_cross_fitting (`logical(1)`) \cr
#' Indicates whether cross-fitting should be applied. Default is `TRUE`.
initialize = function(data,
ml_g,
ml_m,
n_folds = 5,
n_rep = 1,
score = "partialling out",
dml_procedure = "dml2",
draw_sample_splitting = TRUE,
apply_cross_fitting = TRUE) {
super$initialize_double_ml(
data,
n_folds,
n_rep,
score,
dml_procedure,
draw_sample_splitting,
apply_cross_fitting)
private$learner_class = list(
"ml_g" = NULL,
"ml_m" = NULL)
ml_g = private$assert_learner(ml_g, "ml_g", Regr = TRUE, Classif = FALSE)
ml_m = private$assert_learner(ml_m, "ml_m", Regr = TRUE, Classif = TRUE)
self$learner = list(
"ml_g" = ml_g,
"ml_m" = ml_m)
private$initialize_ml_nuisance_params()
}
),
private = list(
n_nuisance = 2,
initialize_ml_nuisance_params = function() {
nuisance = vector("list", self$data$n_treat)
names(nuisance) = self$data$d_cols
self$params = list(
"ml_g" = nuisance,
"ml_m" = nuisance)
invisible(self)
},
ml_nuisance_and_score_elements = function(smpls, ...) {
g_hat = dml_cv_predict(self$learner$ml_g,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$y_col,
self$data$data_model,
nuisance_id = "nuis_g",
smpls = smpls,
est_params = self$get_params("ml_g"),
return_train_preds = FALSE,
learner_class = private$learner_class$ml_g,
fold_specific_params = private$fold_specific_params)
m_hat = dml_cv_predict(self$learner$ml_m,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$treat_col,
self$data$data_model,
nuisance_id = "nuis_m",
smpls = smpls,
est_params = self$get_params("ml_m"),
return_train_preds = FALSE,
learner_class = private$learner_class$ml_m,
fold_specific_params = private$fold_specific_params)
d = self$data$data_model[[self$data$treat_col]]
y = self$data$data_model[[self$data$y_col]]
res = private$score_elements(y, d, g_hat, m_hat, smpls)
res$preds = list(
"ml_g" = g_hat,
"ml_m" = m_hat)
return(res)
},
score_elements = function(y, d, g_hat, m_hat, smpls) {
v_hat = d - m_hat
u_hat = y - g_hat
v_hatd = v_hat * d
if (is.character(self$score)) {
if (self$score == "IV-type") {
psi_a = -v_hatd
} else if (self$score == "partialling out") {
psi_a = -v_hat * v_hat
}
psi_b = v_hat * u_hat
psis = list(
psi_a = psi_a,
psi_b = psi_b)
} else if (is.function(self$score)) {
psis = self$score(y, d, g_hat, m_hat, smpls)
}
return(psis)
},
ml_nuisance_tuning = function(smpls, param_set, tune_settings,
tune_on_folds, ...) {
if (!tune_on_folds) {
data_tune_list = list(self$data$data_model)
} else {
data_tune_list = lapply(smpls$train_ids, function(x) {
extract_training_data(self$data$data_model, x)
})
}
tuning_result_g = dml_tune(self$learner$ml_g,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$y_col, data_tune_list,
nuisance_id = "nuis_g",
param_set$ml_g, tune_settings,
tune_settings$measure$ml_g,
private$learner_class$ml_g)
tuning_result_m = dml_tune(self$learner$ml_m,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$treat_col, data_tune_list,
nuisance_id = "nuis_m",
param_set$ml_m, tune_settings,
tune_settings$measure$ml_m,
private$learner_class$ml_m)
tuning_result = list(
"ml_g" = list(tuning_result_g, params = tuning_result_g$params),
"ml_m" = list(tuning_result_m, params = tuning_result_m$params))
return(tuning_result)
},
check_score = function(score) {
assert(
check_character(score),
check_class(score, "function"))
if (is.character(score)) {
valid_score = c("IV-type", "partialling out")
assertChoice(score, valid_score)
}
return(score)
},
check_data = function(obj_dml_data) {
if (!is.null(obj_dml_data$z_cols)) {
stop(paste(
"Incompatible data.\n", paste(obj_dml_data$z_cols, collapse = ", "),
"has been set as instrumental variable(s).\n",
"To fit a partially linear IV regression model use
DoubleMLPLIV instead of DoubleMLPLR."))
}
return()
}
)
)
| /R/double_ml_plr.R | no_license | anhnguyendepocen/doubleml-for-r | R | false | false | 9,807 | r | #' @title Double machine learning for partially linear regression models
#'
#' @description
#' Double machine learning for partially linear regression models.
#'
#' @format [R6::R6Class] object inheriting from [DoubleML].
#'
#' @family DoubleML
#' @details
#' Partially linear regression (PLR) models take the form
#'
#' \eqn{Y = D\theta_0 + g_0(X) + \zeta,}
#'
#' \eqn{D = m_0(X) + V,}
#'
#' with \eqn{E[\zeta|D,X]=0} and \eqn{E[V|X] = 0}. \eqn{Y} is the outcome
#' variable variable and \eqn{D} is the policy variable of interest.
#' The high-dimensional vector \eqn{X = (X_1, \ldots, X_p)} consists of other
#' confounding covariates, and \eqn{\zeta} and \eqn{V} are stochastic errors.
#'
#' @usage NULL
#'
#' @examples
#' \donttest{
#' library(DoubleML)
#' library(mlr3)
#' library(mlr3learners)
#' library(data.table)
#' set.seed(2)
#' ml_g = lrn("regr.ranger", num.trees = 10, max.depth = 2)
#' ml_m = ml_g$clone()
#' obj_dml_data = make_plr_CCDDHNR2018(alpha = 0.5)
#' dml_plr_obj = DoubleMLPLR$new(obj_dml_data, ml_g, ml_m)
#' dml_plr_obj$fit()
#' dml_plr_obj$summary()
#' }
#'
#' \dontrun{
#' library(DoubleML)
#' library(mlr3)
#' library(mlr3learners)
#' library(mlr3tuning)
#' library(data.table)
#' set.seed(2)
#' ml_g = lrn("regr.rpart")
#' ml_m = ml_g$clone()
#' obj_dml_data = make_plr_CCDDHNR2018(alpha = 0.5)
#' dml_plr_obj = DoubleMLPLR$new(obj_dml_data, ml_g, ml_m)
#'
#' param_grid = list(
#' "ml_g" = paradox::ParamSet$new(list(
#' paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
#' paradox::ParamInt$new("minsplit", lower = 1, upper = 2))),
#' "ml_m" = paradox::ParamSet$new(list(
#' paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
#' paradox::ParamInt$new("minsplit", lower = 1, upper = 2))))
#'
#' # minimum requirements for tune_settings
#' tune_settings = list(
#' terminator = mlr3tuning::trm("evals", n_evals = 5),
#' algorithm = mlr3tuning::tnr("grid_search", resolution = 5))
#' dml_plr_obj$tune(param_set = param_grid, tune_settings = tune_settings)
#' dml_plr_obj$fit()
#' dml_plr_obj$summary()
#' }
#' @export
DoubleMLPLR = R6Class("DoubleMLPLR",
inherit = DoubleML, public = list(
#' @description
#' Creates a new instance of this R6 class.
#'
#' @param data (`DoubleMLData`) \cr
#' The `DoubleMLData` object providing the data and specifying the
#' variables of the causal model.
#'
#' @param ml_g ([`LearnerRegr`][mlr3::LearnerRegr], `character(1)`,) \cr
#' An object of the class [mlr3 regression learner][mlr3::LearnerRegr]
#' to pass a learner, possibly with specified parameters, for example
#' `lrn("regr.cv_glmnet", s = "lambda.min")`.
#' Alternatively, a `character(1)` specifying the name of a
#' [mlr3 regression learner][mlr3::LearnerRegr] that is available in
#' [mlr3](https://mlr3.mlr-org.com/index.html) or its extension packages
#' [mlr3learners](https://mlr3learners.mlr-org.com/) or
#' [mlr3extralearners](https://mlr3extralearners.mlr-org.com/),
#' for example `"regr.cv_glmnet"`. \cr
#' `ml_g` refers to the nuisance function \eqn{g_0(X) = E[Y|X]}.
#'
#' @param ml_m ([`LearnerRegr`][mlr3::LearnerRegr],
#' [`LearnerClassif`][mlr3::LearnerClassif], `character(1)`,) \cr
#' An object of the class [mlr3 regression learner][mlr3::LearnerRegr] to
#' pass a learner, possibly with specified parameters, for example
#' `lrn("regr.cv_glmnet", s = "lambda.min")`. For binary treatment
#' variables, an object of the class
#' [`LearnerClassif`][mlr3::LearnerClassif] can be passed, for example
#' `lrn("classif.cv_glmnet", s = "lambda.min")`. Alternatively, a
#' `character(1)` specifying the name of a
#' [mlr3 regression learner][mlr3::LearnerRegr] that is available in
#' [mlr3](https://mlr3.mlr-org.com/index.html) or its extension packages
#' [mlr3learners](https://mlr3learners.mlr-org.com/) or
#' [mlr3extralearners](https://mlr3extralearners.mlr-org.com/), for example
#' `"regr.cv_glmnet"`. \cr
#' `ml_m` refers to the nuisance function \eqn{m_0(X) = E[D|X]}.
#'
#' @param n_folds (`integer(1)`)\cr
#' Number of folds. Default is `5`.
#'
#' @param n_rep (`integer(1)`) \cr
#' Number of repetitions for the sample splitting. Default is `1`.
#'
#' @param score (`character(1)`, `function()`) \cr
#' A `character(1)` (`"partialling out"` or `IV-type`) or a `function()`
#' specifying the score function.
#' If a `function()` is provided, it must be of the form
#' `function(y, d, g_hat, m_hat, smpls)` and
#' the returned output must be a named `list()` with elements `psi_a` and
#' `psi_b`. Default is `"partialling out"`.
#'
#' @param dml_procedure (`character(1)`) \cr
#' A `character(1)` (`"dml1"` or `"dml2"`) specifying the double machine
#' learning algorithm. Default is `"dml2"`.
#'
#' @param draw_sample_splitting (`logical(1)`) \cr
#' Indicates whether the sample splitting should be drawn during
#' initialization of the object. Default is `TRUE`.
#'
#' @param apply_cross_fitting (`logical(1)`) \cr
#' Indicates whether cross-fitting should be applied. Default is `TRUE`.
initialize = function(data,
ml_g,
ml_m,
n_folds = 5,
n_rep = 1,
score = "partialling out",
dml_procedure = "dml2",
draw_sample_splitting = TRUE,
apply_cross_fitting = TRUE) {
super$initialize_double_ml(
data,
n_folds,
n_rep,
score,
dml_procedure,
draw_sample_splitting,
apply_cross_fitting)
private$learner_class = list(
"ml_g" = NULL,
"ml_m" = NULL)
ml_g = private$assert_learner(ml_g, "ml_g", Regr = TRUE, Classif = FALSE)
ml_m = private$assert_learner(ml_m, "ml_m", Regr = TRUE, Classif = TRUE)
self$learner = list(
"ml_g" = ml_g,
"ml_m" = ml_m)
private$initialize_ml_nuisance_params()
}
),
private = list(
n_nuisance = 2,
initialize_ml_nuisance_params = function() {
nuisance = vector("list", self$data$n_treat)
names(nuisance) = self$data$d_cols
self$params = list(
"ml_g" = nuisance,
"ml_m" = nuisance)
invisible(self)
},
ml_nuisance_and_score_elements = function(smpls, ...) {
g_hat = dml_cv_predict(self$learner$ml_g,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$y_col,
self$data$data_model,
nuisance_id = "nuis_g",
smpls = smpls,
est_params = self$get_params("ml_g"),
return_train_preds = FALSE,
learner_class = private$learner_class$ml_g,
fold_specific_params = private$fold_specific_params)
m_hat = dml_cv_predict(self$learner$ml_m,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$treat_col,
self$data$data_model,
nuisance_id = "nuis_m",
smpls = smpls,
est_params = self$get_params("ml_m"),
return_train_preds = FALSE,
learner_class = private$learner_class$ml_m,
fold_specific_params = private$fold_specific_params)
d = self$data$data_model[[self$data$treat_col]]
y = self$data$data_model[[self$data$y_col]]
res = private$score_elements(y, d, g_hat, m_hat, smpls)
res$preds = list(
"ml_g" = g_hat,
"ml_m" = m_hat)
return(res)
},
score_elements = function(y, d, g_hat, m_hat, smpls) {
v_hat = d - m_hat
u_hat = y - g_hat
v_hatd = v_hat * d
if (is.character(self$score)) {
if (self$score == "IV-type") {
psi_a = -v_hatd
} else if (self$score == "partialling out") {
psi_a = -v_hat * v_hat
}
psi_b = v_hat * u_hat
psis = list(
psi_a = psi_a,
psi_b = psi_b)
} else if (is.function(self$score)) {
psis = self$score(y, d, g_hat, m_hat, smpls)
}
return(psis)
},
ml_nuisance_tuning = function(smpls, param_set, tune_settings,
tune_on_folds, ...) {
if (!tune_on_folds) {
data_tune_list = list(self$data$data_model)
} else {
data_tune_list = lapply(smpls$train_ids, function(x) {
extract_training_data(self$data$data_model, x)
})
}
tuning_result_g = dml_tune(self$learner$ml_g,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$y_col, data_tune_list,
nuisance_id = "nuis_g",
param_set$ml_g, tune_settings,
tune_settings$measure$ml_g,
private$learner_class$ml_g)
tuning_result_m = dml_tune(self$learner$ml_m,
c(self$data$x_cols, self$data$other_treat_cols),
self$data$treat_col, data_tune_list,
nuisance_id = "nuis_m",
param_set$ml_m, tune_settings,
tune_settings$measure$ml_m,
private$learner_class$ml_m)
tuning_result = list(
"ml_g" = list(tuning_result_g, params = tuning_result_g$params),
"ml_m" = list(tuning_result_m, params = tuning_result_m$params))
return(tuning_result)
},
check_score = function(score) {
assert(
check_character(score),
check_class(score, "function"))
if (is.character(score)) {
valid_score = c("IV-type", "partialling out")
assertChoice(score, valid_score)
}
return(score)
},
check_data = function(obj_dml_data) {
if (!is.null(obj_dml_data$z_cols)) {
stop(paste(
"Incompatible data.\n", paste(obj_dml_data$z_cols, collapse = ", "),
"has been set as instrumental variable(s).\n",
"To fit a partially linear IV regression model use
DoubleMLPLIV instead of DoubleMLPLR."))
}
return()
}
)
)
|
#!/usr/bin/r
# plot3.R
#
# Assignment for https://class.coursera.org/exdata-005
# set working directory
setwd("/Users/peter.nelson/Documents/Coursera/jhds-04-exdata/week1/ExData_Plotting1")
# segment data by 2007-02-01 to 2007-02-02 only
system('head -1 ../data/household_power_consumption.txt > ../data/household_power_consumption_subset.csv')
system('egrep "^[1-2]/2/2007;" ../data/household_power_consumption.txt >> ../data/household_power_consumption_subset.csv')
# load data
houseElecData <- read.table("../data/household_power_consumption_subset.csv", sep=";", header=TRUE)
# convert date column from string to datetype
houseElecData$Date <- as.Date(houseElecData$Date, format="%d/%m/%Y")
# convert time column from string to timetype
houseElecData$Time <- strptime(paste(houseElecData$Date, houseElecData$Time), format="%Y-%m-%d %H:%M:%S")
# view data
# View(houseElecData)
# confirm no ? in data
# sum(houseElecData$Global_active_power=="?")
# sum(houseElecData$Global_reactive_power=="?")
# sum(houseElecData$Global_intensity=="?")
# sum(houseElecData$Sub_metering_1=="?")
# sum(houseElecData$Sub_metering_2=="?")
# sum(houseElecData$Sub_metering_3=="?")
# my own plotting function
makePlot <- function() {
plot(houseElecData$Time, houseElecData$Sub_metering_1, main="", xlab="", ylab="Energy Sub Metering", type="n")
points(houseElecData$Time, houseElecData$Sub_metering_1, col="black", type="l")
points(houseElecData$Time, houseElecData$Sub_metering_2, col="red" , type="l")
points(houseElecData$Time, houseElecData$Sub_metering_3, col="blue" , type="l")
legend(x="topright", c("Sub_metering_1","Sub_metering_1","Sub_metering_1"), col=c("black","red","blue"), lty=1, cex=0.80)
}
# make plot to screen
system("rm plot3.png") # remove existing plot
quartz(width=6, height=6)
makePlot()
# unfortunately, dev.copy doesn't honor the background color, so I have to use png()
# dev.copy(png, file = "plot3.png", width = 480, height = 480, bg="white")
dev.off()
png(file = "plot3.png", width = 480, height = 480, bg="white")
makePlot()
dev.off()
# open the chart, on a mac
system('open plot3.png')
| /plot3.R | no_license | pbnelson/ExData_Plotting1 | R | false | false | 2,152 | r | #!/usr/bin/r
# plot3.R
#
# Assignment for https://class.coursera.org/exdata-005
# set working directory
setwd("/Users/peter.nelson/Documents/Coursera/jhds-04-exdata/week1/ExData_Plotting1")
# segment data by 2007-02-01 to 2007-02-02 only
system('head -1 ../data/household_power_consumption.txt > ../data/household_power_consumption_subset.csv')
system('egrep "^[1-2]/2/2007;" ../data/household_power_consumption.txt >> ../data/household_power_consumption_subset.csv')
# load data
houseElecData <- read.table("../data/household_power_consumption_subset.csv", sep=";", header=TRUE)
# convert date column from string to datetype
houseElecData$Date <- as.Date(houseElecData$Date, format="%d/%m/%Y")
# convert time column from string to timetype
houseElecData$Time <- strptime(paste(houseElecData$Date, houseElecData$Time), format="%Y-%m-%d %H:%M:%S")
# view data
# View(houseElecData)
# confirm no ? in data
# sum(houseElecData$Global_active_power=="?")
# sum(houseElecData$Global_reactive_power=="?")
# sum(houseElecData$Global_intensity=="?")
# sum(houseElecData$Sub_metering_1=="?")
# sum(houseElecData$Sub_metering_2=="?")
# sum(houseElecData$Sub_metering_3=="?")
# my own plotting function
makePlot <- function() {
plot(houseElecData$Time, houseElecData$Sub_metering_1, main="", xlab="", ylab="Energy Sub Metering", type="n")
points(houseElecData$Time, houseElecData$Sub_metering_1, col="black", type="l")
points(houseElecData$Time, houseElecData$Sub_metering_2, col="red" , type="l")
points(houseElecData$Time, houseElecData$Sub_metering_3, col="blue" , type="l")
legend(x="topright", c("Sub_metering_1","Sub_metering_1","Sub_metering_1"), col=c("black","red","blue"), lty=1, cex=0.80)
}
# make plot to screen
system("rm plot3.png") # remove existing plot
quartz(width=6, height=6)
makePlot()
# unfortunately, dev.copy doesn't honor the background color, so I have to use png()
# dev.copy(png, file = "plot3.png", width = 480, height = 480, bg="white")
dev.off()
png(file = "plot3.png", width = 480, height = 480, bg="white")
makePlot()
dev.off()
# open the chart, on a mac
system('open plot3.png')
|
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Subset coal combustion related NEI data
combustR <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coalR <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
combustSCC <- SCC[(combustR & coalR),]$SCC
combustNEI <- NEI[NEI$SCC %in% combustSCC,]
# Plot 4
png("plot4.png")
ggplot(combustNEI,aes(x = factor(year),y = Emissions/10^5)) +
geom_bar(stat="identity", fill ="#FF9999", width=0.75) +
labs(x="year",
y=expression("Total PM"[2.5]*" Emission (10^5 Tons)"),
title=expression("PM"[2.5]*" Coal Combustion Source
Emissions Across US from 1999-2008"))
dev.off() | /Project2/plot4.R | no_license | nselvak/Exploratory_Data_Analysis | R | false | false | 716 | r |
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Subset coal combustion related NEI data
combustR <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coalR <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
combustSCC <- SCC[(combustR & coalR),]$SCC
combustNEI <- NEI[NEI$SCC %in% combustSCC,]
# Plot 4
png("plot4.png")
ggplot(combustNEI,aes(x = factor(year),y = Emissions/10^5)) +
geom_bar(stat="identity", fill ="#FF9999", width=0.75) +
labs(x="year",
y=expression("Total PM"[2.5]*" Emission (10^5 Tons)"),
title=expression("PM"[2.5]*" Coal Combustion Source
Emissions Across US from 1999-2008"))
dev.off() |
context("func_scans")
library(testthat)
library(bidser)
test_that("can extract functional files from bids project", {
proj <- bids_project(system.file("extdata/ds001", package="bidser"), fmriprep=FALSE)
fscans <- func_scans(proj)
expect_equal(length(fscans), 48)
})
test_that("can extract functional files for one subject from bids project", {
proj <- bids_project(system.file("extdata/ds001", package="bidser"), fmriprep=FALSE)
fscans <- func_scans(proj, subid="01")
expect_equal(length(fscans), 3)
})
test_that("attempt to find func_scan with non-existent id should return NULL", {
proj <- bids_project(system.file("extdata/ds001", package="bidser"), fmriprep=FALSE)
fscans <- func_scans(proj, subid="junk")
expect_null(fscans)
})
| /tests/testthat/test_func_scans.R | no_license | bbuchsbaum/bidser | R | false | false | 754 | r | context("func_scans")
library(testthat)
library(bidser)
test_that("can extract functional files from bids project", {
proj <- bids_project(system.file("extdata/ds001", package="bidser"), fmriprep=FALSE)
fscans <- func_scans(proj)
expect_equal(length(fscans), 48)
})
test_that("can extract functional files for one subject from bids project", {
proj <- bids_project(system.file("extdata/ds001", package="bidser"), fmriprep=FALSE)
fscans <- func_scans(proj, subid="01")
expect_equal(length(fscans), 3)
})
test_that("attempt to find func_scan with non-existent id should return NULL", {
proj <- bids_project(system.file("extdata/ds001", package="bidser"), fmriprep=FALSE)
fscans <- func_scans(proj, subid="junk")
expect_null(fscans)
})
|
####DATOS FALTANTES####
# primero creamos una matriz con datos faltantes
a <- 1:50 + runif(50,-5,5)
b <- a*0.3 + 4 + runif(50, -5,5)
c <- 25:-24 + rnorm(50,0,6)
a[sample(1:50,5)] <- NA
b[sample(1:50,5)] <- NA
c[sample(1:50,10)] <- NA
abc <- cbind(a,b,c)
# Podemos obtener un dataset "limpio" (eliminación de casos completos)
na.omit(abc)
attr(na.omit(abc), "na.action")
# Información sobre datos faltantes
is.na(abc)
which(is.na(abc))
apply(abc,2,function(x) which(is.na(x)))
apply(abc,2,function(x) table(is.na(x)))
cor(abc)
cor(abc, use="complete.cases")
cor(abc, use="pairwise.complete.obs") | /datos_faltantes.R | no_license | pmtempone/DM_Cs | R | false | false | 638 | r | ####DATOS FALTANTES####
# primero creamos una matriz con datos faltantes
a <- 1:50 + runif(50,-5,5)
b <- a*0.3 + 4 + runif(50, -5,5)
c <- 25:-24 + rnorm(50,0,6)
a[sample(1:50,5)] <- NA
b[sample(1:50,5)] <- NA
c[sample(1:50,10)] <- NA
abc <- cbind(a,b,c)
# Podemos obtener un dataset "limpio" (eliminación de casos completos)
na.omit(abc)
attr(na.omit(abc), "na.action")
# Información sobre datos faltantes
is.na(abc)
which(is.na(abc))
apply(abc,2,function(x) which(is.na(x)))
apply(abc,2,function(x) table(is.na(x)))
cor(abc)
cor(abc, use="complete.cases")
cor(abc, use="pairwise.complete.obs") |
#clustering
A=c(1,1.5,3.5,3.5,4.5,3.5)
B=c(1,2,4,7,5.5,4.5)
marks=data.frame(A,B)
marks
?kmeans
(c1=kmeans(marks,3))
cbind(marks,c1$cluster) #??
plot(marks,pch=14,col=c1$cluster)
c1$centers
points(c1$centers,col=1:3,pch=20,cex=3)
c1$cluster
| /cluster.R | no_license | kashyapmakadia/project2 | R | false | false | 243 | r | #clustering
A=c(1,1.5,3.5,3.5,4.5,3.5)
B=c(1,2,4,7,5.5,4.5)
marks=data.frame(A,B)
marks
?kmeans
(c1=kmeans(marks,3))
cbind(marks,c1$cluster) #??
plot(marks,pch=14,col=c1$cluster)
c1$centers
points(c1$centers,col=1:3,pch=20,cex=3)
c1$cluster
|
# scp aob2x@rivanna.hpc.virginia.edu:/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_10cm/parental.Rdata ~/.
### libraries
library(data.table)
library(foreach)
library(ggplot2)
library(patchwork)
### load data
load("~/peaks.Rdata")
load("~/parental.Rdata")
setkey(parental, chr, pos)
### function
plotParentalHaplos <- function(chr.i, maxPos, window=10000) {
#chr.i=peaks[which.max(maxGprime)]$CHROM; start=peaks[which.max(maxGprime)]$posMaxGprime - 5000; stop=peaks[which.max(maxGprime)]$posMaxGprime + 5000
start<-maxPos-window
stop<-maxPos+window
tmp <- parental[J(data.table(chr=chr.i, pos=start:stop)), nomatch=0]
tmp[,id.x:=rank(id, ties="min")]
ggplot(data=tmp, aes(x=id.x, y=allele, fill=as.factor(value))) + geom_tile()
}
i<-12
plotParentalHaplos(chr.i=peaks$CHROM[i],
maxPos=peaks$posMaxGprime[i],
window=10000)
| /AlanAnalysis/rQTL/parentalHaplotype.plot.R | no_license | kbkubow/DaphniaPulex20162017Sequencing | R | false | false | 913 | r | # scp aob2x@rivanna.hpc.virginia.edu:/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_10cm/parental.Rdata ~/.
### libraries
library(data.table)
library(foreach)
library(ggplot2)
library(patchwork)
### load data
load("~/peaks.Rdata")
load("~/parental.Rdata")
setkey(parental, chr, pos)
### function
plotParentalHaplos <- function(chr.i, maxPos, window=10000) {
#chr.i=peaks[which.max(maxGprime)]$CHROM; start=peaks[which.max(maxGprime)]$posMaxGprime - 5000; stop=peaks[which.max(maxGprime)]$posMaxGprime + 5000
start<-maxPos-window
stop<-maxPos+window
tmp <- parental[J(data.table(chr=chr.i, pos=start:stop)), nomatch=0]
tmp[,id.x:=rank(id, ties="min")]
ggplot(data=tmp, aes(x=id.x, y=allele, fill=as.factor(value))) + geom_tile()
}
i<-12
plotParentalHaplos(chr.i=peaks$CHROM[i],
maxPos=peaks$posMaxGprime[i],
window=10000)
|
context("localize")
expect_code_string <- function(code, expected, ...) {
actual <- unclass(formatCode(code, ...))
expect_equal(actual, expected)
}
# utility function for creating test expectations
generate_code_string <- function(code, ...) {
cat("c(", "\n", "'")
str <- strsplit(formatCode(code, ...), "\n")[[1]]
cat(str, sep = "',\n'")
cat("'", ")")
}
describe(
"auto-localized expressions", isolate({
mr <- metaReactive({
a <- 1 + 1
if (T) return("b")
a + 1
})
it("without assignment", {
expected <- c(
'local({',
' a <- 1 + 1',
' if (T) {',
' return("b")',
' }',
' a + 1',
'})'
)
expect_code_string(withMetaMode(mr()), expected)
})
it("with assignment", {
expected <- c(
'mr <- local({',
' a <- 1 + 1',
' if (T) {',
' return("b")',
' }',
' a + 1',
'})',
'mr'
)
expect_code_string(expandChain(mr()), expected)
})
it("with chaining", {
mr2 <- metaReactive({
..(mr()) + 1
})
expect_code_string(
expandChain(mr2()),
c(
'mr <- local({',
' a <- 1 + 1',
' if (T) {',
' return("b")',
' }',
' a + 1',
'})',
'mr2 <- mr + 1',
'mr2'
)
)
})
it("with anonymous functions", {
mrx <- metaReactive({
unlist(lapply(1:5, function(x) { if (x == 2) return(x) }))
})
expect_code_string(
withMetaMode(mrx()),
c(
'unlist(lapply(1:5, function(x) {',
' if (x == 2) {',
' return(x)',
' }',
'}))'
)
)
})
it("with already localized expression", {
mrl <- metaReactive({
local({
a <- 1
a + 2
})
})
expect_code_string(
withMetaMode(mrl()),
c(
'local({',
' a <- 1',
' a + 2',
'})'
)
)
})
})
)
describe(
"bindToReturn", isolate({
mr <- metaReactive(bindToReturn = TRUE, {
a <- 1 + 1
b <- a + 1
b + 1
})
it("single assign works", {
expect_code_string(
expandChain(invisible(mr())),
c(
'a <- 1 + 1',
'b <- a + 1',
'mr <- b + 1'
)
)
})
it("double assign works", {
mr2 <- metaReactive({
a <- 1 + 1
b <- a + 1
b + 1
}, bindToReturn = TRUE)
mrx <- metaReactive({
..(mr()) + ..(mr2())
})
expect_code_string(
expandChain(mrx()),
c(
'a <- 1 + 1',
'b <- a + 1',
'mr <- b + 1',
'a <- 1 + 1',
'b <- a + 1',
'mr2 <- b + 1',
'mrx <- mr + mr2',
'mrx'
)
)
})
it("doesn't bind on local", {
# TODO: maybe this should throw a warning?
mr <- metaReactive({
a <- 1 + 1
b <- a + 1
b + 1
}, local = TRUE, bindToReturn = TRUE)
expect_code_string(
expandChain(mr()),
c(
'mr <- local({',
' a <- 1 + 1',
' b <- a + 1',
' b + 1',
'})',
'mr'
)
)
})
})
)
| /tests/testthat/test-format.R | no_license | MassaCarolyn/shinymeta | R | false | false | 3,403 | r | context("localize")
expect_code_string <- function(code, expected, ...) {
actual <- unclass(formatCode(code, ...))
expect_equal(actual, expected)
}
# utility function for creating test expectations
generate_code_string <- function(code, ...) {
cat("c(", "\n", "'")
str <- strsplit(formatCode(code, ...), "\n")[[1]]
cat(str, sep = "',\n'")
cat("'", ")")
}
describe(
"auto-localized expressions", isolate({
mr <- metaReactive({
a <- 1 + 1
if (T) return("b")
a + 1
})
it("without assignment", {
expected <- c(
'local({',
' a <- 1 + 1',
' if (T) {',
' return("b")',
' }',
' a + 1',
'})'
)
expect_code_string(withMetaMode(mr()), expected)
})
it("with assignment", {
expected <- c(
'mr <- local({',
' a <- 1 + 1',
' if (T) {',
' return("b")',
' }',
' a + 1',
'})',
'mr'
)
expect_code_string(expandChain(mr()), expected)
})
it("with chaining", {
mr2 <- metaReactive({
..(mr()) + 1
})
expect_code_string(
expandChain(mr2()),
c(
'mr <- local({',
' a <- 1 + 1',
' if (T) {',
' return("b")',
' }',
' a + 1',
'})',
'mr2 <- mr + 1',
'mr2'
)
)
})
it("with anonymous functions", {
mrx <- metaReactive({
unlist(lapply(1:5, function(x) { if (x == 2) return(x) }))
})
expect_code_string(
withMetaMode(mrx()),
c(
'unlist(lapply(1:5, function(x) {',
' if (x == 2) {',
' return(x)',
' }',
'}))'
)
)
})
it("with already localized expression", {
mrl <- metaReactive({
local({
a <- 1
a + 2
})
})
expect_code_string(
withMetaMode(mrl()),
c(
'local({',
' a <- 1',
' a + 2',
'})'
)
)
})
})
)
describe(
"bindToReturn", isolate({
mr <- metaReactive(bindToReturn = TRUE, {
a <- 1 + 1
b <- a + 1
b + 1
})
it("single assign works", {
expect_code_string(
expandChain(invisible(mr())),
c(
'a <- 1 + 1',
'b <- a + 1',
'mr <- b + 1'
)
)
})
it("double assign works", {
mr2 <- metaReactive({
a <- 1 + 1
b <- a + 1
b + 1
}, bindToReturn = TRUE)
mrx <- metaReactive({
..(mr()) + ..(mr2())
})
expect_code_string(
expandChain(mrx()),
c(
'a <- 1 + 1',
'b <- a + 1',
'mr <- b + 1',
'a <- 1 + 1',
'b <- a + 1',
'mr2 <- b + 1',
'mrx <- mr + mr2',
'mrx'
)
)
})
it("doesn't bind on local", {
# TODO: maybe this should throw a warning?
mr <- metaReactive({
a <- 1 + 1
b <- a + 1
b + 1
}, local = TRUE, bindToReturn = TRUE)
expect_code_string(
expandChain(mr()),
c(
'mr <- local({',
' a <- 1 + 1',
' b <- a + 1',
' b + 1',
'})',
'mr'
)
)
})
})
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_probability}
\alias{bin_probability}
\title{Binomial probability function}
\usage{
bin_probability(success, trials, prob)
}
\arguments{
\item{success}{the number of successes}
\item{trials}{the number of trials}
\item{prob}{the probability of success on each trial}
}
\description{
the probability of a given outcome of a binomial task
}
\examples{
bin_probability(success = 2, trials = 5, prob = 0.5)
bin_probability(success = 0:2, trials = 5, prob = 0.5)
bin_probability(success = 55, trials = 100, prob = 0.45)
}
| /binomial/man/bin_probability.Rd | no_license | stat133-sp19/hw-stat133-jonisaac | R | false | true | 640 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_probability}
\alias{bin_probability}
\title{Binomial probability function}
\usage{
bin_probability(success, trials, prob)
}
\arguments{
\item{success}{the number of successes}
\item{trials}{the number of trials}
\item{prob}{the probability of success on each trial}
}
\description{
the probability of a given outcome of a binomial task
}
\examples{
bin_probability(success = 2, trials = 5, prob = 0.5)
bin_probability(success = 0:2, trials = 5, prob = 0.5)
bin_probability(success = 55, trials = 100, prob = 0.45)
}
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of Covid19IncidencePPIandH2RA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Export all results to tables
#'
#' @description
#' Outputs all results to a folder called 'export', and zips them.
#'
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param databaseId A short string for identifying the database (e.g. 'Synpuf').
#' @param databaseName The full name of the database.
#' @param databaseDescription A short description (several sentences) of the database.
#' @param minCellCount The minimum cell count for fields contains person counts or fractions.
#' @param maxCores How many parallel cores should be used? If more cores are made
#' available this can speed up the analyses.
#'
#' @export
exportResults <- function(outputFolder,
databaseId,
databaseName,
databaseDescription,
minCellCount = 5,
maxCores) {
exportFolder <- file.path(outputFolder, "export")
if (!file.exists(exportFolder)) {
dir.create(exportFolder, recursive = TRUE)
}
exportAnalyses(outputFolder = outputFolder,
exportFolder = exportFolder)
exportExposures(outputFolder = outputFolder,
exportFolder = exportFolder)
exportOutcomes(outputFolder = outputFolder,
exportFolder = exportFolder)
exportMetadata(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
minCellCount = minCellCount)
exportMainResults(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
minCellCount = minCellCount,
maxCores = maxCores)
exportDiagnostics(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
minCellCount = minCellCount,
maxCores = maxCores)
exportProfiles(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
minCellCount = minCellCount,
maxCores = maxCores)
# Add all to zip file -------------------------------------------------------------------------------
ParallelLogger::logInfo("Adding results to zip file")
zipName <- file.path(exportFolder, sprintf("Results_%s.zip", databaseId))
files <- list.files(exportFolder, pattern = ".*\\.csv$")
oldWd <- setwd(exportFolder)
on.exit(setwd(oldWd))
DatabaseConnector::createZipFile(zipFile = zipName, files = files)
ParallelLogger::logInfo("Results are ready for sharing at:", zipName)
}
exportAnalyses <- function(outputFolder, exportFolder) {
ParallelLogger::logInfo("Exporting analyses")
ParallelLogger::logInfo("- cohort_method_analysis table")
tempFileName <- tempfile()
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "Covid19IncidencePPIandH2RA")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
cmAnalysisToRow <- function(cmAnalysis) {
ParallelLogger::saveSettingsToJson(cmAnalysis, tempFileName)
row <- tibble::tibble(analysisId = cmAnalysis$analysisId,
description = cmAnalysis$description,
definition = readChar(tempFileName, file.info(tempFileName)$size))
return(row)
}
cohortMethodAnalysis <- lapply(cmAnalysisList, cmAnalysisToRow)
cohortMethodAnalysis <- do.call("rbind", cohortMethodAnalysis)
cohortMethodAnalysis <- unique(cohortMethodAnalysis)
unlink(tempFileName)
colnames(cohortMethodAnalysis) <- SqlRender::camelCaseToSnakeCase(colnames(cohortMethodAnalysis))
fileName <- file.path(exportFolder, "cohort_method_analysis.csv")
readr::write_csv(cohortMethodAnalysis, fileName)
ParallelLogger::logInfo("- covariate_analysis table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
getCovariateAnalyses <- function(cmAnalysis) {
cmDataFolder <- reference$cohortMethodDataFile[reference$analysisId == cmAnalysis$analysisId][1]
cmData <- CohortMethod::loadCohortMethodData(file.path(outputFolder, "cmOutput", cmDataFolder))
if (!is.null(cmData$analysisRef)) {
covariateAnalysis <- collect(cmData$analysisRef)
covariateAnalysis <- covariateAnalysis[, c("analysisId", "analysisName")]
colnames(covariateAnalysis) <- c("covariate_analysis_id", "covariate_analysis_name")
covariateAnalysis$analysis_id <- cmAnalysis$analysisId
return(covariateAnalysis)
} else {
return(data.frame(covariate_analysis_id = 1, covariate_analysis_name = "")[-1,])
}
}
covariateAnalysis <- lapply(cmAnalysisList, getCovariateAnalyses)
covariateAnalysis <- do.call("rbind", covariateAnalysis)
fileName <- file.path(exportFolder, "covariate_analysis.csv")
readr::write_csv(covariateAnalysis, fileName)
}
exportExposures <- function(outputFolder, exportFolder) {
ParallelLogger::logInfo("Exporting exposures")
ParallelLogger::logInfo("- exposure_of_interest table")
pathToCsv <- system.file("settings", "TcosOfInterest.csv", package = "Covid19IncidencePPIandH2RA")
tcosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "Covid19IncidencePPIandH2RA")
cohortsToCreate <- read.csv(pathToCsv)
createExposureRow <- function(exposureId) {
atlasName <- as.character(cohortsToCreate$atlasName[cohortsToCreate$cohortId == exposureId])
name <- as.character(cohortsToCreate$name[cohortsToCreate$cohortId == exposureId])
cohortFileName <- system.file("cohorts", paste0(name, ".json"), package = "Covid19IncidencePPIandH2RA")
definition <- readChar(cohortFileName, file.info(cohortFileName)$size)
return(tibble::tibble(exposureId = exposureId,
exposureName = atlasName,
definition = definition))
}
exposuresOfInterest <- unique(c(tcosOfInterest$targetId, tcosOfInterest$comparatorId))
exposureOfInterest <- lapply(exposuresOfInterest, createExposureRow)
exposureOfInterest <- do.call("rbind", exposureOfInterest)
colnames(exposureOfInterest) <- SqlRender::camelCaseToSnakeCase(colnames(exposureOfInterest))
fileName <- file.path(exportFolder, "exposure_of_interest.csv")
readr::write_csv(exposureOfInterest, fileName)
}
exportOutcomes <- function(outputFolder, exportFolder) {
ParallelLogger::logInfo("Exporting outcomes")
ParallelLogger::logInfo("- outcome_of_interest table")
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "Covid19IncidencePPIandH2RA")
cohortsToCreate <- read.csv(pathToCsv)
createOutcomeRow <- function(outcomeId) {
atlasName <- as.character(cohortsToCreate$atlasName[cohortsToCreate$cohortId == outcomeId])
name <- as.character(cohortsToCreate$name[cohortsToCreate$cohortId == outcomeId])
cohortFileName <- system.file("cohorts", paste0(name, ".json"), package = "Covid19IncidencePPIandH2RA")
definition <- readChar(cohortFileName, file.info(cohortFileName)$size)
return(tibble::tibble(outcomeId = outcomeId,
outcomeName = atlasName,
definition = definition))
}
outcomesOfInterest <- getOutcomesOfInterest()
outcomeOfInterest <- lapply(outcomesOfInterest, createOutcomeRow)
outcomeOfInterest <- do.call("rbind", outcomeOfInterest)
colnames(outcomeOfInterest) <- SqlRender::camelCaseToSnakeCase(colnames(outcomeOfInterest))
fileName <- file.path(exportFolder, "outcome_of_interest.csv")
readr::write_csv(outcomeOfInterest, fileName)
ParallelLogger::logInfo("- negative_control_outcome table")
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19IncidencePPIandH2RA")
negativeControls <- read.csv(pathToCsv)
negativeControls <- negativeControls[tolower(negativeControls$type) == "outcome", ]
negativeControls <- negativeControls[, c("outcomeId", "outcomeName")]
colnames(negativeControls) <- SqlRender::camelCaseToSnakeCase(colnames(negativeControls))
fileName <- file.path(exportFolder, "negative_control_outcome.csv")
readr::write_csv(negativeControls, fileName)
synthesisSummaryFile <- file.path(outputFolder, "SynthesisSummary.csv")
if (file.exists(synthesisSummaryFile)) {
positiveControls <- read.csv(synthesisSummaryFile, stringsAsFactors = FALSE)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19IncidencePPIandH2RA")
negativeControls <- read.csv(pathToCsv)
positiveControls <- merge(positiveControls,
negativeControls[, c("outcomeId", "outcomeName")])
positiveControls$outcomeName <- paste0(positiveControls$outcomeName,
", RR = ",
positiveControls$targetEffectSize)
positiveControls <- positiveControls[, c("newOutcomeId",
"outcomeName",
"exposureId",
"outcomeId",
"targetEffectSize")]
colnames(positiveControls) <- c("outcomeId",
"outcomeName",
"exposureId",
"negativeControlId",
"effectSize")
colnames(positiveControls) <- SqlRender::camelCaseToSnakeCase(colnames(positiveControls))
fileName <- file.path(exportFolder, "positive_control_outcome.csv")
readr::write_csv(positiveControls, fileName)
}
}
exportMetadata <- function(outputFolder,
exportFolder,
databaseId,
databaseName,
databaseDescription,
minCellCount) {
ParallelLogger::logInfo("Exporting metadata")
getInfo <- function(row) {
cmData <- CohortMethod::loadCohortMethodData(file.path(outputFolder, "cmOutput", row$cohortMethodDataFile))
info <- cmData$cohorts %>%
group_by(.data$treatment) %>%
summarise(minDate = min(.data$cohortStartDate, na.rm = TRUE),
maxDate = max(.data$cohortStartDate, na.rm = TRUE)) %>%
ungroup() %>%
collect()
info <- tibble::tibble(targetId = row$targetId,
comparatorId = row$comparatorId,
targetMinDate = info$minDate[info$treatment == 1],
targetMaxDate = info$maxDate[info$treatment == 1],
comparatorMinDate = info$minDate[info$treatment == 0],
comparatorMaxDate = info$maxDate[info$treatment == 0])
info$comparisonMinDate <- min(info$targetMinDate, info$comparatorMinDate)
info$comparisonMaxDate <- min(info$targetMaxDate, info$comparatorMaxDate)
return(info)
}
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
reference <- unique(reference[, c("targetId", "comparatorId", "cohortMethodDataFile")])
reference <- split(reference, reference$cohortMethodDataFile)
info <- lapply(reference, getInfo)
info <- bind_rows(info)
ParallelLogger::logInfo("- database table")
database <- tibble::tibble(database_id = databaseId,
database_name = databaseName,
description = databaseDescription,
is_meta_analysis = 0)
fileName <- file.path(exportFolder, "database.csv")
readr::write_csv(database, fileName)
ParallelLogger::logInfo("- exposure_summary table")
minDates <- rbind(tibble::tibble(exposureId = info$targetId,
minDate = info$targetMinDate),
tibble::tibble(exposureId = info$comparatorId,
minDate = info$comparatorMinDate))
minDates <- aggregate(minDate ~ exposureId, minDates, min)
maxDates <- rbind(tibble::tibble(exposureId = info$targetId,
maxDate = info$targetMaxDate),
tibble::tibble(exposureId = info$comparatorId,
maxDate = info$comparatorMaxDate))
maxDates <- aggregate(maxDate ~ exposureId, maxDates, max)
exposureSummary <- merge(minDates, maxDates)
exposureSummary$databaseId <- databaseId
colnames(exposureSummary) <- SqlRender::camelCaseToSnakeCase(colnames(exposureSummary))
fileName <- file.path(exportFolder, "exposure_summary.csv")
readr::write_csv(exposureSummary, fileName)
ParallelLogger::logInfo("- comparison_summary table")
minDates <- aggregate(comparisonMinDate ~ targetId + comparatorId, info, min)
maxDates <- aggregate(comparisonMaxDate ~ targetId + comparatorId, info, max)
comparisonSummary <- merge(minDates, maxDates)
comparisonSummary$databaseId <- databaseId
colnames(comparisonSummary)[colnames(comparisonSummary) == "comparisonMinDate"] <- "minDate"
colnames(comparisonSummary)[colnames(comparisonSummary) == "comparisonMaxDate"] <- "maxDate"
colnames(comparisonSummary) <- SqlRender::camelCaseToSnakeCase(colnames(comparisonSummary))
fileName <- file.path(exportFolder, "comparison_summary.csv")
readr::write_csv(comparisonSummary, fileName)
ParallelLogger::logInfo("- attrition table")
fileName <- file.path(exportFolder, "attrition.csv")
if (file.exists(fileName)) {
unlink(fileName)
}
outcomesOfInterest <- getOutcomesOfInterest()
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
reference <- reference[reference$outcomeId %in% outcomesOfInterest, ]
first <- !file.exists(fileName)
pb <- txtProgressBar(style = 3)
for (i in 1:nrow(reference)) {
outcomeModel <- readRDS(file.path(outputFolder,
"cmOutput",
reference$outcomeModelFile[i]))
attrition <- outcomeModel$attrition[, c("description", "targetPersons", "comparatorPersons")]
attrition$sequenceNumber <- 1:nrow(attrition)
attrition1 <- attrition[, c("sequenceNumber", "description", "targetPersons")]
colnames(attrition1)[3] <- "subjects"
attrition1$exposureId <- reference$targetId[i]
attrition2 <- attrition[, c("sequenceNumber", "description", "comparatorPersons")]
colnames(attrition2)[3] <- "subjects"
attrition2$exposureId <- reference$comparatorId[i]
attrition <- rbind(attrition1, attrition2)
attrition$targetId <- reference$targetId[i]
attrition$comparatorId <- reference$comparatorId[i]
attrition$analysisId <- reference$analysisId[i]
attrition$outcomeId <- reference$outcomeId[i]
attrition$databaseId <- databaseId
attrition <- attrition[, c("databaseId",
"exposureId",
"targetId",
"comparatorId",
"outcomeId",
"analysisId",
"sequenceNumber",
"description",
"subjects")]
attrition <- enforceMinCellValue(attrition, "subjects", minCellCount, silent = TRUE)
colnames(attrition) <- SqlRender::camelCaseToSnakeCase(colnames(attrition))
write.table(x = attrition,
file = fileName,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
first <- FALSE
if (i %% 100 == 10) {
setTxtProgressBar(pb, i/nrow(reference))
}
}
setTxtProgressBar(pb, 1)
close(pb)
ParallelLogger::logInfo("- covariate table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
getCovariates <- function(analysisId) {
cmDataFolder <- reference$cohortMethodDataFile[analysisId][1]
cmData <- CohortMethod::loadCohortMethodData(file.path(outputFolder, "cmOutput", cmDataFolder))
covariateRef <- collect(cmData$covariateRef)
if (nrow(covariateRef) > 0) {
covariateRef <- covariateRef[, c("covariateId", "covariateName", "analysisId")]
colnames(covariateRef) <- c("covariateId", "covariateName", "covariateAnalysisId")
covariateRef$analysisId <- analysisId
return(covariateRef)
} else {
return(data.frame(analysisId = analysisId, covariateId = 1, covariateName = "", covariateAnalysisId = 1)[-1])
}
}
covariates <- lapply(unique(reference$analysisId), getCovariates)
covariates <- do.call("rbind", covariates)
covariates$databaseId <- databaseId
colnames(covariates) <- SqlRender::camelCaseToSnakeCase(colnames(covariates))
fileName <- file.path(exportFolder, "covariate.csv")
readr::write_csv(covariates, fileName)
rm(covariates) # Free up memory
ParallelLogger::logInfo("- cm_follow_up_dist table")
getResult <- function(i) {
if (reference$strataFile[i] == "") {
strataPop <- readRDS(file.path(outputFolder,
"cmOutput",
reference$studyPopFile[i]))
} else {
strataPop <- readRDS(file.path(outputFolder,
"cmOutput",
reference$strataFile[i]))
}
targetDist <- quantile(strataPop$survivalTime[strataPop$treatment == 1],
c(0, 0.1, 0.25, 0.5, 0.85, 0.9, 1))
comparatorDist <- quantile(strataPop$survivalTime[strataPop$treatment == 0],
c(0, 0.1, 0.25, 0.5, 0.85, 0.9, 1))
row <- tibble::tibble(target_id = reference$targetId[i],
comparator_id = reference$comparatorId[i],
outcome_id = reference$outcomeId[i],
analysis_id = reference$analysisId[i],
target_min_days = targetDist[1],
target_p10_days = targetDist[2],
target_p25_days = targetDist[3],
target_median_days = targetDist[4],
target_p75_days = targetDist[5],
target_p90_days = targetDist[6],
target_max_days = targetDist[7],
comparator_min_days = comparatorDist[1],
comparator_p10_days = comparatorDist[2],
comparator_p25_days = comparatorDist[3],
comparator_median_days = comparatorDist[4],
comparator_p75_days = comparatorDist[5],
comparator_p90_days = comparatorDist[6],
comparator_max_days = comparatorDist[7])
return(row)
}
outcomesOfInterest <- getOutcomesOfInterest()
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
reference <- reference[reference$outcomeId %in% outcomesOfInterest, ]
results <- plyr::llply(1:nrow(reference), getResult, .progress = "text")
results <- do.call("rbind", results)
results$database_id <- databaseId
fileName <- file.path(exportFolder, "cm_follow_up_dist.csv")
readr::write_csv(results, fileName)
rm(results) # Free up memory
}
enforceMinCellValue <- function(data, fieldName, minValues, silent = FALSE) {
toCensor <- !is.na(pull(data, fieldName)) & pull(data, fieldName) < minValues & pull(data, fieldName) != 0
if (!silent) {
percent <- round(100 * sum(toCensor)/nrow(data), 1)
ParallelLogger::logInfo(" censoring ",
sum(toCensor),
" values (",
percent,
"%) from ",
fieldName,
" because value below minimum")
}
if (length(minValues) == 1) {
data[toCensor, fieldName] <- -minValues
} else {
data[toCensor, fieldName] <- -minValues[toCensor]
}
return(data)
}
exportMainResults <- function(outputFolder,
exportFolder,
databaseId,
minCellCount,
maxCores) {
ParallelLogger::logInfo("Exporting main results")
ParallelLogger::logInfo("- cohort_method_result table")
analysesSum <- readr::read_csv(file.path(outputFolder, "analysisSummary.csv"), col_types = readr::cols())
allControls <- getAllControls(outputFolder)
ParallelLogger::logInfo(" Performing empirical calibration on main effects")
cluster <- ParallelLogger::makeCluster(min(4, maxCores))
subsets <- split(analysesSum,
paste(analysesSum$targetId, analysesSum$comparatorId, analysesSum$analysisId))
rm(analysesSum) # Free up memory
results <- ParallelLogger::clusterApply(cluster,
subsets,
calibrate,
allControls = allControls)
ParallelLogger::stopCluster(cluster)
mainEffects <- do.call("rbind", subsets)[, -c(2,4,6,8,9:20)]
rm(subsets) # Free up memory
results <- do.call("rbind", results)
results$databaseId <- databaseId
results <- enforceMinCellValue(results, "targetSubjects", minCellCount)
results <- enforceMinCellValue(results, "comparatorSubjects", minCellCount)
results <- enforceMinCellValue(results, "targetOutcomes", minCellCount)
results <- enforceMinCellValue(results, "comparatorOutcomes", minCellCount)
colnames(results) <- SqlRender::camelCaseToSnakeCase(colnames(results))
fileName <- file.path(exportFolder, "cohort_method_result.csv")
readr::write_csv(results, fileName)
rm(results) # Free up memory
# Handle main / interaction effects
if (ncol(mainEffects) > 4) {
ParallelLogger::logInfo("- cm_main_effect_result table")
keyCol <- "estimate"
valueCol <- "value"
gatherCols <- names(mainEffects)[5:length(names(mainEffects))]
longTable <- tidyr::gather_(mainEffects, keyCol, valueCol, gatherCols)
longTable$label <- as.numeric(sub(".*I", "", longTable$estimate))
longTable$estimate <- sub("I.*", "", longTable$estimate)
uniqueCovariates <- unique(longTable$label)
mainEffects <- tidyr::spread(longTable, estimate, value)
mainEffects <- mainEffects[!is.na(mainEffects$logRr),]
mainEffects <- data.frame(
databaseId = databaseId,
analysisId = mainEffects$analysisId,
targetId = mainEffects$targetId,
comparatorId = mainEffects$comparatorId,
outcomeId = mainEffects$outcomeId,
covariateId = mainEffects$label,
coefficient = mainEffects$logRr,
ci95lb = log(mainEffects$ci95lb),
ci95ub = log(mainEffects$ci95ub),
se = mainEffects$seLogRr
)
colnames(mainEffects) <- SqlRender::camelCaseToSnakeCase(colnames(mainEffects))
fileName <- file.path(exportFolder, "cm_main_effects_result.csv")
write.csv(mainEffects, fileName, row.names = FALSE)
rm(mainEffects) # Free up memory
}
ParallelLogger::logInfo("- cm_interaction_result table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
loadInteractionsFromOutcomeModel <- function(i) {
outcomeModel <- readRDS(file.path(outputFolder,
"cmOutput",
reference$outcomeModelFile[i]))
if ("subgroupCounts" %in% names(outcomeModel)) {
rows <- tibble::tibble(targetId = reference$targetId[i],
comparatorId = reference$comparatorId[i],
outcomeId = reference$outcomeId[i],
analysisId = reference$analysisId[i],
interactionCovariateId = outcomeModel$subgroupCounts$subgroupCovariateId,
rrr = NA,
ci95Lb = NA,
ci95Ub = NA,
p = NA,
i2 = NA,
logRrr = NA,
seLogRrr = NA,
targetSubjects = outcomeModel$subgroupCounts$targetPersons,
comparatorSubjects = outcomeModel$subgroupCounts$comparatorPersons,
targetDays = outcomeModel$subgroupCounts$targetDays,
comparatorDays = outcomeModel$subgroupCounts$comparatorDays,
targetOutcomes = outcomeModel$subgroupCounts$targetOutcomes,
comparatorOutcomes = outcomeModel$subgroupCounts$comparatorOutcomes)
if ("outcomeModelInteractionEstimates" %in% names(outcomeModel)) {
idx <- match(outcomeModel$outcomeModelInteractionEstimates$covariateId,
rows$interactionCovariateId)
rows$rrr[idx] <- exp(outcomeModel$outcomeModelInteractionEstimates$logRr)
rows$ci95Lb[idx] <- exp(outcomeModel$outcomeModelInteractionEstimates$logLb95)
rows$ci95Ub[idx] <- exp(outcomeModel$outcomeModelInteractionEstimates$logUb95)
rows$logRrr[idx] <- outcomeModel$outcomeModelInteractionEstimates$logRr
rows$seLogRrr[idx] <- outcomeModel$outcomeModelInteractionEstimates$seLogRr
z <- rows$logRrr[idx]/rows$seLogRrr[idx]
rows$p[idx] <- 2 * pmin(pnorm(z), 1 - pnorm(z))
}
return(rows)
} else {
return(NULL)
}
}
interactions <- plyr::llply(1:nrow(reference),
loadInteractionsFromOutcomeModel,
.progress = "text")
interactions <- bind_rows(interactions)
if (nrow(interactions) > 0) {
ParallelLogger::logInfo(" Performing empirical calibration on interaction effects")
allControls <- getAllControls(outputFolder)
negativeControls <- allControls[allControls$targetEffectSize == 1, ]
cluster <- ParallelLogger::makeCluster(min(4, maxCores))
subsets <- split(interactions,
paste(interactions$targetId, interactions$comparatorId, interactions$analysisId))
interactions <- ParallelLogger::clusterApply(cluster,
subsets,
calibrateInteractions,
negativeControls = negativeControls)
ParallelLogger::stopCluster(cluster)
rm(subsets) # Free up memory
interactions <- bind_rows(interactions)
interactions$databaseId <- databaseId
interactions <- enforceMinCellValue(interactions, "targetSubjects", minCellCount)
interactions <- enforceMinCellValue(interactions, "comparatorSubjects", minCellCount)
interactions <- enforceMinCellValue(interactions, "targetOutcomes", minCellCount)
interactions <- enforceMinCellValue(interactions, "comparatorOutcomes", minCellCount)
colnames(interactions) <- SqlRender::camelCaseToSnakeCase(colnames(interactions))
fileName <- file.path(exportFolder, "cm_interaction_result.csv")
readr::write_csv(interactions, fileName)
rm(interactions) # Free up memory
}
}
calibrate <- function(subset, allControls) {
ncs <- subset[subset$outcomeId %in% allControls$outcomeId[allControls$targetEffectSize == 1], ]
ncs <- ncs[!is.na(ncs$seLogRr), ]
if (nrow(ncs) > 5) {
null <- EmpiricalCalibration::fitMcmcNull(ncs$logRr, ncs$seLogRr)
calibratedP <- EmpiricalCalibration::calibrateP(null = null,
logRr = subset$logRr,
seLogRr = subset$seLogRr)
subset$calibratedP <- calibratedP$p
} else {
subset$calibratedP <- rep(NA, nrow(subset))
}
pcs <- subset[subset$outcomeId %in% allControls$outcomeId[allControls$targetEffectSize != 1], ]
pcs <- pcs[!is.na(pcs$seLogRr), ]
if (nrow(pcs) > 5) {
controls <- merge(subset, allControls[, c("targetId", "comparatorId", "outcomeId", "targetEffectSize")])
model <- EmpiricalCalibration::fitSystematicErrorModel(logRr = controls$logRr,
seLogRr = controls$seLogRr,
trueLogRr = log(controls$targetEffectSize),
estimateCovarianceMatrix = FALSE)
calibratedCi <- EmpiricalCalibration::calibrateConfidenceInterval(logRr = subset$logRr,
seLogRr = subset$seLogRr,
model = model)
subset$calibratedRr <- exp(calibratedCi$logRr)
subset$calibratedCi95Lb <- exp(calibratedCi$logLb95Rr)
subset$calibratedCi95Ub <- exp(calibratedCi$logUb95Rr)
subset$calibratedLogRr <- calibratedCi$logRr
subset$calibratedSeLogRr <- calibratedCi$seLogRr
} else {
subset$calibratedRr <- rep(NA, nrow(subset))
subset$calibratedCi95Lb <- rep(NA, nrow(subset))
subset$calibratedCi95Ub <- rep(NA, nrow(subset))
subset$calibratedLogRr <- rep(NA, nrow(subset))
subset$calibratedSeLogRr <- rep(NA, nrow(subset))
}
subset$i2 <- rep(NA, nrow(subset))
subset <- subset[, c("targetId",
"comparatorId",
"outcomeId",
"analysisId",
"rr",
"ci95lb",
"ci95ub",
"p",
"i2",
"logRr",
"seLogRr",
"target",
"comparator",
"targetDays",
"comparatorDays",
"eventsTarget",
"eventsComparator",
"calibratedP",
"calibratedRr",
"calibratedCi95Lb",
"calibratedCi95Ub",
"calibratedLogRr",
"calibratedSeLogRr")]
colnames(subset) <- c("targetId",
"comparatorId",
"outcomeId",
"analysisId",
"rr",
"ci95Lb",
"ci95Ub",
"p",
"i2",
"logRr",
"seLogRr",
"targetSubjects",
"comparatorSubjects",
"targetDays",
"comparatorDays",
"targetOutcomes",
"comparatorOutcomes",
"calibratedP",
"calibratedRr",
"calibratedCi95Lb",
"calibratedCi95Ub",
"calibratedLogRr",
"calibratedSeLogRr")
return(subset)
}
calibrateInteractions <- function(subset, negativeControls) {
ncs <- subset[subset$outcomeId %in% negativeControls$outcomeId, ]
ncs <- ncs[!is.na(pull(ncs, .data$seLogRrr)), ]
if (nrow(ncs) > 5) {
null <- EmpiricalCalibration::fitMcmcNull(ncs$logRrr, ncs$seLogRrr)
calibratedP <- EmpiricalCalibration::calibrateP(null = null,
logRr = subset$logRrr,
seLogRr = subset$seLogRrr)
subset$calibratedP <- calibratedP$p
} else {
subset$calibratedP <- rep(NA, nrow(subset))
}
return(subset)
}
exportProfiles <- function(outputFolder,
exportFolder,
databaseId,
minCellCount,
maxCores) {
ParallelLogger::logInfo("Exporting profiles")
fileName <- file.path(exportFolder, "outcome_profile.csv")
if (file.exists(fileName)) {
unlink(fileName)
}
first <- TRUE
profileFolder <- file.path(outputFolder, "profile")
files <- list.files(profileFolder, pattern = "prof_.*.rds", full.names = TRUE)
pb <- txtProgressBar(style = 3)
if (length(files) > 0) {
for (i in 1:length(files)) {
ids <- gsub("^.*prof_t", "", files[i])
targetId <- as.numeric(gsub("_c.*", "", ids))
ids <- gsub("^.*_c", "", ids)
comparatorId <- as.numeric(gsub("_[aso].*$", "", ids))
if (grepl("_s", ids)) {
subgroupId <- as.numeric(gsub("^.*_s", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
subgroupId <- NA
}
if (grepl("_o", ids)) {
outcomeId <- as.numeric(gsub("^.*_o", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
outcomeId <- NA
}
ids <- gsub("^.*_a", "", ids)
analysisId <- as.numeric(gsub(".rds", "", ids))
profile <- readRDS(files[i])
profile$targetId <- targetId
profile$comparatorId <- comparatorId
profile$outcomeId <- outcomeId
profile$analysisId <- analysisId
profile$databaseId <- databaseId
colnames(profile) <- SqlRender::camelCaseToSnakeCase(colnames(profile))
write.table(x = profile,
file = fileName,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
first <- FALSE
setTxtProgressBar(pb, i/length(files))
}
}
close(pb)
}
exportDiagnostics <- function(outputFolder,
exportFolder,
databaseId,
minCellCount,
maxCores) {
ParallelLogger::logInfo("Exporting diagnostics")
ParallelLogger::logInfo("- covariate_balance table")
fileName <- file.path(exportFolder, "covariate_balance.csv")
if (file.exists(fileName)) {
unlink(fileName)
}
first <- TRUE
balanceFolder <- file.path(outputFolder, "balance")
files <- list.files(balanceFolder, pattern = "bal_.*.rds", full.names = TRUE)
pb <- txtProgressBar(style = 3)
if (length(files) > 0) {
for (i in 1:length(files)) {
ids <- gsub("^.*bal_t", "", files[i])
targetId <- as.numeric(gsub("_c.*", "", ids))
ids <- gsub("^.*_c", "", ids)
comparatorId <- as.numeric(gsub("_[aso].*$", "", ids))
if (grepl("_s", ids)) {
subgroupId <- as.numeric(gsub("^.*_s", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
subgroupId <- NA
}
if (grepl("_o", ids)) {
outcomeId <- as.numeric(gsub("^.*_o", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
outcomeId <- NA
}
ids <- gsub("^.*_a", "", ids)
analysisId <- as.numeric(gsub(".rds", "", ids))
balance <- readRDS(files[i])
inferredTargetBeforeSize <- mean(balance$beforeMatchingSumTarget/balance$beforeMatchingMeanTarget,
na.rm = TRUE)
inferredComparatorBeforeSize <- mean(balance$beforeMatchingSumComparator/balance$beforeMatchingMeanComparator,
na.rm = TRUE)
inferredTargetAfterSize <- mean(balance$afterMatchingSumTarget/balance$afterMatchingMeanTarget,
na.rm = TRUE)
inferredComparatorAfterSize <- mean(balance$afterMatchingSumComparator/balance$afterMatchingMeanComparator,
na.rm = TRUE)
balance$databaseId <- databaseId
balance$targetId <- targetId
balance$comparatorId <- comparatorId
balance$outcomeId <- outcomeId
balance$analysisId <- analysisId
balance$interactionCovariateId <- subgroupId
balance <- balance[, c("databaseId",
"targetId",
"comparatorId",
"outcomeId",
"analysisId",
"interactionCovariateId",
"covariateId",
"beforeMatchingMeanTarget",
"beforeMatchingMeanComparator",
"beforeMatchingStdDiff",
"afterMatchingMeanTarget",
"afterMatchingMeanComparator",
"afterMatchingStdDiff")]
colnames(balance) <- c("databaseId",
"targetId",
"comparatorId",
"outcomeId",
"analysisId",
"interactionCovariateId",
"covariateId",
"targetMeanBefore",
"comparatorMeanBefore",
"stdDiffBefore",
"targetMeanAfter",
"comparatorMeanAfter",
"stdDiffAfter")
balance$targetMeanBefore[is.na(balance$targetMeanBefore)] <- 0
balance$comparatorMeanBefore[is.na(balance$comparatorMeanBefore)] <- 0
balance$stdDiffBefore <- round(balance$stdDiffBefore, 3)
balance$targetMeanAfter[is.na(balance$targetMeanAfter)] <- 0
balance$comparatorMeanAfter[is.na(balance$comparatorMeanAfter)] <- 0
balance$targetSizeBefore <- inferredTargetBeforeSize
balance$targetSizeBefore[is.na(inferredTargetBeforeSize)] <- 0
balance$comparatorSizeBefore <- inferredComparatorBeforeSize
balance$comparatorSizeBefore[is.na(inferredComparatorBeforeSize)] <- 0
balance$targetSizeAfter <- inferredTargetAfterSize
balance$targetSizeAfter[is.na(inferredTargetAfterSize)] <- 0
balance$comparatorSizeAfter <- inferredComparatorAfterSize
balance$comparatorSizeAfter[is.na(inferredComparatorAfterSize)] <- 0
balance$stdDiffAfter <- round(balance$stdDiffAfter, 3)
balance <- enforceMinCellValue(balance,
"targetMeanBefore",
minCellCount/inferredTargetBeforeSize,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorMeanBefore",
minCellCount/inferredComparatorBeforeSize,
TRUE)
balance <- enforceMinCellValue(balance,
"targetMeanAfter",
minCellCount/inferredTargetAfterSize,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorMeanAfter",
minCellCount/inferredComparatorAfterSize,
TRUE)
balance <- enforceMinCellValue(balance,
"targetSizeBefore",
minCellCount,
TRUE)
balance <- enforceMinCellValue(balance,
"targetSizeAfter",
minCellCount,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorSizeBefore",
minCellCount,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorSizeAfter",
minCellCount,
TRUE)
balance$targetMeanBefore <- round(balance$targetMeanBefore, 3)
balance$comparatorMeanBefore <- round(balance$comparatorMeanBefore, 3)
balance$targetMeanAfter <- round(balance$targetMeanAfter, 3)
balance$comparatorMeanAfter <- round(balance$comparatorMeanAfter, 3)
balance$targetSizeBefore <- round(balance$targetSizeBefore, 0)
balance$comparatorSizeBefore <- round(balance$comparatorSizeBefore, 0)
balance$targetSizeAfter <- round(balance$targetSizeAfter, 0)
balance$comparatorSizeAfter <- round(balance$comparatorSizeAfter, 0)
# balance <- balance[balance$targetMeanBefore != 0 & balance$comparatorMeanBefore != 0 & balance$targetMeanAfter !=
# 0 & balance$comparatorMeanAfter != 0 & balance$stdDiffBefore != 0 & balance$stdDiffAfter !=
# 0, ]
balance <- balance[!is.na(balance$targetId), ]
colnames(balance) <- SqlRender::camelCaseToSnakeCase(colnames(balance))
write.table(x = balance,
file = fileName,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
first <- FALSE
setTxtProgressBar(pb, i/length(files))
}
}
close(pb)
ParallelLogger::logInfo("- preference_score_dist table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
preparePlot <- function(row, reference) {
idx <- reference$analysisId == row$analysisId &
reference$targetId == row$targetId &
reference$comparatorId == row$comparatorId
psFileName <- file.path(outputFolder,
"cmOutput",
reference$sharedPsFile[idx][1])
if (file.exists(psFileName)) {
ps <- readRDS(psFileName)
if (length(unique(ps$treatment)) == 2 &&
min(ps$propensityScore) < max(ps$propensityScore)) {
ps <- CohortMethod:::computePreferenceScore(ps)
pop1 <- ps$preferenceScore[ps$treatment == 1]
pop0 <- ps$preferenceScore[ps$treatment == 0]
bw1 <- ifelse(length(pop1) > 1, "nrd0", 0.1)
bw0 <- ifelse(length(pop0) > 1, "nrd0", 0.1)
d1 <- density(pop1, bw = bw1, from = 0, to = 1, n = 100)
d0 <- density(pop0, bw = bw0, from = 0, to = 1, n = 100)
result <- tibble::tibble(databaseId = databaseId,
targetId = row$targetId,
comparatorId = row$comparatorId,
analysisId = row$analysisId,
preferenceScore = d1$x,
targetDensity = d1$y,
comparatorDensity = d0$y)
return(result)
}
}
return(NULL)
}
subset <- unique(reference[reference$sharedPsFile != "",
c("targetId", "comparatorId", "analysisId")])
data <- plyr::llply(split(subset, 1:nrow(subset)),
preparePlot,
reference = reference,
.progress = "text")
data <- do.call("rbind", data)
fileName <- file.path(exportFolder, "preference_score_dist.csv")
if (!is.null(data)) {
colnames(data) <- SqlRender::camelCaseToSnakeCase(colnames(data))
}
readr::write_csv(data, fileName)
ParallelLogger::logInfo("- propensity_model table")
getPsModel <- function(row, reference) {
idx <- reference$analysisId == row$analysisId &
reference$targetId == row$targetId &
reference$comparatorId == row$comparatorId
psFileName <- file.path(outputFolder,
"cmOutput",
reference$sharedPsFile[idx][1])
if (file.exists(psFileName)) {
ps <- readRDS(psFileName)
metaData <- attr(ps, "metaData")
if (is.null(metaData$psError)) {
cmDataFile <- file.path(outputFolder,
"cmOutput",
reference$cohortMethodDataFile[idx][1])
cmData <- CohortMethod::loadCohortMethodData(cmDataFile)
model <- CohortMethod::getPsModel(ps, cmData)
model$covariateId[is.na(model$covariateId)] <- 0
Andromeda::close(cmData)
model$databaseId <- databaseId
model$targetId <- row$targetId
model$comparatorId <- row$comparatorId
model$analysisId <- row$analysisId
model <- model[, c("databaseId", "targetId", "comparatorId", "analysisId", "covariateId", "coefficient")]
return(model)
}
}
return(NULL)
}
subset <- unique(reference[reference$sharedPsFile != "",
c("targetId", "comparatorId", "analysisId")])
data <- plyr::llply(split(subset, 1:nrow(subset)),
getPsModel,
reference = reference,
.progress = "text")
data <- do.call("rbind", data)
fileName <- file.path(exportFolder, "propensity_model.csv")
if (!is.null(data)) {
colnames(data) <- SqlRender::camelCaseToSnakeCase(colnames(data))
}
readr::write_csv(data, fileName)
ParallelLogger::logInfo("- kaplan_meier_dist table")
ParallelLogger::logInfo(" Computing KM curves")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
outcomesOfInterest <- getOutcomesOfInterest()
reference <- reference[reference$outcomeId %in% outcomesOfInterest, ]
reference <- reference[, c("strataFile",
"studyPopFile",
"targetId",
"comparatorId",
"outcomeId",
"analysisId")]
tempFolder <- file.path(exportFolder, "temp")
if (!file.exists(tempFolder)) {
dir.create(tempFolder)
}
cluster <- ParallelLogger::makeCluster(min(4, maxCores))
tasks <- split(reference, seq(nrow(reference)))
ParallelLogger::clusterApply(cluster,
tasks,
prepareKm,
outputFolder = outputFolder,
tempFolder = tempFolder,
databaseId = databaseId,
minCellCount = minCellCount)
ParallelLogger::stopCluster(cluster)
ParallelLogger::logInfo(" Writing to single csv file")
saveKmToCsv <- function(file, first, outputFile) {
data <- readRDS(file)
if (!is.null(data)) {
colnames(data) <- SqlRender::camelCaseToSnakeCase(colnames(data))
}
write.table(x = data,
file = outputFile,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
}
outputFile <- file.path(exportFolder, "kaplan_meier_dist.csv")
files <- list.files(tempFolder, "km_.*.rds", full.names = TRUE)
if (length(files) > 0) {
saveKmToCsv(files[1], first = TRUE, outputFile = outputFile)
if (length(files) > 1) {
plyr::l_ply(files[2:length(files)], saveKmToCsv, first = FALSE, outputFile = outputFile, .progress = "text")
}
}
unlink(tempFolder, recursive = TRUE)
}
prepareKm <- function(task,
outputFolder,
tempFolder,
databaseId,
minCellCount) {
ParallelLogger::logTrace("Preparing KM plot for target ",
task$targetId,
", comparator ",
task$comparatorId,
", outcome ",
task$outcomeId,
", analysis ",
task$analysisId)
outputFileName <- file.path(tempFolder, sprintf("km_t%s_c%s_o%s_a%s.rds",
task$targetId,
task$comparatorId,
task$outcomeId,
task$analysisId))
if (file.exists(outputFileName)) {
return(NULL)
}
popFile <- task$strataFile
if (popFile == "") {
popFile <- task$studyPopFile
}
population <- readRDS(file.path(outputFolder,
"cmOutput",
popFile))
if (nrow(population) == 0 || length(unique(population$treatment)) != 2) {
# Can happen when matching and treatment is predictable
return(NULL)
}
data <- prepareKaplanMeier(population)
if (is.null(data)) {
# No shared strata
return(NULL)
}
data$targetId <- task$targetId
data$comparatorId <- task$comparatorId
data$outcomeId <- task$outcomeId
data$analysisId <- task$analysisId
data$databaseId <- databaseId
data <- enforceMinCellValue(data, "targetAtRisk", minCellCount)
data <- enforceMinCellValue(data, "comparatorAtRisk", minCellCount)
saveRDS(data, outputFileName)
}
prepareKaplanMeier <- function(population) {
dataCutoff <- 0.9
population$y <- 0
population$y[population$outcomeCount != 0] <- 1
if (is.null(population$stratumId) || length(unique(population$stratumId)) == nrow(population)/2) {
sv <- survival::survfit(survival::Surv(survivalTime, y) ~ treatment, population, conf.int = TRUE)
idx <- summary(sv, censored = T)$strata == "treatment=1"
survTarget <- tibble::tibble(time = sv$time[idx],
targetSurvival = sv$surv[idx],
targetSurvivalLb = sv$lower[idx],
targetSurvivalUb = sv$upper[idx])
idx <- summary(sv, censored = T)$strata == "treatment=0"
survComparator <- tibble::tibble(time = sv$time[idx],
comparatorSurvival = sv$surv[idx],
comparatorSurvivalLb = sv$lower[idx],
comparatorSurvivalUb = sv$upper[idx])
data <- merge(survTarget, survComparator, all = TRUE)
} else {
population$stratumSizeT <- 1
strataSizesT <- aggregate(stratumSizeT ~ stratumId, population[population$treatment == 1, ], sum)
if (max(strataSizesT$stratumSizeT) == 1) {
# variable ratio matching: use propensity score to compute IPTW
if (is.null(population$propensityScore)) {
stop("Variable ratio matching detected, but no propensity score found")
}
weights <- aggregate(propensityScore ~ stratumId, population, mean)
if (max(weights$propensityScore) > 0.99999) {
return(NULL)
}
weights$weight <- weights$propensityScore / (1 - weights$propensityScore)
} else {
# stratification: infer probability of treatment from subject counts
strataSizesC <- aggregate(stratumSizeT ~ stratumId, population[population$treatment == 0, ], sum)
colnames(strataSizesC)[2] <- "stratumSizeC"
weights <- merge(strataSizesT, strataSizesC)
if (nrow(weights) == 0) {
warning("No shared strata between target and comparator")
return(NULL)
}
weights$weight <- weights$stratumSizeT/weights$stratumSizeC
}
population <- merge(population, weights[, c("stratumId", "weight")])
population$weight[population$treatment == 1] <- 1
idx <- population$treatment == 1
survTarget <- CohortMethod:::adjustedKm(weight = population$weight[idx],
time = population$survivalTime[idx],
y = population$y[idx])
survTarget$targetSurvivalUb <- survTarget$s^exp(qnorm(0.975)/log(survTarget$s) * sqrt(survTarget$var)/survTarget$s)
survTarget$targetSurvivalLb <- survTarget$s^exp(qnorm(0.025)/log(survTarget$s) * sqrt(survTarget$var)/survTarget$s)
survTarget$targetSurvivalLb[survTarget$s > 0.9999] <- survTarget$s[survTarget$s > 0.9999]
survTarget$targetSurvival <- survTarget$s
survTarget$s <- NULL
survTarget$var <- NULL
idx <- population$treatment == 0
survComparator <- CohortMethod:::adjustedKm(weight = population$weight[idx],
time = population$survivalTime[idx],
y = population$y[idx])
survComparator$comparatorSurvivalUb <- survComparator$s^exp(qnorm(0.975)/log(survComparator$s) *
sqrt(survComparator$var)/survComparator$s)
survComparator$comparatorSurvivalLb <- survComparator$s^exp(qnorm(0.025)/log(survComparator$s) *
sqrt(survComparator$var)/survComparator$s)
survComparator$comparatorSurvivalLb[survComparator$s > 0.9999] <- survComparator$s[survComparator$s >
0.9999]
survComparator$comparatorSurvival <- survComparator$s
survComparator$s <- NULL
survComparator$var <- NULL
data <- merge(survTarget, survComparator, all = TRUE)
}
data <- data[, c("time", "targetSurvival", "targetSurvivalLb", "targetSurvivalUb", "comparatorSurvival", "comparatorSurvivalLb", "comparatorSurvivalUb")]
cutoff <- quantile(population$survivalTime, dataCutoff)
data <- data[data$time <= cutoff, ]
if (cutoff <= 300) {
xBreaks <- seq(0, cutoff, by = 50)
} else if (cutoff <= 600) {
xBreaks <- seq(0, cutoff, by = 100)
} else {
xBreaks <- seq(0, cutoff, by = 250)
}
targetAtRisk <- c()
comparatorAtRisk <- c()
for (xBreak in xBreaks) {
targetAtRisk <- c(targetAtRisk,
sum(population$treatment == 1 & population$survivalTime >= xBreak))
comparatorAtRisk <- c(comparatorAtRisk,
sum(population$treatment == 0 & population$survivalTime >=
xBreak))
}
data <- merge(data, tibble::tibble(time = xBreaks,
targetAtRisk = targetAtRisk,
comparatorAtRisk = comparatorAtRisk), all = TRUE)
if (is.na(data$targetSurvival[1])) {
data$targetSurvival[1] <- 1
data$targetSurvivalUb[1] <- 1
data$targetSurvivalLb[1] <- 1
}
if (is.na(data$comparatorSurvival[1])) {
data$comparatorSurvival[1] <- 1
data$comparatorSurvivalUb[1] <- 1
data$comparatorSurvivalLb[1] <- 1
}
idx <- which(is.na(data$targetSurvival))
while (length(idx) > 0) {
data$targetSurvival[idx] <- data$targetSurvival[idx - 1]
data$targetSurvivalLb[idx] <- data$targetSurvivalLb[idx - 1]
data$targetSurvivalUb[idx] <- data$targetSurvivalUb[idx - 1]
idx <- which(is.na(data$targetSurvival))
}
idx <- which(is.na(data$comparatorSurvival))
while (length(idx) > 0) {
data$comparatorSurvival[idx] <- data$comparatorSurvival[idx - 1]
data$comparatorSurvivalLb[idx] <- data$comparatorSurvivalLb[idx - 1]
data$comparatorSurvivalUb[idx] <- data$comparatorSurvivalUb[idx - 1]
idx <- which(is.na(data$comparatorSurvival))
}
data$targetSurvival <- round(data$targetSurvival, 4)
data$targetSurvivalLb <- round(data$targetSurvivalLb, 4)
data$targetSurvivalUb <- round(data$targetSurvivalUb, 4)
data$comparatorSurvival <- round(data$comparatorSurvival, 4)
data$comparatorSurvivalLb <- round(data$comparatorSurvivalLb, 4)
data$comparatorSurvivalUb <- round(data$comparatorSurvivalUb, 4)
# Remove duplicate (except time) entries:
data <- data[order(data$time), ]
data <- data[!duplicated(data[, -1]), ]
return(data)
}
| /Covid19IncidencePPIandH2RA/R/Export.R | permissive | ohdsi-studies/Covid19PPIandH2RA | R | false | false | 57,473 | r | # Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of Covid19IncidencePPIandH2RA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Export all results to tables
#'
#' @description
#' Outputs all results to a folder called 'export', and zips them.
#'
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param databaseId A short string for identifying the database (e.g. 'Synpuf').
#' @param databaseName The full name of the database.
#' @param databaseDescription A short description (several sentences) of the database.
#' @param minCellCount The minimum cell count for fields contains person counts or fractions.
#' @param maxCores How many parallel cores should be used? If more cores are made
#' available this can speed up the analyses.
#'
#' @export
exportResults <- function(outputFolder,
databaseId,
databaseName,
databaseDescription,
minCellCount = 5,
maxCores) {
exportFolder <- file.path(outputFolder, "export")
if (!file.exists(exportFolder)) {
dir.create(exportFolder, recursive = TRUE)
}
exportAnalyses(outputFolder = outputFolder,
exportFolder = exportFolder)
exportExposures(outputFolder = outputFolder,
exportFolder = exportFolder)
exportOutcomes(outputFolder = outputFolder,
exportFolder = exportFolder)
exportMetadata(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
minCellCount = minCellCount)
exportMainResults(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
minCellCount = minCellCount,
maxCores = maxCores)
exportDiagnostics(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
minCellCount = minCellCount,
maxCores = maxCores)
exportProfiles(outputFolder = outputFolder,
exportFolder = exportFolder,
databaseId = databaseId,
minCellCount = minCellCount,
maxCores = maxCores)
# Add all to zip file -------------------------------------------------------------------------------
ParallelLogger::logInfo("Adding results to zip file")
zipName <- file.path(exportFolder, sprintf("Results_%s.zip", databaseId))
files <- list.files(exportFolder, pattern = ".*\\.csv$")
oldWd <- setwd(exportFolder)
on.exit(setwd(oldWd))
DatabaseConnector::createZipFile(zipFile = zipName, files = files)
ParallelLogger::logInfo("Results are ready for sharing at:", zipName)
}
exportAnalyses <- function(outputFolder, exportFolder) {
ParallelLogger::logInfo("Exporting analyses")
ParallelLogger::logInfo("- cohort_method_analysis table")
tempFileName <- tempfile()
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "Covid19IncidencePPIandH2RA")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
cmAnalysisToRow <- function(cmAnalysis) {
ParallelLogger::saveSettingsToJson(cmAnalysis, tempFileName)
row <- tibble::tibble(analysisId = cmAnalysis$analysisId,
description = cmAnalysis$description,
definition = readChar(tempFileName, file.info(tempFileName)$size))
return(row)
}
cohortMethodAnalysis <- lapply(cmAnalysisList, cmAnalysisToRow)
cohortMethodAnalysis <- do.call("rbind", cohortMethodAnalysis)
cohortMethodAnalysis <- unique(cohortMethodAnalysis)
unlink(tempFileName)
colnames(cohortMethodAnalysis) <- SqlRender::camelCaseToSnakeCase(colnames(cohortMethodAnalysis))
fileName <- file.path(exportFolder, "cohort_method_analysis.csv")
readr::write_csv(cohortMethodAnalysis, fileName)
ParallelLogger::logInfo("- covariate_analysis table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
getCovariateAnalyses <- function(cmAnalysis) {
cmDataFolder <- reference$cohortMethodDataFile[reference$analysisId == cmAnalysis$analysisId][1]
cmData <- CohortMethod::loadCohortMethodData(file.path(outputFolder, "cmOutput", cmDataFolder))
if (!is.null(cmData$analysisRef)) {
covariateAnalysis <- collect(cmData$analysisRef)
covariateAnalysis <- covariateAnalysis[, c("analysisId", "analysisName")]
colnames(covariateAnalysis) <- c("covariate_analysis_id", "covariate_analysis_name")
covariateAnalysis$analysis_id <- cmAnalysis$analysisId
return(covariateAnalysis)
} else {
return(data.frame(covariate_analysis_id = 1, covariate_analysis_name = "")[-1,])
}
}
covariateAnalysis <- lapply(cmAnalysisList, getCovariateAnalyses)
covariateAnalysis <- do.call("rbind", covariateAnalysis)
fileName <- file.path(exportFolder, "covariate_analysis.csv")
readr::write_csv(covariateAnalysis, fileName)
}
exportExposures <- function(outputFolder, exportFolder) {
ParallelLogger::logInfo("Exporting exposures")
ParallelLogger::logInfo("- exposure_of_interest table")
pathToCsv <- system.file("settings", "TcosOfInterest.csv", package = "Covid19IncidencePPIandH2RA")
tcosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "Covid19IncidencePPIandH2RA")
cohortsToCreate <- read.csv(pathToCsv)
createExposureRow <- function(exposureId) {
atlasName <- as.character(cohortsToCreate$atlasName[cohortsToCreate$cohortId == exposureId])
name <- as.character(cohortsToCreate$name[cohortsToCreate$cohortId == exposureId])
cohortFileName <- system.file("cohorts", paste0(name, ".json"), package = "Covid19IncidencePPIandH2RA")
definition <- readChar(cohortFileName, file.info(cohortFileName)$size)
return(tibble::tibble(exposureId = exposureId,
exposureName = atlasName,
definition = definition))
}
exposuresOfInterest <- unique(c(tcosOfInterest$targetId, tcosOfInterest$comparatorId))
exposureOfInterest <- lapply(exposuresOfInterest, createExposureRow)
exposureOfInterest <- do.call("rbind", exposureOfInterest)
colnames(exposureOfInterest) <- SqlRender::camelCaseToSnakeCase(colnames(exposureOfInterest))
fileName <- file.path(exportFolder, "exposure_of_interest.csv")
readr::write_csv(exposureOfInterest, fileName)
}
exportOutcomes <- function(outputFolder, exportFolder) {
ParallelLogger::logInfo("Exporting outcomes")
ParallelLogger::logInfo("- outcome_of_interest table")
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "Covid19IncidencePPIandH2RA")
cohortsToCreate <- read.csv(pathToCsv)
createOutcomeRow <- function(outcomeId) {
atlasName <- as.character(cohortsToCreate$atlasName[cohortsToCreate$cohortId == outcomeId])
name <- as.character(cohortsToCreate$name[cohortsToCreate$cohortId == outcomeId])
cohortFileName <- system.file("cohorts", paste0(name, ".json"), package = "Covid19IncidencePPIandH2RA")
definition <- readChar(cohortFileName, file.info(cohortFileName)$size)
return(tibble::tibble(outcomeId = outcomeId,
outcomeName = atlasName,
definition = definition))
}
outcomesOfInterest <- getOutcomesOfInterest()
outcomeOfInterest <- lapply(outcomesOfInterest, createOutcomeRow)
outcomeOfInterest <- do.call("rbind", outcomeOfInterest)
colnames(outcomeOfInterest) <- SqlRender::camelCaseToSnakeCase(colnames(outcomeOfInterest))
fileName <- file.path(exportFolder, "outcome_of_interest.csv")
readr::write_csv(outcomeOfInterest, fileName)
ParallelLogger::logInfo("- negative_control_outcome table")
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19IncidencePPIandH2RA")
negativeControls <- read.csv(pathToCsv)
negativeControls <- negativeControls[tolower(negativeControls$type) == "outcome", ]
negativeControls <- negativeControls[, c("outcomeId", "outcomeName")]
colnames(negativeControls) <- SqlRender::camelCaseToSnakeCase(colnames(negativeControls))
fileName <- file.path(exportFolder, "negative_control_outcome.csv")
readr::write_csv(negativeControls, fileName)
synthesisSummaryFile <- file.path(outputFolder, "SynthesisSummary.csv")
if (file.exists(synthesisSummaryFile)) {
positiveControls <- read.csv(synthesisSummaryFile, stringsAsFactors = FALSE)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "Covid19IncidencePPIandH2RA")
negativeControls <- read.csv(pathToCsv)
positiveControls <- merge(positiveControls,
negativeControls[, c("outcomeId", "outcomeName")])
positiveControls$outcomeName <- paste0(positiveControls$outcomeName,
", RR = ",
positiveControls$targetEffectSize)
positiveControls <- positiveControls[, c("newOutcomeId",
"outcomeName",
"exposureId",
"outcomeId",
"targetEffectSize")]
colnames(positiveControls) <- c("outcomeId",
"outcomeName",
"exposureId",
"negativeControlId",
"effectSize")
colnames(positiveControls) <- SqlRender::camelCaseToSnakeCase(colnames(positiveControls))
fileName <- file.path(exportFolder, "positive_control_outcome.csv")
readr::write_csv(positiveControls, fileName)
}
}
exportMetadata <- function(outputFolder,
exportFolder,
databaseId,
databaseName,
databaseDescription,
minCellCount) {
ParallelLogger::logInfo("Exporting metadata")
getInfo <- function(row) {
cmData <- CohortMethod::loadCohortMethodData(file.path(outputFolder, "cmOutput", row$cohortMethodDataFile))
info <- cmData$cohorts %>%
group_by(.data$treatment) %>%
summarise(minDate = min(.data$cohortStartDate, na.rm = TRUE),
maxDate = max(.data$cohortStartDate, na.rm = TRUE)) %>%
ungroup() %>%
collect()
info <- tibble::tibble(targetId = row$targetId,
comparatorId = row$comparatorId,
targetMinDate = info$minDate[info$treatment == 1],
targetMaxDate = info$maxDate[info$treatment == 1],
comparatorMinDate = info$minDate[info$treatment == 0],
comparatorMaxDate = info$maxDate[info$treatment == 0])
info$comparisonMinDate <- min(info$targetMinDate, info$comparatorMinDate)
info$comparisonMaxDate <- min(info$targetMaxDate, info$comparatorMaxDate)
return(info)
}
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
reference <- unique(reference[, c("targetId", "comparatorId", "cohortMethodDataFile")])
reference <- split(reference, reference$cohortMethodDataFile)
info <- lapply(reference, getInfo)
info <- bind_rows(info)
ParallelLogger::logInfo("- database table")
database <- tibble::tibble(database_id = databaseId,
database_name = databaseName,
description = databaseDescription,
is_meta_analysis = 0)
fileName <- file.path(exportFolder, "database.csv")
readr::write_csv(database, fileName)
ParallelLogger::logInfo("- exposure_summary table")
minDates <- rbind(tibble::tibble(exposureId = info$targetId,
minDate = info$targetMinDate),
tibble::tibble(exposureId = info$comparatorId,
minDate = info$comparatorMinDate))
minDates <- aggregate(minDate ~ exposureId, minDates, min)
maxDates <- rbind(tibble::tibble(exposureId = info$targetId,
maxDate = info$targetMaxDate),
tibble::tibble(exposureId = info$comparatorId,
maxDate = info$comparatorMaxDate))
maxDates <- aggregate(maxDate ~ exposureId, maxDates, max)
exposureSummary <- merge(minDates, maxDates)
exposureSummary$databaseId <- databaseId
colnames(exposureSummary) <- SqlRender::camelCaseToSnakeCase(colnames(exposureSummary))
fileName <- file.path(exportFolder, "exposure_summary.csv")
readr::write_csv(exposureSummary, fileName)
ParallelLogger::logInfo("- comparison_summary table")
minDates <- aggregate(comparisonMinDate ~ targetId + comparatorId, info, min)
maxDates <- aggregate(comparisonMaxDate ~ targetId + comparatorId, info, max)
comparisonSummary <- merge(minDates, maxDates)
comparisonSummary$databaseId <- databaseId
colnames(comparisonSummary)[colnames(comparisonSummary) == "comparisonMinDate"] <- "minDate"
colnames(comparisonSummary)[colnames(comparisonSummary) == "comparisonMaxDate"] <- "maxDate"
colnames(comparisonSummary) <- SqlRender::camelCaseToSnakeCase(colnames(comparisonSummary))
fileName <- file.path(exportFolder, "comparison_summary.csv")
readr::write_csv(comparisonSummary, fileName)
ParallelLogger::logInfo("- attrition table")
fileName <- file.path(exportFolder, "attrition.csv")
if (file.exists(fileName)) {
unlink(fileName)
}
outcomesOfInterest <- getOutcomesOfInterest()
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
reference <- reference[reference$outcomeId %in% outcomesOfInterest, ]
first <- !file.exists(fileName)
pb <- txtProgressBar(style = 3)
for (i in 1:nrow(reference)) {
outcomeModel <- readRDS(file.path(outputFolder,
"cmOutput",
reference$outcomeModelFile[i]))
attrition <- outcomeModel$attrition[, c("description", "targetPersons", "comparatorPersons")]
attrition$sequenceNumber <- 1:nrow(attrition)
attrition1 <- attrition[, c("sequenceNumber", "description", "targetPersons")]
colnames(attrition1)[3] <- "subjects"
attrition1$exposureId <- reference$targetId[i]
attrition2 <- attrition[, c("sequenceNumber", "description", "comparatorPersons")]
colnames(attrition2)[3] <- "subjects"
attrition2$exposureId <- reference$comparatorId[i]
attrition <- rbind(attrition1, attrition2)
attrition$targetId <- reference$targetId[i]
attrition$comparatorId <- reference$comparatorId[i]
attrition$analysisId <- reference$analysisId[i]
attrition$outcomeId <- reference$outcomeId[i]
attrition$databaseId <- databaseId
attrition <- attrition[, c("databaseId",
"exposureId",
"targetId",
"comparatorId",
"outcomeId",
"analysisId",
"sequenceNumber",
"description",
"subjects")]
attrition <- enforceMinCellValue(attrition, "subjects", minCellCount, silent = TRUE)
colnames(attrition) <- SqlRender::camelCaseToSnakeCase(colnames(attrition))
write.table(x = attrition,
file = fileName,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
first <- FALSE
if (i %% 100 == 10) {
setTxtProgressBar(pb, i/nrow(reference))
}
}
setTxtProgressBar(pb, 1)
close(pb)
ParallelLogger::logInfo("- covariate table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
getCovariates <- function(analysisId) {
cmDataFolder <- reference$cohortMethodDataFile[analysisId][1]
cmData <- CohortMethod::loadCohortMethodData(file.path(outputFolder, "cmOutput", cmDataFolder))
covariateRef <- collect(cmData$covariateRef)
if (nrow(covariateRef) > 0) {
covariateRef <- covariateRef[, c("covariateId", "covariateName", "analysisId")]
colnames(covariateRef) <- c("covariateId", "covariateName", "covariateAnalysisId")
covariateRef$analysisId <- analysisId
return(covariateRef)
} else {
return(data.frame(analysisId = analysisId, covariateId = 1, covariateName = "", covariateAnalysisId = 1)[-1])
}
}
covariates <- lapply(unique(reference$analysisId), getCovariates)
covariates <- do.call("rbind", covariates)
covariates$databaseId <- databaseId
colnames(covariates) <- SqlRender::camelCaseToSnakeCase(colnames(covariates))
fileName <- file.path(exportFolder, "covariate.csv")
readr::write_csv(covariates, fileName)
rm(covariates) # Free up memory
ParallelLogger::logInfo("- cm_follow_up_dist table")
getResult <- function(i) {
if (reference$strataFile[i] == "") {
strataPop <- readRDS(file.path(outputFolder,
"cmOutput",
reference$studyPopFile[i]))
} else {
strataPop <- readRDS(file.path(outputFolder,
"cmOutput",
reference$strataFile[i]))
}
targetDist <- quantile(strataPop$survivalTime[strataPop$treatment == 1],
c(0, 0.1, 0.25, 0.5, 0.85, 0.9, 1))
comparatorDist <- quantile(strataPop$survivalTime[strataPop$treatment == 0],
c(0, 0.1, 0.25, 0.5, 0.85, 0.9, 1))
row <- tibble::tibble(target_id = reference$targetId[i],
comparator_id = reference$comparatorId[i],
outcome_id = reference$outcomeId[i],
analysis_id = reference$analysisId[i],
target_min_days = targetDist[1],
target_p10_days = targetDist[2],
target_p25_days = targetDist[3],
target_median_days = targetDist[4],
target_p75_days = targetDist[5],
target_p90_days = targetDist[6],
target_max_days = targetDist[7],
comparator_min_days = comparatorDist[1],
comparator_p10_days = comparatorDist[2],
comparator_p25_days = comparatorDist[3],
comparator_median_days = comparatorDist[4],
comparator_p75_days = comparatorDist[5],
comparator_p90_days = comparatorDist[6],
comparator_max_days = comparatorDist[7])
return(row)
}
outcomesOfInterest <- getOutcomesOfInterest()
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
reference <- reference[reference$outcomeId %in% outcomesOfInterest, ]
results <- plyr::llply(1:nrow(reference), getResult, .progress = "text")
results <- do.call("rbind", results)
results$database_id <- databaseId
fileName <- file.path(exportFolder, "cm_follow_up_dist.csv")
readr::write_csv(results, fileName)
rm(results) # Free up memory
}
enforceMinCellValue <- function(data, fieldName, minValues, silent = FALSE) {
toCensor <- !is.na(pull(data, fieldName)) & pull(data, fieldName) < minValues & pull(data, fieldName) != 0
if (!silent) {
percent <- round(100 * sum(toCensor)/nrow(data), 1)
ParallelLogger::logInfo(" censoring ",
sum(toCensor),
" values (",
percent,
"%) from ",
fieldName,
" because value below minimum")
}
if (length(minValues) == 1) {
data[toCensor, fieldName] <- -minValues
} else {
data[toCensor, fieldName] <- -minValues[toCensor]
}
return(data)
}
exportMainResults <- function(outputFolder,
exportFolder,
databaseId,
minCellCount,
maxCores) {
ParallelLogger::logInfo("Exporting main results")
ParallelLogger::logInfo("- cohort_method_result table")
analysesSum <- readr::read_csv(file.path(outputFolder, "analysisSummary.csv"), col_types = readr::cols())
allControls <- getAllControls(outputFolder)
ParallelLogger::logInfo(" Performing empirical calibration on main effects")
cluster <- ParallelLogger::makeCluster(min(4, maxCores))
subsets <- split(analysesSum,
paste(analysesSum$targetId, analysesSum$comparatorId, analysesSum$analysisId))
rm(analysesSum) # Free up memory
results <- ParallelLogger::clusterApply(cluster,
subsets,
calibrate,
allControls = allControls)
ParallelLogger::stopCluster(cluster)
mainEffects <- do.call("rbind", subsets)[, -c(2,4,6,8,9:20)]
rm(subsets) # Free up memory
results <- do.call("rbind", results)
results$databaseId <- databaseId
results <- enforceMinCellValue(results, "targetSubjects", minCellCount)
results <- enforceMinCellValue(results, "comparatorSubjects", minCellCount)
results <- enforceMinCellValue(results, "targetOutcomes", minCellCount)
results <- enforceMinCellValue(results, "comparatorOutcomes", minCellCount)
colnames(results) <- SqlRender::camelCaseToSnakeCase(colnames(results))
fileName <- file.path(exportFolder, "cohort_method_result.csv")
readr::write_csv(results, fileName)
rm(results) # Free up memory
# Handle main / interaction effects
if (ncol(mainEffects) > 4) {
ParallelLogger::logInfo("- cm_main_effect_result table")
keyCol <- "estimate"
valueCol <- "value"
gatherCols <- names(mainEffects)[5:length(names(mainEffects))]
longTable <- tidyr::gather_(mainEffects, keyCol, valueCol, gatherCols)
longTable$label <- as.numeric(sub(".*I", "", longTable$estimate))
longTable$estimate <- sub("I.*", "", longTable$estimate)
uniqueCovariates <- unique(longTable$label)
mainEffects <- tidyr::spread(longTable, estimate, value)
mainEffects <- mainEffects[!is.na(mainEffects$logRr),]
mainEffects <- data.frame(
databaseId = databaseId,
analysisId = mainEffects$analysisId,
targetId = mainEffects$targetId,
comparatorId = mainEffects$comparatorId,
outcomeId = mainEffects$outcomeId,
covariateId = mainEffects$label,
coefficient = mainEffects$logRr,
ci95lb = log(mainEffects$ci95lb),
ci95ub = log(mainEffects$ci95ub),
se = mainEffects$seLogRr
)
colnames(mainEffects) <- SqlRender::camelCaseToSnakeCase(colnames(mainEffects))
fileName <- file.path(exportFolder, "cm_main_effects_result.csv")
write.csv(mainEffects, fileName, row.names = FALSE)
rm(mainEffects) # Free up memory
}
ParallelLogger::logInfo("- cm_interaction_result table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
loadInteractionsFromOutcomeModel <- function(i) {
outcomeModel <- readRDS(file.path(outputFolder,
"cmOutput",
reference$outcomeModelFile[i]))
if ("subgroupCounts" %in% names(outcomeModel)) {
rows <- tibble::tibble(targetId = reference$targetId[i],
comparatorId = reference$comparatorId[i],
outcomeId = reference$outcomeId[i],
analysisId = reference$analysisId[i],
interactionCovariateId = outcomeModel$subgroupCounts$subgroupCovariateId,
rrr = NA,
ci95Lb = NA,
ci95Ub = NA,
p = NA,
i2 = NA,
logRrr = NA,
seLogRrr = NA,
targetSubjects = outcomeModel$subgroupCounts$targetPersons,
comparatorSubjects = outcomeModel$subgroupCounts$comparatorPersons,
targetDays = outcomeModel$subgroupCounts$targetDays,
comparatorDays = outcomeModel$subgroupCounts$comparatorDays,
targetOutcomes = outcomeModel$subgroupCounts$targetOutcomes,
comparatorOutcomes = outcomeModel$subgroupCounts$comparatorOutcomes)
if ("outcomeModelInteractionEstimates" %in% names(outcomeModel)) {
idx <- match(outcomeModel$outcomeModelInteractionEstimates$covariateId,
rows$interactionCovariateId)
rows$rrr[idx] <- exp(outcomeModel$outcomeModelInteractionEstimates$logRr)
rows$ci95Lb[idx] <- exp(outcomeModel$outcomeModelInteractionEstimates$logLb95)
rows$ci95Ub[idx] <- exp(outcomeModel$outcomeModelInteractionEstimates$logUb95)
rows$logRrr[idx] <- outcomeModel$outcomeModelInteractionEstimates$logRr
rows$seLogRrr[idx] <- outcomeModel$outcomeModelInteractionEstimates$seLogRr
z <- rows$logRrr[idx]/rows$seLogRrr[idx]
rows$p[idx] <- 2 * pmin(pnorm(z), 1 - pnorm(z))
}
return(rows)
} else {
return(NULL)
}
}
interactions <- plyr::llply(1:nrow(reference),
loadInteractionsFromOutcomeModel,
.progress = "text")
interactions <- bind_rows(interactions)
if (nrow(interactions) > 0) {
ParallelLogger::logInfo(" Performing empirical calibration on interaction effects")
allControls <- getAllControls(outputFolder)
negativeControls <- allControls[allControls$targetEffectSize == 1, ]
cluster <- ParallelLogger::makeCluster(min(4, maxCores))
subsets <- split(interactions,
paste(interactions$targetId, interactions$comparatorId, interactions$analysisId))
interactions <- ParallelLogger::clusterApply(cluster,
subsets,
calibrateInteractions,
negativeControls = negativeControls)
ParallelLogger::stopCluster(cluster)
rm(subsets) # Free up memory
interactions <- bind_rows(interactions)
interactions$databaseId <- databaseId
interactions <- enforceMinCellValue(interactions, "targetSubjects", minCellCount)
interactions <- enforceMinCellValue(interactions, "comparatorSubjects", minCellCount)
interactions <- enforceMinCellValue(interactions, "targetOutcomes", minCellCount)
interactions <- enforceMinCellValue(interactions, "comparatorOutcomes", minCellCount)
colnames(interactions) <- SqlRender::camelCaseToSnakeCase(colnames(interactions))
fileName <- file.path(exportFolder, "cm_interaction_result.csv")
readr::write_csv(interactions, fileName)
rm(interactions) # Free up memory
}
}
calibrate <- function(subset, allControls) {
ncs <- subset[subset$outcomeId %in% allControls$outcomeId[allControls$targetEffectSize == 1], ]
ncs <- ncs[!is.na(ncs$seLogRr), ]
if (nrow(ncs) > 5) {
null <- EmpiricalCalibration::fitMcmcNull(ncs$logRr, ncs$seLogRr)
calibratedP <- EmpiricalCalibration::calibrateP(null = null,
logRr = subset$logRr,
seLogRr = subset$seLogRr)
subset$calibratedP <- calibratedP$p
} else {
subset$calibratedP <- rep(NA, nrow(subset))
}
pcs <- subset[subset$outcomeId %in% allControls$outcomeId[allControls$targetEffectSize != 1], ]
pcs <- pcs[!is.na(pcs$seLogRr), ]
if (nrow(pcs) > 5) {
controls <- merge(subset, allControls[, c("targetId", "comparatorId", "outcomeId", "targetEffectSize")])
model <- EmpiricalCalibration::fitSystematicErrorModel(logRr = controls$logRr,
seLogRr = controls$seLogRr,
trueLogRr = log(controls$targetEffectSize),
estimateCovarianceMatrix = FALSE)
calibratedCi <- EmpiricalCalibration::calibrateConfidenceInterval(logRr = subset$logRr,
seLogRr = subset$seLogRr,
model = model)
subset$calibratedRr <- exp(calibratedCi$logRr)
subset$calibratedCi95Lb <- exp(calibratedCi$logLb95Rr)
subset$calibratedCi95Ub <- exp(calibratedCi$logUb95Rr)
subset$calibratedLogRr <- calibratedCi$logRr
subset$calibratedSeLogRr <- calibratedCi$seLogRr
} else {
subset$calibratedRr <- rep(NA, nrow(subset))
subset$calibratedCi95Lb <- rep(NA, nrow(subset))
subset$calibratedCi95Ub <- rep(NA, nrow(subset))
subset$calibratedLogRr <- rep(NA, nrow(subset))
subset$calibratedSeLogRr <- rep(NA, nrow(subset))
}
subset$i2 <- rep(NA, nrow(subset))
subset <- subset[, c("targetId",
"comparatorId",
"outcomeId",
"analysisId",
"rr",
"ci95lb",
"ci95ub",
"p",
"i2",
"logRr",
"seLogRr",
"target",
"comparator",
"targetDays",
"comparatorDays",
"eventsTarget",
"eventsComparator",
"calibratedP",
"calibratedRr",
"calibratedCi95Lb",
"calibratedCi95Ub",
"calibratedLogRr",
"calibratedSeLogRr")]
colnames(subset) <- c("targetId",
"comparatorId",
"outcomeId",
"analysisId",
"rr",
"ci95Lb",
"ci95Ub",
"p",
"i2",
"logRr",
"seLogRr",
"targetSubjects",
"comparatorSubjects",
"targetDays",
"comparatorDays",
"targetOutcomes",
"comparatorOutcomes",
"calibratedP",
"calibratedRr",
"calibratedCi95Lb",
"calibratedCi95Ub",
"calibratedLogRr",
"calibratedSeLogRr")
return(subset)
}
calibrateInteractions <- function(subset, negativeControls) {
ncs <- subset[subset$outcomeId %in% negativeControls$outcomeId, ]
ncs <- ncs[!is.na(pull(ncs, .data$seLogRrr)), ]
if (nrow(ncs) > 5) {
null <- EmpiricalCalibration::fitMcmcNull(ncs$logRrr, ncs$seLogRrr)
calibratedP <- EmpiricalCalibration::calibrateP(null = null,
logRr = subset$logRrr,
seLogRr = subset$seLogRrr)
subset$calibratedP <- calibratedP$p
} else {
subset$calibratedP <- rep(NA, nrow(subset))
}
return(subset)
}
exportProfiles <- function(outputFolder,
exportFolder,
databaseId,
minCellCount,
maxCores) {
ParallelLogger::logInfo("Exporting profiles")
fileName <- file.path(exportFolder, "outcome_profile.csv")
if (file.exists(fileName)) {
unlink(fileName)
}
first <- TRUE
profileFolder <- file.path(outputFolder, "profile")
files <- list.files(profileFolder, pattern = "prof_.*.rds", full.names = TRUE)
pb <- txtProgressBar(style = 3)
if (length(files) > 0) {
for (i in 1:length(files)) {
ids <- gsub("^.*prof_t", "", files[i])
targetId <- as.numeric(gsub("_c.*", "", ids))
ids <- gsub("^.*_c", "", ids)
comparatorId <- as.numeric(gsub("_[aso].*$", "", ids))
if (grepl("_s", ids)) {
subgroupId <- as.numeric(gsub("^.*_s", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
subgroupId <- NA
}
if (grepl("_o", ids)) {
outcomeId <- as.numeric(gsub("^.*_o", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
outcomeId <- NA
}
ids <- gsub("^.*_a", "", ids)
analysisId <- as.numeric(gsub(".rds", "", ids))
profile <- readRDS(files[i])
profile$targetId <- targetId
profile$comparatorId <- comparatorId
profile$outcomeId <- outcomeId
profile$analysisId <- analysisId
profile$databaseId <- databaseId
colnames(profile) <- SqlRender::camelCaseToSnakeCase(colnames(profile))
write.table(x = profile,
file = fileName,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
first <- FALSE
setTxtProgressBar(pb, i/length(files))
}
}
close(pb)
}
exportDiagnostics <- function(outputFolder,
exportFolder,
databaseId,
minCellCount,
maxCores) {
ParallelLogger::logInfo("Exporting diagnostics")
ParallelLogger::logInfo("- covariate_balance table")
fileName <- file.path(exportFolder, "covariate_balance.csv")
if (file.exists(fileName)) {
unlink(fileName)
}
first <- TRUE
balanceFolder <- file.path(outputFolder, "balance")
files <- list.files(balanceFolder, pattern = "bal_.*.rds", full.names = TRUE)
pb <- txtProgressBar(style = 3)
if (length(files) > 0) {
for (i in 1:length(files)) {
ids <- gsub("^.*bal_t", "", files[i])
targetId <- as.numeric(gsub("_c.*", "", ids))
ids <- gsub("^.*_c", "", ids)
comparatorId <- as.numeric(gsub("_[aso].*$", "", ids))
if (grepl("_s", ids)) {
subgroupId <- as.numeric(gsub("^.*_s", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
subgroupId <- NA
}
if (grepl("_o", ids)) {
outcomeId <- as.numeric(gsub("^.*_o", "", gsub("_a[0-9]*.rds", "", ids)))
} else {
outcomeId <- NA
}
ids <- gsub("^.*_a", "", ids)
analysisId <- as.numeric(gsub(".rds", "", ids))
balance <- readRDS(files[i])
inferredTargetBeforeSize <- mean(balance$beforeMatchingSumTarget/balance$beforeMatchingMeanTarget,
na.rm = TRUE)
inferredComparatorBeforeSize <- mean(balance$beforeMatchingSumComparator/balance$beforeMatchingMeanComparator,
na.rm = TRUE)
inferredTargetAfterSize <- mean(balance$afterMatchingSumTarget/balance$afterMatchingMeanTarget,
na.rm = TRUE)
inferredComparatorAfterSize <- mean(balance$afterMatchingSumComparator/balance$afterMatchingMeanComparator,
na.rm = TRUE)
balance$databaseId <- databaseId
balance$targetId <- targetId
balance$comparatorId <- comparatorId
balance$outcomeId <- outcomeId
balance$analysisId <- analysisId
balance$interactionCovariateId <- subgroupId
balance <- balance[, c("databaseId",
"targetId",
"comparatorId",
"outcomeId",
"analysisId",
"interactionCovariateId",
"covariateId",
"beforeMatchingMeanTarget",
"beforeMatchingMeanComparator",
"beforeMatchingStdDiff",
"afterMatchingMeanTarget",
"afterMatchingMeanComparator",
"afterMatchingStdDiff")]
colnames(balance) <- c("databaseId",
"targetId",
"comparatorId",
"outcomeId",
"analysisId",
"interactionCovariateId",
"covariateId",
"targetMeanBefore",
"comparatorMeanBefore",
"stdDiffBefore",
"targetMeanAfter",
"comparatorMeanAfter",
"stdDiffAfter")
balance$targetMeanBefore[is.na(balance$targetMeanBefore)] <- 0
balance$comparatorMeanBefore[is.na(balance$comparatorMeanBefore)] <- 0
balance$stdDiffBefore <- round(balance$stdDiffBefore, 3)
balance$targetMeanAfter[is.na(balance$targetMeanAfter)] <- 0
balance$comparatorMeanAfter[is.na(balance$comparatorMeanAfter)] <- 0
balance$targetSizeBefore <- inferredTargetBeforeSize
balance$targetSizeBefore[is.na(inferredTargetBeforeSize)] <- 0
balance$comparatorSizeBefore <- inferredComparatorBeforeSize
balance$comparatorSizeBefore[is.na(inferredComparatorBeforeSize)] <- 0
balance$targetSizeAfter <- inferredTargetAfterSize
balance$targetSizeAfter[is.na(inferredTargetAfterSize)] <- 0
balance$comparatorSizeAfter <- inferredComparatorAfterSize
balance$comparatorSizeAfter[is.na(inferredComparatorAfterSize)] <- 0
balance$stdDiffAfter <- round(balance$stdDiffAfter, 3)
balance <- enforceMinCellValue(balance,
"targetMeanBefore",
minCellCount/inferredTargetBeforeSize,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorMeanBefore",
minCellCount/inferredComparatorBeforeSize,
TRUE)
balance <- enforceMinCellValue(balance,
"targetMeanAfter",
minCellCount/inferredTargetAfterSize,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorMeanAfter",
minCellCount/inferredComparatorAfterSize,
TRUE)
balance <- enforceMinCellValue(balance,
"targetSizeBefore",
minCellCount,
TRUE)
balance <- enforceMinCellValue(balance,
"targetSizeAfter",
minCellCount,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorSizeBefore",
minCellCount,
TRUE)
balance <- enforceMinCellValue(balance,
"comparatorSizeAfter",
minCellCount,
TRUE)
balance$targetMeanBefore <- round(balance$targetMeanBefore, 3)
balance$comparatorMeanBefore <- round(balance$comparatorMeanBefore, 3)
balance$targetMeanAfter <- round(balance$targetMeanAfter, 3)
balance$comparatorMeanAfter <- round(balance$comparatorMeanAfter, 3)
balance$targetSizeBefore <- round(balance$targetSizeBefore, 0)
balance$comparatorSizeBefore <- round(balance$comparatorSizeBefore, 0)
balance$targetSizeAfter <- round(balance$targetSizeAfter, 0)
balance$comparatorSizeAfter <- round(balance$comparatorSizeAfter, 0)
# balance <- balance[balance$targetMeanBefore != 0 & balance$comparatorMeanBefore != 0 & balance$targetMeanAfter !=
# 0 & balance$comparatorMeanAfter != 0 & balance$stdDiffBefore != 0 & balance$stdDiffAfter !=
# 0, ]
balance <- balance[!is.na(balance$targetId), ]
colnames(balance) <- SqlRender::camelCaseToSnakeCase(colnames(balance))
write.table(x = balance,
file = fileName,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
first <- FALSE
setTxtProgressBar(pb, i/length(files))
}
}
close(pb)
ParallelLogger::logInfo("- preference_score_dist table")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
preparePlot <- function(row, reference) {
idx <- reference$analysisId == row$analysisId &
reference$targetId == row$targetId &
reference$comparatorId == row$comparatorId
psFileName <- file.path(outputFolder,
"cmOutput",
reference$sharedPsFile[idx][1])
if (file.exists(psFileName)) {
ps <- readRDS(psFileName)
if (length(unique(ps$treatment)) == 2 &&
min(ps$propensityScore) < max(ps$propensityScore)) {
ps <- CohortMethod:::computePreferenceScore(ps)
pop1 <- ps$preferenceScore[ps$treatment == 1]
pop0 <- ps$preferenceScore[ps$treatment == 0]
bw1 <- ifelse(length(pop1) > 1, "nrd0", 0.1)
bw0 <- ifelse(length(pop0) > 1, "nrd0", 0.1)
d1 <- density(pop1, bw = bw1, from = 0, to = 1, n = 100)
d0 <- density(pop0, bw = bw0, from = 0, to = 1, n = 100)
result <- tibble::tibble(databaseId = databaseId,
targetId = row$targetId,
comparatorId = row$comparatorId,
analysisId = row$analysisId,
preferenceScore = d1$x,
targetDensity = d1$y,
comparatorDensity = d0$y)
return(result)
}
}
return(NULL)
}
subset <- unique(reference[reference$sharedPsFile != "",
c("targetId", "comparatorId", "analysisId")])
data <- plyr::llply(split(subset, 1:nrow(subset)),
preparePlot,
reference = reference,
.progress = "text")
data <- do.call("rbind", data)
fileName <- file.path(exportFolder, "preference_score_dist.csv")
if (!is.null(data)) {
colnames(data) <- SqlRender::camelCaseToSnakeCase(colnames(data))
}
readr::write_csv(data, fileName)
ParallelLogger::logInfo("- propensity_model table")
getPsModel <- function(row, reference) {
idx <- reference$analysisId == row$analysisId &
reference$targetId == row$targetId &
reference$comparatorId == row$comparatorId
psFileName <- file.path(outputFolder,
"cmOutput",
reference$sharedPsFile[idx][1])
if (file.exists(psFileName)) {
ps <- readRDS(psFileName)
metaData <- attr(ps, "metaData")
if (is.null(metaData$psError)) {
cmDataFile <- file.path(outputFolder,
"cmOutput",
reference$cohortMethodDataFile[idx][1])
cmData <- CohortMethod::loadCohortMethodData(cmDataFile)
model <- CohortMethod::getPsModel(ps, cmData)
model$covariateId[is.na(model$covariateId)] <- 0
Andromeda::close(cmData)
model$databaseId <- databaseId
model$targetId <- row$targetId
model$comparatorId <- row$comparatorId
model$analysisId <- row$analysisId
model <- model[, c("databaseId", "targetId", "comparatorId", "analysisId", "covariateId", "coefficient")]
return(model)
}
}
return(NULL)
}
subset <- unique(reference[reference$sharedPsFile != "",
c("targetId", "comparatorId", "analysisId")])
data <- plyr::llply(split(subset, 1:nrow(subset)),
getPsModel,
reference = reference,
.progress = "text")
data <- do.call("rbind", data)
fileName <- file.path(exportFolder, "propensity_model.csv")
if (!is.null(data)) {
colnames(data) <- SqlRender::camelCaseToSnakeCase(colnames(data))
}
readr::write_csv(data, fileName)
ParallelLogger::logInfo("- kaplan_meier_dist table")
ParallelLogger::logInfo(" Computing KM curves")
reference <- readRDS(file.path(outputFolder, "cmOutput", "outcomeModelReference.rds"))
outcomesOfInterest <- getOutcomesOfInterest()
reference <- reference[reference$outcomeId %in% outcomesOfInterest, ]
reference <- reference[, c("strataFile",
"studyPopFile",
"targetId",
"comparatorId",
"outcomeId",
"analysisId")]
tempFolder <- file.path(exportFolder, "temp")
if (!file.exists(tempFolder)) {
dir.create(tempFolder)
}
cluster <- ParallelLogger::makeCluster(min(4, maxCores))
tasks <- split(reference, seq(nrow(reference)))
ParallelLogger::clusterApply(cluster,
tasks,
prepareKm,
outputFolder = outputFolder,
tempFolder = tempFolder,
databaseId = databaseId,
minCellCount = minCellCount)
ParallelLogger::stopCluster(cluster)
ParallelLogger::logInfo(" Writing to single csv file")
saveKmToCsv <- function(file, first, outputFile) {
data <- readRDS(file)
if (!is.null(data)) {
colnames(data) <- SqlRender::camelCaseToSnakeCase(colnames(data))
}
write.table(x = data,
file = outputFile,
row.names = FALSE,
col.names = first,
sep = ",",
dec = ".",
qmethod = "double",
append = !first)
}
outputFile <- file.path(exportFolder, "kaplan_meier_dist.csv")
files <- list.files(tempFolder, "km_.*.rds", full.names = TRUE)
if (length(files) > 0) {
saveKmToCsv(files[1], first = TRUE, outputFile = outputFile)
if (length(files) > 1) {
plyr::l_ply(files[2:length(files)], saveKmToCsv, first = FALSE, outputFile = outputFile, .progress = "text")
}
}
unlink(tempFolder, recursive = TRUE)
}
prepareKm <- function(task,
outputFolder,
tempFolder,
databaseId,
minCellCount) {
ParallelLogger::logTrace("Preparing KM plot for target ",
task$targetId,
", comparator ",
task$comparatorId,
", outcome ",
task$outcomeId,
", analysis ",
task$analysisId)
outputFileName <- file.path(tempFolder, sprintf("km_t%s_c%s_o%s_a%s.rds",
task$targetId,
task$comparatorId,
task$outcomeId,
task$analysisId))
if (file.exists(outputFileName)) {
return(NULL)
}
popFile <- task$strataFile
if (popFile == "") {
popFile <- task$studyPopFile
}
population <- readRDS(file.path(outputFolder,
"cmOutput",
popFile))
if (nrow(population) == 0 || length(unique(population$treatment)) != 2) {
# Can happen when matching and treatment is predictable
return(NULL)
}
data <- prepareKaplanMeier(population)
if (is.null(data)) {
# No shared strata
return(NULL)
}
data$targetId <- task$targetId
data$comparatorId <- task$comparatorId
data$outcomeId <- task$outcomeId
data$analysisId <- task$analysisId
data$databaseId <- databaseId
data <- enforceMinCellValue(data, "targetAtRisk", minCellCount)
data <- enforceMinCellValue(data, "comparatorAtRisk", minCellCount)
saveRDS(data, outputFileName)
}
prepareKaplanMeier <- function(population) {
dataCutoff <- 0.9
population$y <- 0
population$y[population$outcomeCount != 0] <- 1
if (is.null(population$stratumId) || length(unique(population$stratumId)) == nrow(population)/2) {
sv <- survival::survfit(survival::Surv(survivalTime, y) ~ treatment, population, conf.int = TRUE)
idx <- summary(sv, censored = T)$strata == "treatment=1"
survTarget <- tibble::tibble(time = sv$time[idx],
targetSurvival = sv$surv[idx],
targetSurvivalLb = sv$lower[idx],
targetSurvivalUb = sv$upper[idx])
idx <- summary(sv, censored = T)$strata == "treatment=0"
survComparator <- tibble::tibble(time = sv$time[idx],
comparatorSurvival = sv$surv[idx],
comparatorSurvivalLb = sv$lower[idx],
comparatorSurvivalUb = sv$upper[idx])
data <- merge(survTarget, survComparator, all = TRUE)
} else {
population$stratumSizeT <- 1
strataSizesT <- aggregate(stratumSizeT ~ stratumId, population[population$treatment == 1, ], sum)
if (max(strataSizesT$stratumSizeT) == 1) {
# variable ratio matching: use propensity score to compute IPTW
if (is.null(population$propensityScore)) {
stop("Variable ratio matching detected, but no propensity score found")
}
weights <- aggregate(propensityScore ~ stratumId, population, mean)
if (max(weights$propensityScore) > 0.99999) {
return(NULL)
}
weights$weight <- weights$propensityScore / (1 - weights$propensityScore)
} else {
# stratification: infer probability of treatment from subject counts
strataSizesC <- aggregate(stratumSizeT ~ stratumId, population[population$treatment == 0, ], sum)
colnames(strataSizesC)[2] <- "stratumSizeC"
weights <- merge(strataSizesT, strataSizesC)
if (nrow(weights) == 0) {
warning("No shared strata between target and comparator")
return(NULL)
}
weights$weight <- weights$stratumSizeT/weights$stratumSizeC
}
population <- merge(population, weights[, c("stratumId", "weight")])
population$weight[population$treatment == 1] <- 1
idx <- population$treatment == 1
survTarget <- CohortMethod:::adjustedKm(weight = population$weight[idx],
time = population$survivalTime[idx],
y = population$y[idx])
survTarget$targetSurvivalUb <- survTarget$s^exp(qnorm(0.975)/log(survTarget$s) * sqrt(survTarget$var)/survTarget$s)
survTarget$targetSurvivalLb <- survTarget$s^exp(qnorm(0.025)/log(survTarget$s) * sqrt(survTarget$var)/survTarget$s)
survTarget$targetSurvivalLb[survTarget$s > 0.9999] <- survTarget$s[survTarget$s > 0.9999]
survTarget$targetSurvival <- survTarget$s
survTarget$s <- NULL
survTarget$var <- NULL
idx <- population$treatment == 0
survComparator <- CohortMethod:::adjustedKm(weight = population$weight[idx],
time = population$survivalTime[idx],
y = population$y[idx])
survComparator$comparatorSurvivalUb <- survComparator$s^exp(qnorm(0.975)/log(survComparator$s) *
sqrt(survComparator$var)/survComparator$s)
survComparator$comparatorSurvivalLb <- survComparator$s^exp(qnorm(0.025)/log(survComparator$s) *
sqrt(survComparator$var)/survComparator$s)
survComparator$comparatorSurvivalLb[survComparator$s > 0.9999] <- survComparator$s[survComparator$s >
0.9999]
survComparator$comparatorSurvival <- survComparator$s
survComparator$s <- NULL
survComparator$var <- NULL
data <- merge(survTarget, survComparator, all = TRUE)
}
data <- data[, c("time", "targetSurvival", "targetSurvivalLb", "targetSurvivalUb", "comparatorSurvival", "comparatorSurvivalLb", "comparatorSurvivalUb")]
cutoff <- quantile(population$survivalTime, dataCutoff)
data <- data[data$time <= cutoff, ]
if (cutoff <= 300) {
xBreaks <- seq(0, cutoff, by = 50)
} else if (cutoff <= 600) {
xBreaks <- seq(0, cutoff, by = 100)
} else {
xBreaks <- seq(0, cutoff, by = 250)
}
targetAtRisk <- c()
comparatorAtRisk <- c()
for (xBreak in xBreaks) {
targetAtRisk <- c(targetAtRisk,
sum(population$treatment == 1 & population$survivalTime >= xBreak))
comparatorAtRisk <- c(comparatorAtRisk,
sum(population$treatment == 0 & population$survivalTime >=
xBreak))
}
data <- merge(data, tibble::tibble(time = xBreaks,
targetAtRisk = targetAtRisk,
comparatorAtRisk = comparatorAtRisk), all = TRUE)
if (is.na(data$targetSurvival[1])) {
data$targetSurvival[1] <- 1
data$targetSurvivalUb[1] <- 1
data$targetSurvivalLb[1] <- 1
}
if (is.na(data$comparatorSurvival[1])) {
data$comparatorSurvival[1] <- 1
data$comparatorSurvivalUb[1] <- 1
data$comparatorSurvivalLb[1] <- 1
}
idx <- which(is.na(data$targetSurvival))
while (length(idx) > 0) {
data$targetSurvival[idx] <- data$targetSurvival[idx - 1]
data$targetSurvivalLb[idx] <- data$targetSurvivalLb[idx - 1]
data$targetSurvivalUb[idx] <- data$targetSurvivalUb[idx - 1]
idx <- which(is.na(data$targetSurvival))
}
idx <- which(is.na(data$comparatorSurvival))
while (length(idx) > 0) {
data$comparatorSurvival[idx] <- data$comparatorSurvival[idx - 1]
data$comparatorSurvivalLb[idx] <- data$comparatorSurvivalLb[idx - 1]
data$comparatorSurvivalUb[idx] <- data$comparatorSurvivalUb[idx - 1]
idx <- which(is.na(data$comparatorSurvival))
}
data$targetSurvival <- round(data$targetSurvival, 4)
data$targetSurvivalLb <- round(data$targetSurvivalLb, 4)
data$targetSurvivalUb <- round(data$targetSurvivalUb, 4)
data$comparatorSurvival <- round(data$comparatorSurvival, 4)
data$comparatorSurvivalLb <- round(data$comparatorSurvivalLb, 4)
data$comparatorSurvivalUb <- round(data$comparatorSurvivalUb, 4)
# Remove duplicate (except time) entries:
data <- data[order(data$time), ]
data <- data[!duplicated(data[, -1]), ]
return(data)
}
|
#' @importFrom data.table data.table := setDF setorder .N
#' @importFrom stats na.omit
bin_create <- function(bm) {
bm <- data.table(bm)
setorder(bm, predictor) # sort
# group and summarize
bm_group <- bm[, .(bin_count = .N,
good = sum(response == 1),
bad = sum(response == 0)),
by = bin]
# create new columns
bm_group[, ':='(bin_cum_count = cumsum(bin_count),
good_cum_count = cumsum(good),
bad_cum_count = cumsum(bad),
bin_prop = bin_count / sum(bin_count),
good_rate = good / bin_count,
bad_rate = bad / bin_count,
good_dist = good / sum(good),
bad_dist = bad / sum(bad))]
bm_group[, woe := log(bad_dist / good_dist)]
bm_group[, dist_diff := bad_dist - good_dist,]
bm_group[, iv := dist_diff * woe,]
bm_group[, entropy := (-1) * (((good / bin_count) * log2(good / bin_count)) +
((bad / bin_count) * log2(bad / bin_count)))]
bm_group[, prop_entropy := (bin_count / sum(bin_count)) * entropy]
setDF(bm_group)
return(bm_group)
}
f_bin <- function(u_freq) {
len_fbin <- length(u_freq)
fbin <- u_freq[-len_fbin]
l_fbin <- length(fbin)
c(fbin, fbin[l_fbin])
}
create_intervals <- function(sym_sign, fbin2) {
result <- data.frame(sym_sign, fbin2)
result$cut_point <- paste(result$sym_sign, result$fbin2)
result['cut_point']
}
freq_bin_create <- function(bm, bin_rep) {
bm$bin <- bin_rep
bin_create(bm)
}
plot_bins <- function(x) {
plot_data <- x$bins
xseq <- nrow(plot_data)
p <-
ggplot2::ggplot(data = plot_data) +
ggplot2::geom_line(ggplot2::aes(x = bin, y = woe), color = "blue") +
ggplot2::geom_point(ggplot2::aes(x = bin, y = woe), color = "red") +
ggplot2::xlab("Bins") + ggplot2::ylab("WoE") + ggplot2::ggtitle("WoE Trend") +
ggplot2::scale_x_continuous(breaks = seq(xseq))
return(p)
}
#' @importFrom utils packageVersion menu install.packages
check_suggests <- function(pkg) {
pkg_flag <- tryCatch(utils::packageVersion(pkg), error = function(e) NA)
if (is.na(pkg_flag)) {
msg <- message(paste0('\n', pkg, ' must be installed for this functionality.'))
if (interactive()) {
message(msg, "\nWould you like to install it?")
if (utils::menu(c("Yes", "No")) == 1) {
utils::install.packages(pkg)
} else {
stop(msg, call. = FALSE)
}
} else {
stop(msg, call. = FALSE)
}
}
}
#' @importFrom stats quantile
#' @importFrom utils head tail
winsor <- function(x, min_val = NULL, max_val = NULL, probs = c(0.05, 0.95),
na.rm = TRUE, type = 7) {
if (is.null(min_val)) {
y <- quantile(x, probs = probs, type = type, na.rm = na.rm)
x[x > y[2]] <- y[2]
x[x < y[1]] <- y[1]
} else {
if (is.null(max_val)) {
stop("Argument max_val is missing.")
}
z <- sort(x)
min_replace <- max(head(z, min_val))
max_replace <- min(tail(z, max_val))
x[x < min_replace] <- min_replace
x[x > max_replace] <- max_replace
}
return(x)
}
| /R/utils.R | permissive | statunizaga/rbin | R | false | false | 3,207 | r | #' @importFrom data.table data.table := setDF setorder .N
#' @importFrom stats na.omit
bin_create <- function(bm) {
bm <- data.table(bm)
setorder(bm, predictor) # sort
# group and summarize
bm_group <- bm[, .(bin_count = .N,
good = sum(response == 1),
bad = sum(response == 0)),
by = bin]
# create new columns
bm_group[, ':='(bin_cum_count = cumsum(bin_count),
good_cum_count = cumsum(good),
bad_cum_count = cumsum(bad),
bin_prop = bin_count / sum(bin_count),
good_rate = good / bin_count,
bad_rate = bad / bin_count,
good_dist = good / sum(good),
bad_dist = bad / sum(bad))]
bm_group[, woe := log(bad_dist / good_dist)]
bm_group[, dist_diff := bad_dist - good_dist,]
bm_group[, iv := dist_diff * woe,]
bm_group[, entropy := (-1) * (((good / bin_count) * log2(good / bin_count)) +
((bad / bin_count) * log2(bad / bin_count)))]
bm_group[, prop_entropy := (bin_count / sum(bin_count)) * entropy]
setDF(bm_group)
return(bm_group)
}
f_bin <- function(u_freq) {
len_fbin <- length(u_freq)
fbin <- u_freq[-len_fbin]
l_fbin <- length(fbin)
c(fbin, fbin[l_fbin])
}
create_intervals <- function(sym_sign, fbin2) {
result <- data.frame(sym_sign, fbin2)
result$cut_point <- paste(result$sym_sign, result$fbin2)
result['cut_point']
}
freq_bin_create <- function(bm, bin_rep) {
bm$bin <- bin_rep
bin_create(bm)
}
plot_bins <- function(x) {
plot_data <- x$bins
xseq <- nrow(plot_data)
p <-
ggplot2::ggplot(data = plot_data) +
ggplot2::geom_line(ggplot2::aes(x = bin, y = woe), color = "blue") +
ggplot2::geom_point(ggplot2::aes(x = bin, y = woe), color = "red") +
ggplot2::xlab("Bins") + ggplot2::ylab("WoE") + ggplot2::ggtitle("WoE Trend") +
ggplot2::scale_x_continuous(breaks = seq(xseq))
return(p)
}
#' @importFrom utils packageVersion menu install.packages
check_suggests <- function(pkg) {
pkg_flag <- tryCatch(utils::packageVersion(pkg), error = function(e) NA)
if (is.na(pkg_flag)) {
msg <- message(paste0('\n', pkg, ' must be installed for this functionality.'))
if (interactive()) {
message(msg, "\nWould you like to install it?")
if (utils::menu(c("Yes", "No")) == 1) {
utils::install.packages(pkg)
} else {
stop(msg, call. = FALSE)
}
} else {
stop(msg, call. = FALSE)
}
}
}
#' @importFrom stats quantile
#' @importFrom utils head tail
winsor <- function(x, min_val = NULL, max_val = NULL, probs = c(0.05, 0.95),
na.rm = TRUE, type = 7) {
if (is.null(min_val)) {
y <- quantile(x, probs = probs, type = type, na.rm = na.rm)
x[x > y[2]] <- y[2]
x[x < y[1]] <- y[1]
} else {
if (is.null(max_val)) {
stop("Argument max_val is missing.")
}
z <- sort(x)
min_replace <- max(head(z, min_val))
max_replace <- min(tail(z, max_val))
x[x < min_replace] <- min_replace
x[x > max_replace] <- max_replace
}
return(x)
}
|
################################
################################
##Three functions in here:
#### 1) test.gamma for finding the ML estimate for the grand lambda
#### 2) make.Z for converting a matrix to the Z-scale
#### 3) update.UDV for updating the ideal point estimates
#### *) Small additonal ones at the end
################################
################################
################################
################################
##1) Finding gamma
#test.gamma.pois_EM<-function(gamma.try,Theta.last.0=Theta.last[row.type=="count",],votes.mat.0=votes.mat[row.type=="count",],emp.cdf.0,cutoff.seq=NULL){
test.gamma.pois_EM<-function(gamma.try,Theta.last.0,votes.mat.0,emp.cdf.0,cutoff.seq=NULL){
gamma.try<-exp(gamma.try)
votes.mat<-votes.mat.0
taus.try<-NULL
count.seq<-cutoff.seq
if(length(cutoff.seq)==0) count.seq<-seq(-1,max(votes.mat)+2,1)
taus.try<-count.seq*0
analytic.cdf<-count.seq
a.int<-qnorm(mean(votes.mat==0))
taus.try[count.seq>=0]<-(a.int+gamma.try[1]*count.seq[count.seq>=0]^(gamma.try[2]))
taus.try[count.seq<0]<- -Inf
taus.try<-sort(taus.try)
taus.try[1]<--Inf
find.pnorm<-function(x){
a<-x[1]
b<-x[2]
coefs<-c( -1.82517672, 0.51283415, -0.81377290, -0.02699400, -0.49642787, -0.33379312, -0.24176661, 0.03776971)
x<-c(1, a, b, a^2,b^2, log(abs(a-b)),log(abs(a-b))^2,a*b)
(sum(x*coefs))
}
lik<-((pnorm(taus.try[votes.mat+2 ]-Theta.last.0)-pnorm(taus.try[votes.mat+1 ]-Theta.last.0)) )
log.lik<-log(lik)
which.zero<-which(lik==0)
a0<-taus.try[votes.mat[which.zero]+2]-Theta.last.0[which.zero]
b0<-taus.try[votes.mat[which.zero]+1]-Theta.last.0[which.zero]
log.lik[which.zero]<-apply(cbind(a0,b0),1,find.pnorm)
log.lik[is.infinite(lik)&lik>0]<-max(log.lik[is.finite(lik)])
log.lik[is.infinite(lik)&lik<0]<-min(log.lik[is.finite(lik)])
thresh<-min(-1e30, min(log.lik[is.finite(log.lik)],na.rm=TRUE))
log.lik[log.lik<thresh]<-thresh
dev.out<--2*sum(log.lik,na.rm=TRUE)+sum(log(gamma.try)^2)
return(list("deviance"=dev.out,"tau"=taus.try))
}
################################
################################
##2) Converting a matrix to the Z scale
make.Z_EM<-function(
Theta.last.0=Theta.last,
votes.mat.0=votes.mat,row.type.0=row.type,
n0=n, k0=k, params=NULL, iter.curr=0,empir=NULL,cutoff.seq.0=NULL,missing.mat.0=NULL,lambda.lasso,proposal.sd,scale.sd, max.optim,step.size,maxdim.0,tau.ord.0
){
tau.ord<-tau.ord.0
Theta.last<-Theta.last.0;votes.mat<-votes.mat.0;
row.type<-row.type.0; n<-n0; k<-k0
cutoff.seq<-cutoff.seq.0
sigma<-1
row.type<-row.type.0; votes.mat<-votes.mat.0
Z.next<-matrix(NA,nrow=n,ncol=k)
missing.mat<-missing.mat.0
i.gibbs<-iter.curr
maxdim<-maxdim.0
#print(i.gibbs)
#print(iter.curr)
estep.bin<-function(means,a,b){
means+(dnorm(a-means)-dnorm(b-means))/(pnorm(b-means)-pnorm(a- means))
}
if(sum(row.type=="bin")>0){
toplimit.mat<-bottomlimit.mat<-votes.mat[row.type=="bin",]*0
toplimit.mat[votes.mat[row.type=="bin",]==1]<-Inf
toplimit.mat[votes.mat[row.type=="bin",]==0]<-0
toplimit.mat[votes.mat[row.type=="bin",]==0.5]<-Inf
bottomlimit.mat[votes.mat[row.type=="bin",]==1]<-0
bottomlimit.mat[votes.mat[row.type=="bin",]==0]<--Inf
bottomlimit.mat[votes.mat[row.type=="bin",]==0.5]<- -Inf
Z.next[row.type=="bin",]<-
estep.bin(means=Theta.last[row.type=="bin",],a=bottomlimit.mat,b=toplimit.mat)
Z.next[row.type=="bin",][is.na(Z.next[row.type=="bin",])]<-0
Z.next[row.type=="bin",][is.infinite(Z.next[row.type=="bin",])]<-0
pars.max<-1
accept.out<-prob.accept<-1
}
if(sum(row.type=="ord")>0){
#Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-2]<-rtnorm(sum(votes.mat[row.type=="ord",]==-2), mean=sigma^.5*Theta.last[row.type=="ord",][votes.mat[row.type=="ord",]==-2], lower=tau.ord, sd=sigma^.5)
#Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-1]<-rtnorm(sum(votes.mat[row.type=="ord",]==-1), mean=sigma^.5*Theta.last[row.type=="ord",][votes.mat[row.type=="ord",]==-1], lower=0,upper=tau.ord, sd=sigma^.5)
#Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==0]<-rtnorm(sum(votes.mat[row.type=="ord",]==0), mean=sigma^.5*Theta.last[row.type=="ord",][votes.mat[row.type=="ord",]==0], upper=0 , sd=sigma^.5)
#Z.next[row.type=="ord",][missing.mat[row.type=="ord",]==1]<-rnorm(sum(missing.mat[row.type=="ord",]==1), mean=sigma^.5*Theta.last[row.type=="ord",][missing.mat[row.type=="ord",]==1], sd=sigma^.5)
lower.mat<-upper.mat<-matrix(0,nrow=nrow(votes.mat[row.type=="ord",]), ncol=ncol(votes.mat))
##Top category
lower.mat[votes.mat[row.type=="ord",]==-2]<- tau.ord
upper.mat[votes.mat[row.type=="ord",]==-2]<- Inf
##Middle category
lower.mat[votes.mat[row.type=="ord",]==-1]<- 0
upper.mat[votes.mat[row.type=="ord",]==-1]<- tau.ord
##Lower category
lower.mat[votes.mat[row.type=="ord",]==0]<- -Inf
upper.mat[votes.mat[row.type=="ord",]==0]<- 0
#Missing data
lower.mat[missing.mat[row.type=="ord",]==1]<- -Inf
upper.mat[missing.mat[row.type=="ord",]==1]<- Inf
Z.next.temp<-estep.bin(means=Theta.last[row.type=="ord",],a=lower.mat,b=upper.mat)
which.change<-!is.finite(Z.next.temp)
Z.next.temp[which.change]<-Theta.last[row.type=="ord",][which.change]
Z.next[row.type=="ord",]<-Z.next.temp
tau.min<-max(Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-1&missing.mat[row.type=="ord"]==0])
tau.max<-min(Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-2&missing.mat[row.type=="ord"]==0])
tau.min<-max(tau.min,0.001)
tau.samp<-sort(c(tau.ord,runif(100,min(tau.min,tau.max),max(tau.min,tau.max))))
#which.tau<-which(tau.samp==tau.ord)
#tau.ord<-tau.samp[102-which.tau]
#print(c(tau.min,tau.max))
tau.ord<-runif(1,min(tau.min,tau.max),max(tau.min,tau.max))
pars.max<-1
accept.out<-prob.accept<-1
}
if(sum(row.type=="count")>0){
#begin type = "pois"
num.sample.count<-sum(row.type=="count")*k
accept.out<-prob.accept<-NA
dev.est<-function(x) test.gamma.pois_EM(x,votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$de
dev.est1<-function(x) test.gamma.pois_EM(c(x,params[2]),votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$de
dev.est2<-function(x) test.gamma.pois_EM(c(params[1],x),votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$de
#range.opt<-c(params-1,params+1)
range.opt<-rbind(params-1,params+1)
if(iter.curr>10) range.opt<-rbind(params-.25,params+.25)
if(iter.curr%%1==0|iter.curr<3){
gamma.opt.1<-optimize(dev.est1,
lower=range.opt[1,1],upper=range.opt[2,1],tol=0.01)
params[1]<-gamma.opt.1$minimum
gamma.opt.2<-optimize(dev.est2,
lower=range.opt[1],upper=range.opt[2],tol=0.01)
params[2]<-gamma.opt.2$minimum
gamma.opt<-list(gamma.opt.1,gamma.opt.2)
"M Step"
#print(gamma.opt.2)
}
gamma.next<-pars.max<-params
taus<-test.gamma.pois_EM(gamma.next,votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$tau
taus<-sort(taus)
taus[1]<--Inf
#print("Range of gamma")
#print(range.opt)
#print(gamma.next)
#print(range(taus))
#function(means,a,b){
# means+(dnorm(a-means)-dnorm(b-means))/(pnorm(b-means)-pnorm(a-means))
#}
lower.mat<-matrix(taus[votes.mat[row.type=="count",]+1 ],nrow=nrow(votes.mat[row.type=="count",]))
upper.mat<-matrix(taus[votes.mat[row.type=="count",]+2 ],nrow=nrow(votes.mat[row.type=="count",]))
Z.next.temp<-estep.bin(means=Theta.last[row.type=="count"],a=lower.mat,b=upper.mat)
which.change<-is.na(Z.next.temp)|is.infinite(Z.next.temp)
Z.next.temp[which.change]<-
(Theta.last[row.type=="count",][which.change] > upper.mat[which.change])*
upper.mat[which.change]+
(Theta.last[row.type=="count",][which.change] < lower.mat[which.change])*lower.mat[which.change]
which.change<-is.na(Z.next.temp)|is.infinite(Z.next.temp)
Z.next.temp[which.change]<-Theta.last[which.change]
Z.next[row.type=="count",]<-Z.next.temp
}
return(list("Z.next"=Z.next,"params"=(pars.max),"accept"=accept.out,"prob"=prob.accept,"proposal.sd"=proposal.sd,"step.size"=step.size,"tau.ord"=tau.ord))
}##Closes out make.Z function
################################
################################
##2) Converting a matrix to the Z scale
update_UDV_EM<-function(
Z.next.0=Z.next,
k0=k, n0=n, lambda.lasso.0=lambda.lasso,lambda.shrink.0=lambda.shrink,
Dtau.0=Dtau,
votes.mat.0=votes.mat, iter.curr=0,row.type.0,missing.mat.0=missing.mat,maxdim.0,
V.last
){
missing.mat<-missing.mat.0
Dtau<-Dtau.0;
Z.next<-Z.next.0;
votes.mat<-votes.mat.0;
n<-n0; k<-k0;
lambda.lasso<-lambda.lasso.0
row.type <- row.type.0
maxdim<-maxdim.0
#Declare some vectors
sigma<-1
ones.r<-rep(1,k0)
ones.c<-rep(1,n0)
#Update intercepts
mu.r<-rowMeans(Z.next)#-ones.c%*%t(mu.c)-Theta.last.0+mu.grand)
mu.r<-mu.r*n/(n+1)#+rnorm(length(mu.r),sd=1/k)
mu.c<-colMeans(Z.next)#-mu.r%*%t(ones.r)-Theta.last.0+mu.grand)
#mu.c<-mu.c*k/(k+1)#+rnorm(length(mu.c),sd=1/n)
mu.grand<-mean(Z.next)
mu.grand<-mu.grand*(n*k)/(n*k+1)#+rnorm(1,sd=1/(n*k))
mean.mat<-ones.c%*%t(mu.c)+mu.r%*%t(ones.r)-mu.grand
if(length(unique(row.type))>1){
is.bin<-sum(row.type=="bin")>0
is.count<-sum(row.type=="count")>0
is.ord<-sum(row.type=="ord")>0
#print("Two Means Being Used")
mean.c.mat<-matrix(NA,nrow=n,ncol=k)
if(is.bin) mu.c.bin<-colMeans(Z.next[row.type=="bin",])
if(is.count) mu.c.count<-colMeans(Z.next[row.type=="count",])
if(is.ord) mu.c.ord<-colMeans(Z.next[row.type=="ord",])
if(is.bin) mu.c.bin<-(mu.c.bin*k)/(k+1)#+rnorm(length(mu.c.bin),sd=1/sum(row.type=="bin"))
if(is.count) mu.c.count<-mu.c.count*k/(k+1)#+rnorm(length(mu.c.count),sd=1/sum(row.type=="count"))
if(is.ord) mu.c.ord<-mu.c.ord*k/(k+1)#+rnorm(length(mu.c.count),sd=1/sum(row.type=="count"))
if(is.bin) mean.c.mat[row.type=="bin",]<-ones.c[row.type=="bin"]%*%t(mu.c.bin)
if(is.count) mean.c.mat[row.type=="count",]<-ones.c[row.type=="count"]%*%t(mu.c.count)
if(is.ord) mean.c.mat[row.type=="ord",]<-ones.c[row.type=="ord"]%*%t(mu.c.ord)
mean.grand.mat<-matrix(NA,nrow=n,ncol=k)
if(is.bin) mean.grand.mat[row.type=="bin",]<-mean(Z.next[row.type=="bin",])* (sum(row.type=="bin")*k)/(sum(row.type=="bin")*k+1)#+rnorm(1,sd=1/(sum(row.type=="bin")*k))
if(is.count) mean.grand.mat[row.type=="count",]<-mean(Z.next[row.type=="count",])* (sum(row.type=="count")*k)/(sum(row.type=="count")*k+1)#+rnorm(1,sd=1/(sum(row.type=="bin")*k))#+rnorm(1,sd=1/(sum(row.type=="count")*k))
if(is.ord) mean.grand.mat[row.type=="ord",]<-mean(Z.next[row.type=="ord",])* (sum(row.type=="count")*k)/(sum(row.type=="ord")*k+1)#+rnorm(1,sd=1/(sum(row.type=="bin")*k))#+rnorm(1,sd=1/(sum(row.type=="count")*k))
mean.mat<-mean.c.mat+mu.r%*%t(ones.r)-mean.grand.mat
}
Z.starstar<-svd.mat<- Z.next- mean.mat
#Take svd, give each column an sd of 1 (rather than norm of 1)
num.zeroes<-1#colMeans(votes.mat!=0)
svd.mat.0<-svd.mat
#save(svd.mat,file="svd.mat")
svd.mat[is.na(svd.mat)]<-0
wts.dum<-rep(1,nrow(svd.mat))
wts.dum[row.type=="bin"]<-1/sum(1-missing.mat[row.type=="bin",])^.5
wts.dum[row.type=="count"]<-1/sum(1-missing.mat[row.type=="count",])^.5
wts.dum[row.type=="ord"]<-1/sum(1-missing.mat[row.type=="ord",])^.5
wts.dum<-wts.dum/mean(wts.dum)
#svd.dum<-irlba(svd.mat*wts.dum,nu=maxdim,nv=maxdim,V=V.last)
svd.dum<-svd(svd.mat*wts.dum,nu=maxdim,nv=maxdim)
svd.dum$u[is.na(svd.dum$u)|is.infinite(svd.dum$u)]<-0
svd.dum$d[is.na(svd.dum$d)|is.infinite(svd.dum$d)]<-0
svd.dum$v[is.na(svd.dum$v)|is.infinite(svd.dum$v)]<-0
svd.dum$d<-(t(svd.dum$u)%*%svd.mat%*%svd.dum$v)
which.rows<-which(rowMeans(svd.dum$d^2)^.5<1e-4)
which.cols<-which(colMeans(svd.dum$d^2)^.5<1e-4)
svd.dum$d[which.rows,]<-rnorm(length(svd.dum$d[which.rows,]),sd=.001)
svd.dum$d[which.cols,]<-rnorm(length(svd.dum$d[which.cols,]),sd=.001)
svd2<-svd(svd.dum$d,nu=maxdim,nv=maxdim)
#print(svd.dum$d)
#print(maxdim)
#print(maxdim-2)
#svd2<-irlba(svd.dum$d,nu=maxdim-5,nv=maxdim-5)
svd2$u[is.na(svd2$u)|is.infinite(svd2$u)]<-0
svd2$d[is.na(svd2$d)|is.infinite(svd2$d)]<-0
svd2$v[is.na(svd2$v)|is.infinite(svd2$v)]<-0
svd.dum$u<-svd.dum$u%*%(svd2$u)
svd.dum$v<-svd.dum$v%*%(svd2$v)
svd.dum$d<-svd2$d
svd0<-svd.dum
svd0$v<-t(t(svd0$u)%*%svd.mat.0)
svd0$v<-apply(svd0$v,2,FUN=function(x) x/sum(x^2)^.5)
svd0$d<-diag(t(svd0$u)%*%svd.mat.0%*%svd0$v)
sort.ord<-sort(svd0$d,ind=T,decreasing=T)$ix
svd0$u<-svd0$u[,sort.ord]
svd0$v<-svd0$v[,sort.ord]
svd0$d<-svd0$d[sort.ord]
svd0$u<-svd0$u*(n-1)^.5
svd0$v<-svd0$v*(k-1)^.5
svd0$d<-svd0$d*((n-1)*(k-1))^-.5
Theta.last.0<-svd.mat
Theta.last<-Theta.last.0+(ones.c%*%t(mu.c)+mu.r%*%t(ones.r)-mu.grand)
#Update d; follows from Blasso and DvD
Y.tilde<-as.vector(svd.mat)
if(n>length(Dtau)) Dtau[(length(Dtau)+1):n]<-1
A<- (n*k)*diag(n)+diag(as.vector(Dtau^(-1)))
gA<-A*0+NA
gA[1:maxdim,1:maxdim]<-ginv(A[1:maxdim,1:maxdim])
gA[is.na(gA)]<-0
XprimeY<-sapply(1:maxdim, FUN=function(i, svd2=svd0, Z.use=svd.mat) sum(Z.use*(svd2$u[,i]%*%t(svd2$v[,i]))))
if(length(XprimeY)<dim(gA)[1]) XprimeY[(length(XprimeY)+1) :dim(gA)[1]]<-0
D.post.mean<- as.vector(gA%*%XprimeY)
D.post.var.2<-gA
##Sample D and reconstruct theta, putting intercepts back in
D.post<-rep(NA,length(D.post.mean))
D.post[1:maxdim]<-D.post.mean[1:maxdim] + diag(D.post.var.2[1:maxdim,1:maxdim]) ^.5 #as.vector(mvrnorm(1, mu=D.post.mean[1:maxdim], D.post.var.2[1:maxdim,1:maxdim] ) )/sigma^.5
D.post[is.na(D.post)]<-0
abs.D.post<-abs(D.post)
#abs.D.post.loop<-abs(mvrnorm(50, mu=D.post.mean[1:maxdim], D.post.var.2[1:maxdim,1:maxdim] ) )
#print(abs.D.post.loop)
##Calculate MAP and mean estimate
D.trunc<-pmax(svd0$d-lambda.lasso.0,0)
U.last<-svd0$u
V.last<-svd0$v
U.mean.next<-0*U.last
V.mean.next<-0*V.last
##Construct U and V
#prior var of 2
#D.adj<-abs(D.post[1:maxdim])/svd0$d
#U.last<-t(t(U.last)*(D.post[1:maxdim]^2/(D.post[1:maxdim]^2+1/(4*k))))
#V.last<-t(t(V.last)*D.post[1:maxdim]^2/(D.post[1:maxdim]^2+1/(4*n)))
U.last[!is.finite(U.last)]<-0
V.last[!is.finite(V.last)]<-0
U.next<-U.last
V.next<-V.last
Theta.last.0<-U.next%*%diag(D.post[1:maxdim])%*%t(V.next)
Theta.last<-Theta.last.0+mean.mat
Theta.mode<- U.next%*%diag(D.trunc[1:maxdim])%*%t(V.next)+mean.mat
##Update muprime, invTau2, lambda.lasso
muprime<-(abs(lambda.lasso*sqrt(sigma)))/abs.D.post#*colMeans(1/abs.D.post.loop)
invTau2<-muprime#sapply(1:maxdim, FUN=function(i) rinv.gaussian(1, muprime[i], (lambda.lasso^2 ) ) )
#invTau2<-matrix(NA,nrow=500,ncol=maxdim)
invTau2<-sapply(1:maxdim, FUN=function(i) rinv.gaussian(500, muprime[i], (lambda.lasso^2 ) ) )
Dtau<-colMeans(1/abs(invTau2))
#lambda.lasso<-((maxdim)/(sum(Dtau[1:maxdim])/2))^.5
#lambda.lasso<-(2/mean(Dtau))^.5
#ran.gamma<-rgamma(1000, shape=maxdim+1 , rate=sum(Dtau[1:maxdim])/2+1.78 )
#d1<-density(ran.gamma^.5,cut=0)
#lambda.shrink<-lambda.lasso<-d1$x[d1$y==max(d1$y)]
lambda.shrink<-lambda.lasso<-((maxdim)/(sum(Dtau[1:maxdim])/2+1.78))^.5
#lambda.shrink<-lambda.lasso<-rgamma(1, shape=maxdim+1 , rate=sum(Dtau[1:maxdim])/2+1.78 )^.5
return(list(
"Theta.last"=Theta.last,
"U.next"=U.next,
"V.next"=V.next,
"lambda.lasso"=lambda.lasso,
"lambda.shrink"=lambda.shrink,
"D.trunc"=D.trunc,
"D.post"=D.post,
"Theta.mode"=Theta.mode,
"svd0"=svd0,
"Dtau"=Dtau,
"D.ols"=svd0$d
))
}
expit<-function(x) exp(x)/(1+exp(x))
| /fuzzedpackages/SparseFactorAnalysis/R/FunctionsInternal_Count_EM.R | no_license | akhikolla/testpackages | R | false | false | 15,504 | r | ################################
################################
##Three functions in here:
#### 1) test.gamma for finding the ML estimate for the grand lambda
#### 2) make.Z for converting a matrix to the Z-scale
#### 3) update.UDV for updating the ideal point estimates
#### *) Small additonal ones at the end
################################
################################
################################
################################
##1) Finding gamma
#test.gamma.pois_EM<-function(gamma.try,Theta.last.0=Theta.last[row.type=="count",],votes.mat.0=votes.mat[row.type=="count",],emp.cdf.0,cutoff.seq=NULL){
test.gamma.pois_EM<-function(gamma.try,Theta.last.0,votes.mat.0,emp.cdf.0,cutoff.seq=NULL){
gamma.try<-exp(gamma.try)
votes.mat<-votes.mat.0
taus.try<-NULL
count.seq<-cutoff.seq
if(length(cutoff.seq)==0) count.seq<-seq(-1,max(votes.mat)+2,1)
taus.try<-count.seq*0
analytic.cdf<-count.seq
a.int<-qnorm(mean(votes.mat==0))
taus.try[count.seq>=0]<-(a.int+gamma.try[1]*count.seq[count.seq>=0]^(gamma.try[2]))
taus.try[count.seq<0]<- -Inf
taus.try<-sort(taus.try)
taus.try[1]<--Inf
find.pnorm<-function(x){
a<-x[1]
b<-x[2]
coefs<-c( -1.82517672, 0.51283415, -0.81377290, -0.02699400, -0.49642787, -0.33379312, -0.24176661, 0.03776971)
x<-c(1, a, b, a^2,b^2, log(abs(a-b)),log(abs(a-b))^2,a*b)
(sum(x*coefs))
}
lik<-((pnorm(taus.try[votes.mat+2 ]-Theta.last.0)-pnorm(taus.try[votes.mat+1 ]-Theta.last.0)) )
log.lik<-log(lik)
which.zero<-which(lik==0)
a0<-taus.try[votes.mat[which.zero]+2]-Theta.last.0[which.zero]
b0<-taus.try[votes.mat[which.zero]+1]-Theta.last.0[which.zero]
log.lik[which.zero]<-apply(cbind(a0,b0),1,find.pnorm)
log.lik[is.infinite(lik)&lik>0]<-max(log.lik[is.finite(lik)])
log.lik[is.infinite(lik)&lik<0]<-min(log.lik[is.finite(lik)])
thresh<-min(-1e30, min(log.lik[is.finite(log.lik)],na.rm=TRUE))
log.lik[log.lik<thresh]<-thresh
dev.out<--2*sum(log.lik,na.rm=TRUE)+sum(log(gamma.try)^2)
return(list("deviance"=dev.out,"tau"=taus.try))
}
################################
################################
##2) Converting a matrix to the Z scale
make.Z_EM<-function(
Theta.last.0=Theta.last,
votes.mat.0=votes.mat,row.type.0=row.type,
n0=n, k0=k, params=NULL, iter.curr=0,empir=NULL,cutoff.seq.0=NULL,missing.mat.0=NULL,lambda.lasso,proposal.sd,scale.sd, max.optim,step.size,maxdim.0,tau.ord.0
){
tau.ord<-tau.ord.0
Theta.last<-Theta.last.0;votes.mat<-votes.mat.0;
row.type<-row.type.0; n<-n0; k<-k0
cutoff.seq<-cutoff.seq.0
sigma<-1
row.type<-row.type.0; votes.mat<-votes.mat.0
Z.next<-matrix(NA,nrow=n,ncol=k)
missing.mat<-missing.mat.0
i.gibbs<-iter.curr
maxdim<-maxdim.0
#print(i.gibbs)
#print(iter.curr)
estep.bin<-function(means,a,b){
means+(dnorm(a-means)-dnorm(b-means))/(pnorm(b-means)-pnorm(a- means))
}
if(sum(row.type=="bin")>0){
toplimit.mat<-bottomlimit.mat<-votes.mat[row.type=="bin",]*0
toplimit.mat[votes.mat[row.type=="bin",]==1]<-Inf
toplimit.mat[votes.mat[row.type=="bin",]==0]<-0
toplimit.mat[votes.mat[row.type=="bin",]==0.5]<-Inf
bottomlimit.mat[votes.mat[row.type=="bin",]==1]<-0
bottomlimit.mat[votes.mat[row.type=="bin",]==0]<--Inf
bottomlimit.mat[votes.mat[row.type=="bin",]==0.5]<- -Inf
Z.next[row.type=="bin",]<-
estep.bin(means=Theta.last[row.type=="bin",],a=bottomlimit.mat,b=toplimit.mat)
Z.next[row.type=="bin",][is.na(Z.next[row.type=="bin",])]<-0
Z.next[row.type=="bin",][is.infinite(Z.next[row.type=="bin",])]<-0
pars.max<-1
accept.out<-prob.accept<-1
}
if(sum(row.type=="ord")>0){
#Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-2]<-rtnorm(sum(votes.mat[row.type=="ord",]==-2), mean=sigma^.5*Theta.last[row.type=="ord",][votes.mat[row.type=="ord",]==-2], lower=tau.ord, sd=sigma^.5)
#Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-1]<-rtnorm(sum(votes.mat[row.type=="ord",]==-1), mean=sigma^.5*Theta.last[row.type=="ord",][votes.mat[row.type=="ord",]==-1], lower=0,upper=tau.ord, sd=sigma^.5)
#Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==0]<-rtnorm(sum(votes.mat[row.type=="ord",]==0), mean=sigma^.5*Theta.last[row.type=="ord",][votes.mat[row.type=="ord",]==0], upper=0 , sd=sigma^.5)
#Z.next[row.type=="ord",][missing.mat[row.type=="ord",]==1]<-rnorm(sum(missing.mat[row.type=="ord",]==1), mean=sigma^.5*Theta.last[row.type=="ord",][missing.mat[row.type=="ord",]==1], sd=sigma^.5)
lower.mat<-upper.mat<-matrix(0,nrow=nrow(votes.mat[row.type=="ord",]), ncol=ncol(votes.mat))
##Top category
lower.mat[votes.mat[row.type=="ord",]==-2]<- tau.ord
upper.mat[votes.mat[row.type=="ord",]==-2]<- Inf
##Middle category
lower.mat[votes.mat[row.type=="ord",]==-1]<- 0
upper.mat[votes.mat[row.type=="ord",]==-1]<- tau.ord
##Lower category
lower.mat[votes.mat[row.type=="ord",]==0]<- -Inf
upper.mat[votes.mat[row.type=="ord",]==0]<- 0
#Missing data
lower.mat[missing.mat[row.type=="ord",]==1]<- -Inf
upper.mat[missing.mat[row.type=="ord",]==1]<- Inf
Z.next.temp<-estep.bin(means=Theta.last[row.type=="ord",],a=lower.mat,b=upper.mat)
which.change<-!is.finite(Z.next.temp)
Z.next.temp[which.change]<-Theta.last[row.type=="ord",][which.change]
Z.next[row.type=="ord",]<-Z.next.temp
tau.min<-max(Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-1&missing.mat[row.type=="ord"]==0])
tau.max<-min(Z.next[row.type=="ord",][votes.mat[row.type=="ord",]==-2&missing.mat[row.type=="ord"]==0])
tau.min<-max(tau.min,0.001)
tau.samp<-sort(c(tau.ord,runif(100,min(tau.min,tau.max),max(tau.min,tau.max))))
#which.tau<-which(tau.samp==tau.ord)
#tau.ord<-tau.samp[102-which.tau]
#print(c(tau.min,tau.max))
tau.ord<-runif(1,min(tau.min,tau.max),max(tau.min,tau.max))
pars.max<-1
accept.out<-prob.accept<-1
}
if(sum(row.type=="count")>0){
#begin type = "pois"
num.sample.count<-sum(row.type=="count")*k
accept.out<-prob.accept<-NA
dev.est<-function(x) test.gamma.pois_EM(x,votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$de
dev.est1<-function(x) test.gamma.pois_EM(c(x,params[2]),votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$de
dev.est2<-function(x) test.gamma.pois_EM(c(params[1],x),votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$de
#range.opt<-c(params-1,params+1)
range.opt<-rbind(params-1,params+1)
if(iter.curr>10) range.opt<-rbind(params-.25,params+.25)
if(iter.curr%%1==0|iter.curr<3){
gamma.opt.1<-optimize(dev.est1,
lower=range.opt[1,1],upper=range.opt[2,1],tol=0.01)
params[1]<-gamma.opt.1$minimum
gamma.opt.2<-optimize(dev.est2,
lower=range.opt[1],upper=range.opt[2],tol=0.01)
params[2]<-gamma.opt.2$minimum
gamma.opt<-list(gamma.opt.1,gamma.opt.2)
"M Step"
#print(gamma.opt.2)
}
gamma.next<-pars.max<-params
taus<-test.gamma.pois_EM(gamma.next,votes.mat.0=votes.mat[row.type=="count",], Theta.last.0=Theta.last[row.type=="count",],cutoff.seq=cutoff.seq)$tau
taus<-sort(taus)
taus[1]<--Inf
#print("Range of gamma")
#print(range.opt)
#print(gamma.next)
#print(range(taus))
#function(means,a,b){
# means+(dnorm(a-means)-dnorm(b-means))/(pnorm(b-means)-pnorm(a-means))
#}
lower.mat<-matrix(taus[votes.mat[row.type=="count",]+1 ],nrow=nrow(votes.mat[row.type=="count",]))
upper.mat<-matrix(taus[votes.mat[row.type=="count",]+2 ],nrow=nrow(votes.mat[row.type=="count",]))
Z.next.temp<-estep.bin(means=Theta.last[row.type=="count"],a=lower.mat,b=upper.mat)
which.change<-is.na(Z.next.temp)|is.infinite(Z.next.temp)
Z.next.temp[which.change]<-
(Theta.last[row.type=="count",][which.change] > upper.mat[which.change])*
upper.mat[which.change]+
(Theta.last[row.type=="count",][which.change] < lower.mat[which.change])*lower.mat[which.change]
which.change<-is.na(Z.next.temp)|is.infinite(Z.next.temp)
Z.next.temp[which.change]<-Theta.last[which.change]
Z.next[row.type=="count",]<-Z.next.temp
}
return(list("Z.next"=Z.next,"params"=(pars.max),"accept"=accept.out,"prob"=prob.accept,"proposal.sd"=proposal.sd,"step.size"=step.size,"tau.ord"=tau.ord))
}##Closes out make.Z function
################################
################################
##2) Converting a matrix to the Z scale
update_UDV_EM<-function(
Z.next.0=Z.next,
k0=k, n0=n, lambda.lasso.0=lambda.lasso,lambda.shrink.0=lambda.shrink,
Dtau.0=Dtau,
votes.mat.0=votes.mat, iter.curr=0,row.type.0,missing.mat.0=missing.mat,maxdim.0,
V.last
){
missing.mat<-missing.mat.0
Dtau<-Dtau.0;
Z.next<-Z.next.0;
votes.mat<-votes.mat.0;
n<-n0; k<-k0;
lambda.lasso<-lambda.lasso.0
row.type <- row.type.0
maxdim<-maxdim.0
#Declare some vectors
sigma<-1
ones.r<-rep(1,k0)
ones.c<-rep(1,n0)
#Update intercepts
mu.r<-rowMeans(Z.next)#-ones.c%*%t(mu.c)-Theta.last.0+mu.grand)
mu.r<-mu.r*n/(n+1)#+rnorm(length(mu.r),sd=1/k)
mu.c<-colMeans(Z.next)#-mu.r%*%t(ones.r)-Theta.last.0+mu.grand)
#mu.c<-mu.c*k/(k+1)#+rnorm(length(mu.c),sd=1/n)
mu.grand<-mean(Z.next)
mu.grand<-mu.grand*(n*k)/(n*k+1)#+rnorm(1,sd=1/(n*k))
mean.mat<-ones.c%*%t(mu.c)+mu.r%*%t(ones.r)-mu.grand
if(length(unique(row.type))>1){
is.bin<-sum(row.type=="bin")>0
is.count<-sum(row.type=="count")>0
is.ord<-sum(row.type=="ord")>0
#print("Two Means Being Used")
mean.c.mat<-matrix(NA,nrow=n,ncol=k)
if(is.bin) mu.c.bin<-colMeans(Z.next[row.type=="bin",])
if(is.count) mu.c.count<-colMeans(Z.next[row.type=="count",])
if(is.ord) mu.c.ord<-colMeans(Z.next[row.type=="ord",])
if(is.bin) mu.c.bin<-(mu.c.bin*k)/(k+1)#+rnorm(length(mu.c.bin),sd=1/sum(row.type=="bin"))
if(is.count) mu.c.count<-mu.c.count*k/(k+1)#+rnorm(length(mu.c.count),sd=1/sum(row.type=="count"))
if(is.ord) mu.c.ord<-mu.c.ord*k/(k+1)#+rnorm(length(mu.c.count),sd=1/sum(row.type=="count"))
if(is.bin) mean.c.mat[row.type=="bin",]<-ones.c[row.type=="bin"]%*%t(mu.c.bin)
if(is.count) mean.c.mat[row.type=="count",]<-ones.c[row.type=="count"]%*%t(mu.c.count)
if(is.ord) mean.c.mat[row.type=="ord",]<-ones.c[row.type=="ord"]%*%t(mu.c.ord)
mean.grand.mat<-matrix(NA,nrow=n,ncol=k)
if(is.bin) mean.grand.mat[row.type=="bin",]<-mean(Z.next[row.type=="bin",])* (sum(row.type=="bin")*k)/(sum(row.type=="bin")*k+1)#+rnorm(1,sd=1/(sum(row.type=="bin")*k))
if(is.count) mean.grand.mat[row.type=="count",]<-mean(Z.next[row.type=="count",])* (sum(row.type=="count")*k)/(sum(row.type=="count")*k+1)#+rnorm(1,sd=1/(sum(row.type=="bin")*k))#+rnorm(1,sd=1/(sum(row.type=="count")*k))
if(is.ord) mean.grand.mat[row.type=="ord",]<-mean(Z.next[row.type=="ord",])* (sum(row.type=="count")*k)/(sum(row.type=="ord")*k+1)#+rnorm(1,sd=1/(sum(row.type=="bin")*k))#+rnorm(1,sd=1/(sum(row.type=="count")*k))
mean.mat<-mean.c.mat+mu.r%*%t(ones.r)-mean.grand.mat
}
Z.starstar<-svd.mat<- Z.next- mean.mat
#Take svd, give each column an sd of 1 (rather than norm of 1)
num.zeroes<-1#colMeans(votes.mat!=0)
svd.mat.0<-svd.mat
#save(svd.mat,file="svd.mat")
svd.mat[is.na(svd.mat)]<-0
wts.dum<-rep(1,nrow(svd.mat))
wts.dum[row.type=="bin"]<-1/sum(1-missing.mat[row.type=="bin",])^.5
wts.dum[row.type=="count"]<-1/sum(1-missing.mat[row.type=="count",])^.5
wts.dum[row.type=="ord"]<-1/sum(1-missing.mat[row.type=="ord",])^.5
wts.dum<-wts.dum/mean(wts.dum)
#svd.dum<-irlba(svd.mat*wts.dum,nu=maxdim,nv=maxdim,V=V.last)
svd.dum<-svd(svd.mat*wts.dum,nu=maxdim,nv=maxdim)
svd.dum$u[is.na(svd.dum$u)|is.infinite(svd.dum$u)]<-0
svd.dum$d[is.na(svd.dum$d)|is.infinite(svd.dum$d)]<-0
svd.dum$v[is.na(svd.dum$v)|is.infinite(svd.dum$v)]<-0
svd.dum$d<-(t(svd.dum$u)%*%svd.mat%*%svd.dum$v)
which.rows<-which(rowMeans(svd.dum$d^2)^.5<1e-4)
which.cols<-which(colMeans(svd.dum$d^2)^.5<1e-4)
svd.dum$d[which.rows,]<-rnorm(length(svd.dum$d[which.rows,]),sd=.001)
svd.dum$d[which.cols,]<-rnorm(length(svd.dum$d[which.cols,]),sd=.001)
svd2<-svd(svd.dum$d,nu=maxdim,nv=maxdim)
#print(svd.dum$d)
#print(maxdim)
#print(maxdim-2)
#svd2<-irlba(svd.dum$d,nu=maxdim-5,nv=maxdim-5)
svd2$u[is.na(svd2$u)|is.infinite(svd2$u)]<-0
svd2$d[is.na(svd2$d)|is.infinite(svd2$d)]<-0
svd2$v[is.na(svd2$v)|is.infinite(svd2$v)]<-0
svd.dum$u<-svd.dum$u%*%(svd2$u)
svd.dum$v<-svd.dum$v%*%(svd2$v)
svd.dum$d<-svd2$d
svd0<-svd.dum
svd0$v<-t(t(svd0$u)%*%svd.mat.0)
svd0$v<-apply(svd0$v,2,FUN=function(x) x/sum(x^2)^.5)
svd0$d<-diag(t(svd0$u)%*%svd.mat.0%*%svd0$v)
sort.ord<-sort(svd0$d,ind=T,decreasing=T)$ix
svd0$u<-svd0$u[,sort.ord]
svd0$v<-svd0$v[,sort.ord]
svd0$d<-svd0$d[sort.ord]
svd0$u<-svd0$u*(n-1)^.5
svd0$v<-svd0$v*(k-1)^.5
svd0$d<-svd0$d*((n-1)*(k-1))^-.5
Theta.last.0<-svd.mat
Theta.last<-Theta.last.0+(ones.c%*%t(mu.c)+mu.r%*%t(ones.r)-mu.grand)
#Update d; follows from Blasso and DvD
Y.tilde<-as.vector(svd.mat)
if(n>length(Dtau)) Dtau[(length(Dtau)+1):n]<-1
A<- (n*k)*diag(n)+diag(as.vector(Dtau^(-1)))
gA<-A*0+NA
gA[1:maxdim,1:maxdim]<-ginv(A[1:maxdim,1:maxdim])
gA[is.na(gA)]<-0
XprimeY<-sapply(1:maxdim, FUN=function(i, svd2=svd0, Z.use=svd.mat) sum(Z.use*(svd2$u[,i]%*%t(svd2$v[,i]))))
if(length(XprimeY)<dim(gA)[1]) XprimeY[(length(XprimeY)+1) :dim(gA)[1]]<-0
D.post.mean<- as.vector(gA%*%XprimeY)
D.post.var.2<-gA
##Sample D and reconstruct theta, putting intercepts back in
D.post<-rep(NA,length(D.post.mean))
D.post[1:maxdim]<-D.post.mean[1:maxdim] + diag(D.post.var.2[1:maxdim,1:maxdim]) ^.5 #as.vector(mvrnorm(1, mu=D.post.mean[1:maxdim], D.post.var.2[1:maxdim,1:maxdim] ) )/sigma^.5
D.post[is.na(D.post)]<-0
abs.D.post<-abs(D.post)
#abs.D.post.loop<-abs(mvrnorm(50, mu=D.post.mean[1:maxdim], D.post.var.2[1:maxdim,1:maxdim] ) )
#print(abs.D.post.loop)
##Calculate MAP and mean estimate
D.trunc<-pmax(svd0$d-lambda.lasso.0,0)
U.last<-svd0$u
V.last<-svd0$v
U.mean.next<-0*U.last
V.mean.next<-0*V.last
##Construct U and V
#prior var of 2
#D.adj<-abs(D.post[1:maxdim])/svd0$d
#U.last<-t(t(U.last)*(D.post[1:maxdim]^2/(D.post[1:maxdim]^2+1/(4*k))))
#V.last<-t(t(V.last)*D.post[1:maxdim]^2/(D.post[1:maxdim]^2+1/(4*n)))
U.last[!is.finite(U.last)]<-0
V.last[!is.finite(V.last)]<-0
U.next<-U.last
V.next<-V.last
Theta.last.0<-U.next%*%diag(D.post[1:maxdim])%*%t(V.next)
Theta.last<-Theta.last.0+mean.mat
Theta.mode<- U.next%*%diag(D.trunc[1:maxdim])%*%t(V.next)+mean.mat
##Update muprime, invTau2, lambda.lasso
muprime<-(abs(lambda.lasso*sqrt(sigma)))/abs.D.post#*colMeans(1/abs.D.post.loop)
invTau2<-muprime#sapply(1:maxdim, FUN=function(i) rinv.gaussian(1, muprime[i], (lambda.lasso^2 ) ) )
#invTau2<-matrix(NA,nrow=500,ncol=maxdim)
invTau2<-sapply(1:maxdim, FUN=function(i) rinv.gaussian(500, muprime[i], (lambda.lasso^2 ) ) )
Dtau<-colMeans(1/abs(invTau2))
#lambda.lasso<-((maxdim)/(sum(Dtau[1:maxdim])/2))^.5
#lambda.lasso<-(2/mean(Dtau))^.5
#ran.gamma<-rgamma(1000, shape=maxdim+1 , rate=sum(Dtau[1:maxdim])/2+1.78 )
#d1<-density(ran.gamma^.5,cut=0)
#lambda.shrink<-lambda.lasso<-d1$x[d1$y==max(d1$y)]
lambda.shrink<-lambda.lasso<-((maxdim)/(sum(Dtau[1:maxdim])/2+1.78))^.5
#lambda.shrink<-lambda.lasso<-rgamma(1, shape=maxdim+1 , rate=sum(Dtau[1:maxdim])/2+1.78 )^.5
return(list(
"Theta.last"=Theta.last,
"U.next"=U.next,
"V.next"=V.next,
"lambda.lasso"=lambda.lasso,
"lambda.shrink"=lambda.shrink,
"D.trunc"=D.trunc,
"D.post"=D.post,
"Theta.mode"=Theta.mode,
"svd0"=svd0,
"Dtau"=Dtau,
"D.ols"=svd0$d
))
}
expit<-function(x) exp(x)/(1+exp(x))
|
invisible(options(echo = TRUE))
## read in data
pangenome <- read.table("###input_file###", header=FALSE)
genome_count <- max(pangenome$V8)
genomes <- (pangenome$V9[1:genome_count])
print(genomes)
pangenome <- pangenome[ pangenome$V1 > 1, ]
attach(pangenome)
## Calculate the means
v2means <- as.vector(tapply(V2,V1,FUN=mean))
v1means <- as.vector(tapply(V1,V1,FUN=mean))
## Calculate the medians
v2allmedians <- as.vector(tapply(V2,V1,FUN=median))
v1allmedians <- as.vector(tapply(V1,V1,FUN=median))
# plot points from each new comparison genome in its own color
row_count <- length(V1)
source_colors <- rainbow(genome_count)
p_color <- c()
for ( ii in c(1:row_count) ) {
p_color[ii] <- source_colors[V8[ii]]
# points(temp_v1, temp_v4, pch=17, col=p_color)
}
## end of color block
## exponential model based on medianss
nlmodel_exp <- nls(v2allmedians ~ th1 + th2* exp(-v1allmedians / th3), data=pangenome,
start=list(th1=33, th2=476, th3=1.5))
#summary(nlmodel_exp)
# Open up the output file for the log graph
postscript(file="###output_path###core_genes_exponential_medians_log.ps", width=11, height=8.5, paper='special')
layout(matrix(c(1,2),byrow=TRUE), heights=c(7.5,1))
# Draw the axis
plot(V1,V2, xlab="number of genomes", ylab="new genes", main="###TITLE### core genes exponential log axis", cex=0.5, log="xy", col=p_color)
# plot the medians
points(tapply(pangenome$V2,pangenome$V1,FUN=median)~tapply(pangenome$V1,pangenome$V1,FUN=median),pch=5,col='black')
# plot the means
points(tapply(V2,V1,FUN=mean)~tapply(V1,V1,FUN=mean),pch=6,col='black')
# plot the regression
x <- seq(par()$xaxp[1]-1,as.integer(1.0 + 10^par()$usr[[2]]))
lines(x, predict(nlmodel_exp, data.frame(v1allmedians=x)), lwd=2, col="black")
abline(h=nlmodel_exp$m$getPars()[1], lty=2, lwd=2,col="black")
expr_exp <- substitute(
expression(y == th1 %+-% th1err + th2 %+-% th2err * italic(e)^(-x / (th3 %+-% th3err))),
list(
th1 = round(nlmodel_exp$m$getPars()[1], digit=2),
th1err = round(summary(nlmodel_exp)[10][[1]][3], digit=2),
th2 = round(nlmodel_exp$m$getPars()[2], digit=2),
th2err = round(summary(nlmodel_exp)[10][[1]][4], digit=2),
th3 = round(nlmodel_exp$m$getPars()[3], digit=2),
th3err = round(summary(nlmodel_exp)[10][[1]][5], digit=2)
)
)
par(mai=c(.2,0,0,0))
height<- (10^(par()$usr[4]) - 10^(par()$usr[3]))
width<- (10^(par()$usr[2]) - 10^(par()$usr[1]))
plot.new()
legend("top", c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0)
#legend(10^(par()$usr[2])+(0.01*width),10^(par()$usr[3]) + height/2, c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0) | /clovr_pipelines/workflow/project_saved_templates/clovr_pangenome/core_genes/core_genes_exponential_medians_log.R | no_license | carze/clovr-base | R | false | false | 2,753 | r | invisible(options(echo = TRUE))
## read in data
pangenome <- read.table("###input_file###", header=FALSE)
genome_count <- max(pangenome$V8)
genomes <- (pangenome$V9[1:genome_count])
print(genomes)
pangenome <- pangenome[ pangenome$V1 > 1, ]
attach(pangenome)
## Calculate the means
v2means <- as.vector(tapply(V2,V1,FUN=mean))
v1means <- as.vector(tapply(V1,V1,FUN=mean))
## Calculate the medians
v2allmedians <- as.vector(tapply(V2,V1,FUN=median))
v1allmedians <- as.vector(tapply(V1,V1,FUN=median))
# plot points from each new comparison genome in its own color
row_count <- length(V1)
source_colors <- rainbow(genome_count)
p_color <- c()
for ( ii in c(1:row_count) ) {
p_color[ii] <- source_colors[V8[ii]]
# points(temp_v1, temp_v4, pch=17, col=p_color)
}
## end of color block
## exponential model based on medianss
nlmodel_exp <- nls(v2allmedians ~ th1 + th2* exp(-v1allmedians / th3), data=pangenome,
start=list(th1=33, th2=476, th3=1.5))
#summary(nlmodel_exp)
# Open up the output file for the log graph
postscript(file="###output_path###core_genes_exponential_medians_log.ps", width=11, height=8.5, paper='special')
layout(matrix(c(1,2),byrow=TRUE), heights=c(7.5,1))
# Draw the axis
plot(V1,V2, xlab="number of genomes", ylab="new genes", main="###TITLE### core genes exponential log axis", cex=0.5, log="xy", col=p_color)
# plot the medians
points(tapply(pangenome$V2,pangenome$V1,FUN=median)~tapply(pangenome$V1,pangenome$V1,FUN=median),pch=5,col='black')
# plot the means
points(tapply(V2,V1,FUN=mean)~tapply(V1,V1,FUN=mean),pch=6,col='black')
# plot the regression
x <- seq(par()$xaxp[1]-1,as.integer(1.0 + 10^par()$usr[[2]]))
lines(x, predict(nlmodel_exp, data.frame(v1allmedians=x)), lwd=2, col="black")
abline(h=nlmodel_exp$m$getPars()[1], lty=2, lwd=2,col="black")
expr_exp <- substitute(
expression(y == th1 %+-% th1err + th2 %+-% th2err * italic(e)^(-x / (th3 %+-% th3err))),
list(
th1 = round(nlmodel_exp$m$getPars()[1], digit=2),
th1err = round(summary(nlmodel_exp)[10][[1]][3], digit=2),
th2 = round(nlmodel_exp$m$getPars()[2], digit=2),
th2err = round(summary(nlmodel_exp)[10][[1]][4], digit=2),
th3 = round(nlmodel_exp$m$getPars()[3], digit=2),
th3err = round(summary(nlmodel_exp)[10][[1]][5], digit=2)
)
)
par(mai=c(.2,0,0,0))
height<- (10^(par()$usr[4]) - 10^(par()$usr[3]))
width<- (10^(par()$usr[2]) - 10^(par()$usr[1]))
plot.new()
legend("top", c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0)
#legend(10^(par()$usr[2])+(0.01*width),10^(par()$usr[3]) + height/2, c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0) |
\name{Dataset-class}
\alias{Dataset-class}
\docType{class}
\title{
Dataset
}
\format{An R6 class object.}
\description{
A Dataset is an Entity that defines a
flat list of entities as a tableview (a.k.a. a "dataset").
}
\section{Methods}{
\itemize{
\item \code{Dataset(name=NULL, columns=NULL, parent=NULL, properties=NULL, addDefaultViewColumns=TRUE, addAnnotationColumns=TRUE, ignoredAnnotationColumnNames=list(), annotations=NULL, local_state=NULL, dataset_items=NULL, folders=NULL, force=FALSE, description=NULL, folder=NULL)}: Constructor for \code{\link{Dataset}}
\item \code{addColumn(column)}:
\item \code{addColumns(columns)}:
\item \code{add_folder(folder, force=TRUE)}:
\item \code{add_folders(folders, force=TRUE)}:
\item \code{add_item(dataset_item, force=TRUE)}:
\item \code{add_items(dataset_items, force=TRUE)}:
\item \code{add_scope(entities)}:
\item \code{empty()}:
\item \code{has_columns()}: Does this schema have columns specified?
\item \code{has_item(item_id)}:
\item \code{removeColumn(column)}:
\item \code{remove_item(item_id)}:
}
}
| /man/Dataset-class.Rd | permissive | Sage-Bionetworks/synapser | R | false | false | 1,059 | rd | \name{Dataset-class}
\alias{Dataset-class}
\docType{class}
\title{
Dataset
}
\format{An R6 class object.}
\description{
A Dataset is an Entity that defines a
flat list of entities as a tableview (a.k.a. a "dataset").
}
\section{Methods}{
\itemize{
\item \code{Dataset(name=NULL, columns=NULL, parent=NULL, properties=NULL, addDefaultViewColumns=TRUE, addAnnotationColumns=TRUE, ignoredAnnotationColumnNames=list(), annotations=NULL, local_state=NULL, dataset_items=NULL, folders=NULL, force=FALSE, description=NULL, folder=NULL)}: Constructor for \code{\link{Dataset}}
\item \code{addColumn(column)}:
\item \code{addColumns(columns)}:
\item \code{add_folder(folder, force=TRUE)}:
\item \code{add_folders(folders, force=TRUE)}:
\item \code{add_item(dataset_item, force=TRUE)}:
\item \code{add_items(dataset_items, force=TRUE)}:
\item \code{add_scope(entities)}:
\item \code{empty()}:
\item \code{has_columns()}: Does this schema have columns specified?
\item \code{has_item(item_id)}:
\item \code{removeColumn(column)}:
\item \code{remove_item(item_id)}:
}
}
|
library(tidyverse)
indiv <- 100
mov <- round(rnorm(indiv, 10, 2)) %>% abs()
rsc <- 10
locx <- rnorm(indiv, 0, 10) %>% round()
locy <- rnorm(indiv, 0, 10) %>% round()
indivs <- tibble(id=1:indiv, mov, rsc, locx, locy)
beans <- tibble(x=-20:20, y=-20:20) %>% complete(x,y) %>%
mutate(eggsnum=0)
foreach(i=1:indiv) %do% {
indiv <- indivs %>% slice(i)
locs <- indiv %>% select(locx, locy)
beans <- foreach(j=1:indiv %>% pull(rsc)) %do% {
beans %>% mutate(eggsum=if_else(indiv %>% pull(locx)))
}
}
| /sample_tqexam.r | no_license | 6W3N/R4DS_EX | R | false | false | 511 | r | library(tidyverse)
indiv <- 100
mov <- round(rnorm(indiv, 10, 2)) %>% abs()
rsc <- 10
locx <- rnorm(indiv, 0, 10) %>% round()
locy <- rnorm(indiv, 0, 10) %>% round()
indivs <- tibble(id=1:indiv, mov, rsc, locx, locy)
beans <- tibble(x=-20:20, y=-20:20) %>% complete(x,y) %>%
mutate(eggsnum=0)
foreach(i=1:indiv) %do% {
indiv <- indivs %>% slice(i)
locs <- indiv %>% select(locx, locy)
beans <- foreach(j=1:indiv %>% pull(rsc)) %do% {
beans %>% mutate(eggsum=if_else(indiv %>% pull(locx)))
}
}
|
library(dplyr)
read_power<-read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
power<-mutate(read_power,NDate=paste(Date,Time))
power$NDate<-as.POSIXct(power$NDate,format="%d/%m/%Y %H:%M:%S")
powerf<-filter(power,NDate<as.POSIXct("2007-02-03",format="%Y-%m-%d") & NDate>=as.POSIXct("2007-02-01",format="%Y-%m-%d"))
powerf$Sub_metering_1<-as.numeric(powerf$Sub_metering_1)
powerf$Sub_metering_2<-as.numeric(powerf$Sub_metering_2)
powerf$Voltage<-as.numeric(powerf$Voltage)
powerf$Global_active_power<-as.numeric(powerf$Global_active_power)
powerf$Global_reactive_power<-as.numeric(powerf$Global_reactive_power)
png(file="plot4.png")
par(mfrow=c(2,2),mar=c(4,4,2,1),cex=0.7)
with(powerf,plot(NDate,Global_active_power,type="l",xlab="",ylab="Global Active Power"))
with(powerf,plot(NDate,Voltage,type="l",xlab="datetime",ylab="Voltage"))
with(powerf,plot(NDate,Sub_metering_1,col="green",xlab="",ylab="Energy sub metering",type="l"))
with(powerf,lines(NDate,Sub_metering_2,col="red"))
with(powerf,lines(NDate,Sub_metering_3,col="blue"))
legend("topright",col=c("green","red","blue"),lty=c("solid","solid","solid"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
with(powerf,plot(NDate,Global_reactive_power,type="l",xlab="datetime",ylab="Global Reactive Power"))
dev.off()
| /plot4.R | no_license | danielcalcinaro/ExData_Plotting1 | R | false | false | 1,381 | r | library(dplyr)
read_power<-read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
power<-mutate(read_power,NDate=paste(Date,Time))
power$NDate<-as.POSIXct(power$NDate,format="%d/%m/%Y %H:%M:%S")
powerf<-filter(power,NDate<as.POSIXct("2007-02-03",format="%Y-%m-%d") & NDate>=as.POSIXct("2007-02-01",format="%Y-%m-%d"))
powerf$Sub_metering_1<-as.numeric(powerf$Sub_metering_1)
powerf$Sub_metering_2<-as.numeric(powerf$Sub_metering_2)
powerf$Voltage<-as.numeric(powerf$Voltage)
powerf$Global_active_power<-as.numeric(powerf$Global_active_power)
powerf$Global_reactive_power<-as.numeric(powerf$Global_reactive_power)
png(file="plot4.png")
par(mfrow=c(2,2),mar=c(4,4,2,1),cex=0.7)
with(powerf,plot(NDate,Global_active_power,type="l",xlab="",ylab="Global Active Power"))
with(powerf,plot(NDate,Voltage,type="l",xlab="datetime",ylab="Voltage"))
with(powerf,plot(NDate,Sub_metering_1,col="green",xlab="",ylab="Energy sub metering",type="l"))
with(powerf,lines(NDate,Sub_metering_2,col="red"))
with(powerf,lines(NDate,Sub_metering_3,col="blue"))
legend("topright",col=c("green","red","blue"),lty=c("solid","solid","solid"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
with(powerf,plot(NDate,Global_reactive_power,type="l",xlab="datetime",ylab="Global Reactive Power"))
dev.off()
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "climate-model-simulation-crashes")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "outcome")
lrn = makeLearner("classif.ada", par.vals = list(), predict.type = "prob")
#:# hash
#:# dd2315ac7daeb27463dd57a0cb6c70f5
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_climate-model-simulation-crashes/classification_outcome/dd2315ac7daeb27463dd57a0cb6c70f5/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 705 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "climate-model-simulation-crashes")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "outcome")
lrn = makeLearner("classif.ada", par.vals = list(), predict.type = "prob")
#:# hash
#:# dd2315ac7daeb27463dd57a0cb6c70f5
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
# clear all variables
rm(list = ls(all = TRUE))
graphics.off()
# install and load packages
libraries = c("KernSmooth")
lapply(libraries, function(x) if (!(x %in% installed.packages())) {
install.packages(x)
})
lapply(libraries, library, quietly = TRUE, character.only = TRUE)
p = 0.5
n = 35
bsample = rbinom(n * 1000, 1, 0.5) # Random generation of the binomial distribution with parameters 1000*n and 0.5
bsamplem = matrix(bsample, n, 1000) # Create a matrix of binomial random variables
bden = bkde((colMeans(bsamplem) - p)/sqrt(p * (1 - p)/n)) # Compute kernel density estimate
# Plot
plot(bden, col = "blue3", type = "l", lty = 1, lwd = 4, xlab = "1000 Random Samples",
ylab = "Estimated and Normal Density", cex.lab = 1, cex.axis = 1, ylim = c(0,
0.45))
lines(bden$x, dnorm(bden$x), col = "red3", lty = 1, lwd = 4)
title(paste("Asymptotic Distribution, n =", n))
| /QID-1199-MVAcltbern/MVAcltbern.r | no_license | QuantLet/MVA | R | false | false | 911 | r |
# clear all variables
rm(list = ls(all = TRUE))
graphics.off()
# install and load packages
libraries = c("KernSmooth")
lapply(libraries, function(x) if (!(x %in% installed.packages())) {
install.packages(x)
})
lapply(libraries, library, quietly = TRUE, character.only = TRUE)
p = 0.5
n = 35
bsample = rbinom(n * 1000, 1, 0.5) # Random generation of the binomial distribution with parameters 1000*n and 0.5
bsamplem = matrix(bsample, n, 1000) # Create a matrix of binomial random variables
bden = bkde((colMeans(bsamplem) - p)/sqrt(p * (1 - p)/n)) # Compute kernel density estimate
# Plot
plot(bden, col = "blue3", type = "l", lty = 1, lwd = 4, xlab = "1000 Random Samples",
ylab = "Estimated and Normal Density", cex.lab = 1, cex.axis = 1, ylim = c(0,
0.45))
lines(bden$x, dnorm(bden$x), col = "red3", lty = 1, lwd = 4)
title(paste("Asymptotic Distribution, n =", n))
|
library(tidyverse); library(AER); library(stargazer); library(dynlm);
library(quantmod); library(forecast); library(strucchange); library(readr);
library(vars); library(xts); library(mfx)
### Upload Data
MA <- read_csv("ALL.csv")
MA$Time <- as.Date(MA$Time, "%m/%d/%Y")
#We're going to use xts which makes time series data a little
#easier to work with. (we used this above)
MA.xts <- xts(MA[, 4:6], order.by = MA$Time)
#ADF test
GDP.df.test <- ur.df(MA.xts$GDP, type = "trend", lags = 6, selectlags = "AIC")
summary(GDP.df.test)
Transactions.df.test <- ur.df(MA.xts$GDP, type = "trend", lags = 6, selectlags = "AIC")
summary(GDP.df.test)
Valuation.df.test <- ur.df(MA.xts$GDP, type = "trend", lags = 6, selectlags = "AIC")
summary(GDP.df.test)
### Choosing lags for Transactions and GDP
acf(MA.xts$Valuation)
pacf(MA.xts$Transactions)
auto.arima(MA.xts$Transactions, max.p = 6, max.q = 0,
stationary = TRUE, seasonal = FALSE,
ic = "bic")
ar.1 <- arima(MA.xts$Transactions, order = c(14,0,0))
acf(ar.1$residuals)
#Create lags
MA.xts$Transactions.1 <- lag(MA.xts$Transactions)
MA.xts$Transactions.2 <- lag(MA.xts$Transactions, 2)
MA.xts$Transactions.3 <- lag(MA.xts$Transactions, 3)
MA.xts$Transactions.4 <- lag(MA.xts$Transactions, 4)
MA.xts$GDP.1 <- lag(MA.xts$GDP)
MA.xts$GDP.2 <- lag(MA.xts$GDP, 2)
MA.xts$GDP.3 <- lag(MA.xts$GDP, 3)
MA.xts$GDP.4 <- lag(MA.xts$GDP, 4)
MA.xts$GDP.5 <- lag(MA.xts$GDP, 5)
ic.mat <- matrix(NA, nrow = 20, ncol = 2)
colnames(ic.mat) <- c("AIC", "BIC")
for (i in 1:20) {
mod.temp <- dynlm(Transactions ~ L(Transactions, 1:4) + L(GDP, 1:i), data = as.zoo(MA.xts))
ic.mat[i, 1] <- AIC(mod.temp)
ic.mat[i, 2] <- BIC(mod.temp)
}
print(ic.mat)
adl.4.3 <- dynlm(Transactions ~ L(Transactions, 1) + L(Transactions, 2) + L(Transactions, 3)
+ L(Transactions, 4) + L(GDP, 1) + L(GDP, 2) + L(GDP, 3), as.zoo(MA.xts))
stargazer( adl.4.3, type = "text",
keep.stat = c("n", "rsq"))
### Granger causality test
linearHypothesis(adl.4.3, c("L(GDP, 1) = 0", "L(GDP, 2) = 0", "L(GDP, 3) = 0"),
vcov = sandwich)
##### Valuation
# Choosing lags for Valuation and GDP
acf(MA.xts$Valuation)
pacf(MA.xts$Valuation)
auto.arima(MA.xts$Valuation, max.p = 6, max.q = 0,
stationary = TRUE, seasonal = FALSE,
ic = "bic")
ic.mat <- matrix(NA, nrow = 20, ncol = 2)
colnames(ic.mat) <- c("AIC", "BIC")
for (i in 1:20) {
mod.temp <- dynlm(Valuation ~ L(GDP, 1:i), data = as.zoo(MA.xts))
ic.mat[i, 1] <- AIC(mod.temp)
ic.mat[i, 2] <- BIC(mod.temp)
}
print(ic.mat)
dl.2 <- dynlm(Valuation ~ L(GDP, 1) + L(GDP, 2), as.zoo(MA.xts))
stargazer(dl.2, type = "text",
keep.stat = c("n", "rsq"))
linearHypothesis(dl.2, c("L(GDP, 1) = 0", "L(GDP, 2) = 0"),
vcov = sandwich)
| /All.R | no_license | emilbille/Big-Data-M-A | R | false | false | 2,824 | r | library(tidyverse); library(AER); library(stargazer); library(dynlm);
library(quantmod); library(forecast); library(strucchange); library(readr);
library(vars); library(xts); library(mfx)
### Upload Data
MA <- read_csv("ALL.csv")
MA$Time <- as.Date(MA$Time, "%m/%d/%Y")
#We're going to use xts which makes time series data a little
#easier to work with. (we used this above)
MA.xts <- xts(MA[, 4:6], order.by = MA$Time)
#ADF test
GDP.df.test <- ur.df(MA.xts$GDP, type = "trend", lags = 6, selectlags = "AIC")
summary(GDP.df.test)
Transactions.df.test <- ur.df(MA.xts$GDP, type = "trend", lags = 6, selectlags = "AIC")
summary(GDP.df.test)
Valuation.df.test <- ur.df(MA.xts$GDP, type = "trend", lags = 6, selectlags = "AIC")
summary(GDP.df.test)
### Choosing lags for Transactions and GDP
acf(MA.xts$Valuation)
pacf(MA.xts$Transactions)
auto.arima(MA.xts$Transactions, max.p = 6, max.q = 0,
stationary = TRUE, seasonal = FALSE,
ic = "bic")
ar.1 <- arima(MA.xts$Transactions, order = c(14,0,0))
acf(ar.1$residuals)
#Create lags
MA.xts$Transactions.1 <- lag(MA.xts$Transactions)
MA.xts$Transactions.2 <- lag(MA.xts$Transactions, 2)
MA.xts$Transactions.3 <- lag(MA.xts$Transactions, 3)
MA.xts$Transactions.4 <- lag(MA.xts$Transactions, 4)
MA.xts$GDP.1 <- lag(MA.xts$GDP)
MA.xts$GDP.2 <- lag(MA.xts$GDP, 2)
MA.xts$GDP.3 <- lag(MA.xts$GDP, 3)
MA.xts$GDP.4 <- lag(MA.xts$GDP, 4)
MA.xts$GDP.5 <- lag(MA.xts$GDP, 5)
ic.mat <- matrix(NA, nrow = 20, ncol = 2)
colnames(ic.mat) <- c("AIC", "BIC")
for (i in 1:20) {
mod.temp <- dynlm(Transactions ~ L(Transactions, 1:4) + L(GDP, 1:i), data = as.zoo(MA.xts))
ic.mat[i, 1] <- AIC(mod.temp)
ic.mat[i, 2] <- BIC(mod.temp)
}
print(ic.mat)
adl.4.3 <- dynlm(Transactions ~ L(Transactions, 1) + L(Transactions, 2) + L(Transactions, 3)
+ L(Transactions, 4) + L(GDP, 1) + L(GDP, 2) + L(GDP, 3), as.zoo(MA.xts))
stargazer( adl.4.3, type = "text",
keep.stat = c("n", "rsq"))
### Granger causality test
linearHypothesis(adl.4.3, c("L(GDP, 1) = 0", "L(GDP, 2) = 0", "L(GDP, 3) = 0"),
vcov = sandwich)
##### Valuation
# Choosing lags for Valuation and GDP
acf(MA.xts$Valuation)
pacf(MA.xts$Valuation)
auto.arima(MA.xts$Valuation, max.p = 6, max.q = 0,
stationary = TRUE, seasonal = FALSE,
ic = "bic")
ic.mat <- matrix(NA, nrow = 20, ncol = 2)
colnames(ic.mat) <- c("AIC", "BIC")
for (i in 1:20) {
mod.temp <- dynlm(Valuation ~ L(GDP, 1:i), data = as.zoo(MA.xts))
ic.mat[i, 1] <- AIC(mod.temp)
ic.mat[i, 2] <- BIC(mod.temp)
}
print(ic.mat)
dl.2 <- dynlm(Valuation ~ L(GDP, 1) + L(GDP, 2), as.zoo(MA.xts))
stargazer(dl.2, type = "text",
keep.stat = c("n", "rsq"))
linearHypothesis(dl.2, c("L(GDP, 1) = 0", "L(GDP, 2) = 0"),
vcov = sandwich)
|
#Set our working directory.
#This helps avoid confusion if our working directory is
#not our site because of other projects we were
#working on at the time.
#setwd("/Users/abrahamalex/Psychology-Statistics")
#render your sweet site.
rmarkdown::render_site() | /build_site.R | no_license | a-abrahamalex/Psychology-Statistics | R | false | false | 263 | r | #Set our working directory.
#This helps avoid confusion if our working directory is
#not our site because of other projects we were
#working on at the time.
#setwd("/Users/abrahamalex/Psychology-Statistics")
#render your sweet site.
rmarkdown::render_site() |
downloadFile = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
extractFile = "household_power_consumption.txt"
#Download and unzip files
if (!file.exists(extractFile)) {
download.file(downloadFile,"exdata data household_power_consumption.zip")
unzip("exdata data household_power_consumption.zip")
}
#Read datafile
allData = read.csv(extractFile, header = TRUE, sep=";", stringsAsFactors = FALSE, na.strings = "?")
plotData = allData[allData$Date %in% c("1/2/2007","2/2/2007"),]
plotData$plotDate <- strptime(paste(plotData$Date, plotData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
rm(allData)
#plot and save png
plot2Data = plotData[!is.na(plotData$Global_active_power) & !is.na(plotData$plotDate), ]
png(filename = "plot2.png", width = 480, height = 480, units = "px")
plot(plot2Data$plotDate, plot2Data$Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)")
lines(plot2Data$plotDate, plot2Data$Global_active_power, type="l")
dev.off()
| /plot2.R | no_license | cchudsc/ExData_Plotting1 | R | false | false | 1,006 | r | downloadFile = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
extractFile = "household_power_consumption.txt"
#Download and unzip files
if (!file.exists(extractFile)) {
download.file(downloadFile,"exdata data household_power_consumption.zip")
unzip("exdata data household_power_consumption.zip")
}
#Read datafile
allData = read.csv(extractFile, header = TRUE, sep=";", stringsAsFactors = FALSE, na.strings = "?")
plotData = allData[allData$Date %in% c("1/2/2007","2/2/2007"),]
plotData$plotDate <- strptime(paste(plotData$Date, plotData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
rm(allData)
#plot and save png
plot2Data = plotData[!is.na(plotData$Global_active_power) & !is.na(plotData$plotDate), ]
png(filename = "plot2.png", width = 480, height = 480, units = "px")
plot(plot2Data$plotDate, plot2Data$Global_active_power, type="n", xlab="", ylab="Global Active Power (kilowatts)")
lines(plot2Data$plotDate, plot2Data$Global_active_power, type="l")
dev.off()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/D1Client.R
\name{upload}
\alias{upload}
\title{Upload an object to the DataONE System.}
\usage{
upload(x, ...)
}
\arguments{
\item{x}{: D1Client to do the uploading}
\item{...}{(not yet used)}
\item{object}{: the object to create in DataONE}
}
\value{
identifier of the uploaded object if success, otherwise FALSE
}
\description{
Uploads a DataObject on the MemberNode determined by the object's systemMetadata.
Values in the object's SystemMetadata are used to determine where the object is
is uploaded, its identifier, format, owner, access policies, and other relevant
metadata about the object.
}
| /dataone/man/upload.Rd | permissive | KillEdision/rdataone | R | false | false | 690 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/D1Client.R
\name{upload}
\alias{upload}
\title{Upload an object to the DataONE System.}
\usage{
upload(x, ...)
}
\arguments{
\item{x}{: D1Client to do the uploading}
\item{...}{(not yet used)}
\item{object}{: the object to create in DataONE}
}
\value{
identifier of the uploaded object if success, otherwise FALSE
}
\description{
Uploads a DataObject on the MemberNode determined by the object's systemMetadata.
Values in the object's SystemMetadata are used to determine where the object is
is uploaded, its identifier, format, owner, access policies, and other relevant
metadata about the object.
}
|
## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup---------------------------------------------------------------
library(irrCAC)
## ------------------------------------------------------------------------
data(package="irrCAC")
## ------------------------------------------------------------------------
cont3x3abstractors
kappa2.table(cont3x3abstractors)
scott2.table(cont3x3abstractors)
gwet.ac1.table(cont3x3abstractors)
bp2.table(cont3x3abstractors)
krippen2.table(cont3x3abstractors)
pa2.table(cont3x3abstractors)
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.table(cont3x3abstractors)$coeff.val
## ------------------------------------------------------------------------
distrib.6raters
gwet.ac1.dist(distrib.6raters)
fleiss.kappa.dist(distrib.6raters)
krippen.alpha.dist(distrib.6raters)
bp.coeff.dist(distrib.6raters)
## ------------------------------------------------------------------------
alpha <- krippen.alpha.dist(distrib.6raters)$coeff
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.dist(cac.dist4cat[,2:4])$coeff
## ------------------------------------------------------------------------
cac.raw4raters
## ------------------------------------------------------------------------
pa.coeff.raw(cac.raw4raters)
gwet.ac1.raw(cac.raw4raters)
fleiss.kappa.raw(cac.raw4raters)
krippen.alpha.raw(cac.raw4raters)
conger.kappa.raw(cac.raw4raters)
bp.coeff.raw(cac.raw4raters)
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.raw(cac.raw4raters)$est
ac1
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.raw(cac.raw4raters)$est
ac1$coeff.val
| /inst/doc/overview.R | no_license | cran/irrCAC | R | false | false | 1,918 | r | ## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup---------------------------------------------------------------
library(irrCAC)
## ------------------------------------------------------------------------
data(package="irrCAC")
## ------------------------------------------------------------------------
cont3x3abstractors
kappa2.table(cont3x3abstractors)
scott2.table(cont3x3abstractors)
gwet.ac1.table(cont3x3abstractors)
bp2.table(cont3x3abstractors)
krippen2.table(cont3x3abstractors)
pa2.table(cont3x3abstractors)
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.table(cont3x3abstractors)$coeff.val
## ------------------------------------------------------------------------
distrib.6raters
gwet.ac1.dist(distrib.6raters)
fleiss.kappa.dist(distrib.6raters)
krippen.alpha.dist(distrib.6raters)
bp.coeff.dist(distrib.6raters)
## ------------------------------------------------------------------------
alpha <- krippen.alpha.dist(distrib.6raters)$coeff
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.dist(cac.dist4cat[,2:4])$coeff
## ------------------------------------------------------------------------
cac.raw4raters
## ------------------------------------------------------------------------
pa.coeff.raw(cac.raw4raters)
gwet.ac1.raw(cac.raw4raters)
fleiss.kappa.raw(cac.raw4raters)
krippen.alpha.raw(cac.raw4raters)
conger.kappa.raw(cac.raw4raters)
bp.coeff.raw(cac.raw4raters)
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.raw(cac.raw4raters)$est
ac1
## ------------------------------------------------------------------------
ac1 <- gwet.ac1.raw(cac.raw4raters)$est
ac1$coeff.val
|
deathstar = function() {
cat(" . .", "\n")
cat(" . . . . .", "\n")
cat(" +. _____ . . + . .", "\n")
cat(" . . ,-~' '~-. +", "\n")
cat(" ,^ ___ ^. + . . .", "\n")
cat(" / .^ ^. \\ . _ .", "\n")
cat(" Y l o ! Y . __CL\\H--.", "\n")
cat(" . l_ `.___.' _,[ L__/_\\H' \\--_- +", "\n")
cat(" |^~'-----------''~ ^| + __L_(=): ]-_ _-- -", "\n")
cat(" + . ! ! . T__\\ /H. //---- - .", "\n")
cat(" . \\ / ~^-H--'", "\n")
cat(" ^. .^ . ' +.", "\n")
cat(" '-.._____.,-' . .", "\n")
cat(" + . . + .", "\n")
cat(" + . + .", "\n")
cat(" . . .", "\n")
}
| /R/other.R | no_license | jacobfredsoee/LCEF | R | false | false | 1,216 | r | deathstar = function() {
cat(" . .", "\n")
cat(" . . . . .", "\n")
cat(" +. _____ . . + . .", "\n")
cat(" . . ,-~' '~-. +", "\n")
cat(" ,^ ___ ^. + . . .", "\n")
cat(" / .^ ^. \\ . _ .", "\n")
cat(" Y l o ! Y . __CL\\H--.", "\n")
cat(" . l_ `.___.' _,[ L__/_\\H' \\--_- +", "\n")
cat(" |^~'-----------''~ ^| + __L_(=): ]-_ _-- -", "\n")
cat(" + . ! ! . T__\\ /H. //---- - .", "\n")
cat(" . \\ / ~^-H--'", "\n")
cat(" ^. .^ . ' +.", "\n")
cat(" '-.._____.,-' . .", "\n")
cat(" + . . + .", "\n")
cat(" + . + .", "\n")
cat(" . . .", "\n")
}
|
library(data.table)
library(streamgraph)
library(htmlwidgets)
jobloss <- fread("stateunemployment_longrate.csv")
head(jobloss$month)
jobloss$month <- as.Date(jobloss$month,format='%m/%d/%y') #had to format csv to work with asDate for some oddreason.
pp <- streamgraph(jobloss, "state", "rate", "month", offset ="zero", interpolate ="cardinal", height="300px", width="1000px") %>%
sg_legend(show=TRUE, label="states: ")
pp
saveWidget(pp, file=paste0(getwd(), "/streamgraphBasic.html"))
| /vizualization/unemploymentchart.r | no_license | osiau/GIS713DataChallenge | R | false | false | 494 | r | library(data.table)
library(streamgraph)
library(htmlwidgets)
jobloss <- fread("stateunemployment_longrate.csv")
head(jobloss$month)
jobloss$month <- as.Date(jobloss$month,format='%m/%d/%y') #had to format csv to work with asDate for some oddreason.
pp <- streamgraph(jobloss, "state", "rate", "month", offset ="zero", interpolate ="cardinal", height="300px", width="1000px") %>%
sg_legend(show=TRUE, label="states: ")
pp
saveWidget(pp, file=paste0(getwd(), "/streamgraphBasic.html"))
|
#' Make Negative Loglikelihood Function to be Minimized
#'
#' Note that the general
#' form of the model has parameters in addition to those in the loss model,
#' namely the power for the variance and the constant of proprtionality that
#' varies by column. So if the original model has k parameters with size
#' columns of data, the total objective function has k + size + 1 parameters
#' @param a do not know
#' @param A do not know
#' @param dnom numeric vector representing the exposures (claims) used in the
#' denominator
#' @param g_obj objective function
#' @export
make_negative_log_likelihood <- function(a, A, dnom, g_obj) {
npar = length(a) - 2
size <- length(dnom)
# Generate a matrix to reflect exposure count in the variance structure
logd = log(matrix(dnom, size, size))
e = g_obj(a[1:npar])
v = exp(-outer(logd[, 1], rep(a[npar + 1], size), "-")) * (e^2)^a[npar + 2]
t1 = log(2 * pi * v) / 2
t2 = (A - e) ^ 2 / (2 * v)
sum(t1 + t2, na.rm = TRUE)
}
| /R/make_negative_log_likelihood.R | permissive | OwenAnalytics/stochasticreserver | R | false | false | 984 | r | #' Make Negative Loglikelihood Function to be Minimized
#'
#' Note that the general
#' form of the model has parameters in addition to those in the loss model,
#' namely the power for the variance and the constant of proprtionality that
#' varies by column. So if the original model has k parameters with size
#' columns of data, the total objective function has k + size + 1 parameters
#' @param a do not know
#' @param A do not know
#' @param dnom numeric vector representing the exposures (claims) used in the
#' denominator
#' @param g_obj objective function
#' @export
make_negative_log_likelihood <- function(a, A, dnom, g_obj) {
npar = length(a) - 2
size <- length(dnom)
# Generate a matrix to reflect exposure count in the variance structure
logd = log(matrix(dnom, size, size))
e = g_obj(a[1:npar])
v = exp(-outer(logd[, 1], rep(a[npar + 1], size), "-")) * (e^2)^a[npar + 2]
t1 = log(2 * pi * v) / 2
t2 = (A - e) ^ 2 / (2 * v)
sum(t1 + t2, na.rm = TRUE)
}
|
## sigmoid activation function
sigmoid <- function(x) {
return(1 / (1 + exp(-x)))
}
## ReLU activation function
sig_der = function(x){
return(sigmoid(x)*(1-sigmoid(x)))
}
### function to sample alpha vector
draw.alpha.fun = function(n.hypo, n.graph, alpha.const.in){
temp = matrix(runif(n.hypo*n.graph, 0, 1), nrow = n.graph, ncol = n.hypo)
temp[, alpha.const.in==0] = 0
temp = temp/apply(temp, 1, sum)
return(temp)
}
### function to sample weight matrix
draw.w.fun = function(n.hypo, n.graph, w.const.in){
temp = array(runif(n.hypo*n.hypo*n.graph, 0, 1), dim = c(n.hypo, n.hypo, n.graph))
for (i in 1:n.graph){
temp.in = temp[,,i]
temp.in[w.const.in==0] = 0
temp[,,i] = temp.in
}
norm = apply(temp, 3, rowSums)
norm = array(rep(norm, each = n.hypo), dim = c(n.hypo, n.hypo, n.graph))
norm = aperm(norm, c(2,1,3))
temp = temp/ norm
return(temp)
}
### function to calculate graphical power based on the R package gMCP
graph.power = function(alpha.in, w.in, type.1.error.in, pval.sim.mat.in){
graph.in = matrix2graph(w.in, alpha.in)
out_seq = graphTest(pvalues = t(pval.sim.mat.in), graph = graph.in, alpha = type.1.error.in)
out.power = apply(out_seq, 2, mean)
return(out.power)
}
## get columns names for the alpha vector and the transition matrix
obtain.name.func = function(alpha.const.in, w.const.in){
n.hypo = length(alpha.const.in)
name.free.space = head(paste0("a", which(!alpha.const==0)), -1)
for (i in 1:dim(w.const.in)[1]){
w.const.temp = w.const.in[i, ]
name.free.space = c(name.free.space, head(paste0("w", i, "_",which(!w.const.temp==0)), -1))
}
name.free.plus = paste(name.free.space, collapse = "+")
name.free.comma = paste(name.free.space, collapse = ",")
newlist = list("name.free.space" = name.free.space,
"name.free.comma" = name.free.comma,
"name.free.plus" = name.free.plus)
return(newlist)
}
## DNN fitting with optimization
neu.function = function(n.node.in, n.layer.in, k.indicator, k.itt.in, data.net.in,
fit.tol.in, pval.sim.mat.in, parallel, obtain.name.fit,
drop.rate.in, max.epoch.in, df.fit.tol.in, df.max.n.in,
df.max.t.in){
neu.time = Sys.time()
name.free.space = obtain.name.fit$name.free.space
name.free.plus = obtain.name.fit$name.free.plus
name.free.comma = obtain.name.fit$name.free.comma
n.nodes.output = matrix(NA, nrow = 1, ncol = 9 + length(name.free.space))
#rownames(n.nodes.output) = n.nodes.vec
colnames(n.nodes.output) =
c("TD_MSE", "VD_MSE", "opt_fit_power", "opt_real_power", "opt_rank",
name.free.space,
"max_power", "hidden", "layer", "drop_rate")
n.nodes.output = data.frame(n.nodes.output)
hidden.in = rep(n.node.in, n.layer.in)
n.graph = dim(data.net.in)[1]
val.ind = (n.graph/5)*(k.itt.in-1) + 1:(n.graph/5)
if (k.indicator){
val.ind = sample(1:n.graph, 1)
}
train.ind = (1:n.graph)[-val.ind]
data.train = data.net.in[train.ind,]
data.val = data.net.in[val.ind,]
## Keras
data.keras.train = subset(data.train, select=name.free.space)
data.keras.val = subset(data.val, select=name.free.space)
data.keras.train = as_tibble(data.keras.train)
data.keras.train.scale <- scale(data.keras.train)
col_means_train <- attr(data.keras.train.scale, "scaled:center")
col_stddevs_train <- attr(data.keras.train.scale, "scaled:scale")
data.keras.val.scale <- scale(data.keras.val,
center = col_means_train, scale = col_stddevs_train)
k_clear_session()
#rm(model)
build_model <- function() {
model <- NULL
### with dropout
model.text.1 = paste0("model <- keras_model_sequential() %>% layer_dense(units = n.node.in, activation =",
shQuote("sigmoid"),
",input_shape = dim(data.keras.train.scale)[2]) %>% layer_dropout(rate=", drop.rate.in, ")%>%")
model.text.2 = paste0(rep(paste0(" layer_dense(units = n.node.in, activation = ",
shQuote("sigmoid"),
") %>% layer_dropout(rate=", drop.rate.in, ")%>%"),
(n.layer.in-1)), collapse ="")
### model.text.3
model.text.3 = paste0("layer_dense(units = 1, activation = ",
shQuote("sigmoid"), ")")
eval(parse(text=paste0(model.text.1, model.text.2, model.text.3)))
model %>% compile(
loss = "mse",
optimizer = optimizer_rmsprop(),
metrics = list("mse")
)
model
}
model <- build_model()
model %>% summary()
print_dot_callback <- callback_lambda(
on_epoch_end = function(epoch, logs) {
if (epoch %% 80 == 0) cat("\n")
cat(".")
}
)
label.keras.train %<-% data.train$target.power.norm
# label.keras.train %<-% data.train$target.power
history <- model %>% fit(
data.keras.train.scale,
label.keras.train,
epochs = max.epoch.in,
validation_split = 0,
verbose = 0,
callbacks = list(print_dot_callback),
batch_size = 100
)
print(history)
net.train.result <- model %>% predict(data.keras.train.scale)
net.val.result <- model %>% predict(data.keras.val.scale)
w1.scale = get_weights(model)[[1]]
b1.scale = as.matrix(get_weights(model)[[2]])
w1 = t(w1.scale/matrix(rep(col_stddevs_train, dim(w1.scale)[2]),
nrow = dim(w1.scale)[1], ncol = dim(w1.scale)[2]))
b1 = b1.scale - t(w1.scale)%*%as.matrix(col_means_train/col_stddevs_train)
for (wb.itt in 2:(n.layer.in+1)){
w.text = paste0("w", wb.itt, "=t(get_weights(model)[[", wb.itt*2-1, "]])")
b.text = paste0("b", wb.itt, "= as.matrix(get_weights(model)[[", wb.itt*2, "]])")
eval(parse(text=w.text))
eval(parse(text=b.text))
}
######################################################################
## for sigmoid function
eval_f_whole_text1 = paste0(
"eval_f <- function( x ) {; x.mat = as.matrix(c(x)); w1x = (w1)%*%x.mat + b1;sw1x = as.matrix(c(sigmoid(w1x)))")
eval_f_whole_text2 = NULL
for (wb.itt in 2:(n.layer.in)){
wx.text = paste0("w", wb.itt, "x = (w", wb.itt, ")%*%sw", wb.itt-1,
"x + b", wb.itt)
swx.text = paste0("sw", wb.itt, "x = as.matrix(c(sigmoid(w", wb.itt, "x)))")
eval_f_whole_text2 = paste(eval_f_whole_text2, wx.text, swx.text, sep = ";")
}
wb.itt.final = n.layer.in + 1
wx.text = paste0("w", wb.itt.final, "x = (w", wb.itt.final, ")%*%sw", wb.itt.final-1,
"x + b", wb.itt.final)
swx.text = paste0("sw",n.layer.in+1,"x =sigmoid(w", wb.itt.final, "x)")
eval_f_whole_text2 = paste(eval_f_whole_text2, wx.text, swx.text, sep = ";")
eval_f_whole_text3 = paste("der_f = function(i){;sw1x_der = as.matrix(as.vector(c((1-sigmoid(w1x))*sigmoid(w1x)))*as.vector(c(w1[, i])));w2x_der = (w2)%*%sw1x_der")
if (n.layer.in>1){
for (wb.itt in 2:(n.layer.in)){
swx.text = paste0("sw", wb.itt, "x_der = as.matrix(as.vector(c(sig_der(w",
wb.itt, "x)))*as.vector(c(w", wb.itt, "x_der)))")
wx.text = paste0("w", wb.itt+1, "x_der = (w", wb.itt+1, ")%*%sw",
wb.itt, "x_der")
eval_f_whole_text3 = paste(eval_f_whole_text3, swx.text, wx.text, sep = ";")
}
}
out.text = paste0("out = as.numeric(sig_der(w", n.layer.in+1,
"x)*w", n.layer.in+1, "x_der)")
eval_f_whole_text3 = paste(eval_f_whole_text3, out.text,
"return(out); }", sep = ";")
grad.text = paste("-der_f(", 1:(length(name.free.space)), ")", collapse = ",")
return.text = paste0(" return( list( ", shQuote("objective"), " = -sw",
n.layer.in+1,"x,",
shQuote("gradient") , " = c(", grad.text, ") ) )")
eval_f_whole_text = paste(eval_f_whole_text1, eval_f_whole_text2,";",
eval_f_whole_text3, ";", return.text,";", "}")
eval(parse(text=eval_f_whole_text))
#####################################################################################
data.train$fit.power = as.vector(net.train.result)
data.train$fit.target.power = (data.train$fit.power-0.3)/0.4*
(max(data.net.in$target.power)-min(data.net.in$target.power))+
min(data.net.in$target.power)
data.train$rank.target.power = (rank(data.train$target.power)-1)/(dim(data.train)[1]-1)
data.train$rank.fit.power = (rank(data.train$fit.power)-1)/(dim(data.train)[1]-1)
data.val$fit.power = as.vector(net.val.result)
data.val$fit.target.power = (data.val$fit.power-0.3)/0.4*
(max(data.net.in$target.power)-min(data.net.in$target.power))+
min(data.net.in$target.power)
data.val$rank.target.power = (rank(data.val$target.power)-1)/(dim(data.val)[1]-1)
data.val$rank.fit.power = (rank(data.val$fit.power)-1)/(dim(data.val)[1]-1)
###############################################################################
## optimization
###########################################################################################
## set several initial values for constrained optimization
solve.opt.out = solve.opt.in = solve.opt.fit.out = NULL
n.solve.opt = 1
for (solve.opt.ind in 1:n.solve.opt){
x0.in = NULL
grad.mat = NULL
const.text = ""
alpha.free.ind = head(which(!alpha.const==0), -1)
if (sum(alpha.const)>1){
const.text = paste(const.text, paste("x[", 1:length(alpha.free.ind), "]", collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[1:length(alpha.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.temp.in = abs(rnorm(length(alpha.free.ind)+1, 0, 1))
x0.temp.in = x0.temp.in/sum(x0.temp.in)
x0.temp.in = x0.temp.in[1:length(alpha.free.ind)]
x0.in = c(x0.in, x0.temp.in)
}
const.end = length(alpha.free.ind)
for (i in 1:dim(w.const)[1]){
w.const.temp = w.const[i, ]
if (sum(w.const.temp)<=1) next
w.free.ind = head(which(!w.const.temp==0), -1)
const.text = paste(const.text, paste("x[", const.end + 1:length(w.free.ind), "]",
collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[const.end + 1:length(w.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.temp.in = abs(rnorm(length(w.free.ind)+1, 0, 1))
x0.temp.in = x0.temp.in/sum(x0.temp.in)
x0.temp.in = x0.temp.in[1:length(w.free.ind)]
x0.in = c(x0.in, x0.temp.in)
const.end = const.end + length(w.free.ind)
}
substr(const.text, str_length(const.text), str_length(const.text)) <- ")"
const.text = paste("constr <- c(", const.text)
# constraint functions
# inequalities
eval_g_ineq <- function( x ) {
eval(parse(text=const.text))
grad = grad.mat
return( list( "constraints"=constr, "jacobian"=grad ) )
}
# lower and upper bounds of control
lb <- rep(0, length(name.free.space))
ub <- rep(1, length(name.free.space))
# NLOPT_LD_AUGLAG NLOPT_LN_COBYLA
local_opts <- list( "algorithm" = "NLOPT_LD_AUGLAG",
"xtol_rel" = 1.0e-5 )
opts <- list( "algorithm" = "NLOPT_LD_AUGLAG",
"xtol_rel" = 1.0e-5,
"maxeval" = 10000,
"local_opts" = local_opts )
res <- nloptr( x0=x0.in,
eval_f=eval_f,
lb=lb,
ub=ub,
eval_g_ineq=eval_g_ineq,
opts=opts)
print(res)
opt.input.temp = res$solution
opt.data = as.tibble(t(as.matrix(opt.input.temp)))
opt.data.scale <- scale(opt.data,
center = col_means_train, scale = col_stddevs_train)
opt.fit.power.temp = model %>% predict(opt.data.scale)
opt.fit.power.real = -gfo.func(opt.input.temp)
solve.opt.fit.out = c(solve.opt.fit.out, opt.fit.power.temp)
solve.opt.out = c(solve.opt.out, opt.fit.power.real)
solve.opt.in = rbind(solve.opt.in, opt.input.temp)
}
solve.opt.select.ind = which.max(solve.opt.out)
opt.x0 = solve.opt.in[solve.opt.select.ind, ]
opt.real.power = solve.opt.out[solve.opt.select.ind]
print(opt.real.power)
## fine tune with COBYLA
naive.opt.fit = naive.opt.func(nloptr.func.name = "NLOPT_LN_COBYLA",
naive.opt.n = 1,
naive.tol = df.fit.tol.in,
naive.max.n = df.max.n.in,
naive.max.t = df.max.t.in,
pval.sim.mat.in = pval.sim.mat.in,
x0.given = opt.x0,
# x0.given = NULL
set.seed.in = 1
)
n.nodes.output[1, name.free.space] = naive.opt.fit$solution
n.nodes.output$TD_MSE[1] = mean((data.train$fit.target.power-data.train$target.power)^2)
n.nodes.output$VD_MSE[1] = mean((data.val$fit.target.power-data.val$target.power)^2)
n.nodes.output$opt_real_power[1] = as.numeric(naive.opt.fit$naive.fit)
n.nodes.output$opt_fit_power[1] = as.numeric(solve.opt.fit.out[solve.opt.select.ind])
n.nodes.output$opt_rank[1] = sum(data.net.in$target.power>naive.opt.fit$naive.fit)+1
print(n.nodes.output$opt_real_power)
print(n.nodes.output$opt_rank)
n.nodes.output$max_power[1] = as.numeric(max(data.net.in$target.power))
n.nodes.output$hidden[1] = n.node.in
n.nodes.output$layer[1] = n.layer.in
n.nodes.output$drop_rate[1] = drop.rate.in
n.nodes.output$time[1] = difftime(Sys.time(), neu.time, units="secs")
n.nodes.output$iters[1] = naive.opt.fit$iters
n.nodes.output$status[1] = naive.opt.fit$status
newlist = list("n.nodes.output" = n.nodes.output,
"data.train" = data.train,
"data.val" = data.val)
return(newlist)
}
## simulate training data for DNN
sim.data.function = function(n.hypo.in, n.sim.in, trt.vec.in, alpha.fit.in, w.fit.in,
sigma.in, corr.in, type.1.error.in, obj.weight.in){
sim.data.time = Sys.time()
trt.sim.mat = t(mvrnorm(n = n.sim.in, trt.vec.in, Sigma = sigma.in))
pval.sim.mat = pnorm(trt.sim.mat, lower.tail = FALSE)
n.graph.in = dim(alpha.fit)[1]
data.net = cbind(alpha.fit.in, matrix(aperm(w.fit.in, c(3,2,1)), nrow = n.graph.in,
ncol = n.hypo.in*n.hypo.in))
data.net = data.frame(data.net)
colnames(data.net) = c(paste0("a", 1:n.hypo.in),
paste0("w", as.vector(sapply(1:n.hypo.in,
function(x){paste0(x,"_", 1:n.hypo.in)}))))
pow.vec.in = pnorm(qnorm(1-type.1.error.in), mean = trt.vec.in, lower.tail = FALSE)
cl = makeCluster(n.cluster)
registerDoParallel(cl)
target.power.temp = foreach(i.graph.in = 1:n.graph.in) %dopar% {
library(gMCP)
library(MASS)
library(nloptr)
library(stringr)
library(ANN2)
library(CVTuningCov)
library(tibble)
library(pracma)
source("graph_nn_general_functions_keras.R")
graph.power.fit = graph.power(as.vector(alpha.fit.in[i.graph.in, ]),
as.matrix(w.fit.in[,,i.graph.in]),
type.1.error.in, pval.sim.mat)
return(sum(graph.power.fit*obj.weight.in)/sum(obj.weight.in))
}
stopCluster(cl)
target.power.in = unlist(target.power.temp)
data.net$target.power = target.power.in
assump.out = matrix(NA, nrow=2, ncol=length(trt.vec.in))
assump.out[1, ] = pnorm(qnorm(1-type.1.error.in), mean=trt.vec.in, lower.tail = FALSE)
assump.out[2, ] = apply(pval.sim.mat, 1, function(x){mean(x<=0.025)})
rownames(assump.out) = c("true power", "sim power")
data.net.all = data.net
## Finish data simulation
####################################################################################
data.net$target.power.norm =
(data.net$target.power-min(data.net$target.power))/(max(data.net$target.power)-min(data.net$target.power))
data.net$target.power.norm = data.net$target.power.norm*0.4+0.3
newlist = list("pval.matrix" = pval.sim.mat, "data.matrix" = data.net,
"data.matrix.all" = data.net.all,
"sim.data.time.diff" = difftime(Sys.time(), sim.data.time, units="secs"))
return(newlist)
}
## get objective function of COBYLA and ISRES
gfo.func = function(x.gfo){
alpha.free.ind = head(which(!alpha.const==0), -1)
alpha.in = as.vector(rep(0, length(alpha.const)))
if (sum(alpha.const)==1){
alpha.in = alpha.const
} else {
alpha.in[alpha.const==1] = c(x.gfo[1:length(alpha.free.ind)],
1 - sum(x.gfo[1:length(alpha.free.ind)]))
}
const.end = length(alpha.free.ind)
w.in = matrix(0, nrow=dim(w.const)[1], ncol=dim(w.const)[1])
for (i in 1:dim(w.in)[1]){
w.const.temp = w.const[i,]
if (sum(w.const.temp)==1){
w.in[i, ] = w.const.temp
} else {
w.free.ind = head(which(!w.const.temp==0), -1)
w.in[i, w.const[i,]==1] = c(x.gfo[1:length(w.free.ind) + const.end],
1 - sum(x.gfo[1:length(w.free.ind) + const.end]))
const.end = const.end + length(w.free.ind)
}
}
alpha.in = pmin(alpha.in, 1)
alpha.in = pmax(alpha.in, 0)
alpha.in = alpha.in / sum(alpha.in)
w.in[w.in<0] = 0
w.in[w.in>1] = 1
w.in = t(apply(w.in, 1, function(x){x/(sum(x)+10^(-6))}))
# print(w.in)
graph.power.gfo = graph.power(as.vector(alpha.in),
as.matrix(w.in),
type.1.error, sim.data.fit$pval.matrix)
return(-sum(graph.power.gfo*obj.weight)/sum(obj.weight))
}
## function to fit COBYLA and ISRES
naive.opt.func = function(nloptr.func.name, naive.opt.n, naive.tol, naive.max.n,
naive.max.t,
pval.sim.mat.in, x0.given, set.seed.in){
set.seed(set.seed.in)
naive.opt.time = Sys.time()
test.temp = rep(0, naive.opt.n)
for (naive.opt.ind in 1:naive.opt.n){
print(paste(nloptr.func.name, ":", naive.opt.ind, "/", naive.opt.n))
const.text = ""
x0.start = grad.mat = NULL
alpha.free.ind = head(which(!alpha.const==0), -1)
if (sum(alpha.const)>1){
const.text = paste(const.text, paste("x[", 1:length(alpha.free.ind), "]", collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[1:length(alpha.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.start.in.1 = runif(n=length(alpha.free.ind)+1, 0, 1)
x0.start.in.2 = x0.start.in.1/sum(x0.start.in.1)
x0.start.in.3 = x0.start.in.2[2:(length(alpha.free.ind)+1)]
x0.start = c(x0.start, x0.start.in.3)
}
const.end = length(alpha.free.ind)
for (i in 1:dim(w.const)[1]){
w.const.temp = w.const[i, ]
if (sum(w.const.temp)<=1) next
w.free.ind = head(which(!w.const.temp==0), -1)
const.text = paste(const.text, paste("x[", const.end + 1:length(w.free.ind), "]",
collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[const.end + 1:length(w.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.start.in.1 = runif(n=length(w.free.ind)+1, 0, 1)
x0.start.in.2 = x0.start.in.1/sum(x0.start.in.1)
x0.start.in.3 = x0.start.in.2[2:(length(w.free.ind)+1)]
x0.start = c(x0.start, x0.start.in.3)
const.end = const.end + length(w.free.ind)
}
substr(const.text, str_length(const.text), str_length(const.text)) <- ")"
const.text = paste("constr <- c(", const.text)
# constraint functions
# inequalities
eval_ineq <- function( x ) {
eval(parse(text=const.text))
grad = grad.mat
return( list( "constraints"=constr, "jacobian"=grad ) )
}
local_opts <- list( "algorithm" = nloptr.func.name,
"xtol_rel" = naive.tol,
"ftol_rel" = 0,
"maxeval" = 100)
opts <- list( "algorithm" = nloptr.func.name,
"xtol_rel" = naive.tol,
"ftol_rel" = 0,
"maxeval" = naive.max.n,
"maxtime" = naive.max.t,
"local_opts" = local_opts
)
if (is.null(x0.given)){
x0.start.in = x0.start
} else {
x0.start.in = x0.given
}
res <- nloptr( x0=x0.start.in,
eval_f=gfo.func,
lb=rep(0, length(x0.start)),
ub=rep(1, length(x0.start)),
eval_g_ineq=eval_ineq,
opts=opts)
print(res)
test.temp[naive.opt.ind] = -res$objective
naive.input.temp = res$solution
# naive.data = as.tibble(t(as.matrix(naive.input.temp)))
alpha.in = as.vector(rep(0, length(alpha.const)))
if (sum(alpha.const)==1){
alpha.in = alpha.const
} else {
alpha.in[alpha.const==1] = c(naive.input.temp[1:length(alpha.free.ind)],
1 - sum(naive.input.temp[1:length(alpha.free.ind)]))
}
const.end = length(alpha.free.ind)
w.in = matrix(0, nrow=dim(w.const)[1], ncol=dim(w.const)[1])
for (i in 1:dim(w.in)[1]){
w.const.temp = w.const[i,]
if (sum(w.const.temp)==1){
w.in[i, ] = w.const.temp
} else {
w.free.ind = head(which(!w.const.temp==0), -1)
w.in[i, w.const[i,]==1] = c(naive.input.temp[1:length(w.free.ind) + const.end],
1 - sum(naive.input.temp[1:length(w.free.ind) + const.end]))
const.end = const.end + length(w.free.ind)
}
}
alpha.in = pmin(alpha.in, 1)
alpha.in = pmax(alpha.in, 0)
alpha.in = alpha.in / (10^(-6)+sum(alpha.in))
w.in[w.in<0] = 0
w.in[w.in>1] = 1
w.in = t(apply(w.in, 1, function(x){x/(10^(-6)+sum(x))}))
if (sum(alpha.in)>1) return(rep(NA, 9 + length(name.free.space)))
if (sum(alpha.in<0)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(alpha.in>1)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(apply(w.in, 1, sum)>1)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(w.in<0)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(w.in>1)>0) return(rep(NA, 9 + length(name.free.space)))
graph.in = matrix2graph(w.in, alpha.in)
out_seq = graphTest(pvalues = t(pval.sim.mat.in), graph = graph.in, alpha = type.1.error)
out.power = as.vector(apply(out_seq, 2, mean))
# naive.fit.power.real = sum(out.power*obj.weight)/sum(obj.weight)
}
newlist = list("naive.fit" = test.temp,
"solution" = res$solution,
"naive.alpha" = alpha.in,
"naive.w" = w.in,
"status" = res$status,
"iters" = res$iterations,
"time" = difftime(Sys.time(), naive.opt.time, units="secs"))
return(newlist)
}
| /graph_nn_general_functions_keras.R | no_license | tian-yu-zhan/DNN_optimization | R | false | false | 23,365 | r |
## sigmoid activation function
sigmoid <- function(x) {
return(1 / (1 + exp(-x)))
}
## ReLU activation function
sig_der = function(x){
return(sigmoid(x)*(1-sigmoid(x)))
}
### function to sample alpha vector
draw.alpha.fun = function(n.hypo, n.graph, alpha.const.in){
temp = matrix(runif(n.hypo*n.graph, 0, 1), nrow = n.graph, ncol = n.hypo)
temp[, alpha.const.in==0] = 0
temp = temp/apply(temp, 1, sum)
return(temp)
}
### function to sample weight matrix
draw.w.fun = function(n.hypo, n.graph, w.const.in){
temp = array(runif(n.hypo*n.hypo*n.graph, 0, 1), dim = c(n.hypo, n.hypo, n.graph))
for (i in 1:n.graph){
temp.in = temp[,,i]
temp.in[w.const.in==0] = 0
temp[,,i] = temp.in
}
norm = apply(temp, 3, rowSums)
norm = array(rep(norm, each = n.hypo), dim = c(n.hypo, n.hypo, n.graph))
norm = aperm(norm, c(2,1,3))
temp = temp/ norm
return(temp)
}
### function to calculate graphical power based on the R package gMCP
graph.power = function(alpha.in, w.in, type.1.error.in, pval.sim.mat.in){
graph.in = matrix2graph(w.in, alpha.in)
out_seq = graphTest(pvalues = t(pval.sim.mat.in), graph = graph.in, alpha = type.1.error.in)
out.power = apply(out_seq, 2, mean)
return(out.power)
}
## get columns names for the alpha vector and the transition matrix
obtain.name.func = function(alpha.const.in, w.const.in){
n.hypo = length(alpha.const.in)
name.free.space = head(paste0("a", which(!alpha.const==0)), -1)
for (i in 1:dim(w.const.in)[1]){
w.const.temp = w.const.in[i, ]
name.free.space = c(name.free.space, head(paste0("w", i, "_",which(!w.const.temp==0)), -1))
}
name.free.plus = paste(name.free.space, collapse = "+")
name.free.comma = paste(name.free.space, collapse = ",")
newlist = list("name.free.space" = name.free.space,
"name.free.comma" = name.free.comma,
"name.free.plus" = name.free.plus)
return(newlist)
}
## DNN fitting with optimization
neu.function = function(n.node.in, n.layer.in, k.indicator, k.itt.in, data.net.in,
fit.tol.in, pval.sim.mat.in, parallel, obtain.name.fit,
drop.rate.in, max.epoch.in, df.fit.tol.in, df.max.n.in,
df.max.t.in){
neu.time = Sys.time()
name.free.space = obtain.name.fit$name.free.space
name.free.plus = obtain.name.fit$name.free.plus
name.free.comma = obtain.name.fit$name.free.comma
n.nodes.output = matrix(NA, nrow = 1, ncol = 9 + length(name.free.space))
#rownames(n.nodes.output) = n.nodes.vec
colnames(n.nodes.output) =
c("TD_MSE", "VD_MSE", "opt_fit_power", "opt_real_power", "opt_rank",
name.free.space,
"max_power", "hidden", "layer", "drop_rate")
n.nodes.output = data.frame(n.nodes.output)
hidden.in = rep(n.node.in, n.layer.in)
n.graph = dim(data.net.in)[1]
val.ind = (n.graph/5)*(k.itt.in-1) + 1:(n.graph/5)
if (k.indicator){
val.ind = sample(1:n.graph, 1)
}
train.ind = (1:n.graph)[-val.ind]
data.train = data.net.in[train.ind,]
data.val = data.net.in[val.ind,]
## Keras
data.keras.train = subset(data.train, select=name.free.space)
data.keras.val = subset(data.val, select=name.free.space)
data.keras.train = as_tibble(data.keras.train)
data.keras.train.scale <- scale(data.keras.train)
col_means_train <- attr(data.keras.train.scale, "scaled:center")
col_stddevs_train <- attr(data.keras.train.scale, "scaled:scale")
data.keras.val.scale <- scale(data.keras.val,
center = col_means_train, scale = col_stddevs_train)
k_clear_session()
#rm(model)
build_model <- function() {
model <- NULL
### with dropout
model.text.1 = paste0("model <- keras_model_sequential() %>% layer_dense(units = n.node.in, activation =",
shQuote("sigmoid"),
",input_shape = dim(data.keras.train.scale)[2]) %>% layer_dropout(rate=", drop.rate.in, ")%>%")
model.text.2 = paste0(rep(paste0(" layer_dense(units = n.node.in, activation = ",
shQuote("sigmoid"),
") %>% layer_dropout(rate=", drop.rate.in, ")%>%"),
(n.layer.in-1)), collapse ="")
### model.text.3
model.text.3 = paste0("layer_dense(units = 1, activation = ",
shQuote("sigmoid"), ")")
eval(parse(text=paste0(model.text.1, model.text.2, model.text.3)))
model %>% compile(
loss = "mse",
optimizer = optimizer_rmsprop(),
metrics = list("mse")
)
model
}
model <- build_model()
model %>% summary()
print_dot_callback <- callback_lambda(
on_epoch_end = function(epoch, logs) {
if (epoch %% 80 == 0) cat("\n")
cat(".")
}
)
label.keras.train %<-% data.train$target.power.norm
# label.keras.train %<-% data.train$target.power
history <- model %>% fit(
data.keras.train.scale,
label.keras.train,
epochs = max.epoch.in,
validation_split = 0,
verbose = 0,
callbacks = list(print_dot_callback),
batch_size = 100
)
print(history)
net.train.result <- model %>% predict(data.keras.train.scale)
net.val.result <- model %>% predict(data.keras.val.scale)
w1.scale = get_weights(model)[[1]]
b1.scale = as.matrix(get_weights(model)[[2]])
w1 = t(w1.scale/matrix(rep(col_stddevs_train, dim(w1.scale)[2]),
nrow = dim(w1.scale)[1], ncol = dim(w1.scale)[2]))
b1 = b1.scale - t(w1.scale)%*%as.matrix(col_means_train/col_stddevs_train)
for (wb.itt in 2:(n.layer.in+1)){
w.text = paste0("w", wb.itt, "=t(get_weights(model)[[", wb.itt*2-1, "]])")
b.text = paste0("b", wb.itt, "= as.matrix(get_weights(model)[[", wb.itt*2, "]])")
eval(parse(text=w.text))
eval(parse(text=b.text))
}
######################################################################
## for sigmoid function
eval_f_whole_text1 = paste0(
"eval_f <- function( x ) {; x.mat = as.matrix(c(x)); w1x = (w1)%*%x.mat + b1;sw1x = as.matrix(c(sigmoid(w1x)))")
eval_f_whole_text2 = NULL
for (wb.itt in 2:(n.layer.in)){
wx.text = paste0("w", wb.itt, "x = (w", wb.itt, ")%*%sw", wb.itt-1,
"x + b", wb.itt)
swx.text = paste0("sw", wb.itt, "x = as.matrix(c(sigmoid(w", wb.itt, "x)))")
eval_f_whole_text2 = paste(eval_f_whole_text2, wx.text, swx.text, sep = ";")
}
wb.itt.final = n.layer.in + 1
wx.text = paste0("w", wb.itt.final, "x = (w", wb.itt.final, ")%*%sw", wb.itt.final-1,
"x + b", wb.itt.final)
swx.text = paste0("sw",n.layer.in+1,"x =sigmoid(w", wb.itt.final, "x)")
eval_f_whole_text2 = paste(eval_f_whole_text2, wx.text, swx.text, sep = ";")
eval_f_whole_text3 = paste("der_f = function(i){;sw1x_der = as.matrix(as.vector(c((1-sigmoid(w1x))*sigmoid(w1x)))*as.vector(c(w1[, i])));w2x_der = (w2)%*%sw1x_der")
if (n.layer.in>1){
for (wb.itt in 2:(n.layer.in)){
swx.text = paste0("sw", wb.itt, "x_der = as.matrix(as.vector(c(sig_der(w",
wb.itt, "x)))*as.vector(c(w", wb.itt, "x_der)))")
wx.text = paste0("w", wb.itt+1, "x_der = (w", wb.itt+1, ")%*%sw",
wb.itt, "x_der")
eval_f_whole_text3 = paste(eval_f_whole_text3, swx.text, wx.text, sep = ";")
}
}
out.text = paste0("out = as.numeric(sig_der(w", n.layer.in+1,
"x)*w", n.layer.in+1, "x_der)")
eval_f_whole_text3 = paste(eval_f_whole_text3, out.text,
"return(out); }", sep = ";")
grad.text = paste("-der_f(", 1:(length(name.free.space)), ")", collapse = ",")
return.text = paste0(" return( list( ", shQuote("objective"), " = -sw",
n.layer.in+1,"x,",
shQuote("gradient") , " = c(", grad.text, ") ) )")
eval_f_whole_text = paste(eval_f_whole_text1, eval_f_whole_text2,";",
eval_f_whole_text3, ";", return.text,";", "}")
eval(parse(text=eval_f_whole_text))
#####################################################################################
data.train$fit.power = as.vector(net.train.result)
data.train$fit.target.power = (data.train$fit.power-0.3)/0.4*
(max(data.net.in$target.power)-min(data.net.in$target.power))+
min(data.net.in$target.power)
data.train$rank.target.power = (rank(data.train$target.power)-1)/(dim(data.train)[1]-1)
data.train$rank.fit.power = (rank(data.train$fit.power)-1)/(dim(data.train)[1]-1)
data.val$fit.power = as.vector(net.val.result)
data.val$fit.target.power = (data.val$fit.power-0.3)/0.4*
(max(data.net.in$target.power)-min(data.net.in$target.power))+
min(data.net.in$target.power)
data.val$rank.target.power = (rank(data.val$target.power)-1)/(dim(data.val)[1]-1)
data.val$rank.fit.power = (rank(data.val$fit.power)-1)/(dim(data.val)[1]-1)
###############################################################################
## optimization
###########################################################################################
## set several initial values for constrained optimization
solve.opt.out = solve.opt.in = solve.opt.fit.out = NULL
n.solve.opt = 1
for (solve.opt.ind in 1:n.solve.opt){
x0.in = NULL
grad.mat = NULL
const.text = ""
alpha.free.ind = head(which(!alpha.const==0), -1)
if (sum(alpha.const)>1){
const.text = paste(const.text, paste("x[", 1:length(alpha.free.ind), "]", collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[1:length(alpha.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.temp.in = abs(rnorm(length(alpha.free.ind)+1, 0, 1))
x0.temp.in = x0.temp.in/sum(x0.temp.in)
x0.temp.in = x0.temp.in[1:length(alpha.free.ind)]
x0.in = c(x0.in, x0.temp.in)
}
const.end = length(alpha.free.ind)
for (i in 1:dim(w.const)[1]){
w.const.temp = w.const[i, ]
if (sum(w.const.temp)<=1) next
w.free.ind = head(which(!w.const.temp==0), -1)
const.text = paste(const.text, paste("x[", const.end + 1:length(w.free.ind), "]",
collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[const.end + 1:length(w.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.temp.in = abs(rnorm(length(w.free.ind)+1, 0, 1))
x0.temp.in = x0.temp.in/sum(x0.temp.in)
x0.temp.in = x0.temp.in[1:length(w.free.ind)]
x0.in = c(x0.in, x0.temp.in)
const.end = const.end + length(w.free.ind)
}
substr(const.text, str_length(const.text), str_length(const.text)) <- ")"
const.text = paste("constr <- c(", const.text)
# constraint functions
# inequalities
eval_g_ineq <- function( x ) {
eval(parse(text=const.text))
grad = grad.mat
return( list( "constraints"=constr, "jacobian"=grad ) )
}
# lower and upper bounds of control
lb <- rep(0, length(name.free.space))
ub <- rep(1, length(name.free.space))
# NLOPT_LD_AUGLAG NLOPT_LN_COBYLA
local_opts <- list( "algorithm" = "NLOPT_LD_AUGLAG",
"xtol_rel" = 1.0e-5 )
opts <- list( "algorithm" = "NLOPT_LD_AUGLAG",
"xtol_rel" = 1.0e-5,
"maxeval" = 10000,
"local_opts" = local_opts )
res <- nloptr( x0=x0.in,
eval_f=eval_f,
lb=lb,
ub=ub,
eval_g_ineq=eval_g_ineq,
opts=opts)
print(res)
opt.input.temp = res$solution
opt.data = as.tibble(t(as.matrix(opt.input.temp)))
opt.data.scale <- scale(opt.data,
center = col_means_train, scale = col_stddevs_train)
opt.fit.power.temp = model %>% predict(opt.data.scale)
opt.fit.power.real = -gfo.func(opt.input.temp)
solve.opt.fit.out = c(solve.opt.fit.out, opt.fit.power.temp)
solve.opt.out = c(solve.opt.out, opt.fit.power.real)
solve.opt.in = rbind(solve.opt.in, opt.input.temp)
}
solve.opt.select.ind = which.max(solve.opt.out)
opt.x0 = solve.opt.in[solve.opt.select.ind, ]
opt.real.power = solve.opt.out[solve.opt.select.ind]
print(opt.real.power)
## fine tune with COBYLA
naive.opt.fit = naive.opt.func(nloptr.func.name = "NLOPT_LN_COBYLA",
naive.opt.n = 1,
naive.tol = df.fit.tol.in,
naive.max.n = df.max.n.in,
naive.max.t = df.max.t.in,
pval.sim.mat.in = pval.sim.mat.in,
x0.given = opt.x0,
# x0.given = NULL
set.seed.in = 1
)
n.nodes.output[1, name.free.space] = naive.opt.fit$solution
n.nodes.output$TD_MSE[1] = mean((data.train$fit.target.power-data.train$target.power)^2)
n.nodes.output$VD_MSE[1] = mean((data.val$fit.target.power-data.val$target.power)^2)
n.nodes.output$opt_real_power[1] = as.numeric(naive.opt.fit$naive.fit)
n.nodes.output$opt_fit_power[1] = as.numeric(solve.opt.fit.out[solve.opt.select.ind])
n.nodes.output$opt_rank[1] = sum(data.net.in$target.power>naive.opt.fit$naive.fit)+1
print(n.nodes.output$opt_real_power)
print(n.nodes.output$opt_rank)
n.nodes.output$max_power[1] = as.numeric(max(data.net.in$target.power))
n.nodes.output$hidden[1] = n.node.in
n.nodes.output$layer[1] = n.layer.in
n.nodes.output$drop_rate[1] = drop.rate.in
n.nodes.output$time[1] = difftime(Sys.time(), neu.time, units="secs")
n.nodes.output$iters[1] = naive.opt.fit$iters
n.nodes.output$status[1] = naive.opt.fit$status
newlist = list("n.nodes.output" = n.nodes.output,
"data.train" = data.train,
"data.val" = data.val)
return(newlist)
}
## simulate training data for DNN
sim.data.function = function(n.hypo.in, n.sim.in, trt.vec.in, alpha.fit.in, w.fit.in,
sigma.in, corr.in, type.1.error.in, obj.weight.in){
sim.data.time = Sys.time()
trt.sim.mat = t(mvrnorm(n = n.sim.in, trt.vec.in, Sigma = sigma.in))
pval.sim.mat = pnorm(trt.sim.mat, lower.tail = FALSE)
n.graph.in = dim(alpha.fit)[1]
data.net = cbind(alpha.fit.in, matrix(aperm(w.fit.in, c(3,2,1)), nrow = n.graph.in,
ncol = n.hypo.in*n.hypo.in))
data.net = data.frame(data.net)
colnames(data.net) = c(paste0("a", 1:n.hypo.in),
paste0("w", as.vector(sapply(1:n.hypo.in,
function(x){paste0(x,"_", 1:n.hypo.in)}))))
pow.vec.in = pnorm(qnorm(1-type.1.error.in), mean = trt.vec.in, lower.tail = FALSE)
cl = makeCluster(n.cluster)
registerDoParallel(cl)
target.power.temp = foreach(i.graph.in = 1:n.graph.in) %dopar% {
library(gMCP)
library(MASS)
library(nloptr)
library(stringr)
library(ANN2)
library(CVTuningCov)
library(tibble)
library(pracma)
source("graph_nn_general_functions_keras.R")
graph.power.fit = graph.power(as.vector(alpha.fit.in[i.graph.in, ]),
as.matrix(w.fit.in[,,i.graph.in]),
type.1.error.in, pval.sim.mat)
return(sum(graph.power.fit*obj.weight.in)/sum(obj.weight.in))
}
stopCluster(cl)
target.power.in = unlist(target.power.temp)
data.net$target.power = target.power.in
assump.out = matrix(NA, nrow=2, ncol=length(trt.vec.in))
assump.out[1, ] = pnorm(qnorm(1-type.1.error.in), mean=trt.vec.in, lower.tail = FALSE)
assump.out[2, ] = apply(pval.sim.mat, 1, function(x){mean(x<=0.025)})
rownames(assump.out) = c("true power", "sim power")
data.net.all = data.net
## Finish data simulation
####################################################################################
data.net$target.power.norm =
(data.net$target.power-min(data.net$target.power))/(max(data.net$target.power)-min(data.net$target.power))
data.net$target.power.norm = data.net$target.power.norm*0.4+0.3
newlist = list("pval.matrix" = pval.sim.mat, "data.matrix" = data.net,
"data.matrix.all" = data.net.all,
"sim.data.time.diff" = difftime(Sys.time(), sim.data.time, units="secs"))
return(newlist)
}
## get objective function of COBYLA and ISRES
gfo.func = function(x.gfo){
alpha.free.ind = head(which(!alpha.const==0), -1)
alpha.in = as.vector(rep(0, length(alpha.const)))
if (sum(alpha.const)==1){
alpha.in = alpha.const
} else {
alpha.in[alpha.const==1] = c(x.gfo[1:length(alpha.free.ind)],
1 - sum(x.gfo[1:length(alpha.free.ind)]))
}
const.end = length(alpha.free.ind)
w.in = matrix(0, nrow=dim(w.const)[1], ncol=dim(w.const)[1])
for (i in 1:dim(w.in)[1]){
w.const.temp = w.const[i,]
if (sum(w.const.temp)==1){
w.in[i, ] = w.const.temp
} else {
w.free.ind = head(which(!w.const.temp==0), -1)
w.in[i, w.const[i,]==1] = c(x.gfo[1:length(w.free.ind) + const.end],
1 - sum(x.gfo[1:length(w.free.ind) + const.end]))
const.end = const.end + length(w.free.ind)
}
}
alpha.in = pmin(alpha.in, 1)
alpha.in = pmax(alpha.in, 0)
alpha.in = alpha.in / sum(alpha.in)
w.in[w.in<0] = 0
w.in[w.in>1] = 1
w.in = t(apply(w.in, 1, function(x){x/(sum(x)+10^(-6))}))
# print(w.in)
graph.power.gfo = graph.power(as.vector(alpha.in),
as.matrix(w.in),
type.1.error, sim.data.fit$pval.matrix)
return(-sum(graph.power.gfo*obj.weight)/sum(obj.weight))
}
## function to fit COBYLA and ISRES
naive.opt.func = function(nloptr.func.name, naive.opt.n, naive.tol, naive.max.n,
naive.max.t,
pval.sim.mat.in, x0.given, set.seed.in){
set.seed(set.seed.in)
naive.opt.time = Sys.time()
test.temp = rep(0, naive.opt.n)
for (naive.opt.ind in 1:naive.opt.n){
print(paste(nloptr.func.name, ":", naive.opt.ind, "/", naive.opt.n))
const.text = ""
x0.start = grad.mat = NULL
alpha.free.ind = head(which(!alpha.const==0), -1)
if (sum(alpha.const)>1){
const.text = paste(const.text, paste("x[", 1:length(alpha.free.ind), "]", collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[1:length(alpha.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.start.in.1 = runif(n=length(alpha.free.ind)+1, 0, 1)
x0.start.in.2 = x0.start.in.1/sum(x0.start.in.1)
x0.start.in.3 = x0.start.in.2[2:(length(alpha.free.ind)+1)]
x0.start = c(x0.start, x0.start.in.3)
}
const.end = length(alpha.free.ind)
for (i in 1:dim(w.const)[1]){
w.const.temp = w.const[i, ]
if (sum(w.const.temp)<=1) next
w.free.ind = head(which(!w.const.temp==0), -1)
const.text = paste(const.text, paste("x[", const.end + 1:length(w.free.ind), "]",
collapse = "+"), "-1,")
grad.mat.temp = rep(0, length(name.free.space))
grad.mat.temp[const.end + 1:length(w.free.ind)] = 1
grad.mat = rbind(grad.mat, grad.mat.temp)
x0.start.in.1 = runif(n=length(w.free.ind)+1, 0, 1)
x0.start.in.2 = x0.start.in.1/sum(x0.start.in.1)
x0.start.in.3 = x0.start.in.2[2:(length(w.free.ind)+1)]
x0.start = c(x0.start, x0.start.in.3)
const.end = const.end + length(w.free.ind)
}
substr(const.text, str_length(const.text), str_length(const.text)) <- ")"
const.text = paste("constr <- c(", const.text)
# constraint functions
# inequalities
eval_ineq <- function( x ) {
eval(parse(text=const.text))
grad = grad.mat
return( list( "constraints"=constr, "jacobian"=grad ) )
}
local_opts <- list( "algorithm" = nloptr.func.name,
"xtol_rel" = naive.tol,
"ftol_rel" = 0,
"maxeval" = 100)
opts <- list( "algorithm" = nloptr.func.name,
"xtol_rel" = naive.tol,
"ftol_rel" = 0,
"maxeval" = naive.max.n,
"maxtime" = naive.max.t,
"local_opts" = local_opts
)
if (is.null(x0.given)){
x0.start.in = x0.start
} else {
x0.start.in = x0.given
}
res <- nloptr( x0=x0.start.in,
eval_f=gfo.func,
lb=rep(0, length(x0.start)),
ub=rep(1, length(x0.start)),
eval_g_ineq=eval_ineq,
opts=opts)
print(res)
test.temp[naive.opt.ind] = -res$objective
naive.input.temp = res$solution
# naive.data = as.tibble(t(as.matrix(naive.input.temp)))
alpha.in = as.vector(rep(0, length(alpha.const)))
if (sum(alpha.const)==1){
alpha.in = alpha.const
} else {
alpha.in[alpha.const==1] = c(naive.input.temp[1:length(alpha.free.ind)],
1 - sum(naive.input.temp[1:length(alpha.free.ind)]))
}
const.end = length(alpha.free.ind)
w.in = matrix(0, nrow=dim(w.const)[1], ncol=dim(w.const)[1])
for (i in 1:dim(w.in)[1]){
w.const.temp = w.const[i,]
if (sum(w.const.temp)==1){
w.in[i, ] = w.const.temp
} else {
w.free.ind = head(which(!w.const.temp==0), -1)
w.in[i, w.const[i,]==1] = c(naive.input.temp[1:length(w.free.ind) + const.end],
1 - sum(naive.input.temp[1:length(w.free.ind) + const.end]))
const.end = const.end + length(w.free.ind)
}
}
alpha.in = pmin(alpha.in, 1)
alpha.in = pmax(alpha.in, 0)
alpha.in = alpha.in / (10^(-6)+sum(alpha.in))
w.in[w.in<0] = 0
w.in[w.in>1] = 1
w.in = t(apply(w.in, 1, function(x){x/(10^(-6)+sum(x))}))
if (sum(alpha.in)>1) return(rep(NA, 9 + length(name.free.space)))
if (sum(alpha.in<0)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(alpha.in>1)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(apply(w.in, 1, sum)>1)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(w.in<0)>0) return(rep(NA, 9 + length(name.free.space)))
if (sum(w.in>1)>0) return(rep(NA, 9 + length(name.free.space)))
graph.in = matrix2graph(w.in, alpha.in)
out_seq = graphTest(pvalues = t(pval.sim.mat.in), graph = graph.in, alpha = type.1.error)
out.power = as.vector(apply(out_seq, 2, mean))
# naive.fit.power.real = sum(out.power*obj.weight)/sum(obj.weight)
}
newlist = list("naive.fit" = test.temp,
"solution" = res$solution,
"naive.alpha" = alpha.in,
"naive.w" = w.in,
"status" = res$status,
"iters" = res$iterations,
"time" = difftime(Sys.time(), naive.opt.time, units="secs"))
return(newlist)
}
|
library(arules)
library(arulesViz)
# Full dataset and Observations --------------------------------------------------------------------
setwd("~/Dropbox/Ubiqum Code Academy/Module 2/Task 4")
Transactions<-read.transactions("ElectronidexTransactions2017.csv",format="basket",sep=",",
rm.duplicates=FALSE)
# Create a new dataframe without the transactions with 0 items -------------------------------------
FullTrans <- Transactions[-c(which(size(Transactions)==0)), ]
CYB<-subset(FullTrans, items %in% "Acer Desktop" )
length(CYB)
#inspect(FullTrans) # You can view the transactions.
length (FullTrans) # Number of transactions.
size(FullTrans) # Number of items per transaction
max(size(FullTrans))
min(size(FullTrans))
round(mean(size(FullTrans)))
which(size(FullTrans)==30) #30 items in this transaction
#LIST(FullTrans) # Lists the transactions by conversion (LIST must be capitalized)
itemLabels(FullTrans) # To see the item labels
BasketRules <- apriori(FullTrans, parameter = list(supp = 0.002, conf = 0.8, target = "rules"))
summary(BasketRules)
inspect(BasketRules)
inspect(sort(BasketRules,by="lift")[1:10])
PrunedBasRules<-BasketRules[which(is.redundant(BasketRules)==FALSE)]
BWproducts<-c("HP Wireless Printer","Canon Office Printer","Brother Printer","Brother Printer Toner",
"ASUS Chromebook","Acer Aspire","Dell Monitor","LG Monitor","HP Desktop","Dell 2 Desktop",
"Dell Desktop")
PrunedBasRulesBW<-subset(PrunedBasRules, items %in% BWproducts)
inspect(sort(PrunedBasRulesBW,by="lift"))
summary(PrunedBasRules)
plot(PrunedBasRules[1:10], method="graph", control=list(type="items"))
itemFrequencyPlot(FullTrans,type ="absolute", topN=10)
image(sample(FullTrans,50))
# Potential Privaste Customers ---------------------------------------------------------------------
PrCustPr<-c("Eluktronics Pro Gaming Laptop","CYBERPOWER Gamer Desktop","Redragon Gaming Mouse",
"Backlit LED Gaming Keyboard","Apple Earpods","Monster Beats By Dr Dre",
"Otium Wireless Sports Bluetooth Headphone","Panasonic In-Ear Headphone",
"APIE Bluetooth Headphone","Gaming Mouse Professional",
"Rii LED Gaming Keyboard & Mouse Combo","Zombie Gaming Headset",
"Philips Flexible Earhook Headphone","PC Gaming Headset","Koss Home Headphones",
"XIBERIA Gaming Headset","iPhone Charger Cable", "Rokono Mini Speaker",
"Samsung Charging Cable", "Cambridge Bluetooth Speaker",
"JBL Splashproof Portable Bluetooth Speaker","DOSS Touch Wireless Bluetooth",
"Apple TV","Google Home","Smart Light Bulb","Fire TV Stick","Roku Express")
PotPrCust1<-FullTrans[which(size(FullTrans)==1)]
itemFrequencyPlot(PotPrCust1,type ="absolute", topN=10)
PotPrCust1BW<-subset(PotPrCust1, item %in% BWproducts) #There are no items that BlackWell sells
SmallTrans<-FullTrans[which(size(FullTrans)<=6&size(FullTrans)>1)]
PotPrCust2<-subset(SmallTrans, items %in% PrCustPr)
itemFrequencyPlot(PotPrCust2,type ="absolute", topN=10)
PrCust2Rules<-apriori(PotPrCust2, parameter = list(supp = 0.002, conf = 0.8, target = "rules"))
is.redundant(PrCust2Rules)
PrCust2Rules<-PrCust2Rules[which(is.redundant(PrCust2Rules)==FALSE)]
inspect(sort(PrCust2Rules,by="lift"))
PrCust2RulesBW<-subset(PrCust2Rules, items %in% BWproducts) #There are no products that BlackWell sells
inspect(sort(PrCust2RulesBW,by="lift"))
summary(PrunedPrCust2Rules)
plot(PrunedPrCust2Rules, method="graph", control=list(type="items"))
# Potencial Companies Customers -------------------------------------------------------------------
LargeTrans<-FullTrans[which(size(FullTrans)<=6&size(FullTrans)>1)]
PotCompanies1<- subset(LargeTrans, !(items %in% PrCustPr))
itemFrequencyPlot(PotCompanies1,type ="absolute", topN=10)
PotComp1Rules<-apriori(PotCompanies1, parameter = list(supp = 0.0015, conf = 0.6, target = "rules"))
inspect(sort(PotComp1Rules,by="lift"))
PotComp1RulesBW<-subset(PotComp1Rules, items %in% BWproducts)
inspect(sort(PotComp1RulesBW,by="lift"))
PotCompanies2<- FullTrans[which(size(FullTrans)>6)]
itemFrequencyPlot(PotCompanies2,type ="absolute", topN=11)
PotComp2Rules<-apriori(PotCompanies2, parameter = list(supp = 0.006, conf = 0.8, target = "rules"))
inspect(sort(PotComp2Rules,by="lift")[1:5])
PotComp2RulesBW<-subset(PotComp2Rules, items %in% BWproducts)
Sorted<-sort(PotComp2RulesBW,by="lift")[1:15]
plot(Sorted[8:10], method="graph", control=list(type="items"), )
DellTrans<-subset(PotCompanies2, items %in% "Dell Desktop")
length(DellTrans)
AcerTrans<-subset(PotCompanies2, items %in% "Acer Aspire")
length(AcerTrans) | /Market Basket Analysis Script.R | no_license | MAlexakis/Market-Barket-Anakysis | R | false | false | 4,695 | r | library(arules)
library(arulesViz)
# Full dataset and Observations --------------------------------------------------------------------
setwd("~/Dropbox/Ubiqum Code Academy/Module 2/Task 4")
Transactions<-read.transactions("ElectronidexTransactions2017.csv",format="basket",sep=",",
rm.duplicates=FALSE)
# Create a new dataframe without the transactions with 0 items -------------------------------------
FullTrans <- Transactions[-c(which(size(Transactions)==0)), ]
CYB<-subset(FullTrans, items %in% "Acer Desktop" )
length(CYB)
#inspect(FullTrans) # You can view the transactions.
length (FullTrans) # Number of transactions.
size(FullTrans) # Number of items per transaction
max(size(FullTrans))
min(size(FullTrans))
round(mean(size(FullTrans)))
which(size(FullTrans)==30) #30 items in this transaction
#LIST(FullTrans) # Lists the transactions by conversion (LIST must be capitalized)
itemLabels(FullTrans) # To see the item labels
BasketRules <- apriori(FullTrans, parameter = list(supp = 0.002, conf = 0.8, target = "rules"))
summary(BasketRules)
inspect(BasketRules)
inspect(sort(BasketRules,by="lift")[1:10])
PrunedBasRules<-BasketRules[which(is.redundant(BasketRules)==FALSE)]
BWproducts<-c("HP Wireless Printer","Canon Office Printer","Brother Printer","Brother Printer Toner",
"ASUS Chromebook","Acer Aspire","Dell Monitor","LG Monitor","HP Desktop","Dell 2 Desktop",
"Dell Desktop")
PrunedBasRulesBW<-subset(PrunedBasRules, items %in% BWproducts)
inspect(sort(PrunedBasRulesBW,by="lift"))
summary(PrunedBasRules)
plot(PrunedBasRules[1:10], method="graph", control=list(type="items"))
itemFrequencyPlot(FullTrans,type ="absolute", topN=10)
image(sample(FullTrans,50))
# Potential Privaste Customers ---------------------------------------------------------------------
PrCustPr<-c("Eluktronics Pro Gaming Laptop","CYBERPOWER Gamer Desktop","Redragon Gaming Mouse",
"Backlit LED Gaming Keyboard","Apple Earpods","Monster Beats By Dr Dre",
"Otium Wireless Sports Bluetooth Headphone","Panasonic In-Ear Headphone",
"APIE Bluetooth Headphone","Gaming Mouse Professional",
"Rii LED Gaming Keyboard & Mouse Combo","Zombie Gaming Headset",
"Philips Flexible Earhook Headphone","PC Gaming Headset","Koss Home Headphones",
"XIBERIA Gaming Headset","iPhone Charger Cable", "Rokono Mini Speaker",
"Samsung Charging Cable", "Cambridge Bluetooth Speaker",
"JBL Splashproof Portable Bluetooth Speaker","DOSS Touch Wireless Bluetooth",
"Apple TV","Google Home","Smart Light Bulb","Fire TV Stick","Roku Express")
PotPrCust1<-FullTrans[which(size(FullTrans)==1)]
itemFrequencyPlot(PotPrCust1,type ="absolute", topN=10)
PotPrCust1BW<-subset(PotPrCust1, item %in% BWproducts) #There are no items that BlackWell sells
SmallTrans<-FullTrans[which(size(FullTrans)<=6&size(FullTrans)>1)]
PotPrCust2<-subset(SmallTrans, items %in% PrCustPr)
itemFrequencyPlot(PotPrCust2,type ="absolute", topN=10)
PrCust2Rules<-apriori(PotPrCust2, parameter = list(supp = 0.002, conf = 0.8, target = "rules"))
is.redundant(PrCust2Rules)
PrCust2Rules<-PrCust2Rules[which(is.redundant(PrCust2Rules)==FALSE)]
inspect(sort(PrCust2Rules,by="lift"))
PrCust2RulesBW<-subset(PrCust2Rules, items %in% BWproducts) #There are no products that BlackWell sells
inspect(sort(PrCust2RulesBW,by="lift"))
summary(PrunedPrCust2Rules)
plot(PrunedPrCust2Rules, method="graph", control=list(type="items"))
# Potencial Companies Customers -------------------------------------------------------------------
LargeTrans<-FullTrans[which(size(FullTrans)<=6&size(FullTrans)>1)]
PotCompanies1<- subset(LargeTrans, !(items %in% PrCustPr))
itemFrequencyPlot(PotCompanies1,type ="absolute", topN=10)
PotComp1Rules<-apriori(PotCompanies1, parameter = list(supp = 0.0015, conf = 0.6, target = "rules"))
inspect(sort(PotComp1Rules,by="lift"))
PotComp1RulesBW<-subset(PotComp1Rules, items %in% BWproducts)
inspect(sort(PotComp1RulesBW,by="lift"))
PotCompanies2<- FullTrans[which(size(FullTrans)>6)]
itemFrequencyPlot(PotCompanies2,type ="absolute", topN=11)
PotComp2Rules<-apriori(PotCompanies2, parameter = list(supp = 0.006, conf = 0.8, target = "rules"))
inspect(sort(PotComp2Rules,by="lift")[1:5])
PotComp2RulesBW<-subset(PotComp2Rules, items %in% BWproducts)
Sorted<-sort(PotComp2RulesBW,by="lift")[1:15]
plot(Sorted[8:10], method="graph", control=list(type="items"), )
DellTrans<-subset(PotCompanies2, items %in% "Dell Desktop")
length(DellTrans)
AcerTrans<-subset(PotCompanies2, items %in% "Acer Aspire")
length(AcerTrans) |
library(bigsnpr)
bigsnp <- snp_attachExtdata()
G <- bigsnp$genotypes
simu <- snp_simuPheno(G, 0.2, 500, alpha = 0.5)
log_var <- log(big_colstats(G, ind.col = simu$set)$var)
beta2 <- simu$effects^2
FUN <- function(x, log_var, beta2) {
S <- 1 + x[[1]]; sigma2 <- x[[2]]
S * sum(log_var) + length(log_var) * log(sigma2) + sum(beta2 / exp(S * log_var)) / sigma2
}
DER <- function(x, log_var, beta2) {
S <- 1 + x[[1]]; sigma2 <- x[[2]]
res1 <- sum(log_var) - sum(log_var * beta2 / exp(S * log_var)) / sigma2
res2 <- length(log_var) / sigma2 - sum(beta2 / exp(S * log_var)) / sigma2^2
c(res1, res2)
}
optim(par = c(-0.5, 0.2 / 500), fn = FUN, method = "L-BFGS-B",
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50),
log_var = log_var, beta2 = beta2)
optim(par = c(-0.5, 0.2 / 500), fn = FUN, method = "L-BFGS-B", gr = DER,
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50),
log_var = log_var, beta2 = beta2) # this one is best
optim(par = c(-0.5, 0.2 / 500), fn = FUN, gr = DER,
# lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50),
log_var = log_var, beta2 = beta2)
Rcpp::sourceCpp('tmp-tests/proto-MLE-optim-C++.cpp')
test_MLE(log_var, beta2, c(-1, 0.2 / 500),
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50))
microbenchmark::microbenchmark(
R = optim(par = c(-1, 0.002), fn = FUN, method = "L-BFGS-B", gr = DER,
lower = c(-2, 0.001), upper = c(1, 0.004),
log_var = log_var, beta2 = beta2)$par,
C = test_MLE(log_var, beta2, c(-1, 0.2 / 500),
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50))
)
### without caching:
# Unit: microseconds
# expr min lq mean median uq max neval
# R 1483.5 1580.90 1753.977 1643.35 1772.05 6920.2 100
# C 902.8 928.05 980.327 943.30 987.50 1378.7 100
### caching does not really help because the other sums take much more time
### to compute (because of the exp)
Rcpp::sourceCpp('src/ldpred2-auto.cpp')
MLE_alpha(c(0, 0.002), 0:499, log_var, simu$effects,
alpha_bounds = c(-1, 2), boot = FALSE, verbose = TRUE)
microbenchmark::microbenchmark(
R = optim(par = c(-1, 0.002), fn = FUN, method = "L-BFGS-B", gr = DER,
lower = c(-2, 0.001), upper = c(1, 0.004),
log_var = log_var, beta2 = beta2)$par,
C = MLE_alpha(c(0, 0.002), 0:499, log_var, simu$effects, c(-1, 2), FALSE)
)
| /tmp-tests/proto-MLE-optim-better.R | no_license | privefl/bigsnpr | R | false | false | 2,377 | r | library(bigsnpr)
bigsnp <- snp_attachExtdata()
G <- bigsnp$genotypes
simu <- snp_simuPheno(G, 0.2, 500, alpha = 0.5)
log_var <- log(big_colstats(G, ind.col = simu$set)$var)
beta2 <- simu$effects^2
FUN <- function(x, log_var, beta2) {
S <- 1 + x[[1]]; sigma2 <- x[[2]]
S * sum(log_var) + length(log_var) * log(sigma2) + sum(beta2 / exp(S * log_var)) / sigma2
}
DER <- function(x, log_var, beta2) {
S <- 1 + x[[1]]; sigma2 <- x[[2]]
res1 <- sum(log_var) - sum(log_var * beta2 / exp(S * log_var)) / sigma2
res2 <- length(log_var) / sigma2 - sum(beta2 / exp(S * log_var)) / sigma2^2
c(res1, res2)
}
optim(par = c(-0.5, 0.2 / 500), fn = FUN, method = "L-BFGS-B",
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50),
log_var = log_var, beta2 = beta2)
optim(par = c(-0.5, 0.2 / 500), fn = FUN, method = "L-BFGS-B", gr = DER,
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50),
log_var = log_var, beta2 = beta2) # this one is best
optim(par = c(-0.5, 0.2 / 500), fn = FUN, gr = DER,
# lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50),
log_var = log_var, beta2 = beta2)
Rcpp::sourceCpp('tmp-tests/proto-MLE-optim-C++.cpp')
test_MLE(log_var, beta2, c(-1, 0.2 / 500),
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50))
microbenchmark::microbenchmark(
R = optim(par = c(-1, 0.002), fn = FUN, method = "L-BFGS-B", gr = DER,
lower = c(-2, 0.001), upper = c(1, 0.004),
log_var = log_var, beta2 = beta2)$par,
C = test_MLE(log_var, beta2, c(-1, 0.2 / 500),
lower = c(-2, 0.2 / 5000), upper = c(1, 0.2 / 50))
)
### without caching:
# Unit: microseconds
# expr min lq mean median uq max neval
# R 1483.5 1580.90 1753.977 1643.35 1772.05 6920.2 100
# C 902.8 928.05 980.327 943.30 987.50 1378.7 100
### caching does not really help because the other sums take much more time
### to compute (because of the exp)
Rcpp::sourceCpp('src/ldpred2-auto.cpp')
MLE_alpha(c(0, 0.002), 0:499, log_var, simu$effects,
alpha_bounds = c(-1, 2), boot = FALSE, verbose = TRUE)
microbenchmark::microbenchmark(
R = optim(par = c(-1, 0.002), fn = FUN, method = "L-BFGS-B", gr = DER,
lower = c(-2, 0.001), upper = c(1, 0.004),
log_var = log_var, beta2 = beta2)$par,
C = MLE_alpha(c(0, 0.002), 0:499, log_var, simu$effects, c(-1, 2), FALSE)
)
|
testlist <- list(x = numeric(0), y = c(8.86604729189247e-301, -5.48612406879369e+303, 1.39067116156574e-309, 1.4479500431007e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609955323-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 237 | r | testlist <- list(x = numeric(0), y = c(8.86604729189247e-301, -5.48612406879369e+303, 1.39067116156574e-309, 1.4479500431007e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
mydata2 <- read.csv(file="german credit card.csv",header = T,sep = ",")
mydata2
head(mydata2)
which.min(mydata2$amount)
which.max(mydata2$amount)
y<-subset(mydata2,amount>250,select=c(amount,purpose,history))
y
# The easiest way to get ggplot2 is to install the whole tidyverse:
install.packages("tidyverse")
# Alternatively, install just ggplot2:
install.packages("ggplot2")
# Or the the development version from GitHub:
# install.packages("devtools")
devtools::install_github("tidyverse/ggplot2")
x <− rnorm(100,mean = 5,sd = 0.1) mean(x)
sd(x)
summary ( x )
demo( graphics )
| /Stephanie Ziritt Volcan/003/Practice_003.R | no_license | nanw01/r_study_nan | R | false | false | 597 | r | mydata2 <- read.csv(file="german credit card.csv",header = T,sep = ",")
mydata2
head(mydata2)
which.min(mydata2$amount)
which.max(mydata2$amount)
y<-subset(mydata2,amount>250,select=c(amount,purpose,history))
y
# The easiest way to get ggplot2 is to install the whole tidyverse:
install.packages("tidyverse")
# Alternatively, install just ggplot2:
install.packages("ggplot2")
# Or the the development version from GitHub:
# install.packages("devtools")
devtools::install_github("tidyverse/ggplot2")
x <− rnorm(100,mean = 5,sd = 0.1) mean(x)
sd(x)
summary ( x )
demo( graphics )
|
#Joint Modeling Analysis
install.packages("rjags")
install.packages("JM")
install.packages("JMbayes")
library(nlme)
library(survival)
library(JM)
library(rjags)
library(JMbayes)
library(splines)
#Survival sub-model
#I'm not using time independent variables, but i will include age and sex just to obtain a cox model.
coxdata <- subset(clinical.data[,1:3])
coxdata <- cbind(coxdata,ydata_t0)
#change ID names to ID increment values
coxdata <- coxdata %>% mutate(id = cumsum(PATIENT_ID != lag(PATIENT_ID, default="")))
#coxdata$PATIENT_ID <- NULL
#due to an error in data, i will change the time of discharge to the last day in which gene expression were measured
coxdata[44, 4] = 8
#save(coxdata, file = "coxdata.RData")
# Cox model
coxtest <- coxph(Surv(coxdata$time, coxdata$status) ~ SEX + AGE, data=coxdata, x=TRUE)
summary(coxtest)
coxFinal <- coxph(Surv(coxdata$time, coxdata$status) ~ SEX, data=coxdata, x=TRUE)
summary(coxFinal)
# JM package
#Rational function fitted
summary(rational.random.no.missing)
jointFitJM_rational <- jointModel(rational.random.no.missing, coxFinal, timeVar = "time", method = "piecewise-PH-GH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_rational)
#Exponential function fitted
summary(exponential.random.no.missing)
jointFitJM_exponential <- jointModel(exponential.random.no.missing, coxFinal, timeVar = "time", method = "piecewise-PH-GH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_exponential)
#Spline function fitted
summary(spline.random.no.missing)
jointFitJM_spline <- jointModel(spline.random.no.missing, coxFinal, timeVar = "time", method = "spline-PH-aGH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_spline)
#Cubic function fitted
summary(cubic.random.no.missing)
jointFitJM_cubic <- jointModel(cubic.random.no.missing, coxFinal, timeVar = "time", method = "piecewise-PH-GH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_cubic)
#JMbayes package
jointFitJMbayes <- jointModelBayes(splineLME, coxFinal, timeVar = "time")
#Rational function fitted
summary(rational.random.no.missing)
jointFitJMbayes_rational <- jointModelBayes(rational.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_rational)
#Exponential function fitted
summary(exponential.random.no.missing)
jointFitJMbayes_exponential <- jointModelBayes(exponential.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_exponential)
#Spline function fitted
summary(spline.random.no.missing)
jointFitJMbayes_spline <- jointModelBayes(spline.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_spline)
#Cubic function fitted
summary(cubic.random.no.missing)
jointFitJMbayes_cubic <- jointModelBayes(cubic.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_cubic)
#Mixed model
#simple function w/ all 12 genes
MixedModelFit <- mvglmer(list(log(NM_013450) ~ time + (time | id),
log(AW474434) ~ time + (time | id),
log(NM_021730) ~ time + (time | id),
log(BG120535) ~ time + (time | id),
log(NM_005354) ~ time + (time | id),
log(AF279899) ~ time + (time | id),
log(BF940270) ~ time + (time | id),
log(NM_002600) ~ time + (time | id),
log(AW574504) ~ time + (time | id),
log(NM_018368) ~ time + (time | id),
log(NM_025151) ~ time + (time | id),
log(BC000896) ~ time + (time | id)),
data = genes.subset2,
families = list(gaussian, gaussian,gaussian, gaussian,
gaussian, gaussian, gaussian, gaussian,
gaussian, gaussian, gaussian, gaussian))
summary(MixedModelFit)
CoxFit <- coxph(Surv(coxdata$time, coxdata$status) ~ SEX, data = coxdata, model = TRUE)
summary(CoxFit)
JMFit2 <- mvJointModelBayes(MixedModelFit, CoxFit, timeVar = "time")
summary(JMFit2)
| /JointModel_analysis.R | no_license | ClaudiaSConstantino/GlueGrant | R | false | false | 4,154 | r |
#Joint Modeling Analysis
install.packages("rjags")
install.packages("JM")
install.packages("JMbayes")
library(nlme)
library(survival)
library(JM)
library(rjags)
library(JMbayes)
library(splines)
#Survival sub-model
#I'm not using time independent variables, but i will include age and sex just to obtain a cox model.
coxdata <- subset(clinical.data[,1:3])
coxdata <- cbind(coxdata,ydata_t0)
#change ID names to ID increment values
coxdata <- coxdata %>% mutate(id = cumsum(PATIENT_ID != lag(PATIENT_ID, default="")))
#coxdata$PATIENT_ID <- NULL
#due to an error in data, i will change the time of discharge to the last day in which gene expression were measured
coxdata[44, 4] = 8
#save(coxdata, file = "coxdata.RData")
# Cox model
coxtest <- coxph(Surv(coxdata$time, coxdata$status) ~ SEX + AGE, data=coxdata, x=TRUE)
summary(coxtest)
coxFinal <- coxph(Surv(coxdata$time, coxdata$status) ~ SEX, data=coxdata, x=TRUE)
summary(coxFinal)
# JM package
#Rational function fitted
summary(rational.random.no.missing)
jointFitJM_rational <- jointModel(rational.random.no.missing, coxFinal, timeVar = "time", method = "piecewise-PH-GH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_rational)
#Exponential function fitted
summary(exponential.random.no.missing)
jointFitJM_exponential <- jointModel(exponential.random.no.missing, coxFinal, timeVar = "time", method = "piecewise-PH-GH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_exponential)
#Spline function fitted
summary(spline.random.no.missing)
jointFitJM_spline <- jointModel(spline.random.no.missing, coxFinal, timeVar = "time", method = "spline-PH-aGH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_spline)
#Cubic function fitted
summary(cubic.random.no.missing)
jointFitJM_cubic <- jointModel(cubic.random.no.missing, coxFinal, timeVar = "time", method = "piecewise-PH-GH", lng.in.kn = 5, iter.EM = 200)
summary(jointFitJM_cubic)
#JMbayes package
jointFitJMbayes <- jointModelBayes(splineLME, coxFinal, timeVar = "time")
#Rational function fitted
summary(rational.random.no.missing)
jointFitJMbayes_rational <- jointModelBayes(rational.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_rational)
#Exponential function fitted
summary(exponential.random.no.missing)
jointFitJMbayes_exponential <- jointModelBayes(exponential.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_exponential)
#Spline function fitted
summary(spline.random.no.missing)
jointFitJMbayes_spline <- jointModelBayes(spline.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_spline)
#Cubic function fitted
summary(cubic.random.no.missing)
jointFitJMbayes_cubic <- jointModelBayes(cubic.random.no.missing, coxFinal, timeVar = "time")
summary(jointFitJMbayes_cubic)
#Mixed model
#simple function w/ all 12 genes
MixedModelFit <- mvglmer(list(log(NM_013450) ~ time + (time | id),
log(AW474434) ~ time + (time | id),
log(NM_021730) ~ time + (time | id),
log(BG120535) ~ time + (time | id),
log(NM_005354) ~ time + (time | id),
log(AF279899) ~ time + (time | id),
log(BF940270) ~ time + (time | id),
log(NM_002600) ~ time + (time | id),
log(AW574504) ~ time + (time | id),
log(NM_018368) ~ time + (time | id),
log(NM_025151) ~ time + (time | id),
log(BC000896) ~ time + (time | id)),
data = genes.subset2,
families = list(gaussian, gaussian,gaussian, gaussian,
gaussian, gaussian, gaussian, gaussian,
gaussian, gaussian, gaussian, gaussian))
summary(MixedModelFit)
CoxFit <- coxph(Surv(coxdata$time, coxdata$status) ~ SEX, data = coxdata, model = TRUE)
summary(CoxFit)
JMFit2 <- mvJointModelBayes(MixedModelFit, CoxFit, timeVar = "time")
summary(JMFit2)
|
library(ComplexHeatmap)
if(requireNamespace("gridtext")) {
##### test anno_richtext ####
mat = matrix(rnorm(100), 10)
rownames(mat) = letters[1:10]
ht = Heatmap(mat,
column_title = gt_render("Some <span style='color:blue'>blue text **in bold.**</span><br>And *italics text.*<br>And some <span style='font-size:18pt; color:black'>large</span> text.", r = unit(2, "pt"), padding = unit(c(2, 2, 2, 2), "pt")),
column_title_gp = gpar(box_fill = "orange"),
row_labels = gt_render(letters[1:10], padding = unit(c(2, 10, 2, 10), "pt")),
row_names_gp = gpar(box_col = rep(2:3, times = 5), box_fill = ifelse(1:10%%2, "yellow", "white")),
row_km = 2,
row_title = gt_render(c("title1", "title2")),
row_title_gp = gpar(box_fill = "yellow"),
heatmap_legend_param = list(
title = gt_render("<span style='color:orange'>**Legend title**</span>"),
title_gp = gpar(box_fill = "grey"),
at = c(-3, 0, 3),
labels = gt_render(c("*negative* three", "zero", "*positive* three"))
))
ht = rowAnnotation(
foo = anno_text(gt_render(sapply(LETTERS[1:10], strrep, 10), align_widths = TRUE),
gp = gpar(box_col = "blue", box_lwd = 2),
just = "right",
location = unit(1, "npc")
)) + ht
draw(ht)
}
| /tests/test-gridtext.R | permissive | jokergoo/ComplexHeatmap | R | false | false | 1,244 | r | library(ComplexHeatmap)
if(requireNamespace("gridtext")) {
##### test anno_richtext ####
mat = matrix(rnorm(100), 10)
rownames(mat) = letters[1:10]
ht = Heatmap(mat,
column_title = gt_render("Some <span style='color:blue'>blue text **in bold.**</span><br>And *italics text.*<br>And some <span style='font-size:18pt; color:black'>large</span> text.", r = unit(2, "pt"), padding = unit(c(2, 2, 2, 2), "pt")),
column_title_gp = gpar(box_fill = "orange"),
row_labels = gt_render(letters[1:10], padding = unit(c(2, 10, 2, 10), "pt")),
row_names_gp = gpar(box_col = rep(2:3, times = 5), box_fill = ifelse(1:10%%2, "yellow", "white")),
row_km = 2,
row_title = gt_render(c("title1", "title2")),
row_title_gp = gpar(box_fill = "yellow"),
heatmap_legend_param = list(
title = gt_render("<span style='color:orange'>**Legend title**</span>"),
title_gp = gpar(box_fill = "grey"),
at = c(-3, 0, 3),
labels = gt_render(c("*negative* three", "zero", "*positive* three"))
))
ht = rowAnnotation(
foo = anno_text(gt_render(sapply(LETTERS[1:10], strrep, 10), align_widths = TRUE),
gp = gpar(box_col = "blue", box_lwd = 2),
just = "right",
location = unit(1, "npc")
)) + ht
draw(ht)
}
|
library(assertthat)
library(fs)
# checks that scenario names match in the different variables they are
# specified in. Returns `scens` invisibly, if all checks pass. Otherwise
# provides error messages.
crss_res_check_scen_names <- function(scens, icList, icMonth, ui)
{
ss5 <- ui$simple_5yr$ss5
check_plot_group_scens(ui, names(scens))
# check that the names of scens, icList, and icMonth are all the same; they
# don't necessarily need to be in the same order, just all exist in one another
assert_that(
all(names(scens) %in% names(icList), names(icList) %in% names(scens),
names(scens) %in% names(icMonth), names(icMonth) %in% names(scens),
names(icList) %in% names(icMonth), names(icMonth) %in% names(icList)),
msg = paste(
"scenario group names do not match.",
"\nthe names() of scens, icList, and icMonth should all be the same"
)
)
# if we made it here, we know names() of scens, icList, and icMonth all match,
# so just check to make sure that ss5 and heatmap_names is withing scens
assert_that(
all(names(ss5) %in% names(scens)),
msg = "scenario goup names of ss5 must match the names found in scens"
)
invisible(scens)
}
# creats the necesary folders for saving the output data, and ensures the
# folders exist
# returns the folder paths that the results and figures will be saved to as a
# list
crss_res_directory_setup <- function(i_folder, get_pe_data, get_sys_cond_data,
CRSSDIR, crss_month)
{
# onlyl check if reading in data if you have to getData
if (get_pe_data | get_sys_cond_data) {
message('Scenario data will be read in from: ', i_folder)
assert_that(
dir.exists(i_folder),
msg = paste(
i_folder,
'does not exist. Please ensure iFolder is set correctly.'
)
)
}
# folder location to save figures and fully procssed tables
assert_that(
dir.exists(CRSSDIR),
msg = paste(
CRSSDIR,
"does not exist.\n",
"** Please ensure CRSS_DIR environment variable is set correctly."
)
)
tmp_res_rolder <- file.path(CRSSDIR, 'results')
if (!file.exists(tmp_res_rolder)) {
message(tmp_res_rolder,'does not exist. Creating this folder...')
dir.create(tmp_res_rolder)
}
oFigs <- file.path(CRSSDIR,'results', crss_month)
if (!file.exists(oFigs)) {
message(paste('Creating folder:', oFigs))
dir.create(oFigs)
}
message('Figures and tables will be saved to: ', oFigs)
png_out <- file.path(oFigs, "png")
if (!file.exists(png_out)) {
message("Creating folder: ", png_out)
dir.create(png_out)
}
message("pngs will be saved to: ", png_out)
# folder to save procssed text files to (intermediate processed data)
resFolder <- file.path(CRSSDIR,'results', crss_month, 'tempData')
if (!file.exists(resFolder)) {
message(paste('Creating folder:', resFolder))
dir.create(resFolder)
}
message('Intermediate data will be saved to: ', resFolder)
# figure data --------------------
fig_data <- file.path(oFigs, "figure_data")
if (!file.exists(fig_data)) {
message("Creating folder: ", fig_data)
dir.create(fig_data)
}
message("Figure data will be saved to: ", fig_data)
# tables --------------------------
tables <- file.path(oFigs, "tables")
if (!file.exists(tables)) {
message("Creating folder: ", tables)
dir.create(tables)
}
message("Tables will be saved to: ", tables)
# return
list(figs_folder = oFigs, res_folder = resFolder, png_out = png_out,
figure_data = fig_data, tables = tables)
}
# returns a list of all the necessary output file names
crss_res_get_file_names <- function(main_pdf)
{
# return
list(
sys_cond_file = 'SysCond.feather' ,
tmp_pe_file = 'tempPE.feather',
# file name of Powell and Mead PE data
cur_month_pe_file = 'MeadPowellPE.feather',
short_cond_fig = 'shortConditionsFig.pdf',
main_pdf = main_pdf,
csd_file = "csd_ann.feather"
)
}
# goes through all the file names, and appends on the correct file paths, so
# all are fully specified paths
crss_res_append_file_path <- function(file_names, figs_folder, res_folder)
{
res <- c("sys_cond_file", "tmp_pe_file", "cur_month_pe_file", "csd_file")
for (i in names(file_names)) {
if (i %in% res) {
file_names[[i]] <- file.path(res_folder, file_names[[i]])
} else {
file_names[[i]] <- file.path(figs_folder, file_names[[i]])
}
}
file_names
}
construct_table_file_name <- function(table_name, scenario, yrs, extra_label)
{
year_lab <- paste0(yrs[1], '_', tail(yrs, 1))
if (extra_label != '') {
extra_label <- paste0(extra_label, "_")
}
str_replace_all(scenario, " ", "") %>%
paste0("_", extra_label, table_name, "_", year_lab, ".csv") %>%
path_sanitize()
}
# checks that all scenarios specified in plot_groups are found in the available
# scenarios (by name)
check_plot_group_scens <- function(ui, scen_names)
{
err <- NULL
for (i in seq_len(length(ui[["plot_group"]]))) {
spec_scens <- ui[["plot_group"]][[i]][["plot_scenarios"]]
spec_scens <- spec_scens[!(spec_scens %in% scen_names)]
if (length(spec_scens) > 0) {
err <- c(
err,
paste(
"In the", names(ui[["plot_group"]])[i],
"plot_group, the following scenarios do not match the specified scenarios:\n -",
paste(spec_scens, collapse = "\n -")
)
)
}
}
assert_that(length(err) == 0, msg = paste(err, collapse = "\n"))
invisible(ui)
}
# constructs a full file name based on provided info
# used for files that would otherwise have the same name, but inserts in the
# plot_group name to the file
construct_file_name <- function(ui, folder_paths, group_num, folder_name,
file_name)
{
file.path(
folder_paths[[folder_name]],
paste0(names(ui[["plot_group"]])[group_num], "_", file_name)
)
}
| /code/crss_res_directory_setup.R | no_license | BoulderCodeHub/Process-CRSS-Res | R | false | false | 6,010 | r | library(assertthat)
library(fs)
# checks that scenario names match in the different variables they are
# specified in. Returns `scens` invisibly, if all checks pass. Otherwise
# provides error messages.
crss_res_check_scen_names <- function(scens, icList, icMonth, ui)
{
ss5 <- ui$simple_5yr$ss5
check_plot_group_scens(ui, names(scens))
# check that the names of scens, icList, and icMonth are all the same; they
# don't necessarily need to be in the same order, just all exist in one another
assert_that(
all(names(scens) %in% names(icList), names(icList) %in% names(scens),
names(scens) %in% names(icMonth), names(icMonth) %in% names(scens),
names(icList) %in% names(icMonth), names(icMonth) %in% names(icList)),
msg = paste(
"scenario group names do not match.",
"\nthe names() of scens, icList, and icMonth should all be the same"
)
)
# if we made it here, we know names() of scens, icList, and icMonth all match,
# so just check to make sure that ss5 and heatmap_names is withing scens
assert_that(
all(names(ss5) %in% names(scens)),
msg = "scenario goup names of ss5 must match the names found in scens"
)
invisible(scens)
}
# creats the necesary folders for saving the output data, and ensures the
# folders exist
# returns the folder paths that the results and figures will be saved to as a
# list
crss_res_directory_setup <- function(i_folder, get_pe_data, get_sys_cond_data,
CRSSDIR, crss_month)
{
# onlyl check if reading in data if you have to getData
if (get_pe_data | get_sys_cond_data) {
message('Scenario data will be read in from: ', i_folder)
assert_that(
dir.exists(i_folder),
msg = paste(
i_folder,
'does not exist. Please ensure iFolder is set correctly.'
)
)
}
# folder location to save figures and fully procssed tables
assert_that(
dir.exists(CRSSDIR),
msg = paste(
CRSSDIR,
"does not exist.\n",
"** Please ensure CRSS_DIR environment variable is set correctly."
)
)
tmp_res_rolder <- file.path(CRSSDIR, 'results')
if (!file.exists(tmp_res_rolder)) {
message(tmp_res_rolder,'does not exist. Creating this folder...')
dir.create(tmp_res_rolder)
}
oFigs <- file.path(CRSSDIR,'results', crss_month)
if (!file.exists(oFigs)) {
message(paste('Creating folder:', oFigs))
dir.create(oFigs)
}
message('Figures and tables will be saved to: ', oFigs)
png_out <- file.path(oFigs, "png")
if (!file.exists(png_out)) {
message("Creating folder: ", png_out)
dir.create(png_out)
}
message("pngs will be saved to: ", png_out)
# folder to save procssed text files to (intermediate processed data)
resFolder <- file.path(CRSSDIR,'results', crss_month, 'tempData')
if (!file.exists(resFolder)) {
message(paste('Creating folder:', resFolder))
dir.create(resFolder)
}
message('Intermediate data will be saved to: ', resFolder)
# figure data --------------------
fig_data <- file.path(oFigs, "figure_data")
if (!file.exists(fig_data)) {
message("Creating folder: ", fig_data)
dir.create(fig_data)
}
message("Figure data will be saved to: ", fig_data)
# tables --------------------------
tables <- file.path(oFigs, "tables")
if (!file.exists(tables)) {
message("Creating folder: ", tables)
dir.create(tables)
}
message("Tables will be saved to: ", tables)
# return
list(figs_folder = oFigs, res_folder = resFolder, png_out = png_out,
figure_data = fig_data, tables = tables)
}
# returns a list of all the necessary output file names
crss_res_get_file_names <- function(main_pdf)
{
# return
list(
sys_cond_file = 'SysCond.feather' ,
tmp_pe_file = 'tempPE.feather',
# file name of Powell and Mead PE data
cur_month_pe_file = 'MeadPowellPE.feather',
short_cond_fig = 'shortConditionsFig.pdf',
main_pdf = main_pdf,
csd_file = "csd_ann.feather"
)
}
# goes through all the file names, and appends on the correct file paths, so
# all are fully specified paths
crss_res_append_file_path <- function(file_names, figs_folder, res_folder)
{
res <- c("sys_cond_file", "tmp_pe_file", "cur_month_pe_file", "csd_file")
for (i in names(file_names)) {
if (i %in% res) {
file_names[[i]] <- file.path(res_folder, file_names[[i]])
} else {
file_names[[i]] <- file.path(figs_folder, file_names[[i]])
}
}
file_names
}
construct_table_file_name <- function(table_name, scenario, yrs, extra_label)
{
year_lab <- paste0(yrs[1], '_', tail(yrs, 1))
if (extra_label != '') {
extra_label <- paste0(extra_label, "_")
}
str_replace_all(scenario, " ", "") %>%
paste0("_", extra_label, table_name, "_", year_lab, ".csv") %>%
path_sanitize()
}
# checks that all scenarios specified in plot_groups are found in the available
# scenarios (by name)
check_plot_group_scens <- function(ui, scen_names)
{
err <- NULL
for (i in seq_len(length(ui[["plot_group"]]))) {
spec_scens <- ui[["plot_group"]][[i]][["plot_scenarios"]]
spec_scens <- spec_scens[!(spec_scens %in% scen_names)]
if (length(spec_scens) > 0) {
err <- c(
err,
paste(
"In the", names(ui[["plot_group"]])[i],
"plot_group, the following scenarios do not match the specified scenarios:\n -",
paste(spec_scens, collapse = "\n -")
)
)
}
}
assert_that(length(err) == 0, msg = paste(err, collapse = "\n"))
invisible(ui)
}
# constructs a full file name based on provided info
# used for files that would otherwise have the same name, but inserts in the
# plot_group name to the file
construct_file_name <- function(ui, folder_paths, group_num, folder_name,
file_name)
{
file.path(
folder_paths[[folder_name]],
paste0(names(ui[["plot_group"]])[group_num], "_", file_name)
)
}
|
# _
# platform x86_64-w64-mingw32
# arch x86_64
# os mingw32
# crt ucrt
# system x86_64, mingw32
# status
# major 4
# minor 2.1
# year 2022
# month 06
# day 23
# svn rev 82513
# language R
# version.string R version 4.2.1 (2022-06-23 ucrt)
# nickname Funny-Looking Kid
# Mike Rieger, Update 04/06/2023
# Figure 2B: Bootstrap Worm Watcher Data
rm(list=ls()) # for console work
graphics.off() # for console work
wd="~/../Dropbox/chalasanilabsync/mrieger/Manuscripts/PribadiEtAl-2022/FINALv2/Figures/FIG 2/"
dataSheet = "Figure2B"
rawdata = "../../SourceData/RawData.xlsx"
alpha=0.05
setwd(wd)
source("../alwaysLoad.R") # base set of custom functions for reports
########## Parameters for Bootstrap ###############
seedval=2023
Nsim=10^5
frameRate=15 # 15 frames/hr
loProb=alpha/2
hiProb=1-alpha/2
lastFrame=301
firstFrame=1
########### Packages #############################
library(readxl) # reads excel files
library(stringr)
#source("computeKSBootstrap.R") # Function for bootstrap analysis
# Compute KS Bootstrap Comparing two samples
computeKSBootstrap = function(formula,data,contrast,contrastvar,samplevar,alpha=0.05,Nsim=1000,seed=1000){
vars=all.vars(formula)
response=vars[1]
x=vars[2]
#(x must be uniform)
sample1=data[data[,contrastvar]==contrast[1],]
sample2=data[data[,contrastvar]==contrast[2],]
set.seed(seed)
#initialize output
output=list(
poverlap=NA,
n1=length(unique(sample1[,samplevar])),
n2=length(unique(sample2[,samplevar])),
sav1=matrix(NA,nrow=length(unique(data[,x])),ncol=Nsim),
sav2=matrix(NA,nrow=length(unique(data[,x])),ncol=Nsim),
dalt=rep(NA,length=Nsim),
dnull=rep(NA,length=Nsim)
)
# Simulation loop
print("Running Sims")
for(i in 1:Nsim){
# Null samples
s1names=sample(unique(data[,samplevar]),size=output$n1,replace=TRUE)
s1=matrix(NA,nrow=length(unique(data[,x])),ncol=length(s1names))
for(s in 1:length(s1names)){
tmp=data[data[,samplevar]==s1names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s1[,s]=tmp[,response]
}
s2names=sample(unique(data[,samplevar]),size=output$n2,replace=TRUE)
s2=matrix(NA,nrow=length(unique(data[,x])),ncol=length(s2names))
for(s in 1:length(s2names)){
tmp=data[data[,samplevar]==s2names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s2[,s]=tmp[,response]
}
s1=rowMeans(s1)
s2=rowMeans(s2)
output$dnull[i] = max(abs(s2-s1))
#Alt Samples
s1names=sample(unique(sample1[,samplevar]),size=output$n1,replace=TRUE)
s1=matrix(NA,nrow=length(unique(sample1[,x])),ncol=length(s1names))
for(s in 1:length(s1names)){
tmp=data[data[,samplevar]==s1names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s1[,s]=tmp[,response]
}
s2names=sample(unique(sample2[,samplevar]),size=output$n2,replace=TRUE)
s2=matrix(NA,nrow=length(unique(sample2[,x])),ncol=length(s2names))
for(s in 1:length(s2names)){
tmp=data[data[,samplevar]==s2names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s2[,s]=tmp[,response]
}
s1=rowMeans(s1)
s2=rowMeans(s2)
output$sav1[,i]=s1
output$sav2[,i]=s2
output$dalt[i] = max(abs(s2-s1))
}
print("Finished Running Sims")
## Compute pOverlap: #dnull samples that exist within the 1-alpha confidence bounds of dalt
lwr=quantile(output$dalt,probs=alpha/2)
upr=quantile(output$dalt,probs=(1-alpha/2))
overlap = sum(output$dnull >= lwr & output$dnull <= upr)
output$poverlap = overlap/Nsim
## Compute two-tailed p-peak: number of samples that are as extreme or greater than the average of dalt.
pk=mean(output$dalt)
output$ppeak=((sum(output$dnull>=abs(pk))+sum(output$dnull<=-abs(pk)))/Nsim)
return(output)
}
######### Load Data ################################
d = as.data.frame(read_excel(rawdata,sheet=dataSheet))
d=d[d$Frame <=301,] # There are sometimes spurious additional +1 frame at the end of some recordings, trim to exactly 301 frames
######### Run Bootstrap ###########################
DistPCbootstrap = computeKSBootstrap(DistCenterMm ~ Frame, data=d,contrast = c("control","predator"),
contrastvar = "Condition",samplevar = "PlateID",Nsim = Nsim,seed = seedval, alpha=alpha)
###### Report #####################################
DistPCbootstrap.report = c(list(Nsim=Nsim),DistPCbootstrap[c("n1","n2","dnull","dalt","poverlap","ppeak")])
DistPCbootstrap.report$dalt = quantile(DistPCbootstrap.report$dalt,probs = c(loProb,hiProb))
DistPCbootstrap.report$dnull = quantile(DistPCbootstrap.report$dnull,probs = c(loProb,hiProb))
###### Write Report to Text File ##############################
capture.output(DistPCbootstrap.report,file=paste0(dataSheet,"_BootstrapReport.txt"))
### Write Output R Structure ###
filename=paste0(dataSheet,".RData")
save(d,DistPCbootstrap,DistPCbootstrap.report,file = filename) | /FIG 2/Figure2B_analysis.R | no_license | shreklab/PribadiEtAl2023 | R | false | false | 5,439 | r | # _
# platform x86_64-w64-mingw32
# arch x86_64
# os mingw32
# crt ucrt
# system x86_64, mingw32
# status
# major 4
# minor 2.1
# year 2022
# month 06
# day 23
# svn rev 82513
# language R
# version.string R version 4.2.1 (2022-06-23 ucrt)
# nickname Funny-Looking Kid
# Mike Rieger, Update 04/06/2023
# Figure 2B: Bootstrap Worm Watcher Data
rm(list=ls()) # for console work
graphics.off() # for console work
wd="~/../Dropbox/chalasanilabsync/mrieger/Manuscripts/PribadiEtAl-2022/FINALv2/Figures/FIG 2/"
dataSheet = "Figure2B"
rawdata = "../../SourceData/RawData.xlsx"
alpha=0.05
setwd(wd)
source("../alwaysLoad.R") # base set of custom functions for reports
########## Parameters for Bootstrap ###############
seedval=2023
Nsim=10^5
frameRate=15 # 15 frames/hr
loProb=alpha/2
hiProb=1-alpha/2
lastFrame=301
firstFrame=1
########### Packages #############################
library(readxl) # reads excel files
library(stringr)
#source("computeKSBootstrap.R") # Function for bootstrap analysis
# Compute KS Bootstrap Comparing two samples
computeKSBootstrap = function(formula,data,contrast,contrastvar,samplevar,alpha=0.05,Nsim=1000,seed=1000){
vars=all.vars(formula)
response=vars[1]
x=vars[2]
#(x must be uniform)
sample1=data[data[,contrastvar]==contrast[1],]
sample2=data[data[,contrastvar]==contrast[2],]
set.seed(seed)
#initialize output
output=list(
poverlap=NA,
n1=length(unique(sample1[,samplevar])),
n2=length(unique(sample2[,samplevar])),
sav1=matrix(NA,nrow=length(unique(data[,x])),ncol=Nsim),
sav2=matrix(NA,nrow=length(unique(data[,x])),ncol=Nsim),
dalt=rep(NA,length=Nsim),
dnull=rep(NA,length=Nsim)
)
# Simulation loop
print("Running Sims")
for(i in 1:Nsim){
# Null samples
s1names=sample(unique(data[,samplevar]),size=output$n1,replace=TRUE)
s1=matrix(NA,nrow=length(unique(data[,x])),ncol=length(s1names))
for(s in 1:length(s1names)){
tmp=data[data[,samplevar]==s1names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s1[,s]=tmp[,response]
}
s2names=sample(unique(data[,samplevar]),size=output$n2,replace=TRUE)
s2=matrix(NA,nrow=length(unique(data[,x])),ncol=length(s2names))
for(s in 1:length(s2names)){
tmp=data[data[,samplevar]==s2names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s2[,s]=tmp[,response]
}
s1=rowMeans(s1)
s2=rowMeans(s2)
output$dnull[i] = max(abs(s2-s1))
#Alt Samples
s1names=sample(unique(sample1[,samplevar]),size=output$n1,replace=TRUE)
s1=matrix(NA,nrow=length(unique(sample1[,x])),ncol=length(s1names))
for(s in 1:length(s1names)){
tmp=data[data[,samplevar]==s1names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s1[,s]=tmp[,response]
}
s2names=sample(unique(sample2[,samplevar]),size=output$n2,replace=TRUE)
s2=matrix(NA,nrow=length(unique(sample2[,x])),ncol=length(s2names))
for(s in 1:length(s2names)){
tmp=data[data[,samplevar]==s2names[s],]
tmp=tmp[order(tmp[,x],decreasing = FALSE),]
s2[,s]=tmp[,response]
}
s1=rowMeans(s1)
s2=rowMeans(s2)
output$sav1[,i]=s1
output$sav2[,i]=s2
output$dalt[i] = max(abs(s2-s1))
}
print("Finished Running Sims")
## Compute pOverlap: #dnull samples that exist within the 1-alpha confidence bounds of dalt
lwr=quantile(output$dalt,probs=alpha/2)
upr=quantile(output$dalt,probs=(1-alpha/2))
overlap = sum(output$dnull >= lwr & output$dnull <= upr)
output$poverlap = overlap/Nsim
## Compute two-tailed p-peak: number of samples that are as extreme or greater than the average of dalt.
pk=mean(output$dalt)
output$ppeak=((sum(output$dnull>=abs(pk))+sum(output$dnull<=-abs(pk)))/Nsim)
return(output)
}
######### Load Data ################################
d = as.data.frame(read_excel(rawdata,sheet=dataSheet))
d=d[d$Frame <=301,] # There are sometimes spurious additional +1 frame at the end of some recordings, trim to exactly 301 frames
######### Run Bootstrap ###########################
DistPCbootstrap = computeKSBootstrap(DistCenterMm ~ Frame, data=d,contrast = c("control","predator"),
contrastvar = "Condition",samplevar = "PlateID",Nsim = Nsim,seed = seedval, alpha=alpha)
###### Report #####################################
DistPCbootstrap.report = c(list(Nsim=Nsim),DistPCbootstrap[c("n1","n2","dnull","dalt","poverlap","ppeak")])
DistPCbootstrap.report$dalt = quantile(DistPCbootstrap.report$dalt,probs = c(loProb,hiProb))
DistPCbootstrap.report$dnull = quantile(DistPCbootstrap.report$dnull,probs = c(loProb,hiProb))
###### Write Report to Text File ##############################
capture.output(DistPCbootstrap.report,file=paste0(dataSheet,"_BootstrapReport.txt"))
### Write Output R Structure ###
filename=paste0(dataSheet,".RData")
save(d,DistPCbootstrap,DistPCbootstrap.report,file = filename) |
#############################################################################################################
##Density Analysis
#############################################################################################################
## Mae Rennick and Bart DiFiore
## Urchin Density Data
library(here)
library(tidyverse)
source(here("analysis", "Functions.R"))
library(car)
#Here, we use the data from the density dependent herbivory trials in order to create a model that predicts the influence of red and purple urchin denisty on herbivory rate on giant kelp.
# ------------------------------------------------------------------------------------------------
## Set up and visualization and clean data
# ------------------------------------------------------------------------------------------------
df <- read.csv("data/density_experiment/mesocosm_density_data.csv") %>%
as_tibble() %>%
select(kelp_in, kelp_out, urchin_density, tank, date, trial_number, p_r, trial_id, total_time, urchin_size, urchin_mass, mortality) %>%
mutate(kelp_consumed=(kelp_in-kelp_out)) %>%
mutate (herbivory_rate = (kelp_consumed/total_time)*24,
abundance = urchin_density,
urchin_density = NULL) %>%
group_by(date, p_r, trial_number, trial_id, tank, total_time, kelp_in, kelp_out, mortality, kelp_consumed, abundance, herbivory_rate) %>%
summarize(biomass= sum(urchin_mass )/1.587) %>%
mutate(urchin_size = NULL,
urchin_mass = NULL,
sp = ifelse(p_r == "p", "Purple urchin", "Red urchin"))
###summary statistics
df %>% group_by(sp) %>%
summarize(mean = mean(herbivory_rate),
sd = sd(herbivory_rate),
se = sd(herbivory_rate)/n(),
min = min(herbivory_rate),
max = max(herbivory_rate))
# ------------------------------------------------------------------------------------------------
## Purple Analaysis
# ------------------------------------------------------------------------------------------------
#here we are testing different models to determine the best fit model for the red density data
pf <- df[df$p_r == "p", ] #limiting our dataset to only purple urchins
lm1 <- lm(herbivory_rate ~ 0 + biomass, pf) #linear model representing herbivory rate as a function of biomass.
summary(lm1)
modelassump(lm1)
exp1 <- lm(herbivory_rate ~ 0 + biomass + I(biomass^2), pf) #testing the exponential relationship between the biomass of purple urchins and their herbivory rate.
summary(exp1)
pow1 <- lm(log(herbivory_rate+1) ~ 0 + log(biomass), pf) # this is fine but need to fit untransformed so that I can use AIC
summary(pow1)
pred <- data.frame(biomass = seq(min(pf$biomass), max(pf$biomass), length.out = 1000))
pred$lm <- predict(lm1, newdata = pred)#model prediction for a linear function
pred$exp1 <- predict(exp1, newdata = pred)#model prediction for an exponential function
pred$pow1 <- predict(pow1, newdat = pred)#model prediction for a power law function
pow2 <- nls(herbivory_rate ~ a*(biomass^b), data = pf, start = list(a = exp(-1.377), b = 0.29))
summary(pow2) #This is a nonlinear regression model for the realtionship between biomass and herbivory rate.
sig <- nls(herbivory_rate ~ (a * biomass^2) / (b^2 + biomass^2), data = pf, start = list(a = 10, b = 1000)) # this is an alternative parameterization based on Bolker et al. 2008, bestiary of functions p. 22. "a" is similar to handling time, and b is similar to attack rate in this parameterization.
summary(sig)
pred$sig <- predict(sig, newdata = pred)
AIC(lm1, exp1, pow2, sig) #comparing models
# So it seems that there is no evidence for any differences between curves.
model_compare <- ggplot(pf, aes(x = biomass, y = herbivory_rate))+
geom_jitter(pch = 21, width =30)+
geom_line(data = pred, aes(x = biomass, y = lm), color = "red")+
geom_line(data = pred, aes(x = biomass, y = exp1), color = "blue")+
geom_line(data = pred, aes(x = biomass, y = sig), color = "green")+
#geom_line(data = pred2, aes(x = biomass, y = fit), color = "black")+
geom_vline(xintercept = coef(sig)[2], lty = "dashed")+
ggpubr::theme_pubclean()
ggplot(pf, aes(x = biomass, y = herbivory_rate/biomass))+
geom_point()+
geom_smooth(method = "lm") #linear model of biomass v. herbivory
lin_mod <- lm(I(herbivory_rate/biomass) ~ biomass, pf) #Linear model tracking per capita herbivory rate?
summary(lin_mod) #High p value and low R2 value. Cannot prove that the slope is different from zero which means there is no evidence for a nonlinearity.
exp_mod <- lm(I(herbivory_rate/biomass) ~ biomass + I(biomass^2), pf)
summary(exp_mod)
# does the same pattern apply with abundance (not biomass)
lm2 <- lm(herbivory_rate ~ abundance, pf) #linear regression of abundance v herbivory rate
summary(lm2) #Why does the intercept have a p-value higher than 0.05?
exp2 <- lm(herbivory_rate ~ abundance + I(abundance^2), pf) #exponential regression of herbivory rate as a function of abundance.
summary(exp2)
pred2 <- data.frame(abundance = seq(min(pf$abundance), max(pf$abundance), length.out = 1000))
pred2$lm2 <- predict(lm2, newdata = pred2) #linear model predictions for herbivory rate as a function of biomass
pred2$exp2 <- predict(exp2, newdata = pred2) #exponential model predictions for herbivory rate as a function of biomass
sig2 <- nls(herbivory_rate ~ (a * abundance^2) / (b^2 + abundance^2), data = pf, start = list(a = 10, b = 22)) # this is an alternative parameterization based on Bolker et al. 2008, bestiary of functions p. 22. "a" is similar to handling time, and b is similar to attack rate in this parameterization.
summary(sig2)
pred2$sig2 <- predict(sig2, newdata = pred2)
AIC(lm2, exp2, sig2) #all of the model predictions are simillar
ggplot(pf, aes(x = abundance, y = herbivory_rate))+
geom_jitter()+
geom_line(data = pred2, aes(x = abundance, y = lm2), color = "red")+
geom_line(data = pred2, aes(x = abundance, y = exp2), color = "blue")+
geom_line(data = pred2, aes(x = abundance, y = sig2), color = "green")+
geom_vline(xintercept = coef(sig2)[2], lty = "dashed")+
ggpubr::theme_pubclean()
# ------------------------------------------------------------------------------------------------
## Red analysis
# ------------------------------------------------------------------------------------------------
#here we are testing different models to determine the best fit model for the red ensity data
rf <- df[df$p_r == "r", ] #limiting our dataset to only purple urchins
lm1.r <- lm(herbivory_rate ~ 0 + biomass, rf)#linear model representing herbivory rate as a function of biomass.
summary(lm1.r)
modelassump(lm1.r)
exp1.r <- lm(herbivory_rate ~ biomass + I(biomass^2), rf)
summary(exp1) #exponential regression of herbivory rate as a function of biomass
pred3 <- data.frame(biomass = seq(min(rf$biomass), max(rf$biomass), length.out = 1000))
pred3$lm1.r <- predict(lm1.r, newdata = pred3) #linear model predictions.
pred3$exp1.r <- predict(exp1.r, newdata = pred3) #exponential model predictions
sig.r <- nls(herbivory_rate ~ (a * biomass^2) / (b^2 + biomass^2), data = rf, start = list(a = 10, b = 1000)) # this is an alternative parameterization based on Bolker et al. 2008, bestiary of functions p. 22. "a" is similar to handling time, and b is similar to attack rate in this parameterization.
summary(sig.r)
pow2.r <- nls(herbivory_rate ~ (a * biomass^b), data = rf, start = list(a = 10, b = 1))
summary(pow2.r)
pred3$sig.r <- predict(sig.r, newdata = pred3)
AIC(lm1.r, pow2.r, sig.r)
# So it seems that there is no evidence for any differences between linear and sigmoidal curves.
model_compare3 <- ggplot(rf, aes(x = biomass, y = herbivory_rate))+
geom_jitter(pch = 21, width =30)+
geom_line(data = pred3, aes(x = biomass, y = lm), color = "red")+
geom_line(data = pred3, aes(x = biomass, y = exp1), color = "blue")+
geom_line(data = pred3, aes(x = biomass, y = sig), color = "green")+
geom_vline(xintercept = coef(sig)[2], lty = "dashed")+
ggpubr::theme_pubclean()
# ------------------------------------------------------------------------------------------------
## Figure 2: The relationship between biomass and herbivory rate
# ------------------------------------------------------------------------------------------------
pred <- data.frame(biomass = seq(min(pf$biomass), max(pf$biomass), length.out = 1000))
pred$herbivory_rate <- predict(lm1, newdata = pred, interval = "confidence")[,1]
pred$low <- predict(lm1, newdata = pred, interval = "confidence")[,2]
pred$high <- predict(lm1, newdata = pred, interval = "confidence")[,3]
pred$sp <- "Purple urchin"
pred.r <- data.frame(biomass = seq(min(rf$biomass), max(rf$biomass), length.out = 1000))
pred.r$herbivory_rate <- predict(lm1.r, newdata = pred, interval = "confidence")[,1]
pred.r$low <- predict(lm1.r, newdata = pred, interval = "confidence")[,2]
pred.r$high <- predict(lm1.r, newdata = pred, interval = "confidence")[,3]
pred.r$sp <- "Red urchin"
pred <- bind_rows(pred, pred.r)
fig2 <- ggplot(df, aes(x = biomass, y = herbivory_rate))+
geom_rect(aes(xmin= 668, xmax=1246, ymin=-Inf, ymax=Inf), fill = "gray90", alpha = 0.1)+
#geom_vline(xintercept = 668, linetype = 4)+
geom_point(aes(fill = sp), pch = 21, size=3)+
scale_fill_manual(values = c("#550f7a", "#E3493B"))+
geom_line(data = pred, aes( x= biomass, y = herbivory_rate), size = 1, show.legend = F)+
geom_ribbon(data = pred, aes( ymin = low, ymax = high,fill=sp), alpha = 0.3, show.legend = F)+
facet_wrap(~sp)+
theme_classic()+
theme(strip.text = element_text(size = 10))+
labs(x = expression(paste("Urchin biomass (g m"^"-2"*")")), y = expression(paste("Herbivory rate (g m"^"-2"*"d"^"-1"*")")), color = "", linetype = "")+
theme(strip.background = element_blank())+
theme(legend.position = c(.85,.90))+
theme(legend.title=element_blank())+
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)+
theme(axis.title.x= element_text(color= "black", size=14),
axis.title.y= element_text(color= "black", size=14))+
theme(legend.background = element_blank(), legend.box.background = element_blank() )+
theme(axis.text = element_text(size = 12))+
theme(legend.text=element_text(size=12))
fig2
ggsave("figures/herbivoryXdensity_fig2.png", fig2, device = "png",width=7,height=3.5)
ggsave("figures/herbivoryXdensity_fig2.pdf", fig2, device = "pdf", useDingbats = FALSE)
tiff(filename="figures/Fig2.tif",height=5600/2,width=5200,units="px",res=800,compression="lzw")
fig2
dev.off()
########################
## Summary stats
########################
predict(lm1, newdata = list(biomass = 668), se.fit = T) #Using the model to predict herbivory rate at the transition density cited in Ling et al. 2016
#------------------------------------------------
# Supplemental figure 1
#-----------------------------------------------
pf$con_per_g_biomass <- pf$herbivory_rate / pf$biomass #adding con_per_g_biomass to the purple urchin dataset
s1 <- lm(con_per_g_biomass ~ biomass, pf) #linear model of per capita consumption for purple urchins. what is con? concentration? concentration of what? This is percapita consumption?
summary(s1)
modelassump(s1) #Is this heterodacictic too?
rf$con_per_g_biomass <- rf$herbivory_rate / rf$biomass #adding con_per_g_biomass to the red urchin dataset
s2 <- lm(con_per_g_biomass ~ biomass, rf) #linear model of per capita consumption of red urchins.
summary(s2)
modelassump(s2) #This looks less conclusive. P value over .05 meaning we cannot confirm that the slope is not zero therefore there is a positve correlation between biomass and consumption?
plot(con_per_g_biomass ~ biomass, pf)
plot(con_per_g_biomass ~ biomass, rf) #This one looks really spread out
gg <- data.frame(biomass = seq(0, max(df$biomass, na.rm = T), length.out = 1000)) #biomass values from the dataset
gg$`Purple urchin` <- predict(s1, newdata = gg)
gg$`Red urchin` <- predict(s2, newdata = gg) #merging the linear model predictions of per capita consumption for purple and red urchins in the gg dataset.
gg <- gg %>% gather(sp, prediction, -biomass) #merging the purple and red predictions
df$con_per_g_biomass <- df$herbivory_rate / df$biomass
S1 <- ggplot(df, aes(x = biomass, y = con_per_g_biomass))+
geom_jitter(aes(fill = sp), pch = 21, show.legend = F)+
scale_fill_manual(values = c("#762a83", "#d73027"))+
facet_wrap(~sp, scales = "free")+
scale_x_continuous(limits = c(0, 2000))+
scale_y_continuous(limits = c(-0.02, 0.06))+
geom_hline(yintercept = 0, lty = "dashed", color = "gray")+
labs(x = expression(paste("Urchin biomass density (g m"^"-2"*")")),
y = expression(paste("Herbivory rate (g"["kelp"]*"g"["urc"]^"-1"*"m"^"-2"*"d"^"-1"*")")),
color = "", linetype = "")+
theme_classic()+
theme(strip.text = element_text(size = 10))+
theme(strip.background = element_blank())+
theme(legend.position = c(.90,.90))+
theme(legend.title=element_blank())+
theme(axis.title.x= element_text(color= "black", size=20),
axis.title.y= element_text(color= "black", size=20))+
theme(legend.text=element_text(size=10))+
theme(legend.background = element_rect(
size=0.5, linetype ="solid"))+
theme(axis.text = element_text(size = 15))+
theme(legend.text=element_text(size=15))
ggsave(here("figures", "percapconsumptionxbiomass.png"), S1, device = "png", width = 8.5, height = 5)
#----------------------------------------
# deltaAIC table
#----------------------------------------
pl <- c(
`(Intercept)` = "Intercept"
)
sjPlot::tab_model(lm1, pow2, sig,
show.aic = T,
show.icc = F,
show.loglik = T,
show.ngroups = F,
pred.labels = pl,
dv.labels = c("Linear", "Power-law", "Sigmoid"), file = here::here("figures/", "AICtablePurple.html"))
sjPlot::tab_model(lm1.r, pow2.r, sig.r,
show.aic = T,
show.icc = F,
show.loglik = T,
show.ngroups = F,
pred.labels = pl,
dv.labels = c("Linear", "Power-law", "Sigmoid"), file = here::here("figures/", "AICtableRed.html"))
#-----------------------------------------------------------------------------------------
## Test the effects of the tank dividers
#-----------------------------------------------------------------------------------------
bd <- df %>% separate(tank, into = c("tank", "side"), sep = "[-]")
aov <- aov(herbivory_rate ~ side, bd)
summary(aov)
TukeyHSD(aov)
out <- list()
tanks <- unique(bd$tank)
tanks <- tanks[tanks != 9]
for(i in 1:length(unique(bd$tank))){
out[[i]] <- summary(aov(herbivory_rate ~ side, bd[bd$tank == tanks[i], ]))
}
table(bd$tank, bd$side)
ggplot(bd, aes(x = tank, group = side, y = herbivory_rate))+
geom_bar(aes(fill = side), stat = "identity", position = "dodge")
| /analysis/2_density_analysis.R | no_license | stier-lab/Rennick-2019-urchin-grazing | R | false | false | 14,949 | r | #############################################################################################################
##Density Analysis
#############################################################################################################
## Mae Rennick and Bart DiFiore
## Urchin Density Data
library(here)
library(tidyverse)
source(here("analysis", "Functions.R"))
library(car)
#Here, we use the data from the density dependent herbivory trials in order to create a model that predicts the influence of red and purple urchin denisty on herbivory rate on giant kelp.
# ------------------------------------------------------------------------------------------------
## Set up and visualization and clean data
# ------------------------------------------------------------------------------------------------
df <- read.csv("data/density_experiment/mesocosm_density_data.csv") %>%
as_tibble() %>%
select(kelp_in, kelp_out, urchin_density, tank, date, trial_number, p_r, trial_id, total_time, urchin_size, urchin_mass, mortality) %>%
mutate(kelp_consumed=(kelp_in-kelp_out)) %>%
mutate (herbivory_rate = (kelp_consumed/total_time)*24,
abundance = urchin_density,
urchin_density = NULL) %>%
group_by(date, p_r, trial_number, trial_id, tank, total_time, kelp_in, kelp_out, mortality, kelp_consumed, abundance, herbivory_rate) %>%
summarize(biomass= sum(urchin_mass )/1.587) %>%
mutate(urchin_size = NULL,
urchin_mass = NULL,
sp = ifelse(p_r == "p", "Purple urchin", "Red urchin"))
###summary statistics
df %>% group_by(sp) %>%
summarize(mean = mean(herbivory_rate),
sd = sd(herbivory_rate),
se = sd(herbivory_rate)/n(),
min = min(herbivory_rate),
max = max(herbivory_rate))
# ------------------------------------------------------------------------------------------------
## Purple Analaysis
# ------------------------------------------------------------------------------------------------
#here we are testing different models to determine the best fit model for the red density data
pf <- df[df$p_r == "p", ] #limiting our dataset to only purple urchins
lm1 <- lm(herbivory_rate ~ 0 + biomass, pf) #linear model representing herbivory rate as a function of biomass.
summary(lm1)
modelassump(lm1)
exp1 <- lm(herbivory_rate ~ 0 + biomass + I(biomass^2), pf) #testing the exponential relationship between the biomass of purple urchins and their herbivory rate.
summary(exp1)
pow1 <- lm(log(herbivory_rate+1) ~ 0 + log(biomass), pf) # this is fine but need to fit untransformed so that I can use AIC
summary(pow1)
pred <- data.frame(biomass = seq(min(pf$biomass), max(pf$biomass), length.out = 1000))
pred$lm <- predict(lm1, newdata = pred)#model prediction for a linear function
pred$exp1 <- predict(exp1, newdata = pred)#model prediction for an exponential function
pred$pow1 <- predict(pow1, newdat = pred)#model prediction for a power law function
pow2 <- nls(herbivory_rate ~ a*(biomass^b), data = pf, start = list(a = exp(-1.377), b = 0.29))
summary(pow2) #This is a nonlinear regression model for the realtionship between biomass and herbivory rate.
sig <- nls(herbivory_rate ~ (a * biomass^2) / (b^2 + biomass^2), data = pf, start = list(a = 10, b = 1000)) # this is an alternative parameterization based on Bolker et al. 2008, bestiary of functions p. 22. "a" is similar to handling time, and b is similar to attack rate in this parameterization.
summary(sig)
pred$sig <- predict(sig, newdata = pred)
AIC(lm1, exp1, pow2, sig) #comparing models
# So it seems that there is no evidence for any differences between curves.
model_compare <- ggplot(pf, aes(x = biomass, y = herbivory_rate))+
geom_jitter(pch = 21, width =30)+
geom_line(data = pred, aes(x = biomass, y = lm), color = "red")+
geom_line(data = pred, aes(x = biomass, y = exp1), color = "blue")+
geom_line(data = pred, aes(x = biomass, y = sig), color = "green")+
#geom_line(data = pred2, aes(x = biomass, y = fit), color = "black")+
geom_vline(xintercept = coef(sig)[2], lty = "dashed")+
ggpubr::theme_pubclean()
ggplot(pf, aes(x = biomass, y = herbivory_rate/biomass))+
geom_point()+
geom_smooth(method = "lm") #linear model of biomass v. herbivory
lin_mod <- lm(I(herbivory_rate/biomass) ~ biomass, pf) #Linear model tracking per capita herbivory rate?
summary(lin_mod) #High p value and low R2 value. Cannot prove that the slope is different from zero which means there is no evidence for a nonlinearity.
exp_mod <- lm(I(herbivory_rate/biomass) ~ biomass + I(biomass^2), pf)
summary(exp_mod)
# does the same pattern apply with abundance (not biomass)
lm2 <- lm(herbivory_rate ~ abundance, pf) #linear regression of abundance v herbivory rate
summary(lm2) #Why does the intercept have a p-value higher than 0.05?
exp2 <- lm(herbivory_rate ~ abundance + I(abundance^2), pf) #exponential regression of herbivory rate as a function of abundance.
summary(exp2)
pred2 <- data.frame(abundance = seq(min(pf$abundance), max(pf$abundance), length.out = 1000))
pred2$lm2 <- predict(lm2, newdata = pred2) #linear model predictions for herbivory rate as a function of biomass
pred2$exp2 <- predict(exp2, newdata = pred2) #exponential model predictions for herbivory rate as a function of biomass
sig2 <- nls(herbivory_rate ~ (a * abundance^2) / (b^2 + abundance^2), data = pf, start = list(a = 10, b = 22)) # this is an alternative parameterization based on Bolker et al. 2008, bestiary of functions p. 22. "a" is similar to handling time, and b is similar to attack rate in this parameterization.
summary(sig2)
pred2$sig2 <- predict(sig2, newdata = pred2)
AIC(lm2, exp2, sig2) #all of the model predictions are simillar
ggplot(pf, aes(x = abundance, y = herbivory_rate))+
geom_jitter()+
geom_line(data = pred2, aes(x = abundance, y = lm2), color = "red")+
geom_line(data = pred2, aes(x = abundance, y = exp2), color = "blue")+
geom_line(data = pred2, aes(x = abundance, y = sig2), color = "green")+
geom_vline(xintercept = coef(sig2)[2], lty = "dashed")+
ggpubr::theme_pubclean()
# ------------------------------------------------------------------------------------------------
## Red analysis
# ------------------------------------------------------------------------------------------------
#here we are testing different models to determine the best fit model for the red ensity data
rf <- df[df$p_r == "r", ] #limiting our dataset to only purple urchins
lm1.r <- lm(herbivory_rate ~ 0 + biomass, rf)#linear model representing herbivory rate as a function of biomass.
summary(lm1.r)
modelassump(lm1.r)
exp1.r <- lm(herbivory_rate ~ biomass + I(biomass^2), rf)
summary(exp1) #exponential regression of herbivory rate as a function of biomass
pred3 <- data.frame(biomass = seq(min(rf$biomass), max(rf$biomass), length.out = 1000))
pred3$lm1.r <- predict(lm1.r, newdata = pred3) #linear model predictions.
pred3$exp1.r <- predict(exp1.r, newdata = pred3) #exponential model predictions
sig.r <- nls(herbivory_rate ~ (a * biomass^2) / (b^2 + biomass^2), data = rf, start = list(a = 10, b = 1000)) # this is an alternative parameterization based on Bolker et al. 2008, bestiary of functions p. 22. "a" is similar to handling time, and b is similar to attack rate in this parameterization.
summary(sig.r)
pow2.r <- nls(herbivory_rate ~ (a * biomass^b), data = rf, start = list(a = 10, b = 1))
summary(pow2.r)
pred3$sig.r <- predict(sig.r, newdata = pred3)
AIC(lm1.r, pow2.r, sig.r)
# So it seems that there is no evidence for any differences between linear and sigmoidal curves.
model_compare3 <- ggplot(rf, aes(x = biomass, y = herbivory_rate))+
geom_jitter(pch = 21, width =30)+
geom_line(data = pred3, aes(x = biomass, y = lm), color = "red")+
geom_line(data = pred3, aes(x = biomass, y = exp1), color = "blue")+
geom_line(data = pred3, aes(x = biomass, y = sig), color = "green")+
geom_vline(xintercept = coef(sig)[2], lty = "dashed")+
ggpubr::theme_pubclean()
# ------------------------------------------------------------------------------------------------
## Figure 2: The relationship between biomass and herbivory rate
# ------------------------------------------------------------------------------------------------
pred <- data.frame(biomass = seq(min(pf$biomass), max(pf$biomass), length.out = 1000))
pred$herbivory_rate <- predict(lm1, newdata = pred, interval = "confidence")[,1]
pred$low <- predict(lm1, newdata = pred, interval = "confidence")[,2]
pred$high <- predict(lm1, newdata = pred, interval = "confidence")[,3]
pred$sp <- "Purple urchin"
pred.r <- data.frame(biomass = seq(min(rf$biomass), max(rf$biomass), length.out = 1000))
pred.r$herbivory_rate <- predict(lm1.r, newdata = pred, interval = "confidence")[,1]
pred.r$low <- predict(lm1.r, newdata = pred, interval = "confidence")[,2]
pred.r$high <- predict(lm1.r, newdata = pred, interval = "confidence")[,3]
pred.r$sp <- "Red urchin"
pred <- bind_rows(pred, pred.r)
fig2 <- ggplot(df, aes(x = biomass, y = herbivory_rate))+
geom_rect(aes(xmin= 668, xmax=1246, ymin=-Inf, ymax=Inf), fill = "gray90", alpha = 0.1)+
#geom_vline(xintercept = 668, linetype = 4)+
geom_point(aes(fill = sp), pch = 21, size=3)+
scale_fill_manual(values = c("#550f7a", "#E3493B"))+
geom_line(data = pred, aes( x= biomass, y = herbivory_rate), size = 1, show.legend = F)+
geom_ribbon(data = pred, aes( ymin = low, ymax = high,fill=sp), alpha = 0.3, show.legend = F)+
facet_wrap(~sp)+
theme_classic()+
theme(strip.text = element_text(size = 10))+
labs(x = expression(paste("Urchin biomass (g m"^"-2"*")")), y = expression(paste("Herbivory rate (g m"^"-2"*"d"^"-1"*")")), color = "", linetype = "")+
theme(strip.background = element_blank())+
theme(legend.position = c(.85,.90))+
theme(legend.title=element_blank())+
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)+
theme(axis.title.x= element_text(color= "black", size=14),
axis.title.y= element_text(color= "black", size=14))+
theme(legend.background = element_blank(), legend.box.background = element_blank() )+
theme(axis.text = element_text(size = 12))+
theme(legend.text=element_text(size=12))
fig2
ggsave("figures/herbivoryXdensity_fig2.png", fig2, device = "png",width=7,height=3.5)
ggsave("figures/herbivoryXdensity_fig2.pdf", fig2, device = "pdf", useDingbats = FALSE)
tiff(filename="figures/Fig2.tif",height=5600/2,width=5200,units="px",res=800,compression="lzw")
fig2
dev.off()
########################
## Summary stats
########################
predict(lm1, newdata = list(biomass = 668), se.fit = T) #Using the model to predict herbivory rate at the transition density cited in Ling et al. 2016
#------------------------------------------------
# Supplemental figure 1
#-----------------------------------------------
pf$con_per_g_biomass <- pf$herbivory_rate / pf$biomass #adding con_per_g_biomass to the purple urchin dataset
s1 <- lm(con_per_g_biomass ~ biomass, pf) #linear model of per capita consumption for purple urchins. what is con? concentration? concentration of what? This is percapita consumption?
summary(s1)
modelassump(s1) #Is this heterodacictic too?
rf$con_per_g_biomass <- rf$herbivory_rate / rf$biomass #adding con_per_g_biomass to the red urchin dataset
s2 <- lm(con_per_g_biomass ~ biomass, rf) #linear model of per capita consumption of red urchins.
summary(s2)
modelassump(s2) #This looks less conclusive. P value over .05 meaning we cannot confirm that the slope is not zero therefore there is a positve correlation between biomass and consumption?
plot(con_per_g_biomass ~ biomass, pf)
plot(con_per_g_biomass ~ biomass, rf) #This one looks really spread out
gg <- data.frame(biomass = seq(0, max(df$biomass, na.rm = T), length.out = 1000)) #biomass values from the dataset
gg$`Purple urchin` <- predict(s1, newdata = gg)
gg$`Red urchin` <- predict(s2, newdata = gg) #merging the linear model predictions of per capita consumption for purple and red urchins in the gg dataset.
gg <- gg %>% gather(sp, prediction, -biomass) #merging the purple and red predictions
df$con_per_g_biomass <- df$herbivory_rate / df$biomass
S1 <- ggplot(df, aes(x = biomass, y = con_per_g_biomass))+
geom_jitter(aes(fill = sp), pch = 21, show.legend = F)+
scale_fill_manual(values = c("#762a83", "#d73027"))+
facet_wrap(~sp, scales = "free")+
scale_x_continuous(limits = c(0, 2000))+
scale_y_continuous(limits = c(-0.02, 0.06))+
geom_hline(yintercept = 0, lty = "dashed", color = "gray")+
labs(x = expression(paste("Urchin biomass density (g m"^"-2"*")")),
y = expression(paste("Herbivory rate (g"["kelp"]*"g"["urc"]^"-1"*"m"^"-2"*"d"^"-1"*")")),
color = "", linetype = "")+
theme_classic()+
theme(strip.text = element_text(size = 10))+
theme(strip.background = element_blank())+
theme(legend.position = c(.90,.90))+
theme(legend.title=element_blank())+
theme(axis.title.x= element_text(color= "black", size=20),
axis.title.y= element_text(color= "black", size=20))+
theme(legend.text=element_text(size=10))+
theme(legend.background = element_rect(
size=0.5, linetype ="solid"))+
theme(axis.text = element_text(size = 15))+
theme(legend.text=element_text(size=15))
ggsave(here("figures", "percapconsumptionxbiomass.png"), S1, device = "png", width = 8.5, height = 5)
#----------------------------------------
# deltaAIC table
#----------------------------------------
pl <- c(
`(Intercept)` = "Intercept"
)
sjPlot::tab_model(lm1, pow2, sig,
show.aic = T,
show.icc = F,
show.loglik = T,
show.ngroups = F,
pred.labels = pl,
dv.labels = c("Linear", "Power-law", "Sigmoid"), file = here::here("figures/", "AICtablePurple.html"))
sjPlot::tab_model(lm1.r, pow2.r, sig.r,
show.aic = T,
show.icc = F,
show.loglik = T,
show.ngroups = F,
pred.labels = pl,
dv.labels = c("Linear", "Power-law", "Sigmoid"), file = here::here("figures/", "AICtableRed.html"))
#-----------------------------------------------------------------------------------------
## Test the effects of the tank dividers
#-----------------------------------------------------------------------------------------
bd <- df %>% separate(tank, into = c("tank", "side"), sep = "[-]")
aov <- aov(herbivory_rate ~ side, bd)
summary(aov)
TukeyHSD(aov)
out <- list()
tanks <- unique(bd$tank)
tanks <- tanks[tanks != 9]
for(i in 1:length(unique(bd$tank))){
out[[i]] <- summary(aov(herbivory_rate ~ side, bd[bd$tank == tanks[i], ]))
}
table(bd$tank, bd$side)
ggplot(bd, aes(x = tank, group = side, y = herbivory_rate))+
geom_bar(aes(fill = side), stat = "identity", position = "dodge")
|
# Yige Wu @ WashU 2018 Apr
## check regulated pairs
# source ------------------------------------------------------------------
source('/Users/yigewu/Box Sync/cptac2p_analysis/phospho_network/phospho_network_plotting.R')
source('/Users/yigewu/Box Sync/cptac2p_analysis/phospho_network/phospho_network_shared.R')
library(ggrepel)
# devtools::install_github("tidyverse/ggplot2")
library(ggplot2)
# inputs ------------------------------------------------------------------
## input druggable gene list
clinical <- fread(input = paste0(cptac_sharedD, "Specimen_Data_20161005_Yige_20180307.txt"), data.table = F)
## input enzyme_substrate table
ptms_site_pairs_sup <- read_csv(paste0(ppnD, "compile_enzyme_substrate/tables/compile_omnipath_networkin_depod/omnipath_networkin_enzyme_substrate_site_level_union_source_cleaned_addDEPOD_extended.csv"))
mutimpact <- fread(input = paste0(ppnD, "genoalt/tables/merge_mutation_impact/mutation_impact.txt"), data.table = F)
mutimpact_pho <- mutimpact[mutimpact$substrate_type == "phosphosite",]
# set variables -----------------------------------------------------------
mut_p <- 0.05
for (cancer in c("BRCA")) {
## input mutation impact table
mutimpact_pho_can <- mutimpact_pho[mutimpact_pho$Cancer == cancer & mutimpact_pho$p_value < mut_p,]
for (i in 1:nrow(mutimpact_pho_can)) {
enzyme <- as.character(mutimpact_pho_can[i, "Mutated_Gene"])
substrate <- as.character(mutimpact_pho_can[i, "Substrate_Gene"])
rsd <- as.character(mutimpact_pho_can[i, "SUB_MOD_RSD"])
pro_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_PRO_formatted_normalized_noControl.txt",sep=""), data.table = F)
pro_data <- pro_data[pro_data$Gene == enzyme,]
## input phospho level
pho_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_PHO_formatted_normalized_noControl.txt",sep=""), data.table = F)
pho_data <- pho_data[pho_data$Gene == substrate,]
## input phospho level
phog_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_collapsed_PHO_formatted_normalized_replicate_averaged_Tumor.txt",sep=""), data.table = F)
phog_data <- phog_data[phog_data$Gene == enzyme,]
pho_head <- formatPhosphosite(phosphosite_vector = pho_data$Phosphosite, gene_vector = pho_data$Gene)
pho_data <- pho_data[pho_head$SUB_MOD_RSD == rsd,]
pro.m <- melt(pro_data)
colnames(pro.m)[ncol(pro.m)] <- "pro_kin"
pho.m <- melt(pho_data)
colnames(pho.m)[ncol(pho.m)] <- "pho_sub"
phog.m <- melt(phog_data)
colnames(phog.m)[ncol(phog.m)] <- "pho_kin"
sup_tab <- merge(pro.m[, c("variable", "pro_kin")], pho.m[, c("variable", "pho_sub")], all = T)
sup_tab <- merge(sup_tab, phog.m[, c("variable", "pho_kin")], all = T)
sup_tab$partID <- sampID2partID(sampleID_vector = as.vector(sup_tab$variable), sample_map = clinical)
## input maf
maf <- loadMaf(cancer = cancer, maf_files = maf_files)
maf <- maf[(maf$Hugo_Symbol == enzyme | maf$Hugo_Symbol == substrate),]
# maf <- maf[(maf$Hugo_Symbol == enzyme | maf$Hugo_Symbol == substrate | maf$Hugo_Symbol %in% unique(ptms_site_pairs_sup$GENE[ptms_site_pairs_sup$SUB_GENE == substrate & ptms_site_pairs_sup$SUB_MOD_RSD == rsd])),]
maf <- maf[maf$Variant_Classification != "Silent",]
if (nrow(maf) > 0) {
maf$partID <- str_split_fixed(string = maf$Tumor_Sample_Barcode, pattern = "_", 2)[,1]
maf$aa_change <- paste0(maf$Hugo_Symbol, ":", maf$HGVSp_Short)
maf$is.upstream <- ifelse(maf$Hugo_Symbol == enzyme, TRUE, FALSE)
sup_tab <- merge(sup_tab, maf[, c("partID", "Variant_Classification", "aa_change", "is.upstream")], all.x = T)
}
rm(maf)
tab2p <- sup_tab
tab2p_sort <- NULL
for (sampID in unique(tab2p$variable)) {
tab_tmp <- unique(tab2p[tab2p$variable == sampID, c("partID", "variable", "pro_kin", "pho_sub", "pho_kin")])
muts <- unique(as.vector(tab2p$aa_change[tab2p$variable == sampID]))
muts <- muts[!is.na(muts)]
if (length(muts) > 0) {
tab_tmp$aa_change <- paste0(muts, collapse = "\n")
tab_tmp$mutated <- T
tab_tmp$is.upstream <- any(grepl(pattern = enzyme, x = muts))
} else {
tab_tmp$aa_change <- NA
tab_tmp$mutated <- F
tab_tmp$is.upstream <- F
}
tab2p_sort <- rbind(tab2p_sort, tab_tmp)
}
pos <- position_jitter(width = 0.5, seed = 1)
p = ggplot(tab2p_sort, aes(x=is.upstream, y=pho_sub, color = mutated, label= as.character(aa_change)))
p = p + geom_point(aes(shape = ifelse(!is.na(is.upstream) & (is.upstream == TRUE), "b", "a")),
position = pos, stroke = 0, alpha = 0.8)
p = p + geom_violin(fill = "grey", color = NA, alpha = 0.2)
p = p + geom_text_repel(aes(segment.color = mutated),
force = 1,
segment.size = 0.5, segment.alpha = 0.2,
size=1.5,alpha=0.8, position = pos)
p = p + labs(y=paste0(substrate, " ", rsd, " phosphorylation abundance(log2 ratio"))
p = p + theme_nogrid()
p = p + theme(axis.title = element_text(size=6), legend.position = 'none',
axis.text.x = element_text(colour="black", size=8, vjust=0.5),
axis.text.y = element_text(colour="black", size=8))#element_text(colour="black", size=14))
p = p + theme(title = element_text(size = 8))
p = p + scale_color_manual(values = c("TRUE" = "firebrick1", "FALSE" = "black"))
p
fn = paste0(makeOutDir(resultD = resultD), enzyme, "_", substrate, "_", rsd, "_pho_sub.pdf")
ggsave(file=fn, height=4, width=4, useDingbats=FALSE)
}
}
library(readxl)
pks <- fread(input = "./Ding_Lab/Projects_Current/IDG/IDG_shared_data/gene_lists/All_Kinase_cleaned.txt", data.table = F)
pps <- read_xlsx(path = "./pan3can_shared_data/Phospho_databases/DEPOD/DEPOD_201410_human_phosphatases.xlsx")
for (cancer in cancers_sort) {
phog_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_collapsed_PHO_formatted_normalized_replicate_averaged_Tumor.txt",sep=""), data.table = F)
print(nrow(phog_data[phog_data$Gene %in% pks$gene,]))
print(nrow(phog_data[phog_data$Gene %in% pps$`Gene symbol`,]))
}
| /phospho_network/genoalt/figures/scatterplot_regression_genoalt_overlap.R | no_license | ding-lab/phospho-signaling | R | false | false | 6,303 | r | # Yige Wu @ WashU 2018 Apr
## check regulated pairs
# source ------------------------------------------------------------------
source('/Users/yigewu/Box Sync/cptac2p_analysis/phospho_network/phospho_network_plotting.R')
source('/Users/yigewu/Box Sync/cptac2p_analysis/phospho_network/phospho_network_shared.R')
library(ggrepel)
# devtools::install_github("tidyverse/ggplot2")
library(ggplot2)
# inputs ------------------------------------------------------------------
## input druggable gene list
clinical <- fread(input = paste0(cptac_sharedD, "Specimen_Data_20161005_Yige_20180307.txt"), data.table = F)
## input enzyme_substrate table
ptms_site_pairs_sup <- read_csv(paste0(ppnD, "compile_enzyme_substrate/tables/compile_omnipath_networkin_depod/omnipath_networkin_enzyme_substrate_site_level_union_source_cleaned_addDEPOD_extended.csv"))
mutimpact <- fread(input = paste0(ppnD, "genoalt/tables/merge_mutation_impact/mutation_impact.txt"), data.table = F)
mutimpact_pho <- mutimpact[mutimpact$substrate_type == "phosphosite",]
# set variables -----------------------------------------------------------
mut_p <- 0.05
for (cancer in c("BRCA")) {
## input mutation impact table
mutimpact_pho_can <- mutimpact_pho[mutimpact_pho$Cancer == cancer & mutimpact_pho$p_value < mut_p,]
for (i in 1:nrow(mutimpact_pho_can)) {
enzyme <- as.character(mutimpact_pho_can[i, "Mutated_Gene"])
substrate <- as.character(mutimpact_pho_can[i, "Substrate_Gene"])
rsd <- as.character(mutimpact_pho_can[i, "SUB_MOD_RSD"])
pro_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_PRO_formatted_normalized_noControl.txt",sep=""), data.table = F)
pro_data <- pro_data[pro_data$Gene == enzyme,]
## input phospho level
pho_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_PHO_formatted_normalized_noControl.txt",sep=""), data.table = F)
pho_data <- pho_data[pho_data$Gene == substrate,]
## input phospho level
phog_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_collapsed_PHO_formatted_normalized_replicate_averaged_Tumor.txt",sep=""), data.table = F)
phog_data <- phog_data[phog_data$Gene == enzyme,]
pho_head <- formatPhosphosite(phosphosite_vector = pho_data$Phosphosite, gene_vector = pho_data$Gene)
pho_data <- pho_data[pho_head$SUB_MOD_RSD == rsd,]
pro.m <- melt(pro_data)
colnames(pro.m)[ncol(pro.m)] <- "pro_kin"
pho.m <- melt(pho_data)
colnames(pho.m)[ncol(pho.m)] <- "pho_sub"
phog.m <- melt(phog_data)
colnames(phog.m)[ncol(phog.m)] <- "pho_kin"
sup_tab <- merge(pro.m[, c("variable", "pro_kin")], pho.m[, c("variable", "pho_sub")], all = T)
sup_tab <- merge(sup_tab, phog.m[, c("variable", "pho_kin")], all = T)
sup_tab$partID <- sampID2partID(sampleID_vector = as.vector(sup_tab$variable), sample_map = clinical)
## input maf
maf <- loadMaf(cancer = cancer, maf_files = maf_files)
maf <- maf[(maf$Hugo_Symbol == enzyme | maf$Hugo_Symbol == substrate),]
# maf <- maf[(maf$Hugo_Symbol == enzyme | maf$Hugo_Symbol == substrate | maf$Hugo_Symbol %in% unique(ptms_site_pairs_sup$GENE[ptms_site_pairs_sup$SUB_GENE == substrate & ptms_site_pairs_sup$SUB_MOD_RSD == rsd])),]
maf <- maf[maf$Variant_Classification != "Silent",]
if (nrow(maf) > 0) {
maf$partID <- str_split_fixed(string = maf$Tumor_Sample_Barcode, pattern = "_", 2)[,1]
maf$aa_change <- paste0(maf$Hugo_Symbol, ":", maf$HGVSp_Short)
maf$is.upstream <- ifelse(maf$Hugo_Symbol == enzyme, TRUE, FALSE)
sup_tab <- merge(sup_tab, maf[, c("partID", "Variant_Classification", "aa_change", "is.upstream")], all.x = T)
}
rm(maf)
tab2p <- sup_tab
tab2p_sort <- NULL
for (sampID in unique(tab2p$variable)) {
tab_tmp <- unique(tab2p[tab2p$variable == sampID, c("partID", "variable", "pro_kin", "pho_sub", "pho_kin")])
muts <- unique(as.vector(tab2p$aa_change[tab2p$variable == sampID]))
muts <- muts[!is.na(muts)]
if (length(muts) > 0) {
tab_tmp$aa_change <- paste0(muts, collapse = "\n")
tab_tmp$mutated <- T
tab_tmp$is.upstream <- any(grepl(pattern = enzyme, x = muts))
} else {
tab_tmp$aa_change <- NA
tab_tmp$mutated <- F
tab_tmp$is.upstream <- F
}
tab2p_sort <- rbind(tab2p_sort, tab_tmp)
}
pos <- position_jitter(width = 0.5, seed = 1)
p = ggplot(tab2p_sort, aes(x=is.upstream, y=pho_sub, color = mutated, label= as.character(aa_change)))
p = p + geom_point(aes(shape = ifelse(!is.na(is.upstream) & (is.upstream == TRUE), "b", "a")),
position = pos, stroke = 0, alpha = 0.8)
p = p + geom_violin(fill = "grey", color = NA, alpha = 0.2)
p = p + geom_text_repel(aes(segment.color = mutated),
force = 1,
segment.size = 0.5, segment.alpha = 0.2,
size=1.5,alpha=0.8, position = pos)
p = p + labs(y=paste0(substrate, " ", rsd, " phosphorylation abundance(log2 ratio"))
p = p + theme_nogrid()
p = p + theme(axis.title = element_text(size=6), legend.position = 'none',
axis.text.x = element_text(colour="black", size=8, vjust=0.5),
axis.text.y = element_text(colour="black", size=8))#element_text(colour="black", size=14))
p = p + theme(title = element_text(size = 8))
p = p + scale_color_manual(values = c("TRUE" = "firebrick1", "FALSE" = "black"))
p
fn = paste0(makeOutDir(resultD = resultD), enzyme, "_", substrate, "_", rsd, "_pho_sub.pdf")
ggsave(file=fn, height=4, width=4, useDingbats=FALSE)
}
}
library(readxl)
pks <- fread(input = "./Ding_Lab/Projects_Current/IDG/IDG_shared_data/gene_lists/All_Kinase_cleaned.txt", data.table = F)
pps <- read_xlsx(path = "./pan3can_shared_data/Phospho_databases/DEPOD/DEPOD_201410_human_phosphatases.xlsx")
for (cancer in cancers_sort) {
phog_data <- fread(input = paste(cptac_sharedD, cancer,"/",prefix[cancer], "_collapsed_PHO_formatted_normalized_replicate_averaged_Tumor.txt",sep=""), data.table = F)
print(nrow(phog_data[phog_data$Gene %in% pks$gene,]))
print(nrow(phog_data[phog_data$Gene %in% pps$`Gene symbol`,]))
}
|
x <- 1
print(x)
msg <- "Hello"
## This is comment
x <- 1:120
## vectors and lists
## c() create vectors of obj
x <- c(0.4, 0.5)
x <- c(TRUE, FALSE)
x <- c(T, F)
x <- vector('complex', length = 10)
## explicit coercion
x <- 0:6
as.character(x)
as.complex(x)
as.logical(x)
x <- list(1, "a", TRUE, 9 + 8i)
## matrices
m <- matrix(nrow = 2, ncol = 3)
dim(m)
attributes(m)
m <- matrix(1:6, nrow = 2, ncol = 3)
m <- 1:10
dim(m) <- c(2,5)
print(m)
x <- 1:8
y <- 2:5
cbind(x, y) ## add two colunms
rbind(x, y) ## add two rows
## Factors
f <- factor(c('yes', 'yes', 'no', 'no', 'yes'))
print(f)
table(f) # tell us how many of each level there are
unclass(f)
f <- factor(c('yes', 'yes', 'no', 'no', 'yes'), levels = c('yes', 'no'))
print(f)
table(f)
## Missing Values
x <- c(1, 2, NA, 4, NaN, 9)
is.na(x)
is.nan(x)
## Data Frames
x <- data.frame(foo = 1:4, bar = c(T, T, F, F))
print(x)
nrow(x)
ncol(x)
## Names
x <- 1:3
names(x)
names(x) <- c('foo', 'bar', 'norf')
names(x)
print(x)
x <- list(a = 1, b = 2, c = 3)
print(x)
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c('a', 'b'), c('c', 'd'))
print(m)
| /2. R Programming/data_types.R | no_license | martafd/datasciencecoursera | R | false | false | 1,126 | r | x <- 1
print(x)
msg <- "Hello"
## This is comment
x <- 1:120
## vectors and lists
## c() create vectors of obj
x <- c(0.4, 0.5)
x <- c(TRUE, FALSE)
x <- c(T, F)
x <- vector('complex', length = 10)
## explicit coercion
x <- 0:6
as.character(x)
as.complex(x)
as.logical(x)
x <- list(1, "a", TRUE, 9 + 8i)
## matrices
m <- matrix(nrow = 2, ncol = 3)
dim(m)
attributes(m)
m <- matrix(1:6, nrow = 2, ncol = 3)
m <- 1:10
dim(m) <- c(2,5)
print(m)
x <- 1:8
y <- 2:5
cbind(x, y) ## add two colunms
rbind(x, y) ## add two rows
## Factors
f <- factor(c('yes', 'yes', 'no', 'no', 'yes'))
print(f)
table(f) # tell us how many of each level there are
unclass(f)
f <- factor(c('yes', 'yes', 'no', 'no', 'yes'), levels = c('yes', 'no'))
print(f)
table(f)
## Missing Values
x <- c(1, 2, NA, 4, NaN, 9)
is.na(x)
is.nan(x)
## Data Frames
x <- data.frame(foo = 1:4, bar = c(T, T, F, F))
print(x)
nrow(x)
ncol(x)
## Names
x <- 1:3
names(x)
names(x) <- c('foo', 'bar', 'norf')
names(x)
print(x)
x <- list(a = 1, b = 2, c = 3)
print(x)
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c('a', 'b'), c('c', 'd'))
print(m)
|
#' @title Limpia un vector de texto, que suele contener los nombres de un objeto.
#'
#' @description Los vectores resultantes son unicos y estan formadas unicamente por el caracter
#' \code{_}, numeros y letras. Por defecto, solo consistiran en caracteres ASCII, pero se puede
#' permitir que no sean ASCII (por ejemplo, Unicode) configurando \code{ascii=FALSE}.
#' Las preferencias de mayusculas pueden especificarse utilizando el parametro \code{case}.
#'
#'
#' Cuando \code{ascii=TRUE} (el valor predeterminado), los caracteres acentuados se transliteran
#' a ASCII. Por ejemplo, una "o" con dieresis alemana se convierte en "o", y
#' el caracter español "enye" se convierte en "n".
#' Esta funcion fue tomada del paquete janitor.
#'
#'
#'
#' @param string Un vector de caracteres de nombres para limpiar.
#' @param case Preferencias de mayusculas
#' @param sep_in (abreviatura de entrada separadora) si es un carácter, se interpreta como una expresión regular (envuelta internamente en stringr::regex()). El valor por defecto es una expresión regular que coincide con cualquier secuencia de valores no alfanuméricos. Todas las coincidencias serán reemplazadas por guiones bajos (además de "_" y " ", para los que esto siempre es cierto, incluso si se proporciona NULL). Estos guiones bajos se utilizan internamente para dividir las cadenas en subcadenas y especificar los límites de las palabras.
#' @param transliterations Un vector de caracteres (si no es NULL). Las entradas de este argumento deben ser elementos de stringi::stri_trans_list() (como "Latin-ASCII", que suele ser útil) o nombres de tablas de búsqueda (actualmente sólo se admite "german").
#' @param parsing_option Un entero que determinará la parsing_option.
#' @param numerals Carácter que especifica la alineación de los numerales ("medio", izquierda, derecha, asis o apretado). Es decir, numerales = "izquierda" garantiza que no haya ningún separador de salida delante de un dígito.
#' @param ... ...
#'
#'
#' @param replace Un vector de caracteres con nombre en el que el nombre se sustituye por el
#' value.
#' @param ascii Convertir los nombres a ASCII (TRUE, por defecto) o no (FALSE).
#' @param use_make_names ¿Deberia aplicarse el codigo {make.names()} para asegurar que la sea utilizable como un nombre sin comillas? (Evitar \code{make.names()} asegura que la salida es independiente de la localizacion, pero las comillas pueden ser necesarias).
#'
#' @return Devuelve el vector de caracteres "limpio".
#' @export
#' @seealso \code{\link[snakecase]{to_any_case}()}
#' @examples
#'
#' # limpiar los nombres de un vector:
#' x <- structure(1:3, names = c("nombre con espacio", "DosPalabras", "total $ (2009)"))
#' x
#' names(x) <- limpiar_nombres2(names(x))
#' x # Ya tiene los nombres limpios
#'
#' @importFrom stringi stri_trans_general
#' @importFrom stringr str_replace str_replace_all
#' @importFrom snakecase to_any_case
#' @encoding UTF-8
limpiar_nombres2 <- function(string,
case = "snake",
replace=
c(
"\'"="",
"\""="",
"%"="_percent_",
"#"="_number_"
),
ascii=TRUE,
use_make_names=TRUE,
# default arguments for snake_case::to_any_case
sep_in = "\\.",
transliterations = "Latin-ASCII",
parsing_option = 1,
numerals = "asis",
...) {
# Handling "old_janitor" case for backward compatibility
if (case == "old_janitor") {
return(old_make_clean_names(string))
}
warn_micro_mu(string=string, replace=replace)
replaced_names <-
stringr::str_replace_all(
string=string,
pattern=replace
)
transliterated_names <-
if (ascii) {
stringi::stri_trans_general(
replaced_names,
id=available_transliterators(c("Any-Latin", "Greek-Latin", "Any-NFKD", "Any-NFC", "Latin-ASCII"))
)
} else {
replaced_names
}
# Remove starting spaces and punctuation
good_start <-
stringr::str_replace(
string=transliterated_names,
# Description of this regexp:
# \A: beginning of the string (rather than beginning of the line as ^ would indicate)
# \h: any horizontal whitespace character (spaces, tabs, and anything else that is a Unicode whitespace)
# \s: non-unicode whitespace matching (it may overlap with \h)
# \p{}: indicates a unicode class of characters, so these will also match punctuation, symbols, separators, and "other" characters
# * means all of the above zero or more times (not + so that the capturing part of the regexp works)
# (.*)$: captures everything else in the string for the replacement
pattern="\\A[\\h\\s\\p{Punctuation}\\p{Symbol}\\p{Separator}\\p{Other}]*(.*)$",
replacement="\\1"
)
# Convert all interior spaces and punctuation to single dots
cleaned_within <-
stringr::str_replace(
string=good_start,
pattern="[\\h\\s\\p{Punctuation}\\p{Symbol}\\p{Separator}\\p{Other}]+",
replacement="."
)
# make.names() is dependent on the locale and therefore will return different
# system-dependent values (e.g. as in issue #268 with Japanese characters).
made_names <-
if (use_make_names) {
make.names(cleaned_within)
} else {
cleaned_within
}
cased_names <-
snakecase::to_any_case(
made_names,
case = case,
sep_in = sep_in,
transliterations = transliterations,
parsing_option = parsing_option,
numerals = numerals,
...
)
# Handle duplicated names - they mess up dplyr pipelines. This appends the
# column number to repeated instances of duplicate variable names.
while (any(duplicated(cased_names))) {
dupe_count <-
vapply(
seq_along(cased_names), function(i) {
sum(cased_names[i] == cased_names[1:i])
},
1L
)
cased_names[dupe_count > 1] <-
paste(
cased_names[dupe_count > 1],
dupe_count[dupe_count > 1],
sep = "_"
)
}
cased_names
}
#' Avisa si el micro o el mu van a ser sustituidos por limpiar_nombres2()
#'
#' @inheritParams limpiar_nombres2
#' @param character Que caracter debe comprobarse ("micro" o "mu", o ambos)
#' @return TRUE si se emitio una advertencia o FALSE si no se emitio ninguna advertencia
#' @keywords Internal
#' @noRd
warn_micro_mu <- function(string, replace) {
micro_mu <- names(mu_to_u)
# The vector of characters that exist but are not handled at all
warning_characters <- character()
# The vector of characters that exist and may be handled by a specific replacement
warning_characters_specific <- character()
for (current_unicode in micro_mu) {
# Does the character exist in any of the names?
has_character <- any(grepl(x=string, pattern=current_unicode, fixed=TRUE))
if (has_character) {
# Is there a general replacement for any occurrence of the character?
has_replacement_general <- any(names(replace) %in% current_unicode)
# Is there a specific replacement for some form including the character,
# but it may not cover all of replacements?
has_replacement_specific <- any(grepl(x=names(replace), pattern=current_unicode, fixed=TRUE))
warning_characters <-
c(
warning_characters,
current_unicode[!has_replacement_general & !has_replacement_specific]
)
warning_characters_specific <-
c(
warning_characters_specific,
current_unicode[!has_replacement_general & has_replacement_specific]
)
}
}
# Issue the consolidated warnings, if needed
warning_message_general <- NULL
if (length(warning_characters) > 0) {
warning_characters_utf <-
sprintf("\\u%04x", sapply(X=warning_characters, FUN=utf8ToInt))
warning_message_general <-
sprintf(
"Los siguientes caracteres estan en los nombres a limpiar pero no son reemplazados: %s",
paste(warning_characters_utf, collapse=", ")
)
}
warning_message_specific <- NULL
if (length(warning_characters_specific) > 0) {
warning_characters_utf <-
sprintf("\\u%04x", sapply(X=warning_characters_specific, FUN=utf8ToInt))
warning_message_specific <-
sprintf(
"Los siguientes caracteres estan en los nombres a limpiar pero no pueden ser reemplazados, compruebe los nombres de salida cuidadosamente: %s",
paste(warning_characters_utf, collapse=", ")
)
}
if (!is.null(warning_message_general) | !is.null(warning_message_specific)) {
warning_message <- paste(c(warning_message_general, warning_message_specific), collapse="\n")
warning(
"Cuidado",
"El simbolo mu o micro esta en el vector de entrada, y puede haber sido convertido a \'m\' mientras que \'u\' puede haber sido esperado. ",
"Considere a\u00f1adir lo siguiente al argumento `replace`:\n",
warning_message
)
}
length(c(warning_characters, warning_characters_specific)) > 0
}
# copy of clean_names from janitor v0.3 on CRAN, to preserve old behavior
old_make_clean_names <- function(string) {
# Takes a data.frame, returns the same data frame with cleaned names
old_names <- string
new_names <- old_names %>%
gsub("\'", "", .) %>% # remove quotation marks
gsub("\"", "", .) %>% # remove quotation marks
gsub("%", "percent", .) %>%
gsub("^[ ]+", "", .) %>%
make.names(.) %>%
gsub("[.]+", "_", .) %>% # convert 1+ periods to single _
gsub("[_]+", "_", .) %>% # fix rare cases of multiple consecutive underscores
tolower(.) %>%
gsub("_$", "", .) # remove string-final underscores
# Handle duplicated names - they mess up dplyr pipelines
# This appends the column number to repeated instances of duplicate variable names
dupe_count <- vapply(seq_along(new_names), function(i) {
sum(new_names[i] == new_names[1:i])
}, integer(1))
new_names[dupe_count > 1] <- paste(
new_names[dupe_count > 1],
dupe_count[dupe_count > 1],
sep = "_"
)
new_names
}
#' Detect the available transliterators for stri_trans_general
#' @param wanted The transliterators desired for translation
#' @return A semicolon-separated list of the transliterators that are available.
#' @noRd
#' @importFrom stringi stri_trans_list
available_transliterators <- function(wanted) {
desired_available <- intersect(wanted, stringi::stri_trans_list())
if (!identical(wanted, desired_available) & getOption("janitor_warn_transliterators", default=TRUE)) {
warning(
"Algunos transliteradores para convertir caracteres en nombres no estan disponibles \n",
"en este sistema. Los resultados pueden ser diferentes si se ejecuta en un sistema diferente.\n",
"Los transliteradores que faltan son: ",
paste0(setdiff(wanted, desired_available), collapse=", "),
"\n\nEste aviso solo se muestra una vez por sesion.\n",
"Para suprimirlo, utilice lo siguiente:\n `options(janitor_warn_transliterators=FALSE)`\n",
"Para que todos los transliteradores esten disponibles en su sistema, reinstale el stringi con:\n",
'`install.packages(\"stringi\", type=\"source\", configure.args=\"--disable-pkg-config\")`'
)
# Only warn once per session
options(janitor_warn_transliterators=FALSE)
}
paste(desired_available, collapse=";")
}
| /R/limpiar_nombres2.R | permissive | mariosandovalmx/tlamatini | R | false | false | 11,673 | r | #' @title Limpia un vector de texto, que suele contener los nombres de un objeto.
#'
#' @description Los vectores resultantes son unicos y estan formadas unicamente por el caracter
#' \code{_}, numeros y letras. Por defecto, solo consistiran en caracteres ASCII, pero se puede
#' permitir que no sean ASCII (por ejemplo, Unicode) configurando \code{ascii=FALSE}.
#' Las preferencias de mayusculas pueden especificarse utilizando el parametro \code{case}.
#'
#'
#' Cuando \code{ascii=TRUE} (el valor predeterminado), los caracteres acentuados se transliteran
#' a ASCII. Por ejemplo, una "o" con dieresis alemana se convierte en "o", y
#' el caracter español "enye" se convierte en "n".
#' Esta funcion fue tomada del paquete janitor.
#'
#'
#'
#' @param string Un vector de caracteres de nombres para limpiar.
#' @param case Preferencias de mayusculas
#' @param sep_in (abreviatura de entrada separadora) si es un carácter, se interpreta como una expresión regular (envuelta internamente en stringr::regex()). El valor por defecto es una expresión regular que coincide con cualquier secuencia de valores no alfanuméricos. Todas las coincidencias serán reemplazadas por guiones bajos (además de "_" y " ", para los que esto siempre es cierto, incluso si se proporciona NULL). Estos guiones bajos se utilizan internamente para dividir las cadenas en subcadenas y especificar los límites de las palabras.
#' @param transliterations Un vector de caracteres (si no es NULL). Las entradas de este argumento deben ser elementos de stringi::stri_trans_list() (como "Latin-ASCII", que suele ser útil) o nombres de tablas de búsqueda (actualmente sólo se admite "german").
#' @param parsing_option Un entero que determinará la parsing_option.
#' @param numerals Carácter que especifica la alineación de los numerales ("medio", izquierda, derecha, asis o apretado). Es decir, numerales = "izquierda" garantiza que no haya ningún separador de salida delante de un dígito.
#' @param ... ...
#'
#'
#' @param replace Un vector de caracteres con nombre en el que el nombre se sustituye por el
#' value.
#' @param ascii Convertir los nombres a ASCII (TRUE, por defecto) o no (FALSE).
#' @param use_make_names ¿Deberia aplicarse el codigo {make.names()} para asegurar que la sea utilizable como un nombre sin comillas? (Evitar \code{make.names()} asegura que la salida es independiente de la localizacion, pero las comillas pueden ser necesarias).
#'
#' @return Devuelve el vector de caracteres "limpio".
#' @export
#' @seealso \code{\link[snakecase]{to_any_case}()}
#' @examples
#'
#' # limpiar los nombres de un vector:
#' x <- structure(1:3, names = c("nombre con espacio", "DosPalabras", "total $ (2009)"))
#' x
#' names(x) <- limpiar_nombres2(names(x))
#' x # Ya tiene los nombres limpios
#'
#' @importFrom stringi stri_trans_general
#' @importFrom stringr str_replace str_replace_all
#' @importFrom snakecase to_any_case
#' @encoding UTF-8
limpiar_nombres2 <- function(string,
case = "snake",
replace=
c(
"\'"="",
"\""="",
"%"="_percent_",
"#"="_number_"
),
ascii=TRUE,
use_make_names=TRUE,
# default arguments for snake_case::to_any_case
sep_in = "\\.",
transliterations = "Latin-ASCII",
parsing_option = 1,
numerals = "asis",
...) {
# Handling "old_janitor" case for backward compatibility
if (case == "old_janitor") {
return(old_make_clean_names(string))
}
warn_micro_mu(string=string, replace=replace)
replaced_names <-
stringr::str_replace_all(
string=string,
pattern=replace
)
transliterated_names <-
if (ascii) {
stringi::stri_trans_general(
replaced_names,
id=available_transliterators(c("Any-Latin", "Greek-Latin", "Any-NFKD", "Any-NFC", "Latin-ASCII"))
)
} else {
replaced_names
}
# Remove starting spaces and punctuation
good_start <-
stringr::str_replace(
string=transliterated_names,
# Description of this regexp:
# \A: beginning of the string (rather than beginning of the line as ^ would indicate)
# \h: any horizontal whitespace character (spaces, tabs, and anything else that is a Unicode whitespace)
# \s: non-unicode whitespace matching (it may overlap with \h)
# \p{}: indicates a unicode class of characters, so these will also match punctuation, symbols, separators, and "other" characters
# * means all of the above zero or more times (not + so that the capturing part of the regexp works)
# (.*)$: captures everything else in the string for the replacement
pattern="\\A[\\h\\s\\p{Punctuation}\\p{Symbol}\\p{Separator}\\p{Other}]*(.*)$",
replacement="\\1"
)
# Convert all interior spaces and punctuation to single dots
cleaned_within <-
stringr::str_replace(
string=good_start,
pattern="[\\h\\s\\p{Punctuation}\\p{Symbol}\\p{Separator}\\p{Other}]+",
replacement="."
)
# make.names() is dependent on the locale and therefore will return different
# system-dependent values (e.g. as in issue #268 with Japanese characters).
made_names <-
if (use_make_names) {
make.names(cleaned_within)
} else {
cleaned_within
}
cased_names <-
snakecase::to_any_case(
made_names,
case = case,
sep_in = sep_in,
transliterations = transliterations,
parsing_option = parsing_option,
numerals = numerals,
...
)
# Handle duplicated names - they mess up dplyr pipelines. This appends the
# column number to repeated instances of duplicate variable names.
while (any(duplicated(cased_names))) {
dupe_count <-
vapply(
seq_along(cased_names), function(i) {
sum(cased_names[i] == cased_names[1:i])
},
1L
)
cased_names[dupe_count > 1] <-
paste(
cased_names[dupe_count > 1],
dupe_count[dupe_count > 1],
sep = "_"
)
}
cased_names
}
#' Avisa si el micro o el mu van a ser sustituidos por limpiar_nombres2()
#'
#' @inheritParams limpiar_nombres2
#' @param character Que caracter debe comprobarse ("micro" o "mu", o ambos)
#' @return TRUE si se emitio una advertencia o FALSE si no se emitio ninguna advertencia
#' @keywords Internal
#' @noRd
warn_micro_mu <- function(string, replace) {
micro_mu <- names(mu_to_u)
# The vector of characters that exist but are not handled at all
warning_characters <- character()
# The vector of characters that exist and may be handled by a specific replacement
warning_characters_specific <- character()
for (current_unicode in micro_mu) {
# Does the character exist in any of the names?
has_character <- any(grepl(x=string, pattern=current_unicode, fixed=TRUE))
if (has_character) {
# Is there a general replacement for any occurrence of the character?
has_replacement_general <- any(names(replace) %in% current_unicode)
# Is there a specific replacement for some form including the character,
# but it may not cover all of replacements?
has_replacement_specific <- any(grepl(x=names(replace), pattern=current_unicode, fixed=TRUE))
warning_characters <-
c(
warning_characters,
current_unicode[!has_replacement_general & !has_replacement_specific]
)
warning_characters_specific <-
c(
warning_characters_specific,
current_unicode[!has_replacement_general & has_replacement_specific]
)
}
}
# Issue the consolidated warnings, if needed
warning_message_general <- NULL
if (length(warning_characters) > 0) {
warning_characters_utf <-
sprintf("\\u%04x", sapply(X=warning_characters, FUN=utf8ToInt))
warning_message_general <-
sprintf(
"Los siguientes caracteres estan en los nombres a limpiar pero no son reemplazados: %s",
paste(warning_characters_utf, collapse=", ")
)
}
warning_message_specific <- NULL
if (length(warning_characters_specific) > 0) {
warning_characters_utf <-
sprintf("\\u%04x", sapply(X=warning_characters_specific, FUN=utf8ToInt))
warning_message_specific <-
sprintf(
"Los siguientes caracteres estan en los nombres a limpiar pero no pueden ser reemplazados, compruebe los nombres de salida cuidadosamente: %s",
paste(warning_characters_utf, collapse=", ")
)
}
if (!is.null(warning_message_general) | !is.null(warning_message_specific)) {
warning_message <- paste(c(warning_message_general, warning_message_specific), collapse="\n")
warning(
"Cuidado",
"El simbolo mu o micro esta en el vector de entrada, y puede haber sido convertido a \'m\' mientras que \'u\' puede haber sido esperado. ",
"Considere a\u00f1adir lo siguiente al argumento `replace`:\n",
warning_message
)
}
length(c(warning_characters, warning_characters_specific)) > 0
}
# copy of clean_names from janitor v0.3 on CRAN, to preserve old behavior
old_make_clean_names <- function(string) {
# Takes a data.frame, returns the same data frame with cleaned names
old_names <- string
new_names <- old_names %>%
gsub("\'", "", .) %>% # remove quotation marks
gsub("\"", "", .) %>% # remove quotation marks
gsub("%", "percent", .) %>%
gsub("^[ ]+", "", .) %>%
make.names(.) %>%
gsub("[.]+", "_", .) %>% # convert 1+ periods to single _
gsub("[_]+", "_", .) %>% # fix rare cases of multiple consecutive underscores
tolower(.) %>%
gsub("_$", "", .) # remove string-final underscores
# Handle duplicated names - they mess up dplyr pipelines
# This appends the column number to repeated instances of duplicate variable names
dupe_count <- vapply(seq_along(new_names), function(i) {
sum(new_names[i] == new_names[1:i])
}, integer(1))
new_names[dupe_count > 1] <- paste(
new_names[dupe_count > 1],
dupe_count[dupe_count > 1],
sep = "_"
)
new_names
}
#' Detect the available transliterators for stri_trans_general
#' @param wanted The transliterators desired for translation
#' @return A semicolon-separated list of the transliterators that are available.
#' @noRd
#' @importFrom stringi stri_trans_list
available_transliterators <- function(wanted) {
desired_available <- intersect(wanted, stringi::stri_trans_list())
if (!identical(wanted, desired_available) & getOption("janitor_warn_transliterators", default=TRUE)) {
warning(
"Algunos transliteradores para convertir caracteres en nombres no estan disponibles \n",
"en este sistema. Los resultados pueden ser diferentes si se ejecuta en un sistema diferente.\n",
"Los transliteradores que faltan son: ",
paste0(setdiff(wanted, desired_available), collapse=", "),
"\n\nEste aviso solo se muestra una vez por sesion.\n",
"Para suprimirlo, utilice lo siguiente:\n `options(janitor_warn_transliterators=FALSE)`\n",
"Para que todos los transliteradores esten disponibles en su sistema, reinstale el stringi con:\n",
'`install.packages(\"stringi\", type=\"source\", configure.args=\"--disable-pkg-config\")`'
)
# Only warn once per session
options(janitor_warn_transliterators=FALSE)
}
paste(desired_available, collapse=";")
}
|
#' Deprecated functions in trip
#'
#' These functions will be declared defunct in a future release.
#'
#' @name trip.split.exact
#' @aliases trip-deprecated trip.split.exact as.trip.SpatialLinesDataFrame
#' tripTransform as.ltraj.trip
#' @param x see \code{\link{cut.trip}}
#' @param dates see \code{\link{cut.trip}}
#' @param from trip object
#' @seealso
#'
#' \code{\link{cut.trip}}, \code{\link{as.Other}}
#'
NULL
#' @rdname trip.split.exact
#' @export
as.SpatialLinesDataFrame.trip <- function (from)
{
.Deprecated('as(x, "SpatialLinesDataFrame")')
as(from, "SpatialLinesDataFrame")
}
#' @rdname trip.split.exact
#' @export
trip.split.exact <- function(x, dates) {
.Deprecated("cut.trip")
cut(x, dates)
}
#' @rdname trip.split.exact
#' @param xy \code{trip} object
#' @export
as.ltraj.trip <- function(xy) {
.Deprecated('as(x, "ltraj")')
as(xy, "ltraj")
}
##' @rdname trip.split.exact
##' @export
as.trip.SpatialLinesDataFrame <- function(from) {
.Deprecated('as(x, "SpatialLinesDataFrame") or explode(x) ... the original definition was an error, there is no general coercion method available for SpatialLinesDataFrame=>trip')
##as.SpatialLinesDataFrame.trip(from)
as(from, "SpatialLinesDataFrame")
}
| /R/trip-deprecated.R | no_license | cran/trip | R | false | false | 1,243 | r | #' Deprecated functions in trip
#'
#' These functions will be declared defunct in a future release.
#'
#' @name trip.split.exact
#' @aliases trip-deprecated trip.split.exact as.trip.SpatialLinesDataFrame
#' tripTransform as.ltraj.trip
#' @param x see \code{\link{cut.trip}}
#' @param dates see \code{\link{cut.trip}}
#' @param from trip object
#' @seealso
#'
#' \code{\link{cut.trip}}, \code{\link{as.Other}}
#'
NULL
#' @rdname trip.split.exact
#' @export
as.SpatialLinesDataFrame.trip <- function (from)
{
.Deprecated('as(x, "SpatialLinesDataFrame")')
as(from, "SpatialLinesDataFrame")
}
#' @rdname trip.split.exact
#' @export
trip.split.exact <- function(x, dates) {
.Deprecated("cut.trip")
cut(x, dates)
}
#' @rdname trip.split.exact
#' @param xy \code{trip} object
#' @export
as.ltraj.trip <- function(xy) {
.Deprecated('as(x, "ltraj")')
as(xy, "ltraj")
}
##' @rdname trip.split.exact
##' @export
as.trip.SpatialLinesDataFrame <- function(from) {
.Deprecated('as(x, "SpatialLinesDataFrame") or explode(x) ... the original definition was an error, there is no general coercion method available for SpatialLinesDataFrame=>trip')
##as.SpatialLinesDataFrame.trip(from)
as(from, "SpatialLinesDataFrame")
}
|
/Amicia_Canterbury_Doubs_Assignment.R | no_license | amiciacanterbury/QE2021 | R | false | false | 6,548 | r | ||
#' @title Generate threshold vs. performance(s) for 2-class classification.
#'
#' @description
#' Generates data on threshold vs. performance(s) for 2-class classification that can be used for plotting.
#'
#' @family generate_plot_data
#' @family thresh_vs_perf
#' @aliases ThreshVsPerfData
#'
#' @template arg_plotroc_obj
#' @template arg_measures
#' @param gridsize [\code{integer(1)}]\cr
#' Grid resolution for x-axis (threshold).
#' Default is 100.
#' @param aggregate [\code{logical(1)}]\cr
#' Whether to aggregate \code{\link{ResamplePrediction}}s or to plot the performance
#' of each iteration separately.
#' Default is \code{TRUE}.
#' @param task.id [\code{character(1)}]\cr
#' Selected task in \code{\link{BenchmarkResult}} to do plots for, ignored otherwise.
#' Default is first task.
#' @return [\code{ThreshVsPerfData}]. A named list containing the measured performance
#' across the threshold grid, the measures, and whether the performance estimates were
#' aggregated (only applicable for (list of) \code{\link{ResampleResult}}s).
#' @export
generateThreshVsPerfData = function(obj, measures, gridsize = 100L, aggregate = TRUE, task.id = NULL)
UseMethod("generateThreshVsPerfData")
#' @export
generateThreshVsPerfData.Prediction = function(obj, measures, gridsize = 100L, aggregate = TRUE,
task.id = NULL) {
checkPrediction(obj, task.type = "classif", binary = TRUE, predict.type = "prob")
generateThreshVsPerfData.list(namedList("prediction", obj), measures, gridsize, aggregate, task.id)
}
#' @export
generateThreshVsPerfData.ResampleResult = function(obj, measures, gridsize = 100L, aggregate = TRUE,
task.id = NULL) {
obj = getRRPredictions(obj)
checkPrediction(obj, task.type = "classif", binary = TRUE, predict.type = "prob")
generateThreshVsPerfData.Prediction(obj, measures, gridsize, aggregate)
}
#' @export
generateThreshVsPerfData.BenchmarkResult = function(obj, measures, gridsize = 100L, aggregate = TRUE,
task.id = NULL) {
tids = getBMRTaskIds(obj)
if (is.null(task.id))
task.id = tids[1L]
else
assertChoice(task.id, tids)
obj = getBMRPredictions(obj, task.ids = task.id, as.df = FALSE)[[1L]]
for (x in obj)
checkPrediction(x, task.type = "classif", binary = TRUE, predict.type = "prob")
generateThreshVsPerfData.list(obj, measures, gridsize, aggregate, task.id)
}
#' @export
generateThreshVsPerfData.list = function(obj, measures, gridsize = 100L, aggregate = TRUE, task.id = NULL) {
assertList(obj, c("Prediction", "ResampleResult"), min.len = 1L)
## unwrap ResampleResult to Prediction and set default names
if (inherits(obj[[1L]], "ResampleResult")) {
if (is.null(names(obj)))
names(obj) = extractSubList(obj, "learner.id")
obj = extractSubList(obj, "pred", simplify = FALSE)
}
assertList(obj, names = "unique")
td = extractSubList(obj, "task.desc", simplify = FALSE)[[1L]]
measures = checkMeasures(measures, td)
mids = replaceDupeMeasureNames(measures, "id")
names(measures) = mids
grid = data.frame(threshold = seq(0, 1, length.out = gridsize))
resamp = all(sapply(obj, function(x) inherits(x, "ResamplePrediction")))
out = lapply(obj, function(x) {
do.call("rbind", lapply(grid$threshold, function(th) {
pp = setThreshold(x, threshold = th)
if (!aggregate & resamp) {
iter = seq_len(pp$instance$desc$iters)
asMatrixRows(lapply(iter, function(i) {
pp$data = pp$data[pp$data$iter == i, ]
c(setNames(performance(pp, measures = measures), mids), "iter" = i, "threshold" = th)
}))
} else {
c(setNames(performance(pp, measures = measures), mids), "threshold" = th)
}
}))
})
if (length(obj) == 1L & inherits(obj[[1L]], "Prediction")) {
out = out[[1L]]
colnames(out)[!colnames(out) %in% c("iter", "threshold", "learner")] = mids
} else {
out = setDF(rbindlist(lapply(out, as.data.table), fill = TRUE, idcol = "learner"))
colnames(out)[!colnames(out) %in% c("iter", "threshold", "learner")] = mids
}
makeS3Obj("ThreshVsPerfData",
measures = measures,
data = as.data.frame(out),
aggregate = aggregate)
}
#' @title Plot threshold vs. performance(s) for 2-class classification using ggplot2.
#'
#' @description
#' Plots threshold vs. performance(s) data that has been generated with \code{\link{generateThreshVsPerfData}}.
#'
#' @family plot
#' @family thresh_vs_perf
#'
#' @param obj [\code{ThreshVsPerfData}]\cr
#' Result of \code{\link{generateThreshVsPerfData}}.
#' @param measures [\code{\link{Measure}} | list of \code{\link{Measure}}]\cr
#' Performance measure(s) to plot.
#' Must be a subset of those used in \code{\link{generateThreshVsPerfData}}.
#' Default is all the measures stored in \code{obj} generated by
#' \code{\link{generateThreshVsPerfData}}.
#' @param facet [\code{character(1)}]\cr
#' Selects \dQuote{measure} or \dQuote{learner} to be the facetting variable.
#' The variable mapped to \code{facet} must have more than one unique value, otherwise it will
#' be ignored. The variable not chosen is mapped to color if it has more than one unique value.
#' The default is \dQuote{measure}.
#' @param mark.th [\code{numeric(1)}]\cr
#' Mark given threshold with vertical line?
#' Default is \code{NA} which means not to do it.
#' @param pretty.names [\code{logical(1)}]\cr
#' Whether to use the \code{\link{Measure}} name instead of the id in the plot.
#' Default is \code{TRUE}.
#' @template arg_facet_nrow_ncol
#' @template ret_gg2
#' @export
#' @examples
#' lrn = makeLearner("classif.rpart", predict.type = "prob")
#' mod = train(lrn, sonar.task)
#' pred = predict(mod, sonar.task)
#' pvs = generateThreshVsPerfData(pred, list(acc, setAggregation(acc, train.mean)))
#' plotThreshVsPerf(pvs)
plotThreshVsPerf = function(obj, measures = obj$measures,
facet = "measure", mark.th = NA_real_,
pretty.names = TRUE, facet.wrap.nrow = NULL, facet.wrap.ncol = NULL) {
assertClass(obj, classes = "ThreshVsPerfData")
mappings = c("measure", "learner")
assertChoice(facet, mappings)
color = mappings[mappings != facet]
measures = checkMeasures(measures, obj)
checkSubset(extractSubList(measures, "id"), extractSubList(obj$measures, "id"))
mids = replaceDupeMeasureNames(measures, "id")
names(measures) = mids
id.vars = "threshold"
resamp = "iter" %in% colnames(obj$data)
if (resamp) id.vars = c(id.vars, "iter")
if ("learner" %in% colnames(obj$data)) id.vars = c(id.vars, "learner")
obj$data = obj$data[, c(id.vars, names(measures))]
if (pretty.names) {
mnames = replaceDupeMeasureNames(measures, "name")
colnames(obj$data) = mapValues(colnames(obj$data), names(measures), mnames)
} else {
mnames = names(measures)
}
data = setDF(melt(as.data.table(obj$data), measure.vars = mnames, variable.name = "measure", value.name = "performance", id.vars = id.vars))
if (!is.null(data$learner))
nlearn = length(unique(data$learner))
else
nlearn = 1L
nmeas = length(unique(data$measure))
if ((color == "learner" & nlearn == 1L) | (color == "measure" & nmeas == 1L))
color = NULL
if ((facet == "learner" & nlearn == 1L) | (facet == "measure" & nmeas == 1L))
facet = NULL
if (resamp & !obj$aggregate & is.null(color)) {
group = "iter"
} else if (resamp & !obj$aggregate & !is.null(color)) {
data$int = interaction(data[["iter"]], data[[color]])
group = "int"
} else {
group = NULL
}
plt = ggplot(data, aes_string(x = "threshold", y = "performance"))
plt = plt + geom_line(aes_string(group = group, color = color))
if (!is.na(mark.th))
plt = plt + geom_vline(xintercept = mark.th)
if (!is.null(facet)) {
plt = plt + facet_wrap(facet, scales = "free_y", nrow = facet.wrap.nrow,
ncol = facet.wrap.ncol)
}
else if (length(obj$measures) == 1L)
plt = plt + ylab(obj$measures[[1]]$name)
else
plt = plt + ylab("performance")
return(plt)
}
#' @title Plot threshold vs. performance(s) for 2-class classification using ggvis.
#'
#' @description
#' Plots threshold vs. performance(s) data that has been generated with \code{\link{generateThreshVsPerfData}}.
#'
#' @family plot
#' @family thresh_vs_perf
#'
#' @param obj [\code{ThreshVsPerfData}]\cr
#' Result of \code{\link{generateThreshVsPerfData}}.
#' @param mark.th [\code{numeric(1)}]\cr
#' Mark given threshold with vertical line?
#' Default is \code{NA} which means not to do it.
#' @param interaction [\code{character(1)}]\cr
#' Selects \dQuote{measure} or \dQuote{learner} to be used in a Shiny application
#' making the \code{interaction} variable selectable via a drop-down menu.
#' This variable must have more than one unique value, otherwise it will be ignored.
#' The variable not chosen is mapped to color if it has more than one unique value.
#' Note that if there are multiple learners and multiple measures interactivity is
#' necessary as ggvis does not currently support facetting or subplots.
#' The default is \dQuote{measure}.
#' @param pretty.names [\code{logical(1)}]\cr
#' Whether to use the \code{\link{Measure}} name instead of the id in the plot.
#' Default is \code{TRUE}.
#' @template ret_ggv
#' @export
#' @examples \dontrun{
#' lrn = makeLearner("classif.rpart", predict.type = "prob")
#' mod = train(lrn, sonar.task)
#' pred = predict(mod, sonar.task)
#' pvs = generateThreshVsPerfData(pred, list(tpr, fpr))
#' plotThreshVsPerfGGVIS(pvs)
#' }
plotThreshVsPerfGGVIS = function(obj, interaction = "measure", mark.th = NA_real_, pretty.names = TRUE) {
assertClass(obj, classes = "ThreshVsPerfData")
mappings = c("measure", "learner")
assertChoice(interaction, mappings)
assertFlag(pretty.names)
color = mappings[mappings != interaction]
if (pretty.names) {
mnames = replaceDupeMeasureNames(obj$measures, "name")
colnames(obj$data) = mapValues(colnames(obj$data),
names(obj$measures),
mnames)
} else
mnames = names(obj$measures)
id.vars = "threshold"
resamp = "iter" %in% colnames(obj$data)
if (resamp) id.vars = c(id.vars, "iter")
if ("learner" %in% colnames(obj$data)) id.vars = c(id.vars, "learner")
data = setDF(data.table(melt(as.data.table(obj$data), measure.vars = mnames, variable.name = "measure", value.name = "performance", id.vars = id.vars)))
nmeas = length(unique(data$measure))
if (!is.null(data$learner))
nlearn = length(unique(data$learner))
else
nlearn = 1L
if ((color == "learner" & nlearn == 1L) | (color == "measure" & nmeas == 1L))
color = NULL
if ((interaction == "learner" & nlearn == 1L) | (interaction == "measure" & nmeas == 1L))
interaction = NULL
if (resamp & !obj$aggregate & is.null(color)) {
group = "iter"
} else if (resamp & !obj$aggregate & !is.null(color)) {
group = c("iter", color)
} else {
group = NULL
}
create_plot = function(data, color = NULL, group = NULL, measures) {
if (!is.null(color))
plt = ggvis::ggvis(data, ggvis::prop("x", as.name("threshold")), ggvis::prop("y", as.name("performance")),
ggvis::prop("stroke", as.name(color)))
else
plt = ggvis::ggvis(data, ggvis::prop("x", as.name("threshold")), ggvis::prop("y", as.name("performance")))
if (!is.null(group))
plt = ggvis::group_by(plt, .dots = group)
plt = ggvis::layer_paths(plt)
if (!is.na(mark.th) & is.null(interaction)) { ## cannot do vline with reactive data
vline_data = data.frame(x2 = rep(mark.th, 2), y2 = c(min(data$perf), max(data$perf)),
measure = obj$measures[1])
plt = ggvis::layer_lines(plt, ggvis::prop("x", as.name("x2")),
ggvis::prop("y", as.name("y2")),
ggvis::prop("stroke", "grey", scale = FALSE), data = vline_data)
}
plt = ggvis::add_axis(plt, "x", title = "threshold")
if (length(measures) > 1L)
plt = ggvis::add_axis(plt, "y", title = "performance")
else
plt = ggvis::add_axis(plt, "y", title = measures[[1]]$name)
plt
}
if (!is.null(interaction)) {
ui = shiny::shinyUI(
shiny::pageWithSidebar(
shiny::headerPanel("Threshold vs. Performance"),
shiny::sidebarPanel(
shiny::selectInput("interaction_select",
stri_paste("choose a", interaction, sep = " "),
levels(data[[interaction]]))
),
shiny::mainPanel(
shiny::uiOutput("ggvis_ui"),
ggvis::ggvisOutput("ggvis")
)
))
server = shiny::shinyServer(function(input, output) {
data_sub = shiny::reactive(data[which(data[[interaction]] == input$interaction_select), ])
plt = create_plot(data_sub, color, group, obj$measures)
ggvis::bind_shiny(plt, "ggvis", "ggvis_ui")
})
shiny::shinyApp(ui, server)
} else {
create_plot(data, color, group, obj$measures)
}
}
#' @title Plots a ROC curve using ggplot2.
#'
#' @description
#' Plots a ROC curve from predictions.
#'
#' @family plot
#' @family thresh_vs_perf
#'
#' @param obj [\code{ThreshVsPerfData}]\cr
#' Result of \code{\link{generateThreshVsPerfData}}.
#' @param measures [\code{list(2)} of \code{\link{Measure}}]\cr
#' Default is the first 2 measures passed to \code{\link{generateThreshVsPerfData}}.
#' @param diagonal [\code{logical(1)}]\cr
#' Whether to plot a dashed diagonal line.
#' Default is \code{TRUE}.
#' @param pretty.names [\code{logical(1)}]\cr
#' Whether to use the \code{\link{Measure}} name instead of the id in the plot.
#' Default is \code{TRUE}.
#' @template ret_ggv
#' @export
#' @examples
#' \donttest{
#' lrn = makeLearner("classif.rpart", predict.type = "prob")
#' fit = train(lrn, sonar.task)
#' pred = predict(fit, task = sonar.task)
#' roc = generateThreshVsPerfData(pred, list(fpr, tpr))
#' plotROCCurves(roc)
#'
#' r = bootstrapB632plus(lrn, sonar.task, iters = 3)
#' roc_r = generateThreshVsPerfData(r, list(fpr, tpr), aggregate = FALSE)
#' plotROCCurves(roc_r)
#'
#' r2 = crossval(lrn, sonar.task, iters = 3)
#' roc_l = generateThreshVsPerfData(list(boot = r, cv = r2), list(fpr, tpr), aggregate = FALSE)
#' plotROCCurves(roc_l)
#' }
plotROCCurves = function(obj, measures, diagonal = TRUE, pretty.names = TRUE) {
assertClass(obj, "ThreshVsPerfData")
if (missing(measures))
measures = obj$measures[1:2]
assertList(measures, "Measure", len = 2)
assertFlag(diagonal)
assertFlag(pretty.names)
if (is.null(names(measures)))
names(measures) = extractSubList(measures, "id")
if (pretty.names)
mnames = replaceDupeMeasureNames(measures, "name")
else
mnames = names(measures)
if (!is.null(obj$data$learner))
mlearn = length(unique(obj$data$learner)) > 1L
else
mlearn = FALSE
resamp = "iter" %in% colnames(obj$data)
if (!obj$aggregate & mlearn & resamp) {
obj$data$int = interaction(obj$data$learner, obj$data$iter)
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2], group = "int"))
p = p + geom_path(alpha = .5)
} else if (!obj$aggregate & !mlearn & resamp) {
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2], group = "iter"))
p = p + geom_path(alpha = .5)
} else if (obj$aggregate & mlearn & !resamp) {
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2]), group = "learner", color = "learner")
p = p + geom_path(alpha = .5)
} else {
obj$data = obj$data[order(obj$data$threshold), ]
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2]))
p = p + geom_path()
}
p = p + labs(x = mnames[1], y = mnames[2])
if (length(unique(obj$data$learner)) > 1L)
p = p + facet_wrap(~ learner)
if (diagonal & all(sapply(obj$data[, names(measures)], function(x) max(x, na.rm = TRUE)) <= 1))
p = p + geom_abline(aes(intercept = 0, slope = 1), linetype = "dashed", alpha = .5)
p
}
| /R/generateThreshVsPerf.R | no_license | pherephobia/mlr | R | false | false | 16,126 | r | #' @title Generate threshold vs. performance(s) for 2-class classification.
#'
#' @description
#' Generates data on threshold vs. performance(s) for 2-class classification that can be used for plotting.
#'
#' @family generate_plot_data
#' @family thresh_vs_perf
#' @aliases ThreshVsPerfData
#'
#' @template arg_plotroc_obj
#' @template arg_measures
#' @param gridsize [\code{integer(1)}]\cr
#' Grid resolution for x-axis (threshold).
#' Default is 100.
#' @param aggregate [\code{logical(1)}]\cr
#' Whether to aggregate \code{\link{ResamplePrediction}}s or to plot the performance
#' of each iteration separately.
#' Default is \code{TRUE}.
#' @param task.id [\code{character(1)}]\cr
#' Selected task in \code{\link{BenchmarkResult}} to do plots for, ignored otherwise.
#' Default is first task.
#' @return [\code{ThreshVsPerfData}]. A named list containing the measured performance
#' across the threshold grid, the measures, and whether the performance estimates were
#' aggregated (only applicable for (list of) \code{\link{ResampleResult}}s).
#' @export
generateThreshVsPerfData = function(obj, measures, gridsize = 100L, aggregate = TRUE, task.id = NULL)
UseMethod("generateThreshVsPerfData")
#' @export
generateThreshVsPerfData.Prediction = function(obj, measures, gridsize = 100L, aggregate = TRUE,
task.id = NULL) {
checkPrediction(obj, task.type = "classif", binary = TRUE, predict.type = "prob")
generateThreshVsPerfData.list(namedList("prediction", obj), measures, gridsize, aggregate, task.id)
}
#' @export
generateThreshVsPerfData.ResampleResult = function(obj, measures, gridsize = 100L, aggregate = TRUE,
task.id = NULL) {
obj = getRRPredictions(obj)
checkPrediction(obj, task.type = "classif", binary = TRUE, predict.type = "prob")
generateThreshVsPerfData.Prediction(obj, measures, gridsize, aggregate)
}
#' @export
generateThreshVsPerfData.BenchmarkResult = function(obj, measures, gridsize = 100L, aggregate = TRUE,
task.id = NULL) {
tids = getBMRTaskIds(obj)
if (is.null(task.id))
task.id = tids[1L]
else
assertChoice(task.id, tids)
obj = getBMRPredictions(obj, task.ids = task.id, as.df = FALSE)[[1L]]
for (x in obj)
checkPrediction(x, task.type = "classif", binary = TRUE, predict.type = "prob")
generateThreshVsPerfData.list(obj, measures, gridsize, aggregate, task.id)
}
#' @export
generateThreshVsPerfData.list = function(obj, measures, gridsize = 100L, aggregate = TRUE, task.id = NULL) {
assertList(obj, c("Prediction", "ResampleResult"), min.len = 1L)
## unwrap ResampleResult to Prediction and set default names
if (inherits(obj[[1L]], "ResampleResult")) {
if (is.null(names(obj)))
names(obj) = extractSubList(obj, "learner.id")
obj = extractSubList(obj, "pred", simplify = FALSE)
}
assertList(obj, names = "unique")
td = extractSubList(obj, "task.desc", simplify = FALSE)[[1L]]
measures = checkMeasures(measures, td)
mids = replaceDupeMeasureNames(measures, "id")
names(measures) = mids
grid = data.frame(threshold = seq(0, 1, length.out = gridsize))
resamp = all(sapply(obj, function(x) inherits(x, "ResamplePrediction")))
out = lapply(obj, function(x) {
do.call("rbind", lapply(grid$threshold, function(th) {
pp = setThreshold(x, threshold = th)
if (!aggregate & resamp) {
iter = seq_len(pp$instance$desc$iters)
asMatrixRows(lapply(iter, function(i) {
pp$data = pp$data[pp$data$iter == i, ]
c(setNames(performance(pp, measures = measures), mids), "iter" = i, "threshold" = th)
}))
} else {
c(setNames(performance(pp, measures = measures), mids), "threshold" = th)
}
}))
})
if (length(obj) == 1L & inherits(obj[[1L]], "Prediction")) {
out = out[[1L]]
colnames(out)[!colnames(out) %in% c("iter", "threshold", "learner")] = mids
} else {
out = setDF(rbindlist(lapply(out, as.data.table), fill = TRUE, idcol = "learner"))
colnames(out)[!colnames(out) %in% c("iter", "threshold", "learner")] = mids
}
makeS3Obj("ThreshVsPerfData",
measures = measures,
data = as.data.frame(out),
aggregate = aggregate)
}
#' @title Plot threshold vs. performance(s) for 2-class classification using ggplot2.
#'
#' @description
#' Plots threshold vs. performance(s) data that has been generated with \code{\link{generateThreshVsPerfData}}.
#'
#' @family plot
#' @family thresh_vs_perf
#'
#' @param obj [\code{ThreshVsPerfData}]\cr
#' Result of \code{\link{generateThreshVsPerfData}}.
#' @param measures [\code{\link{Measure}} | list of \code{\link{Measure}}]\cr
#' Performance measure(s) to plot.
#' Must be a subset of those used in \code{\link{generateThreshVsPerfData}}.
#' Default is all the measures stored in \code{obj} generated by
#' \code{\link{generateThreshVsPerfData}}.
#' @param facet [\code{character(1)}]\cr
#' Selects \dQuote{measure} or \dQuote{learner} to be the facetting variable.
#' The variable mapped to \code{facet} must have more than one unique value, otherwise it will
#' be ignored. The variable not chosen is mapped to color if it has more than one unique value.
#' The default is \dQuote{measure}.
#' @param mark.th [\code{numeric(1)}]\cr
#' Mark given threshold with vertical line?
#' Default is \code{NA} which means not to do it.
#' @param pretty.names [\code{logical(1)}]\cr
#' Whether to use the \code{\link{Measure}} name instead of the id in the plot.
#' Default is \code{TRUE}.
#' @template arg_facet_nrow_ncol
#' @template ret_gg2
#' @export
#' @examples
#' lrn = makeLearner("classif.rpart", predict.type = "prob")
#' mod = train(lrn, sonar.task)
#' pred = predict(mod, sonar.task)
#' pvs = generateThreshVsPerfData(pred, list(acc, setAggregation(acc, train.mean)))
#' plotThreshVsPerf(pvs)
plotThreshVsPerf = function(obj, measures = obj$measures,
facet = "measure", mark.th = NA_real_,
pretty.names = TRUE, facet.wrap.nrow = NULL, facet.wrap.ncol = NULL) {
assertClass(obj, classes = "ThreshVsPerfData")
mappings = c("measure", "learner")
assertChoice(facet, mappings)
color = mappings[mappings != facet]
measures = checkMeasures(measures, obj)
checkSubset(extractSubList(measures, "id"), extractSubList(obj$measures, "id"))
mids = replaceDupeMeasureNames(measures, "id")
names(measures) = mids
id.vars = "threshold"
resamp = "iter" %in% colnames(obj$data)
if (resamp) id.vars = c(id.vars, "iter")
if ("learner" %in% colnames(obj$data)) id.vars = c(id.vars, "learner")
obj$data = obj$data[, c(id.vars, names(measures))]
if (pretty.names) {
mnames = replaceDupeMeasureNames(measures, "name")
colnames(obj$data) = mapValues(colnames(obj$data), names(measures), mnames)
} else {
mnames = names(measures)
}
data = setDF(melt(as.data.table(obj$data), measure.vars = mnames, variable.name = "measure", value.name = "performance", id.vars = id.vars))
if (!is.null(data$learner))
nlearn = length(unique(data$learner))
else
nlearn = 1L
nmeas = length(unique(data$measure))
if ((color == "learner" & nlearn == 1L) | (color == "measure" & nmeas == 1L))
color = NULL
if ((facet == "learner" & nlearn == 1L) | (facet == "measure" & nmeas == 1L))
facet = NULL
if (resamp & !obj$aggregate & is.null(color)) {
group = "iter"
} else if (resamp & !obj$aggregate & !is.null(color)) {
data$int = interaction(data[["iter"]], data[[color]])
group = "int"
} else {
group = NULL
}
plt = ggplot(data, aes_string(x = "threshold", y = "performance"))
plt = plt + geom_line(aes_string(group = group, color = color))
if (!is.na(mark.th))
plt = plt + geom_vline(xintercept = mark.th)
if (!is.null(facet)) {
plt = plt + facet_wrap(facet, scales = "free_y", nrow = facet.wrap.nrow,
ncol = facet.wrap.ncol)
}
else if (length(obj$measures) == 1L)
plt = plt + ylab(obj$measures[[1]]$name)
else
plt = plt + ylab("performance")
return(plt)
}
#' @title Plot threshold vs. performance(s) for 2-class classification using ggvis.
#'
#' @description
#' Plots threshold vs. performance(s) data that has been generated with \code{\link{generateThreshVsPerfData}}.
#'
#' @family plot
#' @family thresh_vs_perf
#'
#' @param obj [\code{ThreshVsPerfData}]\cr
#' Result of \code{\link{generateThreshVsPerfData}}.
#' @param mark.th [\code{numeric(1)}]\cr
#' Mark given threshold with vertical line?
#' Default is \code{NA} which means not to do it.
#' @param interaction [\code{character(1)}]\cr
#' Selects \dQuote{measure} or \dQuote{learner} to be used in a Shiny application
#' making the \code{interaction} variable selectable via a drop-down menu.
#' This variable must have more than one unique value, otherwise it will be ignored.
#' The variable not chosen is mapped to color if it has more than one unique value.
#' Note that if there are multiple learners and multiple measures interactivity is
#' necessary as ggvis does not currently support facetting or subplots.
#' The default is \dQuote{measure}.
#' @param pretty.names [\code{logical(1)}]\cr
#' Whether to use the \code{\link{Measure}} name instead of the id in the plot.
#' Default is \code{TRUE}.
#' @template ret_ggv
#' @export
#' @examples \dontrun{
#' lrn = makeLearner("classif.rpart", predict.type = "prob")
#' mod = train(lrn, sonar.task)
#' pred = predict(mod, sonar.task)
#' pvs = generateThreshVsPerfData(pred, list(tpr, fpr))
#' plotThreshVsPerfGGVIS(pvs)
#' }
plotThreshVsPerfGGVIS = function(obj, interaction = "measure", mark.th = NA_real_, pretty.names = TRUE) {
assertClass(obj, classes = "ThreshVsPerfData")
mappings = c("measure", "learner")
assertChoice(interaction, mappings)
assertFlag(pretty.names)
color = mappings[mappings != interaction]
if (pretty.names) {
mnames = replaceDupeMeasureNames(obj$measures, "name")
colnames(obj$data) = mapValues(colnames(obj$data),
names(obj$measures),
mnames)
} else
mnames = names(obj$measures)
id.vars = "threshold"
resamp = "iter" %in% colnames(obj$data)
if (resamp) id.vars = c(id.vars, "iter")
if ("learner" %in% colnames(obj$data)) id.vars = c(id.vars, "learner")
data = setDF(data.table(melt(as.data.table(obj$data), measure.vars = mnames, variable.name = "measure", value.name = "performance", id.vars = id.vars)))
nmeas = length(unique(data$measure))
if (!is.null(data$learner))
nlearn = length(unique(data$learner))
else
nlearn = 1L
if ((color == "learner" & nlearn == 1L) | (color == "measure" & nmeas == 1L))
color = NULL
if ((interaction == "learner" & nlearn == 1L) | (interaction == "measure" & nmeas == 1L))
interaction = NULL
if (resamp & !obj$aggregate & is.null(color)) {
group = "iter"
} else if (resamp & !obj$aggregate & !is.null(color)) {
group = c("iter", color)
} else {
group = NULL
}
create_plot = function(data, color = NULL, group = NULL, measures) {
if (!is.null(color))
plt = ggvis::ggvis(data, ggvis::prop("x", as.name("threshold")), ggvis::prop("y", as.name("performance")),
ggvis::prop("stroke", as.name(color)))
else
plt = ggvis::ggvis(data, ggvis::prop("x", as.name("threshold")), ggvis::prop("y", as.name("performance")))
if (!is.null(group))
plt = ggvis::group_by(plt, .dots = group)
plt = ggvis::layer_paths(plt)
if (!is.na(mark.th) & is.null(interaction)) { ## cannot do vline with reactive data
vline_data = data.frame(x2 = rep(mark.th, 2), y2 = c(min(data$perf), max(data$perf)),
measure = obj$measures[1])
plt = ggvis::layer_lines(plt, ggvis::prop("x", as.name("x2")),
ggvis::prop("y", as.name("y2")),
ggvis::prop("stroke", "grey", scale = FALSE), data = vline_data)
}
plt = ggvis::add_axis(plt, "x", title = "threshold")
if (length(measures) > 1L)
plt = ggvis::add_axis(plt, "y", title = "performance")
else
plt = ggvis::add_axis(plt, "y", title = measures[[1]]$name)
plt
}
if (!is.null(interaction)) {
ui = shiny::shinyUI(
shiny::pageWithSidebar(
shiny::headerPanel("Threshold vs. Performance"),
shiny::sidebarPanel(
shiny::selectInput("interaction_select",
stri_paste("choose a", interaction, sep = " "),
levels(data[[interaction]]))
),
shiny::mainPanel(
shiny::uiOutput("ggvis_ui"),
ggvis::ggvisOutput("ggvis")
)
))
server = shiny::shinyServer(function(input, output) {
data_sub = shiny::reactive(data[which(data[[interaction]] == input$interaction_select), ])
plt = create_plot(data_sub, color, group, obj$measures)
ggvis::bind_shiny(plt, "ggvis", "ggvis_ui")
})
shiny::shinyApp(ui, server)
} else {
create_plot(data, color, group, obj$measures)
}
}
#' @title Plots a ROC curve using ggplot2.
#'
#' @description
#' Plots a ROC curve from predictions.
#'
#' @family plot
#' @family thresh_vs_perf
#'
#' @param obj [\code{ThreshVsPerfData}]\cr
#' Result of \code{\link{generateThreshVsPerfData}}.
#' @param measures [\code{list(2)} of \code{\link{Measure}}]\cr
#' Default is the first 2 measures passed to \code{\link{generateThreshVsPerfData}}.
#' @param diagonal [\code{logical(1)}]\cr
#' Whether to plot a dashed diagonal line.
#' Default is \code{TRUE}.
#' @param pretty.names [\code{logical(1)}]\cr
#' Whether to use the \code{\link{Measure}} name instead of the id in the plot.
#' Default is \code{TRUE}.
#' @template ret_ggv
#' @export
#' @examples
#' \donttest{
#' lrn = makeLearner("classif.rpart", predict.type = "prob")
#' fit = train(lrn, sonar.task)
#' pred = predict(fit, task = sonar.task)
#' roc = generateThreshVsPerfData(pred, list(fpr, tpr))
#' plotROCCurves(roc)
#'
#' r = bootstrapB632plus(lrn, sonar.task, iters = 3)
#' roc_r = generateThreshVsPerfData(r, list(fpr, tpr), aggregate = FALSE)
#' plotROCCurves(roc_r)
#'
#' r2 = crossval(lrn, sonar.task, iters = 3)
#' roc_l = generateThreshVsPerfData(list(boot = r, cv = r2), list(fpr, tpr), aggregate = FALSE)
#' plotROCCurves(roc_l)
#' }
plotROCCurves = function(obj, measures, diagonal = TRUE, pretty.names = TRUE) {
assertClass(obj, "ThreshVsPerfData")
if (missing(measures))
measures = obj$measures[1:2]
assertList(measures, "Measure", len = 2)
assertFlag(diagonal)
assertFlag(pretty.names)
if (is.null(names(measures)))
names(measures) = extractSubList(measures, "id")
if (pretty.names)
mnames = replaceDupeMeasureNames(measures, "name")
else
mnames = names(measures)
if (!is.null(obj$data$learner))
mlearn = length(unique(obj$data$learner)) > 1L
else
mlearn = FALSE
resamp = "iter" %in% colnames(obj$data)
if (!obj$aggregate & mlearn & resamp) {
obj$data$int = interaction(obj$data$learner, obj$data$iter)
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2], group = "int"))
p = p + geom_path(alpha = .5)
} else if (!obj$aggregate & !mlearn & resamp) {
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2], group = "iter"))
p = p + geom_path(alpha = .5)
} else if (obj$aggregate & mlearn & !resamp) {
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2]), group = "learner", color = "learner")
p = p + geom_path(alpha = .5)
} else {
obj$data = obj$data[order(obj$data$threshold), ]
p = ggplot(obj$data, aes_string(names(measures)[1], names(measures)[2]))
p = p + geom_path()
}
p = p + labs(x = mnames[1], y = mnames[2])
if (length(unique(obj$data$learner)) > 1L)
p = p + facet_wrap(~ learner)
if (diagonal & all(sapply(obj$data[, names(measures)], function(x) max(x, na.rm = TRUE)) <= 1))
p = p + geom_abline(aes(intercept = 0, slope = 1), linetype = "dashed", alpha = .5)
p
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.