content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_mana.R
\name{compute_mana}
\alias{compute_mana}
\title{Compute Mana}
\usage{
compute_mana(int, base = 1093)
}
\arguments{
\item{int}{intellect of character}
\item{base}{base mana}
}
\description{
Computes the maximum mana based on intellect and base mana.
}
\examples{
compute_mana(267)
}
| /man/compute_mana.Rd | permissive | cphaarmeyer/warlockr | R | false | true | 376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_mana.R
\name{compute_mana}
\alias{compute_mana}
\title{Compute Mana}
\usage{
compute_mana(int, base = 1093)
}
\arguments{
\item{int}{intellect of character}
\item{base}{base mana}
}
\description{
Computes the maximum mana based on intellect and base mana.
}
\examples{
compute_mana(267)
}
|
## The functions below cache and compute the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(mtx = matrix()) {
inverse <- NULL
set <- function(x) {
mtx <<- x;
inverse <<- NULL;
}
get <- function() return(mtx);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}
## This function computes the inverse of the special "matrix" returned by the above cache function.
## If the inverse has already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(mtx, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- mtx$get()
invserse <- solve(data, ...)
mtx$setinv(inverse)
return(inverse)
}
| /cachematrix.R | no_license | christinekang/ProgrammingAssignment2 | R | false | false | 997 | r | ## The functions below cache and compute the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(mtx = matrix()) {
inverse <- NULL
set <- function(x) {
mtx <<- x;
inverse <<- NULL;
}
get <- function() return(mtx);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}
## This function computes the inverse of the special "matrix" returned by the above cache function.
## If the inverse has already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(mtx, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- mtx$get()
invserse <- solve(data, ...)
mtx$setinv(inverse)
return(inverse)
}
|
txt <- read.csv("household_power_consumption.txt",sep=";", na.strings="?",dec=".")
txt <- transform(txt,myDate=as.Date(Date,"%d/%m/%Y"), myTime=strptime(paste(Date,Time),"%d/%m/%Y %H:%M:%S"))
txt <- subset(txt, myDate >= as.Date("2007-02-01") & myDate <= as.Date("2007-02-02"))
## Plot 1
with(txt,hist(Global_active_power,col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
dev.copy(png, file ="plot1.png")
dev.off()
| /plot1.R | no_license | rainerenglisch/ExData_Plotting1 | R | false | false | 451 | r | txt <- read.csv("household_power_consumption.txt",sep=";", na.strings="?",dec=".")
txt <- transform(txt,myDate=as.Date(Date,"%d/%m/%Y"), myTime=strptime(paste(Date,Time),"%d/%m/%Y %H:%M:%S"))
txt <- subset(txt, myDate >= as.Date("2007-02-01") & myDate <= as.Date("2007-02-02"))
## Plot 1
with(txt,hist(Global_active_power,col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
dev.copy(png, file ="plot1.png")
dev.off()
|
# this script will create graphs from
# the espn pre-season fantasy rankings
# and fantasy pros end of season rankings
# libraries
library(stringr)
library(dplyr)
library(ggplot2)
library(tidyr)
# reading files
espn_rankings <- read.csv(
'data/espn rankings_16_18.csv',
stringsAsFactors = F
)
fp_finishes <- read.csv(
'data/fp finishes_16_18.csv',
stringsAsFactors = F
)
weekly_rankings <- read.csv(
'data/fp weekly finishes_16_18.csv',
stringsAsFactors = F
)
weekly_projections <- read.csv(
'data/fp projections_16_18.csv',
stringsAsFactors = F
)
# joining rankings to finishes ======================================================================
# going to join based on year, first, and last name
# would do team, but fantasypros only has current team
# not the team they were on back during the season
# going to left join to espn rankings because not every player
# was ranked
rank_finish <- left_join(
espn_rankings, fp_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_finish %>% group_by(first, last, year) %>%
summarize(count = n()) %>%
filter(count > 1)
# removing defense from rankings
espn_rankings <- espn_rankings %>% filter(
Pos != 'DST' & Pos !='D/ST' & Pos != 'K'
)
rank_finish <- left_join(
espn_rankings, fp_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
# removing missing players
# after research it appears most are instances where
# the player didn't play during the season
# or switched positions (ty monty)
rank_finish <- rank_finish %>% filter(
!is.na(finish)
)
# going to filter out players with less than 10 games
# going to make that a separate frame so that i can
# easily come back if needed
full_szn_rf <- rank_finish %>% filter(
Games > 9
)
# variance graph ===========================================================================================
# creating variable that is just
# difference between initial rank and final finish
# going to graph that over time to see if
# higher or lower rankings are more volatile
# also filtering out kickers, because who cares
full_szn_rf <- full_szn_rf %>% mutate(
variance = init_rank - finish
)
full_szn_rf %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = as.character(year)))+
geom_hline(aes(yintercept = 0))+
facet_wrap(. ~ Pos, scales = 'free_x')+
scale_color_discrete(name = 'year')+
theme_bw()
full_szn_rf %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = Pos))+
geom_hline(aes(yintercept = 0))+
geom_abline(aes(intercept = -30, slope = 1), linetype = 'dashed')+
facet_grid(year ~ . , scales = 'free_x')+
scale_color_discrete(name = 'Pos')+
theme_bw()
full_szn_top <- full_szn_rf %>% mutate(
ten = paste('Top', 10*(1 + floor((init_rank-1) / 10)))
)
# graph of rb and WR initial ranks v final finishes
# dashed line indicates top-30 or better (above)
# or worse than top-30 finish (below)
# top-30 because in a ten-team 2 wr & rb and 1 flex
# league, top-30 would be startable
full_szn_top %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = Pos))+
geom_hline(aes(yintercept = 0))+
geom_abline(aes(intercept = -30, slope = 1), linetype = 'dashed')+
facet_grid(year ~ . , scales = 'free_x')+
scale_color_discrete(name = 'Position')+
ylab('Initial minus Finish')+
xlab('Initial ESPN Ranking')+
theme_bw()+
ggsave('graphs/espn rank variance years.png', width = 13, height = 6)
# graph of rb and WR initial ranks v final finishes
# dashed line indicates top-30 or better (above)
# or worse than top-30 finish (below)
# top-30 because in a ten-team 2 wr & rb and 1 flex
# league, top-30 would be startable
full_szn_top %>%filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = as.character(year)))+
geom_hline(aes(yintercept = 0))+
geom_abline(aes(intercept = -30, slope = 1), linetype = 'dashed')+
facet_grid(Pos ~ . , scales = 'free_x')+
scale_color_discrete(name = 'Year')+
ylab('Initial minus Finish')+
xlab('Initial ESPN Ranking')+
theme_bw()+
ggsave('graphs/espn rank variance pos.png', width = 13, height = 6)
# graph of what ten (10, 20 , 30, etc)
# position and number of top 30 from that ten
# also average variance of that ten
# so you can see expected finish from each ten
# compare across years and positions
full_szn_top$top_rank <- if_else(
full_szn_top$finish <31, 1, 0
)
full_szn_top$top_ten <- if_else(
full_szn_top$finish <11, 1, 0
)
ten_ranks <- full_szn_top %>% group_by(
year, ten, Pos
) %>% summarize(
avg_var = mean(variance),
avg_finish = mean(finish),
top_players = sum(top_rank),
net_var = sum(variance),
top_ten = sum(top_ten)
) %>% ungroup() %>% complete(
year, ten, Pos, fill = list(
top_ten = 0, net_var = 0, top_players = 0,
avg_finish = 0, avg_var = 0
)
)
ten_ranks$ten <- factor(
ten_ranks$ten,
levels = paste('Top', seq(10, 110, 10))
)
# count of top ten finishes for rbs and wrs
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, top_players))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Count of Top 30 Finishes',
breaks = c(0:10)
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN top finish counts.png', width = 13, height = 6
)
# count of top ten finishes for qbs and tes
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('TE', 'QB') &
ten %in% c('Top 10', 'Top 20', 'Top 30', 'Top 40')
) %>% ggplot(aes(ten, top_ten))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c('TE' = 'dodgerblue3', 'QB' = 'forestgreen')
)+
scale_y_continuous(
name = 'Count of Top 10 Finishes',
breaks = c(0:10)
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN top finish counts_te.png', width = 13, height = 6
)
# showing the average difference between initial ESPN rank
# and final fantasy finish for players
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, avg_var))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Average Variance between Rank and Finish'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN ranks average variance.png',
height = 6, width = 13
)
# showig the average finish for players
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, avg_finish))+
geom_col(aes(fill = Pos), position = 'dodge')+
geom_hline(aes(yintercept = 30), linetype = 'dashed')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Average Finish'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+ggsave(
'graphs/ESPN rank average finish.png',
width = 13, height = 6
)
# showing the net difference between initial rank
# and final finish for players ranked within the same ten
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, avg_var))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Average Variance between Rank and Finish'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN ranks net variance.png',
height = 6, width = 13
)
# count of top ten finishes for rbs and wrs
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, top_ten))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Count of Top 10 Finishes',
breaks = c(0:10)
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN top ten finish counts.png', width = 13, height = 6
)
# fantasy finish graphs ==========================================================================================
top_40_finishes <- fp_finishes %>%
filter(finish < 41)
# density graph of point finishes by year by position
# shows that rb has a very wide distribution, but
# that the top scoring rb's always score more than the top scoring
# wide receivers
# tight ends are almost all worthless
# quarterbacks have a very flat distribution
top_40_finishes %>% filter(
Position %in% c('RB', 'WR', 'TE', 'QB')
) %>% ggplot(aes(Points))+
geom_density(aes(fill = Position), alpha = .2) +
facet_grid(year ~ .)+
theme_bw()+
ggsave(
'graphs/point distribution of positions.png',
width = 13, height = 6
)
# graph shows that top finishing rb's are
# more valuable than top finishing WR's
# but after the top they are equal
top_40_finishes %>% filter(
Position %in% c('RB', 'WR', 'TE', 'QB')
) %>% ggplot(aes(finish, Points))+
geom_line(aes(color = Position))+
geom_vline(aes(xintercept = 10))+
geom_vline(aes(xintercept = 30))+
facet_grid(year ~ .)+
xlab('End of Season Finish')+
theme_bw()+
ggsave(
'graphs/points by finish and position.png',
width = 13, height = 6
)
# joining espn_ranks and weekly finishes ====================================================================
# top ten finishes
# a top ten finish is really good for WR and RB
# and startable for QB and TE
top_10_finishes <- weekly_rankings %>% filter(
finish < 11
) %>% group_by(
first, last, Position, year
) %>% summarize(
count = n()
)
rank_top_10 <- left_join(
espn_rankings, top_10_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_top_10 <- rank_top_10 %>% mutate(
ten = 1 + floor((init_rank-1) / 10)
) %>% filter(
!is.na(count)
) %>% group_by(year, ten, Pos) %>% summarize(
count = sum(count)
) %>% ungroup() %>% complete(
year, ten, Pos, fill = list(count = 0)
)
# going to do top-5 finishes
# for qb and te since you can only start one
# top 5 would be a really good week
top_5_finishes <- weekly_rankings %>% filter(
finish < 6
) %>% group_by(
first, last, Position, year
) %>% summarize(
count = n()
)
rank_top_5 <- left_join(
espn_rankings, top_5_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_top_5 <- rank_top_5 %>% mutate(
ten = 1 + floor((init_rank-1) / 10)
) %>% filter(
!is.na(count)
) %>% group_by(year, ten, Pos) %>% summarize(
count = sum(count)
) %>% ungroup() %>% complete(
year, ten, Pos, fill = list(count = 0)
)
# going to do top 30 for WR and RB
# since that would be startable
top_30_finishes <- weekly_rankings %>% mutate(
ten_finish = paste('Top', 10*(1 + floor((finish-1) / 10)))
) %>% filter(
finish < 31
) %>% group_by(
first, last, Position, year, ten_finish
) %>% summarize(
count = n()
)
top_30_finishes <- left_join(
espn_rankings, top_30_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
top_30_finishes <- top_30_finishes %>% mutate(
ten = 1 + floor((init_rank-1) / 10)
) %>% filter(
!is.na(count)
) %>% group_by(year, ten, Pos, ten_finish) %>% summarize(
count = sum(count)
) %>% ungroup() %>% complete(
year, ten, Pos, ten_finish, fill = list(count = 0)
)
# graphs of weekly finishes ==================================================================================
# graphs the number of top ten weekly finishes
# for players ranked in a ten range
# rbs and wrs
rank_top_10 %>% filter(Pos %in% c('WR', 'RB')) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Top Ten Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:6),
labels = paste('Top ', seq(10, 60, 10), sep = '')
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
ggsave('graphs/count of top ten weeks.png', width = 13, height = 6)
# graphs the number of top ten weekly finishes
# for players ranked in a ten range
# rbs and wrs
rank_top_10 %>% filter(
Pos %in% c('QB', 'TE') & ten < 4
) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Top Ten Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:3),
labels = paste('Top ', seq(10, 30, 10), sep = '')
)+
scale_fill_manual(
name = 'Position',
values = c(
'TE' = 'dodgerblue3', 'QB' = 'forestgreen'
)
)+
ggsave('graphs/count of top ten weeks_qb_te.png', width = 13, height = 6)
# number of top five weekly finishes
# for players in a ten range
# tes and qbs
rank_top_5%>% filter(
Pos %in% c('TE', 'QB') & ten < 4
) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Top Five Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:6),
labels = paste('Top ', seq(10, 60, 10), sep = '')
)+
scale_fill_manual(
name = 'Position',
values = c(
'TE' = 'dodgerblue3', 'QB' = 'forestgreen'
)
)+
ggsave(
'graphs/count of top weeks_qbte.png', width = 13, height = 6
)
# count of top 30 weekly finishes
# for WR colored by finish level
top_30_finishes %>% filter(Pos == 'WR') %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = ten_finish))+
facet_grid(year ~ .)+
scale_y_continuous(
name = 'Count of Top 30 Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:12),
labels = paste('Top ', seq(10, 120, 10), sep = '')
)+ scale_fill_manual(
name = 'Finish',
values = c(
'Top 10' = 'darksalmon', 'Top 20' = 'tomato2', 'Top 30' = 'firebrick'
)
)+
theme_bw()+
ggtitle(
'Top 30 Finishes for WR by Pre-season ESPN Rank'
)+
ggsave(
'graphs/wr top 30 finishes.png', width = 13, height = 6
)
# count of top 30 weekly finishes
# for RB colored by finish level
top_30_finishes %>% filter(Pos == 'RB' & ten < 10) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = ten_finish))+
facet_grid(year ~ .)+
scale_y_continuous(
name = 'Count of Top 30 Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:9),
labels = paste('Top ', seq(10, 90, 10), sep = '')
)+ scale_fill_manual(
name = 'Finish',
values = c(
'Top 10' = 'cyan', 'Top 20' = 'dodgerblue', 'Top 30' = 'navy'
)
)+
theme_bw()+
ggtitle(
'Top 30 Finishes for RB by Pre-season ESPN Rank'
)+
ggsave(
'graphs/rb top 30 finishes.png', width = 13, height = 6
)
# creating weekly finish data =============================================================================
# data for weekly finish rankings
# to see if pre-season ranks are consistent over time
rank_weeks <- left_join(
espn_rankings, weekly_rankings,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_weeks <- rank_weeks %>% filter(
!is.na(finish) & finish < 31
) %>% mutate(
ten = paste('Top', 10*(1 + floor((init_rank-1) / 10))),
ten_finish = paste('Top', 10*(1 + floor((finish-1) / 10)))
) %>% group_by(year, week, ten, Pos, ten_finish) %>% summarize(
count = n()
) %>% ungroup() %>% complete(
year, ten, Pos, ten_finish, week, fill = list(count = 0)
)
# weekly finish graph ==============================================================================
# count of top ten running back finishes
# by week, by year, by pre-season rank
rank_weeks %>% filter(
ten_finish == 'Top 10' & Pos == 'RB'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top Ten Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Running Back Top Ten Finishes by Week')+
ggsave(
'graphs/rb top ten weekly count.png', width = 13, height = 6
)
# count of top ten wide receiver finishes
# by week, by year, by pre-season rank
rank_weeks %>% filter(
ten_finish == 'Top 10' & Pos == 'WR'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top Ten Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Wide Receiver Top Ten Finishes by Week')+
ggsave(
'graphs/wr top ten weekly count.png', width = 13, height = 6
)
# rb weekly count of 11-20 finishes
rank_weeks %>% filter(
ten_finish == 'Top 20' & Pos == 'RB'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 11-20 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Running Back Top 11-20 Finishes by Week')+
ggsave(
'graphs/rb top 20 weekly count.png', width = 13, height = 6
)
# wr top 20 finishes by week
rank_weeks %>% filter(
ten_finish == 'Top 20' & Pos == 'WR'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 11-20 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Wide Receiver Top 11-20 Finishes by Week')+
ggsave(
'graphs/wr top 20 weekly count.png', width = 13, height = 6
)
# rb weekly count of 21-30 finishes
rank_weeks %>% filter(
ten_finish == 'Top 30' & Pos == 'RB'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 21-30 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Running Back Top 11-20 Finishes by Week')+
ggsave(
'graphs/rb top 30 weekly count.png', width = 13, height = 6
)
# wr top 20 finishes by week
rank_weeks %>% filter(
ten_finish == 'Top 30' & Pos == 'WR'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 21-30 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Wide Receiver Top 21-30 Finishes by Week')+
ggsave(
'graphs/wr top 30 weekly count.png', width = 13, height = 6
)
# creating consistency data =================================================================================
# counts of players with a certain number
# of usable games to see if certain ranks
# have more or less consistent players
week_consistency <- left_join(
espn_rankings, weekly_rankings,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
week_consistency <- week_consistency %>% filter(
!is.na(finish) & finish < 31
) %>% mutate(
ten = paste('Top', 10*(1 + floor((init_rank-1) / 10))),
ten_finish = paste('Top', 10*(1 + floor((finish-1) / 10)))
) %>% group_by(year, first, last, Pos, ten, ten_finish) %>% summarize(
count = n()
) %>% complete(
year, first, last, Pos, ten, ten_finish, fill = list(count = 0)
)
week_consistency$ten <- factor(
week_consistency$ten,
levels = paste('Top', seq(10, 110, 10))
)
top_10_consistency <- week_consistency %>% spread(
key = ten_finish, value = count
) %>%
filter(
!is.na(`Top 10`) & `Top 10` > 7
) %>% group_by(year, Pos, ten) %>% summarize(
count = sum(`Top 10`)
) %>% ungroup() %>% complete(
year, Pos, ten, fill = list(count = 0)
)
top_20_consistency <- week_consistency %>% spread(
key = ten_finish, value = count, fill = 0
) %>% mutate(
top_20 = `Top 10` + `Top 20`
) %>%
filter(
!is.na(top_20) & top_20 > 7
) %>% group_by(year, Pos, ten) %>% summarize(
count = sum(top_20)
) %>% ungroup() %>% complete(
year, Pos, ten, fill = list(count = 0)
)
top_30_consistency <- week_consistency %>% spread(
key = ten_finish, value = count, fill = 0
) %>% mutate(
top_30 = `Top 10` + `Top 20` + `Top 30`
) %>%
filter(
!is.na(top_30) & top_30 > 7
) %>% group_by(year, Pos, ten) %>% summarize(
count = sum(top_30)
) %>% ungroup() %>% complete(
year, Pos, ten, fill = list(count = 0)
)
# consistency graphs ========================================================================================
top_10_consistency %>% filter(
Pos %in% c('RB', 'WR')
) %>% ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Players'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+ggtitle(
'Count of Players with 8+ Top Ten Games'
)+ggsave(
'graphs/top ten consistency_rb_wr.png', width = 13, height = 6
)
top_20_consistency %>% filter(
Pos %in% c('RB', 'WR')
) %>% ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Players'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+ggtitle(
'Count of Players with 8+ Top 20 Games'
)+ggsave(
'graphs/top 20 consistency_rb_wr.png', width = 13, height = 6
)
top_30_consistency %>% filter(
Pos %in% c('RB', 'WR')
) %>% ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Players'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+ggtitle(
'Count of Players with 8+ Top 30 Games'
)+ggsave(
'graphs/top 30 consistency_rb_wr.png', width = 13, height = 6
)
# point distribution of weekly top finishes ================================================================
weekly_rankings %>% filter(
Position %in% c('WR', 'RB') & finish < 31
) %>% ggplot(aes(Points))+
geom_density(aes(fill = Position), alpha = .2)+
facet_grid(year ~ .)+
theme_bw()+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
ggtitle('Distribution of Points Scored - Top 30')+
ggsave(
'graphs/weekly point distribution_top30_rb_wr.png', width = 13, height = 6
)
weekly_rankings %>% filter(
Position %in% c('WR', 'RB') & finish < 61
) %>% ggplot(aes(Points))+
geom_density(aes(fill = Position), alpha = .2)+
facet_grid(year ~ .)+
theme_bw()+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
ggtitle('Distribution of Points Scored - Top 60')+
ggsave(
'graphs/weekly point distribution_top60_rb_wr.png', width = 13, height = 6
)
# joining weekly projections to weekly finish data =============================================================
| /graphs.R | no_license | tklein18/ESPN_finishes | R | false | false | 24,558 | r | # this script will create graphs from
# the espn pre-season fantasy rankings
# and fantasy pros end of season rankings
# libraries
library(stringr)
library(dplyr)
library(ggplot2)
library(tidyr)
# reading files
espn_rankings <- read.csv(
'data/espn rankings_16_18.csv',
stringsAsFactors = F
)
fp_finishes <- read.csv(
'data/fp finishes_16_18.csv',
stringsAsFactors = F
)
weekly_rankings <- read.csv(
'data/fp weekly finishes_16_18.csv',
stringsAsFactors = F
)
weekly_projections <- read.csv(
'data/fp projections_16_18.csv',
stringsAsFactors = F
)
# joining rankings to finishes ======================================================================
# going to join based on year, first, and last name
# would do team, but fantasypros only has current team
# not the team they were on back during the season
# going to left join to espn rankings because not every player
# was ranked
rank_finish <- left_join(
espn_rankings, fp_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_finish %>% group_by(first, last, year) %>%
summarize(count = n()) %>%
filter(count > 1)
# removing defense from rankings
espn_rankings <- espn_rankings %>% filter(
Pos != 'DST' & Pos !='D/ST' & Pos != 'K'
)
rank_finish <- left_join(
espn_rankings, fp_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
# removing missing players
# after research it appears most are instances where
# the player didn't play during the season
# or switched positions (ty monty)
rank_finish <- rank_finish %>% filter(
!is.na(finish)
)
# going to filter out players with less than 10 games
# going to make that a separate frame so that i can
# easily come back if needed
full_szn_rf <- rank_finish %>% filter(
Games > 9
)
# variance graph ===========================================================================================
# creating variable that is just
# difference between initial rank and final finish
# going to graph that over time to see if
# higher or lower rankings are more volatile
# also filtering out kickers, because who cares
full_szn_rf <- full_szn_rf %>% mutate(
variance = init_rank - finish
)
full_szn_rf %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = as.character(year)))+
geom_hline(aes(yintercept = 0))+
facet_wrap(. ~ Pos, scales = 'free_x')+
scale_color_discrete(name = 'year')+
theme_bw()
full_szn_rf %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = Pos))+
geom_hline(aes(yintercept = 0))+
geom_abline(aes(intercept = -30, slope = 1), linetype = 'dashed')+
facet_grid(year ~ . , scales = 'free_x')+
scale_color_discrete(name = 'Pos')+
theme_bw()
full_szn_top <- full_szn_rf %>% mutate(
ten = paste('Top', 10*(1 + floor((init_rank-1) / 10)))
)
# graph of rb and WR initial ranks v final finishes
# dashed line indicates top-30 or better (above)
# or worse than top-30 finish (below)
# top-30 because in a ten-team 2 wr & rb and 1 flex
# league, top-30 would be startable
full_szn_top %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = Pos))+
geom_hline(aes(yintercept = 0))+
geom_abline(aes(intercept = -30, slope = 1), linetype = 'dashed')+
facet_grid(year ~ . , scales = 'free_x')+
scale_color_discrete(name = 'Position')+
ylab('Initial minus Finish')+
xlab('Initial ESPN Ranking')+
theme_bw()+
ggsave('graphs/espn rank variance years.png', width = 13, height = 6)
# graph of rb and WR initial ranks v final finishes
# dashed line indicates top-30 or better (above)
# or worse than top-30 finish (below)
# top-30 because in a ten-team 2 wr & rb and 1 flex
# league, top-30 would be startable
full_szn_top %>%filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(init_rank, variance))+
geom_point(aes(color = as.character(year)))+
geom_hline(aes(yintercept = 0))+
geom_abline(aes(intercept = -30, slope = 1), linetype = 'dashed')+
facet_grid(Pos ~ . , scales = 'free_x')+
scale_color_discrete(name = 'Year')+
ylab('Initial minus Finish')+
xlab('Initial ESPN Ranking')+
theme_bw()+
ggsave('graphs/espn rank variance pos.png', width = 13, height = 6)
# graph of what ten (10, 20 , 30, etc)
# position and number of top 30 from that ten
# also average variance of that ten
# so you can see expected finish from each ten
# compare across years and positions
full_szn_top$top_rank <- if_else(
full_szn_top$finish <31, 1, 0
)
full_szn_top$top_ten <- if_else(
full_szn_top$finish <11, 1, 0
)
ten_ranks <- full_szn_top %>% group_by(
year, ten, Pos
) %>% summarize(
avg_var = mean(variance),
avg_finish = mean(finish),
top_players = sum(top_rank),
net_var = sum(variance),
top_ten = sum(top_ten)
) %>% ungroup() %>% complete(
year, ten, Pos, fill = list(
top_ten = 0, net_var = 0, top_players = 0,
avg_finish = 0, avg_var = 0
)
)
ten_ranks$ten <- factor(
ten_ranks$ten,
levels = paste('Top', seq(10, 110, 10))
)
# count of top ten finishes for rbs and wrs
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, top_players))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Count of Top 30 Finishes',
breaks = c(0:10)
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN top finish counts.png', width = 13, height = 6
)
# count of top ten finishes for qbs and tes
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('TE', 'QB') &
ten %in% c('Top 10', 'Top 20', 'Top 30', 'Top 40')
) %>% ggplot(aes(ten, top_ten))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c('TE' = 'dodgerblue3', 'QB' = 'forestgreen')
)+
scale_y_continuous(
name = 'Count of Top 10 Finishes',
breaks = c(0:10)
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN top finish counts_te.png', width = 13, height = 6
)
# showing the average difference between initial ESPN rank
# and final fantasy finish for players
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, avg_var))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Average Variance between Rank and Finish'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN ranks average variance.png',
height = 6, width = 13
)
# showig the average finish for players
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, avg_finish))+
geom_col(aes(fill = Pos), position = 'dodge')+
geom_hline(aes(yintercept = 30), linetype = 'dashed')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Average Finish'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+ggsave(
'graphs/ESPN rank average finish.png',
width = 13, height = 6
)
# showing the net difference between initial rank
# and final finish for players ranked within the same ten
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, avg_var))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Average Variance between Rank and Finish'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN ranks net variance.png',
height = 6, width = 13
)
# count of top ten finishes for rbs and wrs
# within the same 10 ranks (e.g. 1-10, 10-20)
ten_ranks %>% filter(
Pos %in% c('WR', 'RB')
) %>% ggplot(aes(ten, top_ten))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ . )+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
scale_y_continuous(
name = 'Count of Top 10 Finishes',
breaks = c(0:10)
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
theme_bw()+
ggsave(
'graphs/ESPN top ten finish counts.png', width = 13, height = 6
)
# fantasy finish graphs ==========================================================================================
top_40_finishes <- fp_finishes %>%
filter(finish < 41)
# density graph of point finishes by year by position
# shows that rb has a very wide distribution, but
# that the top scoring rb's always score more than the top scoring
# wide receivers
# tight ends are almost all worthless
# quarterbacks have a very flat distribution
top_40_finishes %>% filter(
Position %in% c('RB', 'WR', 'TE', 'QB')
) %>% ggplot(aes(Points))+
geom_density(aes(fill = Position), alpha = .2) +
facet_grid(year ~ .)+
theme_bw()+
ggsave(
'graphs/point distribution of positions.png',
width = 13, height = 6
)
# graph shows that top finishing rb's are
# more valuable than top finishing WR's
# but after the top they are equal
top_40_finishes %>% filter(
Position %in% c('RB', 'WR', 'TE', 'QB')
) %>% ggplot(aes(finish, Points))+
geom_line(aes(color = Position))+
geom_vline(aes(xintercept = 10))+
geom_vline(aes(xintercept = 30))+
facet_grid(year ~ .)+
xlab('End of Season Finish')+
theme_bw()+
ggsave(
'graphs/points by finish and position.png',
width = 13, height = 6
)
# joining espn_ranks and weekly finishes ====================================================================
# top ten finishes
# a top ten finish is really good for WR and RB
# and startable for QB and TE
top_10_finishes <- weekly_rankings %>% filter(
finish < 11
) %>% group_by(
first, last, Position, year
) %>% summarize(
count = n()
)
rank_top_10 <- left_join(
espn_rankings, top_10_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_top_10 <- rank_top_10 %>% mutate(
ten = 1 + floor((init_rank-1) / 10)
) %>% filter(
!is.na(count)
) %>% group_by(year, ten, Pos) %>% summarize(
count = sum(count)
) %>% ungroup() %>% complete(
year, ten, Pos, fill = list(count = 0)
)
# going to do top-5 finishes
# for qb and te since you can only start one
# top 5 would be a really good week
top_5_finishes <- weekly_rankings %>% filter(
finish < 6
) %>% group_by(
first, last, Position, year
) %>% summarize(
count = n()
)
rank_top_5 <- left_join(
espn_rankings, top_5_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_top_5 <- rank_top_5 %>% mutate(
ten = 1 + floor((init_rank-1) / 10)
) %>% filter(
!is.na(count)
) %>% group_by(year, ten, Pos) %>% summarize(
count = sum(count)
) %>% ungroup() %>% complete(
year, ten, Pos, fill = list(count = 0)
)
# going to do top 30 for WR and RB
# since that would be startable
top_30_finishes <- weekly_rankings %>% mutate(
ten_finish = paste('Top', 10*(1 + floor((finish-1) / 10)))
) %>% filter(
finish < 31
) %>% group_by(
first, last, Position, year, ten_finish
) %>% summarize(
count = n()
)
top_30_finishes <- left_join(
espn_rankings, top_30_finishes,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
top_30_finishes <- top_30_finishes %>% mutate(
ten = 1 + floor((init_rank-1) / 10)
) %>% filter(
!is.na(count)
) %>% group_by(year, ten, Pos, ten_finish) %>% summarize(
count = sum(count)
) %>% ungroup() %>% complete(
year, ten, Pos, ten_finish, fill = list(count = 0)
)
# graphs of weekly finishes ==================================================================================
# graphs the number of top ten weekly finishes
# for players ranked in a ten range
# rbs and wrs
rank_top_10 %>% filter(Pos %in% c('WR', 'RB')) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Top Ten Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:6),
labels = paste('Top ', seq(10, 60, 10), sep = '')
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
ggsave('graphs/count of top ten weeks.png', width = 13, height = 6)
# graphs the number of top ten weekly finishes
# for players ranked in a ten range
# rbs and wrs
rank_top_10 %>% filter(
Pos %in% c('QB', 'TE') & ten < 4
) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Top Ten Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:3),
labels = paste('Top ', seq(10, 30, 10), sep = '')
)+
scale_fill_manual(
name = 'Position',
values = c(
'TE' = 'dodgerblue3', 'QB' = 'forestgreen'
)
)+
ggsave('graphs/count of top ten weeks_qb_te.png', width = 13, height = 6)
# number of top five weekly finishes
# for players in a ten range
# tes and qbs
rank_top_5%>% filter(
Pos %in% c('TE', 'QB') & ten < 4
) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Top Five Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:6),
labels = paste('Top ', seq(10, 60, 10), sep = '')
)+
scale_fill_manual(
name = 'Position',
values = c(
'TE' = 'dodgerblue3', 'QB' = 'forestgreen'
)
)+
ggsave(
'graphs/count of top weeks_qbte.png', width = 13, height = 6
)
# count of top 30 weekly finishes
# for WR colored by finish level
top_30_finishes %>% filter(Pos == 'WR') %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = ten_finish))+
facet_grid(year ~ .)+
scale_y_continuous(
name = 'Count of Top 30 Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:12),
labels = paste('Top ', seq(10, 120, 10), sep = '')
)+ scale_fill_manual(
name = 'Finish',
values = c(
'Top 10' = 'darksalmon', 'Top 20' = 'tomato2', 'Top 30' = 'firebrick'
)
)+
theme_bw()+
ggtitle(
'Top 30 Finishes for WR by Pre-season ESPN Rank'
)+
ggsave(
'graphs/wr top 30 finishes.png', width = 13, height = 6
)
# count of top 30 weekly finishes
# for RB colored by finish level
top_30_finishes %>% filter(Pos == 'RB' & ten < 10) %>%
ggplot(aes(ten, count))+
geom_col(aes(fill = ten_finish))+
facet_grid(year ~ .)+
scale_y_continuous(
name = 'Count of Top 30 Weekly Finishes'
)+
scale_x_continuous(
name = 'ESPN Ranks',
breaks = c(1:9),
labels = paste('Top ', seq(10, 90, 10), sep = '')
)+ scale_fill_manual(
name = 'Finish',
values = c(
'Top 10' = 'cyan', 'Top 20' = 'dodgerblue', 'Top 30' = 'navy'
)
)+
theme_bw()+
ggtitle(
'Top 30 Finishes for RB by Pre-season ESPN Rank'
)+
ggsave(
'graphs/rb top 30 finishes.png', width = 13, height = 6
)
# creating weekly finish data =============================================================================
# data for weekly finish rankings
# to see if pre-season ranks are consistent over time
rank_weeks <- left_join(
espn_rankings, weekly_rankings,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
rank_weeks <- rank_weeks %>% filter(
!is.na(finish) & finish < 31
) %>% mutate(
ten = paste('Top', 10*(1 + floor((init_rank-1) / 10))),
ten_finish = paste('Top', 10*(1 + floor((finish-1) / 10)))
) %>% group_by(year, week, ten, Pos, ten_finish) %>% summarize(
count = n()
) %>% ungroup() %>% complete(
year, ten, Pos, ten_finish, week, fill = list(count = 0)
)
# weekly finish graph ==============================================================================
# count of top ten running back finishes
# by week, by year, by pre-season rank
rank_weeks %>% filter(
ten_finish == 'Top 10' & Pos == 'RB'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top Ten Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Running Back Top Ten Finishes by Week')+
ggsave(
'graphs/rb top ten weekly count.png', width = 13, height = 6
)
# count of top ten wide receiver finishes
# by week, by year, by pre-season rank
rank_weeks %>% filter(
ten_finish == 'Top 10' & Pos == 'WR'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top Ten Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Wide Receiver Top Ten Finishes by Week')+
ggsave(
'graphs/wr top ten weekly count.png', width = 13, height = 6
)
# rb weekly count of 11-20 finishes
rank_weeks %>% filter(
ten_finish == 'Top 20' & Pos == 'RB'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 11-20 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Running Back Top 11-20 Finishes by Week')+
ggsave(
'graphs/rb top 20 weekly count.png', width = 13, height = 6
)
# wr top 20 finishes by week
rank_weeks %>% filter(
ten_finish == 'Top 20' & Pos == 'WR'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 11-20 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Wide Receiver Top 11-20 Finishes by Week')+
ggsave(
'graphs/wr top 20 weekly count.png', width = 13, height = 6
)
# rb weekly count of 21-30 finishes
rank_weeks %>% filter(
ten_finish == 'Top 30' & Pos == 'RB'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 21-30 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Running Back Top 11-20 Finishes by Week')+
ggsave(
'graphs/rb top 30 weekly count.png', width = 13, height = 6
)
# wr top 20 finishes by week
rank_weeks %>% filter(
ten_finish == 'Top 30' & Pos == 'WR'
) %>% ggplot(aes(week, count))+
geom_line(aes(group = ten, color = ten))+
facet_grid(year ~ . )+
scale_y_continuous(
name = 'Count of Top 21-30 Finishes'
)+
scale_x_continuous(
name = 'Week', breaks = c(1:16)
)+
scale_color_viridis_d(name = 'ESPN Rank')+
theme_bw()+
ggtitle('Count of Wide Receiver Top 21-30 Finishes by Week')+
ggsave(
'graphs/wr top 30 weekly count.png', width = 13, height = 6
)
# creating consistency data =================================================================================
# counts of players with a certain number
# of usable games to see if certain ranks
# have more or less consistent players
week_consistency <- left_join(
espn_rankings, weekly_rankings,
by = c('first', 'last', 'year', 'Pos' = 'Position')
)
week_consistency <- week_consistency %>% filter(
!is.na(finish) & finish < 31
) %>% mutate(
ten = paste('Top', 10*(1 + floor((init_rank-1) / 10))),
ten_finish = paste('Top', 10*(1 + floor((finish-1) / 10)))
) %>% group_by(year, first, last, Pos, ten, ten_finish) %>% summarize(
count = n()
) %>% complete(
year, first, last, Pos, ten, ten_finish, fill = list(count = 0)
)
week_consistency$ten <- factor(
week_consistency$ten,
levels = paste('Top', seq(10, 110, 10))
)
top_10_consistency <- week_consistency %>% spread(
key = ten_finish, value = count
) %>%
filter(
!is.na(`Top 10`) & `Top 10` > 7
) %>% group_by(year, Pos, ten) %>% summarize(
count = sum(`Top 10`)
) %>% ungroup() %>% complete(
year, Pos, ten, fill = list(count = 0)
)
top_20_consistency <- week_consistency %>% spread(
key = ten_finish, value = count, fill = 0
) %>% mutate(
top_20 = `Top 10` + `Top 20`
) %>%
filter(
!is.na(top_20) & top_20 > 7
) %>% group_by(year, Pos, ten) %>% summarize(
count = sum(top_20)
) %>% ungroup() %>% complete(
year, Pos, ten, fill = list(count = 0)
)
top_30_consistency <- week_consistency %>% spread(
key = ten_finish, value = count, fill = 0
) %>% mutate(
top_30 = `Top 10` + `Top 20` + `Top 30`
) %>%
filter(
!is.na(top_30) & top_30 > 7
) %>% group_by(year, Pos, ten) %>% summarize(
count = sum(top_30)
) %>% ungroup() %>% complete(
year, Pos, ten, fill = list(count = 0)
)
# consistency graphs ========================================================================================
top_10_consistency %>% filter(
Pos %in% c('RB', 'WR')
) %>% ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Players'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+ggtitle(
'Count of Players with 8+ Top Ten Games'
)+ggsave(
'graphs/top ten consistency_rb_wr.png', width = 13, height = 6
)
top_20_consistency %>% filter(
Pos %in% c('RB', 'WR')
) %>% ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Players'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+ggtitle(
'Count of Players with 8+ Top 20 Games'
)+ggsave(
'graphs/top 20 consistency_rb_wr.png', width = 13, height = 6
)
top_30_consistency %>% filter(
Pos %in% c('RB', 'WR')
) %>% ggplot(aes(ten, count))+
geom_col(aes(fill = Pos), position = 'dodge')+
facet_grid(year ~ .)+
theme_bw()+
scale_y_continuous(
name = 'Count of Players'
)+
scale_x_discrete(
name = 'ESPN Ranks'
)+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+ggtitle(
'Count of Players with 8+ Top 30 Games'
)+ggsave(
'graphs/top 30 consistency_rb_wr.png', width = 13, height = 6
)
# point distribution of weekly top finishes ================================================================
weekly_rankings %>% filter(
Position %in% c('WR', 'RB') & finish < 31
) %>% ggplot(aes(Points))+
geom_density(aes(fill = Position), alpha = .2)+
facet_grid(year ~ .)+
theme_bw()+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
ggtitle('Distribution of Points Scored - Top 30')+
ggsave(
'graphs/weekly point distribution_top30_rb_wr.png', width = 13, height = 6
)
weekly_rankings %>% filter(
Position %in% c('WR', 'RB') & finish < 61
) %>% ggplot(aes(Points))+
geom_density(aes(fill = Position), alpha = .2)+
facet_grid(year ~ .)+
theme_bw()+
scale_fill_manual(
name = 'Position',
values = c(
'RB' = 'dodgerblue4',
'WR' = 'tomato3'
)
)+
ggtitle('Distribution of Points Scored - Top 60')+
ggsave(
'graphs/weekly point distribution_top60_rb_wr.png', width = 13, height = 6
)
# joining weekly projections to weekly finish data =============================================================
|
## Library dependencies and plot theme --------------------------------------------
library(deSolve)
library(trust)
library(parallel)
library(ggplot2)
library(ggthemes)
library(cOde)
library(dMod)
ggplot <- function(...) ggplot2::ggplot(...) + theme_few() + scale_color_colorblind() + scale_fill_colorblind()
qplot <- function(...) ggplot2::qplot(...) + theme_few() + scale_color_colorblind() + scale_fill_colorblind()
## Model Definition ------------------------------------------------------
# Read in model csv
reactionlist <- read.csv("topology.csv")
# Translate data.frame into equations
f <- generateEquations(reactionlist)
# Define new observables based on ODE states
observables <- c(
# y1 = "s*x1 + off"
)
# Set list of forcings
forcings <- c(
# "u1", "u2", "u3", ...
)
# List of fixed parameters which are known beforehand
fixed <- c(
# "fixed1", "fixed2", ...
)
# Add observable ODEs to the original ODEs or use an observation function
# Choose one of the three options, or combine them
f <- variableTransformation(observables, f)
f <- addObservable(observables, f)
g <- Y(observables, f)
# Generate the model C files, compile them and return a list with func and extended.
model0 <- generateModel(f, fixed = fixed, forcings = forcings, jacobian = "inz.lsodes")
## Parameter Transformations -------------------------------------------
# Define inner parameters (parameters occurring in the equations except forcings)
# Add names(observables) if addObservables(observables, f) is used
innerpars <- getSymbols(c(f, names(f), observables), exclude=c(forcings, "time"))
names(innerpars) <- innerpars
# Define additional parameter constraints, e.g. steady-state conditions
# Parameters (left-hand side) are replaced in the right-hand side of consecutive lines by resolveRecurrence()
constraints <- resolveRecurrence(c(
#p1 = "p2 + p3",
#p4 = "p1*p5"
))
# Build up a parameter transformation (constraints, log-transform, etc.)
# Start with replacing initial value parameters of the observables
trafo <- replaceSymbols(names(observables), observables, innerpars)
# Then employ the other parameter constraints
trafo <- replaceSymbols(names(constraints), constraints, trafo)
# Then do a log-transform of all parameters (if defined as positive numbers)
trafo <- replaceSymbols(innerpars, paste0("exp(log", innerpars, ")"), trafo)
## Specify different conditions -----------------------------------------------------
conditions <- c(
#"condition1", "condition2", ...
)
# Set condition-specific parameter transformations and generate p2p function
trafoL <- lapply(conditions, function(con) trafo); names(trafoL) <- conditions
specific <- c("")
trafoL <- lapply(conditions, function(con) {
replaceSymbols(specific, paste(specific, con, sep="_"), trafoL[[con]])
}); names(trafoL) <- conditions
pL <- lapply(conditions, function(con) P(trafoL[[con]])); names(pL) <- conditions
# Set different forcings per condition
timesF <- seq(0, 100, by=0.1)
uL <- list(
data.frame(name = "u1", time = timesF, value = 1*dnorm(timesF, 0, 5)),
data.frame(name = "u2", time = timesF, value = 3*dnorm(timesF, 0, 5)),
data.frame(name = "u3", time = timesF, value = 8*dnorm(timesF, 0, 5))
); names(uL) <- conditions
# Specify prediction functions for the different conditions (depends on different forces
# but not on different parameter transformations)
xL <- lapply(conditions, function(con) Xs(model0$func, model0$extended, uL[[con]])); names(xL) <- conditions
# Function for the total model prediction, returns a list of predictions (choose one of
# the two possibilities)
x <- function(times, pouter, fixed=NULL, ...) {
out <- lapply(conditions, function(cond) xL[[cond]](times, pL[[cond]](pouter, fixed), ...))
names(out) <- conditions
return(out)
}
x <- function(times, pouter, fixed=NULL, ...) {
out <- lapply(conditions, function(cond) {
pinner <- pL[[cond]](pouter, fixed)
prediction <- xL[[cond]](times, pinner, ...)
observation <- g(prediction, pinner, attach = TRUE)
return(observation)
}); names(out) <- conditions
return(out)
}
## Data ----------------------------------------------------------------------
datasheet <- read.table("datafile.csv") # with columns condition, name, time, value, sigma
data <- lapply(conditions, function(mycondition) subset(datasheet, condition == mycondition))
names(data) <- conditions
## Objective Functions -------------------------------------------------------
# Data times
timesD <- unique(sort(unlist(sapply(data, function(d) d$time))))
# Initalize parameters
outerpars <- getSymbols(do.call(c, trafoL[conditions]))
prior <- rep(0, length(outerpars)); names(prior) <- outerpars
pouter <- rnorm(length(prior), prior, 1); names(pouter) <- outerpars
# Objective function for trust()
obj <- function(pouter, fixed=NULL, deriv=TRUE) {
prediction <- x(timesD, pouter, fixed = fixed, deriv = deriv)
out <- lapply(names(data), function(cn) wrss(res(data[[cn]], prediction[[cn]])))
# Working with weak prior (helps avoiding runaway solutions of the optimization problem)
cOuter <- constraintL2(pouter, prior, sigma = 10)
cOuter + Reduce("+", out)
}
## Howto proceed -------------------------------------------------
# Predicting and plotting
times <- seq(min(timesD), max(timesD), len=100)
prediction <- x(times, pouter)
plotPrediction(prediction)
plotPrediction(prediction, name %in% names(observables))
# Fitting
plotData(data)
myfit <- trust(obj, pouter, rinit=1, rmax=10, iterlim=500)
prediction <- x(times, myfit$argument)
plotCombined(prediction, data)
plotCombined(prediction, data, name%in%names(observables))
plotCombined(prediction, data, name%in%names(observables)) + facet_grid(name~condition, scales="free")
plotObjective(myfit)
# Fitting from random positions
center <- pouter
sink("output.txt")
fitlist <- mclapply(1:100, function(i) {
deviation <- rnorm(length(center), 0, 1)
pars <- center + deviation
out <- NULL
myfit <- try(trust(obj, pars, rinit=1, rmax=10, iterlim=1000), silent=TRUE)
if(!inherits(myfit, "try-error")) {
out <- data.frame(index = i,
chisquare = myfit$value,
converged = myfit$converged,
iterations = myfit$iterations,
as.data.frame(as.list(myfit$argument)))
cat("out", i, myfit$value, myfit$converged, myfit$iterations, "\n")
}
return(out)
}, mc.cores=24, mc.preschedule=FALSE)
sink()
fitlist <- do.call(rbind, fitlist[sapply(fitlist, class) == "data.frame"])
fitlist <- fitlist[order(fitlist$chisquare),]
save(fitlist, file="fitlist.rda")
bestfit <- unlist(fitlist[1,-(1:4)])
qplot(y = fitlist$chisquare)
# Profile likelihood
bestfit <- myfit$argument
profiles.approx <- do.call(c, mclapply(names(bestfit), function(n) profile.trust(obj, bestfit, n, limits=c(-3, 3), algoControl = list(gamma = 0)), mc.cores=4))
profiles.exact <- do.call(c, mclapply(names(bestfit), function(n) profile.trust(obj, bestfit, n, limits=c(-3, 3), algoControl = list(gamma = 0, reoptimize = TRUE), optControl = list(iterlim = 10)), mc.cores=4))
plotProfile(profiles.approx, profiles.exact)
plotPaths(profiles.approx[1])
plotPaths(profiles.approx[c(1,3)]) | /dMod/inst/templates/R2CTemplate.R | no_license | ingted/R-Examples | R | false | false | 7,260 | r | ## Library dependencies and plot theme --------------------------------------------
library(deSolve)
library(trust)
library(parallel)
library(ggplot2)
library(ggthemes)
library(cOde)
library(dMod)
ggplot <- function(...) ggplot2::ggplot(...) + theme_few() + scale_color_colorblind() + scale_fill_colorblind()
qplot <- function(...) ggplot2::qplot(...) + theme_few() + scale_color_colorblind() + scale_fill_colorblind()
## Model Definition ------------------------------------------------------
# Read in model csv
reactionlist <- read.csv("topology.csv")
# Translate data.frame into equations
f <- generateEquations(reactionlist)
# Define new observables based on ODE states
observables <- c(
# y1 = "s*x1 + off"
)
# Set list of forcings
forcings <- c(
# "u1", "u2", "u3", ...
)
# List of fixed parameters which are known beforehand
fixed <- c(
# "fixed1", "fixed2", ...
)
# Add observable ODEs to the original ODEs or use an observation function
# Choose one of the three options, or combine them
f <- variableTransformation(observables, f)
f <- addObservable(observables, f)
g <- Y(observables, f)
# Generate the model C files, compile them and return a list with func and extended.
model0 <- generateModel(f, fixed = fixed, forcings = forcings, jacobian = "inz.lsodes")
## Parameter Transformations -------------------------------------------
# Define inner parameters (parameters occurring in the equations except forcings)
# Add names(observables) if addObservables(observables, f) is used
innerpars <- getSymbols(c(f, names(f), observables), exclude=c(forcings, "time"))
names(innerpars) <- innerpars
# Define additional parameter constraints, e.g. steady-state conditions
# Parameters (left-hand side) are replaced in the right-hand side of consecutive lines by resolveRecurrence()
constraints <- resolveRecurrence(c(
#p1 = "p2 + p3",
#p4 = "p1*p5"
))
# Build up a parameter transformation (constraints, log-transform, etc.)
# Start with replacing initial value parameters of the observables
trafo <- replaceSymbols(names(observables), observables, innerpars)
# Then employ the other parameter constraints
trafo <- replaceSymbols(names(constraints), constraints, trafo)
# Then do a log-transform of all parameters (if defined as positive numbers)
trafo <- replaceSymbols(innerpars, paste0("exp(log", innerpars, ")"), trafo)
## Specify different conditions -----------------------------------------------------
conditions <- c(
#"condition1", "condition2", ...
)
# Set condition-specific parameter transformations and generate p2p function
trafoL <- lapply(conditions, function(con) trafo); names(trafoL) <- conditions
specific <- c("")
trafoL <- lapply(conditions, function(con) {
replaceSymbols(specific, paste(specific, con, sep="_"), trafoL[[con]])
}); names(trafoL) <- conditions
pL <- lapply(conditions, function(con) P(trafoL[[con]])); names(pL) <- conditions
# Set different forcings per condition
timesF <- seq(0, 100, by=0.1)
uL <- list(
data.frame(name = "u1", time = timesF, value = 1*dnorm(timesF, 0, 5)),
data.frame(name = "u2", time = timesF, value = 3*dnorm(timesF, 0, 5)),
data.frame(name = "u3", time = timesF, value = 8*dnorm(timesF, 0, 5))
); names(uL) <- conditions
# Specify prediction functions for the different conditions (depends on different forces
# but not on different parameter transformations)
xL <- lapply(conditions, function(con) Xs(model0$func, model0$extended, uL[[con]])); names(xL) <- conditions
# Function for the total model prediction, returns a list of predictions (choose one of
# the two possibilities)
x <- function(times, pouter, fixed=NULL, ...) {
out <- lapply(conditions, function(cond) xL[[cond]](times, pL[[cond]](pouter, fixed), ...))
names(out) <- conditions
return(out)
}
x <- function(times, pouter, fixed=NULL, ...) {
out <- lapply(conditions, function(cond) {
pinner <- pL[[cond]](pouter, fixed)
prediction <- xL[[cond]](times, pinner, ...)
observation <- g(prediction, pinner, attach = TRUE)
return(observation)
}); names(out) <- conditions
return(out)
}
## Data ----------------------------------------------------------------------
datasheet <- read.table("datafile.csv") # with columns condition, name, time, value, sigma
data <- lapply(conditions, function(mycondition) subset(datasheet, condition == mycondition))
names(data) <- conditions
## Objective Functions -------------------------------------------------------
# Data times
timesD <- unique(sort(unlist(sapply(data, function(d) d$time))))
# Initalize parameters
outerpars <- getSymbols(do.call(c, trafoL[conditions]))
prior <- rep(0, length(outerpars)); names(prior) <- outerpars
pouter <- rnorm(length(prior), prior, 1); names(pouter) <- outerpars
# Objective function for trust()
obj <- function(pouter, fixed=NULL, deriv=TRUE) {
prediction <- x(timesD, pouter, fixed = fixed, deriv = deriv)
out <- lapply(names(data), function(cn) wrss(res(data[[cn]], prediction[[cn]])))
# Working with weak prior (helps avoiding runaway solutions of the optimization problem)
cOuter <- constraintL2(pouter, prior, sigma = 10)
cOuter + Reduce("+", out)
}
## Howto proceed -------------------------------------------------
# Predicting and plotting
times <- seq(min(timesD), max(timesD), len=100)
prediction <- x(times, pouter)
plotPrediction(prediction)
plotPrediction(prediction, name %in% names(observables))
# Fitting
plotData(data)
myfit <- trust(obj, pouter, rinit=1, rmax=10, iterlim=500)
prediction <- x(times, myfit$argument)
plotCombined(prediction, data)
plotCombined(prediction, data, name%in%names(observables))
plotCombined(prediction, data, name%in%names(observables)) + facet_grid(name~condition, scales="free")
plotObjective(myfit)
# Fitting from random positions
center <- pouter
sink("output.txt")
fitlist <- mclapply(1:100, function(i) {
deviation <- rnorm(length(center), 0, 1)
pars <- center + deviation
out <- NULL
myfit <- try(trust(obj, pars, rinit=1, rmax=10, iterlim=1000), silent=TRUE)
if(!inherits(myfit, "try-error")) {
out <- data.frame(index = i,
chisquare = myfit$value,
converged = myfit$converged,
iterations = myfit$iterations,
as.data.frame(as.list(myfit$argument)))
cat("out", i, myfit$value, myfit$converged, myfit$iterations, "\n")
}
return(out)
}, mc.cores=24, mc.preschedule=FALSE)
sink()
fitlist <- do.call(rbind, fitlist[sapply(fitlist, class) == "data.frame"])
fitlist <- fitlist[order(fitlist$chisquare),]
save(fitlist, file="fitlist.rda")
bestfit <- unlist(fitlist[1,-(1:4)])
qplot(y = fitlist$chisquare)
# Profile likelihood
bestfit <- myfit$argument
profiles.approx <- do.call(c, mclapply(names(bestfit), function(n) profile.trust(obj, bestfit, n, limits=c(-3, 3), algoControl = list(gamma = 0)), mc.cores=4))
profiles.exact <- do.call(c, mclapply(names(bestfit), function(n) profile.trust(obj, bestfit, n, limits=c(-3, 3), algoControl = list(gamma = 0, reoptimize = TRUE), optControl = list(iterlim = 10)), mc.cores=4))
plotProfile(profiles.approx, profiles.exact)
plotPaths(profiles.approx[1])
plotPaths(profiles.approx[c(1,3)]) |
rm(list = ls())
library(lme4)
library(MuMIn)
library(ggeffects)
# Run GLMM
setwd("D:/PhD/Fourth chapter/Congreso SEO/Data_SEO")
data <- read.csv ("covariates.csv", header = TRUE)
# Check correlation
data <- data[complete.cases(data), ] # I don't know but I delete because I don't have time
correlation <- cor(data[ ,c(8:21)])
correlation[which(correlation > 0.5 & correlation < 1)] # Correlated: Dist carreteras - Dist nucleos urbanos (0.6)
# Frut secano - Olivo y almendro
names(data) # Quitar: Dist nucleos urbanos, olivo y almendro
data_unscaled <- data[ , -c(10, 19, 20)]
data[ ,c(8:18)] <- scale(data[ ,c(8:18)], center = TRUE, scale = TRUE)
#### Separate by periods ####
ID_p <- unique(data$ID_p)
ID_p[grep("p1", ID_p)]
data_p1 <- data[which(data$ID_p %in% ID_p[grep("p1", ID_p)]), ]
data_p2 <- data[which(data$ID_p %in% ID_p[grep("p2", ID_p)]), ]
data_p3 <- data[which(data$ID_p %in% ID_p[grep("p3", ID_p)]), ]
# ---- Cereal ----
# Predict values of model 1
# Period 1
setwd("C:/Users/ana.sanz/Documents/PhD_20_sept/Fourth chapter/Data/Results")
load("dredge_p1.RData")
topmodels_p1 <- get.models(models_p1,subset = delta < 2)
avg_p1 <- model.avg(topmodels_p1)
newdata <- as.data.frame(lapply(lapply(data_p1[, -c(1:7,13, 14)], mean), rep, 25))
newdata$cereal <- seq(min(data_p1$cereal), max(data_p1$cereal), length.out = 25)
pred <- predict(avg_p1, newdata = newdata, type = "response", se.fit = TRUE )
lcl <- pred$fit - 1.96*pred$se.fit
lch <- pred$fit + 1.96*pred$se.fit
plot(pred$fit ~ newdata$cereal, ylim = c(-1, 1), main = "p1", type = "l")
polygon( x = c(newdata$cereal, rev(newdata$cereal)),
y = c(lcl, rev(lch)),
col = adjustcolor(c("grey"),alpha.f = 0.6),
border = NA)
newx <- seq(min(newdata$cereal),max(newdata$cereal), length.out = 25)
lines(newx, lcl, col = "red")
lines(newx, lch, col = "red")
m <- ggpredict(avg_p1, "cereal", type = "fe")
# Period 2
setwd("D:/PhD/Fourth chapter/Congreso SEO/Data_SEO/Results")
load("dredge_p2.RData")
topmodels_p2 <- get.models(models_p2,subset = delta < 2)
avg_p2 <- model.avg(topmodels_p2)
newdata <- as.data.frame(lapply(lapply(data_p2[, -c(1:7,13, 14)], mean), rep, 25))
newdata$cereal <- seq(min(data_p2$cereal), max(data_p2$cereal), length.out = 25)
newdata$Logger_ID <- rep(unique(data_p2$Logger_ID), 5)
pred <- predict(avg_p2, newdata = newdata, type = "response", re.form = NA ) # It doesn't work
plot(pred ~ newdata$cereal, ylim = c(0, 0.3), main = "p2", type = "l")
# Try with full model (best model)
p2_3 <- glmer(used ~ dist_caminos + dist_carreteras + pendiente + # Without forestal, frut regadio and herb_secano: It CONVERGES
pastos + cereal + barbecho + herb_regadio + frut_secano + (1|Logger_ID),
family = binomial (link = "logit"),
data = data_p2)
summary(p2_3)
pred2 <- predict(p2_3, newdata = newdata, type = "response", re.form = NA)
plot(pred2 ~ newdata$cereal, ylim = c(0, 0.1), main = "p2", type = "l")
m <- ggpredict(p2_3, "cereal", type = "fe")
plot(m)
m2 <- ggpredict(p2_3, "cereal", type = "re", back.transform = TRUE)
plot(m2)
pred3 <- predict(p2_3, type = "response", re.form = NA)
cereal <- seq(min(data_p2$cereal), max(data_p2$cereal), length.out = 23015)
plot(pred3 ~ cereal, ylim = c(0, 0.1), main = "p2")
# This doesn't work because there are too many datapoints, it has to be with the new data frame
# Try with other model that converges
p2_4 <- glmer(used ~ dist_caminos +
#dist_carreteras +
pendiente + # Without forestal, frut regadio and herb_secano: It CONVERGES
pastos + cereal + barbecho + herb_regadio + frut_secano + (1|Logger_ID),
family = binomial (link = "logit"),
data = data_p2)
m2 <- ggpredict(p2_4, "cereal", type = "re")
plot(m2)
summary(p2_4)
# ---- Pendiente ----
# Period 1
setwd("C:/Users/ana.sanz/Documents/PhD_20_sept/Fourth chapter/Data/Results")
setwd("C:/Users/Ana/Documents/PhD/PhD_12_Nov/Fourth chapter/Data/Results")
load("dredge_p1.RData")
topmodels_p1 <- get.models(models_p1,subset = delta < 2)
avg_p1 <- model.avg(topmodels_p1)
newdata_pend<- as.data.frame(lapply(lapply(data_p1[, -c(1:7,11, 13)], mean), rep, 25))
newdata_pend$pendiente <- seq(min(data_p1$pendiente), max(data_p1$pendiente), length.out = 25)
pred <- predict(avg_p1, newdata = newdata_pend, type = "response", se.fit = TRUE )
lcl <- pred$fit - 1.96*pred$se.fit
lch <- pred$fit + 1.96*pred$se.fit
plot(pred$fit ~ newdata_pend$pendiente, ylim = c(0, 0.05), main = "p1", type = "l")
polygon( x = c(newdata_pend$pendiente, rev(newdata_pend$pendiente)),
y = c(lcl, rev(lch)),
col = adjustcolor(c("grey"),alpha.f = 0.6),
border = NA)
newx <- seq(min(newdata_pend$pendiente),max(newdata_pend$pendiente), length.out = 25)
lines(newx, lcl, col = "red")
lines(newx, lch, col = "red")
| /Ch. 4/Análisis SEO/4.GLMM_results.r | no_license | anasanz/MyScripts | R | false | false | 4,906 | r |
rm(list = ls())
library(lme4)
library(MuMIn)
library(ggeffects)
# Run GLMM
setwd("D:/PhD/Fourth chapter/Congreso SEO/Data_SEO")
data <- read.csv ("covariates.csv", header = TRUE)
# Check correlation
data <- data[complete.cases(data), ] # I don't know but I delete because I don't have time
correlation <- cor(data[ ,c(8:21)])
correlation[which(correlation > 0.5 & correlation < 1)] # Correlated: Dist carreteras - Dist nucleos urbanos (0.6)
# Frut secano - Olivo y almendro
names(data) # Quitar: Dist nucleos urbanos, olivo y almendro
data_unscaled <- data[ , -c(10, 19, 20)]
data[ ,c(8:18)] <- scale(data[ ,c(8:18)], center = TRUE, scale = TRUE)
#### Separate by periods ####
ID_p <- unique(data$ID_p)
ID_p[grep("p1", ID_p)]
data_p1 <- data[which(data$ID_p %in% ID_p[grep("p1", ID_p)]), ]
data_p2 <- data[which(data$ID_p %in% ID_p[grep("p2", ID_p)]), ]
data_p3 <- data[which(data$ID_p %in% ID_p[grep("p3", ID_p)]), ]
# ---- Cereal ----
# Predict values of model 1
# Period 1
setwd("C:/Users/ana.sanz/Documents/PhD_20_sept/Fourth chapter/Data/Results")
load("dredge_p1.RData")
topmodels_p1 <- get.models(models_p1,subset = delta < 2)
avg_p1 <- model.avg(topmodels_p1)
newdata <- as.data.frame(lapply(lapply(data_p1[, -c(1:7,13, 14)], mean), rep, 25))
newdata$cereal <- seq(min(data_p1$cereal), max(data_p1$cereal), length.out = 25)
pred <- predict(avg_p1, newdata = newdata, type = "response", se.fit = TRUE )
lcl <- pred$fit - 1.96*pred$se.fit
lch <- pred$fit + 1.96*pred$se.fit
plot(pred$fit ~ newdata$cereal, ylim = c(-1, 1), main = "p1", type = "l")
polygon( x = c(newdata$cereal, rev(newdata$cereal)),
y = c(lcl, rev(lch)),
col = adjustcolor(c("grey"),alpha.f = 0.6),
border = NA)
newx <- seq(min(newdata$cereal),max(newdata$cereal), length.out = 25)
lines(newx, lcl, col = "red")
lines(newx, lch, col = "red")
m <- ggpredict(avg_p1, "cereal", type = "fe")
# Period 2
setwd("D:/PhD/Fourth chapter/Congreso SEO/Data_SEO/Results")
load("dredge_p2.RData")
topmodels_p2 <- get.models(models_p2,subset = delta < 2)
avg_p2 <- model.avg(topmodels_p2)
newdata <- as.data.frame(lapply(lapply(data_p2[, -c(1:7,13, 14)], mean), rep, 25))
newdata$cereal <- seq(min(data_p2$cereal), max(data_p2$cereal), length.out = 25)
newdata$Logger_ID <- rep(unique(data_p2$Logger_ID), 5)
pred <- predict(avg_p2, newdata = newdata, type = "response", re.form = NA ) # It doesn't work
plot(pred ~ newdata$cereal, ylim = c(0, 0.3), main = "p2", type = "l")
# Try with full model (best model)
p2_3 <- glmer(used ~ dist_caminos + dist_carreteras + pendiente + # Without forestal, frut regadio and herb_secano: It CONVERGES
pastos + cereal + barbecho + herb_regadio + frut_secano + (1|Logger_ID),
family = binomial (link = "logit"),
data = data_p2)
summary(p2_3)
pred2 <- predict(p2_3, newdata = newdata, type = "response", re.form = NA)
plot(pred2 ~ newdata$cereal, ylim = c(0, 0.1), main = "p2", type = "l")
m <- ggpredict(p2_3, "cereal", type = "fe")
plot(m)
m2 <- ggpredict(p2_3, "cereal", type = "re", back.transform = TRUE)
plot(m2)
pred3 <- predict(p2_3, type = "response", re.form = NA)
cereal <- seq(min(data_p2$cereal), max(data_p2$cereal), length.out = 23015)
plot(pred3 ~ cereal, ylim = c(0, 0.1), main = "p2")
# This doesn't work because there are too many datapoints, it has to be with the new data frame
# Try with other model that converges
p2_4 <- glmer(used ~ dist_caminos +
#dist_carreteras +
pendiente + # Without forestal, frut regadio and herb_secano: It CONVERGES
pastos + cereal + barbecho + herb_regadio + frut_secano + (1|Logger_ID),
family = binomial (link = "logit"),
data = data_p2)
m2 <- ggpredict(p2_4, "cereal", type = "re")
plot(m2)
summary(p2_4)
# ---- Pendiente ----
# Period 1
setwd("C:/Users/ana.sanz/Documents/PhD_20_sept/Fourth chapter/Data/Results")
setwd("C:/Users/Ana/Documents/PhD/PhD_12_Nov/Fourth chapter/Data/Results")
load("dredge_p1.RData")
topmodels_p1 <- get.models(models_p1,subset = delta < 2)
avg_p1 <- model.avg(topmodels_p1)
newdata_pend<- as.data.frame(lapply(lapply(data_p1[, -c(1:7,11, 13)], mean), rep, 25))
newdata_pend$pendiente <- seq(min(data_p1$pendiente), max(data_p1$pendiente), length.out = 25)
pred <- predict(avg_p1, newdata = newdata_pend, type = "response", se.fit = TRUE )
lcl <- pred$fit - 1.96*pred$se.fit
lch <- pred$fit + 1.96*pred$se.fit
plot(pred$fit ~ newdata_pend$pendiente, ylim = c(0, 0.05), main = "p1", type = "l")
polygon( x = c(newdata_pend$pendiente, rev(newdata_pend$pendiente)),
y = c(lcl, rev(lch)),
col = adjustcolor(c("grey"),alpha.f = 0.6),
border = NA)
newx <- seq(min(newdata_pend$pendiente),max(newdata_pend$pendiente), length.out = 25)
lines(newx, lcl, col = "red")
lines(newx, lch, col = "red")
|
# load and save Shiny map data
library(ggplot2)
library(dplyr)
library(usmap)
library(maps)
library(sf)
library(leaflet)
library(leaflet.extras)
library(leaflet.esri)
library(RColorBrewer)
library(htmlwidgets)
library(htmltools)
library(geojsonio)
library(tigris)
state_list = c('AL','AZ','AR','CA','CO','CT','DE','DC','FL','GA','ID',
'IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT',
'NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC',
'SD','TN','TX','UT','VT','VA','WA','WV','WI','WY')
usm = map_data('usa')
usstates = map_data("state")
states = unique(usstates$region)
############################################################
############### MEAN POLICY COST BY STATE ###############
############################################################
for (i in 1:length(state_list)){
eval(parse(text = paste("load('data/", state_list[i], ".Rda')", sep = '')))
eval(parse(text = paste("DD = ", state_list[i], sep = '')))
eval(parse(text = paste("remove(", state_list[i],")", sep = "")))
DD = mean(DD$policycost)
if (i == 1){
df = data.frame(states[i], DD)
df['state'] = state_list[i]
colnames(df) = c('region', 'mean_cost', 'state')
} else {
dfdf = data.frame(states[i],DD)
dfdf['state'] = state_list[i]
colnames(dfdf) = c('region', 'mean_cost', 'state')
df = rbind(df,dfdf)
}
print(i)
}
remove(states)
states = states(cb = T)
states = states %>% filter(STUSPS %in% state_list)
states_merged_sb = geo_join(states, df, "STUSPS", "state")
save(states_merged_sb, file = 'data/states_merged_sb.Rda')
| /not used/shiny_save_map_data.R | no_license | LenaChretien/National_flood_insurance | R | false | false | 1,724 | r | # load and save Shiny map data
library(ggplot2)
library(dplyr)
library(usmap)
library(maps)
library(sf)
library(leaflet)
library(leaflet.extras)
library(leaflet.esri)
library(RColorBrewer)
library(htmlwidgets)
library(htmltools)
library(geojsonio)
library(tigris)
state_list = c('AL','AZ','AR','CA','CO','CT','DE','DC','FL','GA','ID',
'IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT',
'NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC',
'SD','TN','TX','UT','VT','VA','WA','WV','WI','WY')
usm = map_data('usa')
usstates = map_data("state")
states = unique(usstates$region)
############################################################
############### MEAN POLICY COST BY STATE ###############
############################################################
for (i in 1:length(state_list)){
eval(parse(text = paste("load('data/", state_list[i], ".Rda')", sep = '')))
eval(parse(text = paste("DD = ", state_list[i], sep = '')))
eval(parse(text = paste("remove(", state_list[i],")", sep = "")))
DD = mean(DD$policycost)
if (i == 1){
df = data.frame(states[i], DD)
df['state'] = state_list[i]
colnames(df) = c('region', 'mean_cost', 'state')
} else {
dfdf = data.frame(states[i],DD)
dfdf['state'] = state_list[i]
colnames(dfdf) = c('region', 'mean_cost', 'state')
df = rbind(df,dfdf)
}
print(i)
}
remove(states)
states = states(cb = T)
states = states %>% filter(STUSPS %in% state_list)
states_merged_sb = geo_join(states, df, "STUSPS", "state")
save(states_merged_sb, file = 'data/states_merged_sb.Rda')
|
\dontrun{
# need to set a font containing these values
show_shapes(tableau_shape_pal()(5))
}
| /library/ggthemes/examples/ex-tableau_shape_pal.R | permissive | mayousif/Turbidity-Cleaner | R | false | false | 97 | r | \dontrun{
# need to set a font containing these values
show_shapes(tableau_shape_pal()(5))
}
|
\alias{gdkScreenGetDefault}
\name{gdkScreenGetDefault}
\title{gdkScreenGetDefault}
\description{Gets the default screen for the default display. (See
\code{\link{gdkDisplayGetDefault}}).}
\usage{gdkScreenGetDefault()}
\details{Since 2.2}
\value{[\code{\link{GdkScreen}}] a \code{\link{GdkScreen}}, or \code{NULL} if there is no default display. \emph{[ \acronym{transfer none} ]}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gdkScreenGetDefault.Rd | no_license | lawremi/RGtk2 | R | false | false | 454 | rd | \alias{gdkScreenGetDefault}
\name{gdkScreenGetDefault}
\title{gdkScreenGetDefault}
\description{Gets the default screen for the default display. (See
\code{\link{gdkDisplayGetDefault}}).}
\usage{gdkScreenGetDefault()}
\details{Since 2.2}
\value{[\code{\link{GdkScreen}}] a \code{\link{GdkScreen}}, or \code{NULL} if there is no default display. \emph{[ \acronym{transfer none} ]}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(shiny)
library(plotly)
shinyUI(fluidPage(
titlePanel("Animal species structure and diversity"),
checkboxInput(inputId = "large",
label = strong("Show interspecies values"),
value = FALSE),
checkboxInput(inputId = "pics",
label = strong("Show density estimate"),
value = FALSE),
),
mainPanel(
plotlyOutput("popphyl_data")
)
))
| /UI.R | no_license | loire/interactive_graph | R | false | false | 430 | r | library(shiny)
library(plotly)
shinyUI(fluidPage(
titlePanel("Animal species structure and diversity"),
checkboxInput(inputId = "large",
label = strong("Show interspecies values"),
value = FALSE),
checkboxInput(inputId = "pics",
label = strong("Show density estimate"),
value = FALSE),
),
mainPanel(
plotlyOutput("popphyl_data")
)
))
|
context("intercept")
test_that("intercept issue is solved", {
set.seed(1)
n.per.group <- 4
n <- n.per.group * 2
m <- 100
condition <- factor(rep(letters[1:2], each=n.per.group))
w <- rnorm(n)
# with this design, no sample has the intercept coefficient alone
x <- model.matrix(~w + condition)
beta.sd <- 2
beta.cond <- rnorm(m, 0, beta.sd)
beta.intercept <- runif(m, 2, 6)
beta.mat <- cbind(beta.intercept, rep(0, m), beta.cond)
mu <- exp(t(x %*% t(beta.mat)))
Y <- matrix(rnbinom(m*n, mu=mu, size=1/.1), ncol=n)
param <- matrix(0.1, nrow=m, ncol=1)
offset <- matrix(0, nrow=m, ncol=n)
fit <- apeglm(Y=Y, x=x, log.lik=logLikNB, offset=offset, param=param, coef=2)
})
| /tests/testthat/test_intercept.R | no_license | azhu513/apeglm | R | false | false | 702 | r | context("intercept")
test_that("intercept issue is solved", {
set.seed(1)
n.per.group <- 4
n <- n.per.group * 2
m <- 100
condition <- factor(rep(letters[1:2], each=n.per.group))
w <- rnorm(n)
# with this design, no sample has the intercept coefficient alone
x <- model.matrix(~w + condition)
beta.sd <- 2
beta.cond <- rnorm(m, 0, beta.sd)
beta.intercept <- runif(m, 2, 6)
beta.mat <- cbind(beta.intercept, rep(0, m), beta.cond)
mu <- exp(t(x %*% t(beta.mat)))
Y <- matrix(rnbinom(m*n, mu=mu, size=1/.1), ncol=n)
param <- matrix(0.1, nrow=m, ncol=1)
offset <- matrix(0, nrow=m, ncol=n)
fit <- apeglm(Y=Y, x=x, log.lik=logLikNB, offset=offset, param=param, coef=2)
})
|
library(RQuantLib)
library(ggplot2)
library(dplyr)
rm(list=ls())
source('./ESourceRCode.R',encoding = 'UTF-8')
source('./EDataProvisionLib.R',encoding = 'UTF-8')
#switch: only for today or multiple days for skew calculation
ProcessFileName=paste("_OPChain_PreForSkew.csv",sep="")
isSkewCalc=T
#set TRUE if this opchain is for Future Option
isFOP=F
#set TRUE if this is today's new position, or (already holding position) set FALSE,
isNewPosition=T
#set TRUE if you filter the position,otherwise set FALSE
isFiltered=T
#Price is Interpolated or not
isInterpolatedPrice=F
#Saved File Name
TargetFileName=paste("_Positions_Pre_",format(Sys.time(),"%Y%b%d_%H%M%S"),".csv",sep="")
#for recording the ATMIV adjusted opchain, set isSkewCalc=F AND isFiltered=F
if(isSkewCalc){
TargetFileName=paste("_OPChain_Pos_",format(Sys.time(),"%Y%b%d_%H%M%S"),".csv",sep="")
}else if(isFiltered==F)
TargetFileName=paste("_OPChain_RECORD_",format(Sys.time(),"%Y%b%d_%H%M%S"),".csv",sep="")
#create Option Chain Container
if(!isFOP){
opchain<-makeOpchainContainer()
}else{
opchain <- makeFOPChainContainer()
cat("(:divYld_G)",divYld_G,"\n")
}
na.omit(opchain)->opchain
#inconsistent data purged
opchain %>% filter(Price!=0) -> opchain
opchain$Strike=as.numeric(opchain$Strike)
opchain$TYPE=as.numeric(opchain$TYPE)
opchain %>% filter((Strike-UDLY)*TYPE<Price) -> opchain
#spread<ASK*k
k<-0.4
opchain$Ask=as.numeric(opchain$Ask)
opchain$Bid=as.numeric(opchain$Bid)
opchain %>% filter(!((Ask-Bid)>(Ask*k))) -> opchain
rm(k)
#Implied Volatility Set
delete<-c(-1)
N<-nrow(opchain)
for(i in 1:N){
tryCatch(a<-set.IVOrig(xT=opchain[i,]),
error=function(e){
message(e)
print(i)
delete<<-c(delete,-i)
})
}
#要素を削除
(delete)
delete<-delete[-1]
(delete)
nrow(opchain)
nrow(opchain[delete,])
if(length(delete)>0)
opchain<-opchain[delete,]
nrow(opchain)
rm(delete,N,i,a)
#IVの計算とOption PriceとGreeksの設定
opchain$OrigIV<-set.IVOrig(xT=opchain)
tmp<-set.EuropeanOptionValueGreeks(opchain)
opchain$Price<-tmp$Price
opchain$Delta<-tmp$Delta
opchain$Gamma<-tmp$Gamma
opchain$Vega<-tmp$Vega
opchain$Theta<-tmp$Theta
opchain$Rho<-tmp$Rho
#opchain$Vomma<-get.EuropeanOptionVomma(opchain)
rm(tmp)
#renumber row names
rownames(opchain) <- c(1:nrow(opchain))
#not adjusted atmiv
# atmiv_na=makeNotAdjustedBySkewATMiv(opchain)
## opchain is created
opchain<-makePosition(opchain,isSkewCalc=isSkewCalc)
#just check the result before going throught the rest
if(isFiltered)
opchain_check<-filterPosition(opchain,TARGET_EXPDATE=TARGET_EXPDATE,TARGET_EXPDATE_FRONT=TARGET_EXPDATE_FRONT,TARGET_EXPDATE_BACK=TARGET_EXPDATE_BACK)
if(!isSkewCalc){
if(isNewPosition)
if(isFiltered)
if(!isFOP)
opchain<-filterPosition(opchain,TARGET_EXPDATE=TARGET_EXPDATE,TARGET_EXPDATE_FRONT=TARGET_EXPDATE_FRONT,TARGET_EXPDATE_BACK=TARGET_EXPDATE_BACK)
else
opchain<-filterPosition(opchain,TARGET_EXPDATE=TARGET_EXPDATE,TARGET_EXPDATE_FRONT=TARGET_EXPDATE_FRONT,TARGET_EXPDATE_BACK=TARGET_EXPDATE_BACK)
}
#select and sort
opchain %>% dplyr::select(Date,ExpDate,TYPE,Strike,ContactName,Position,UDLY,Price,
Delta,Gamma,Vega,Theta,Rho,OrigIV,ATMIV,IVIDX,
HowfarOOM,TimeToExpDate,Moneyness.Nm) %>%
dplyr::arrange(as.Date(Date,format="%Y/%m/%d"),as.Date(ExpDate,format="%Y/%m/%d"),desc(TYPE),Strike) -> opchain
opchain$Position<-ifelse(is.na(opchain$Position), 0, opchain$Position)
#Write to a file (RUT_Positions_Pre)
wf_<-paste(DataFiles_Path_G,Underying_Symbol_G,TargetFileName,sep="")
write.table(opchain,wf_,quote=T,row.names=F,sep=",")
rm(list=ls())
| /EDataProvision.R | no_license | Sdoof/edthrpnm | R | false | false | 3,687 | r | library(RQuantLib)
library(ggplot2)
library(dplyr)
rm(list=ls())
source('./ESourceRCode.R',encoding = 'UTF-8')
source('./EDataProvisionLib.R',encoding = 'UTF-8')
#switch: only for today or multiple days for skew calculation
ProcessFileName=paste("_OPChain_PreForSkew.csv",sep="")
isSkewCalc=T
#set TRUE if this opchain is for Future Option
isFOP=F
#set TRUE if this is today's new position, or (already holding position) set FALSE,
isNewPosition=T
#set TRUE if you filter the position,otherwise set FALSE
isFiltered=T
#Price is Interpolated or not
isInterpolatedPrice=F
#Saved File Name
TargetFileName=paste("_Positions_Pre_",format(Sys.time(),"%Y%b%d_%H%M%S"),".csv",sep="")
#for recording the ATMIV adjusted opchain, set isSkewCalc=F AND isFiltered=F
if(isSkewCalc){
TargetFileName=paste("_OPChain_Pos_",format(Sys.time(),"%Y%b%d_%H%M%S"),".csv",sep="")
}else if(isFiltered==F)
TargetFileName=paste("_OPChain_RECORD_",format(Sys.time(),"%Y%b%d_%H%M%S"),".csv",sep="")
#create Option Chain Container
if(!isFOP){
opchain<-makeOpchainContainer()
}else{
opchain <- makeFOPChainContainer()
cat("(:divYld_G)",divYld_G,"\n")
}
na.omit(opchain)->opchain
#inconsistent data purged
opchain %>% filter(Price!=0) -> opchain
opchain$Strike=as.numeric(opchain$Strike)
opchain$TYPE=as.numeric(opchain$TYPE)
opchain %>% filter((Strike-UDLY)*TYPE<Price) -> opchain
#spread<ASK*k
k<-0.4
opchain$Ask=as.numeric(opchain$Ask)
opchain$Bid=as.numeric(opchain$Bid)
opchain %>% filter(!((Ask-Bid)>(Ask*k))) -> opchain
rm(k)
#Implied Volatility Set
delete<-c(-1)
N<-nrow(opchain)
for(i in 1:N){
tryCatch(a<-set.IVOrig(xT=opchain[i,]),
error=function(e){
message(e)
print(i)
delete<<-c(delete,-i)
})
}
#要素を削除
(delete)
delete<-delete[-1]
(delete)
nrow(opchain)
nrow(opchain[delete,])
if(length(delete)>0)
opchain<-opchain[delete,]
nrow(opchain)
rm(delete,N,i,a)
#IVの計算とOption PriceとGreeksの設定
opchain$OrigIV<-set.IVOrig(xT=opchain)
tmp<-set.EuropeanOptionValueGreeks(opchain)
opchain$Price<-tmp$Price
opchain$Delta<-tmp$Delta
opchain$Gamma<-tmp$Gamma
opchain$Vega<-tmp$Vega
opchain$Theta<-tmp$Theta
opchain$Rho<-tmp$Rho
#opchain$Vomma<-get.EuropeanOptionVomma(opchain)
rm(tmp)
#renumber row names
rownames(opchain) <- c(1:nrow(opchain))
#not adjusted atmiv
# atmiv_na=makeNotAdjustedBySkewATMiv(opchain)
## opchain is created
opchain<-makePosition(opchain,isSkewCalc=isSkewCalc)
#just check the result before going throught the rest
if(isFiltered)
opchain_check<-filterPosition(opchain,TARGET_EXPDATE=TARGET_EXPDATE,TARGET_EXPDATE_FRONT=TARGET_EXPDATE_FRONT,TARGET_EXPDATE_BACK=TARGET_EXPDATE_BACK)
if(!isSkewCalc){
if(isNewPosition)
if(isFiltered)
if(!isFOP)
opchain<-filterPosition(opchain,TARGET_EXPDATE=TARGET_EXPDATE,TARGET_EXPDATE_FRONT=TARGET_EXPDATE_FRONT,TARGET_EXPDATE_BACK=TARGET_EXPDATE_BACK)
else
opchain<-filterPosition(opchain,TARGET_EXPDATE=TARGET_EXPDATE,TARGET_EXPDATE_FRONT=TARGET_EXPDATE_FRONT,TARGET_EXPDATE_BACK=TARGET_EXPDATE_BACK)
}
#select and sort
opchain %>% dplyr::select(Date,ExpDate,TYPE,Strike,ContactName,Position,UDLY,Price,
Delta,Gamma,Vega,Theta,Rho,OrigIV,ATMIV,IVIDX,
HowfarOOM,TimeToExpDate,Moneyness.Nm) %>%
dplyr::arrange(as.Date(Date,format="%Y/%m/%d"),as.Date(ExpDate,format="%Y/%m/%d"),desc(TYPE),Strike) -> opchain
opchain$Position<-ifelse(is.na(opchain$Position), 0, opchain$Position)
#Write to a file (RUT_Positions_Pre)
wf_<-paste(DataFiles_Path_G,Underying_Symbol_G,TargetFileName,sep="")
write.table(opchain,wf_,quote=T,row.names=F,sep=",")
rm(list=ls())
|
# Damon LaPoint
# DSC 450 HW5 Lecture 6
# Part 3 Ensemble
# Clear workspace
rm(list=ls())
# Clear console
cat("\014")
date()
setwd("~/Personal/School/Data Science 450 Summer 2015/Cap Stone")
# load data sets for test and predictions
test <- read.csv("data_holdout_test.csv")
#Damon LaPoint's model using feature engineering
pred_dl1 <- read.csv("prediction_holdout_dl_lr.csv")
#RJ's SVM Model
pred_dl2 <- read.csv("predictions_holdout_svm_rj.csv")
#Sandeep Pridshetti's model using Average Random Forest
pred_sp1 <- read.csv("predictions_RandomForrestAvg_holdout_Sandeep.csv")
#Marc Lauzon's model using greater than 0 features binarized and Random Forrest
pred_ml1 <- read.csv("predictions_from_holdback_testing_Marc_Lauzon.csv")
#update names
n <- names(pred_sp1)
names(pred_dl1) <- n
names(pred_dl2) <- n
# test accuracy of each data set
mean(pred_dl1$Label == test$label)
#[1] 0.9642857
mean(pred_dl2$Label == test$label)
#[1] 0.9830952
mean(pred_sp1$Label == test$label)
#[1] 0.9654762
mean(pred_ml1$Label == test$label)
#[1] 0.9552381
vote.dl1 <- c()
vote.dl2 <- c()
vote.sp1 <- c()
vote.ml1 <- c()
###############
# Loop: Takes forever to run, use Guess to run one-off tests.
range <- 5
i <- 0
f <- 0
g <- 0
h <- 0
pred.iter <- data.frame()
pred.total <- data.frame()
for(i in 0:range)
{
w.sp1 <- i
#print(w.sp1)
for(f in 0:range)
{
w.dl1 <- f
#print(w.dl1)
for(g in 0:range)
{
w.ml1 <- g
for(h in 3:range)
{
w.dl2 <- h
for(j in 1:nrow(pred_dl1))
{
if(w.sp1>0){
vote.sp1[1:w.sp1] <- pred_sp1[j, 2]}
else{
vote.sp1 <- c()}
if(w.dl1>0){
vote.dl1[1:w.dl1] <- pred_dl1[j, 2]}
else{
vote.dl1 <- c()}
if(w.ml1>0){
vote.ml1[1:w.ml1] <- pred_ml1[j, 2]}
else{
vote.ml1 <- c()}
if(w.dl2>0){
vote.dl2[1:w.dl2] <- pred_dl2[j, 2]}
else{
vote.dl2 <- c()
}
vote.tot <- c(vote.dl1, vote.dl2, vote.sp1, vote.ml1 )
vote.table <- table(vote.tot)
iter <- c(j, as.numeric(names(which.max(vote.table))))
pred.iter <- rbind(pred.iter, iter )
}
names(pred.iter) <- n
# score for pred.iter
score.iter <- mean(pred.iter$Label == test$label)
pred.total <- rbind(pred.total, c(score.iter, w.sp1, w.dl1, w.ml1, w.dl2) )
pred.iter <- data.frame()
}
}
}
}
names(pred.total) <- c("accuracy", "w.sp1", "w.dl1", "w.ml1", "w.rj1")
#################
# Guess rather than Loop
##################
w.sp1 <- 1
w.dl1 <- 2
w.ml1 <- 1
w.dl2 <- 2
# clear every time!
pred.iter <- data.frame()
for(j in 1:nrow(pred_dl1))
{
if(w.sp1>0){
vote.sp1[1:w.sp1] <- pred_sp1[j, 2]}
else{
vote.sp1 <- c()}
if(w.dl1>0){
vote.dl1[1:w.dl1] <- pred_dl1[j, 2]}
else{
vote.dl1 <- c()}
if(w.ml1>0){
vote.ml1[1:w.ml1] <- pred_ml1[j, 2]}
else{
vote.ml1 <- c()}
if(w.dl2>0){
vote.dl2[1:w.dl2] <- pred_dl2[j, 2]}
else{
vote.dl2 <- c()
}
vote.tot <- c(vote.dl1, vote.dl2, vote.sp1, vote.ml1 )
vote.table <- table(vote.tot)
iter <- c(j, as.numeric(names(which.max(vote.table))))
pred.iter <- rbind(pred.iter, iter )
}
names(pred.iter) <- n
# score for pred.iter
score.iter <- mean(pred.iter$Label == test$label)
pred.total <- rbind(pred.total, c(score.iter, w.sp1, w.dl1, w.ml1, w.dl2) )
##################################################################
# Get Best scores by weight and output:
max.accuracy <- max(pred.total$accuracy)
pred.total[pred.total$accuracy==max(pred.total$accuracy) ,]
write.csv(pred.total, "prediction_weights_final.csv")
| /Data/Holdout/voting_weights-final.R | no_license | icday1/Kaggle-DigitRecognizer- | R | false | false | 3,875 | r | # Damon LaPoint
# DSC 450 HW5 Lecture 6
# Part 3 Ensemble
# Clear workspace
rm(list=ls())
# Clear console
cat("\014")
date()
setwd("~/Personal/School/Data Science 450 Summer 2015/Cap Stone")
# load data sets for test and predictions
test <- read.csv("data_holdout_test.csv")
#Damon LaPoint's model using feature engineering
pred_dl1 <- read.csv("prediction_holdout_dl_lr.csv")
#RJ's SVM Model
pred_dl2 <- read.csv("predictions_holdout_svm_rj.csv")
#Sandeep Pridshetti's model using Average Random Forest
pred_sp1 <- read.csv("predictions_RandomForrestAvg_holdout_Sandeep.csv")
#Marc Lauzon's model using greater than 0 features binarized and Random Forrest
pred_ml1 <- read.csv("predictions_from_holdback_testing_Marc_Lauzon.csv")
#update names
n <- names(pred_sp1)
names(pred_dl1) <- n
names(pred_dl2) <- n
# test accuracy of each data set
mean(pred_dl1$Label == test$label)
#[1] 0.9642857
mean(pred_dl2$Label == test$label)
#[1] 0.9830952
mean(pred_sp1$Label == test$label)
#[1] 0.9654762
mean(pred_ml1$Label == test$label)
#[1] 0.9552381
vote.dl1 <- c()
vote.dl2 <- c()
vote.sp1 <- c()
vote.ml1 <- c()
###############
# Loop: Takes forever to run, use Guess to run one-off tests.
range <- 5
i <- 0
f <- 0
g <- 0
h <- 0
pred.iter <- data.frame()
pred.total <- data.frame()
for(i in 0:range)
{
w.sp1 <- i
#print(w.sp1)
for(f in 0:range)
{
w.dl1 <- f
#print(w.dl1)
for(g in 0:range)
{
w.ml1 <- g
for(h in 3:range)
{
w.dl2 <- h
for(j in 1:nrow(pred_dl1))
{
if(w.sp1>0){
vote.sp1[1:w.sp1] <- pred_sp1[j, 2]}
else{
vote.sp1 <- c()}
if(w.dl1>0){
vote.dl1[1:w.dl1] <- pred_dl1[j, 2]}
else{
vote.dl1 <- c()}
if(w.ml1>0){
vote.ml1[1:w.ml1] <- pred_ml1[j, 2]}
else{
vote.ml1 <- c()}
if(w.dl2>0){
vote.dl2[1:w.dl2] <- pred_dl2[j, 2]}
else{
vote.dl2 <- c()
}
vote.tot <- c(vote.dl1, vote.dl2, vote.sp1, vote.ml1 )
vote.table <- table(vote.tot)
iter <- c(j, as.numeric(names(which.max(vote.table))))
pred.iter <- rbind(pred.iter, iter )
}
names(pred.iter) <- n
# score for pred.iter
score.iter <- mean(pred.iter$Label == test$label)
pred.total <- rbind(pred.total, c(score.iter, w.sp1, w.dl1, w.ml1, w.dl2) )
pred.iter <- data.frame()
}
}
}
}
names(pred.total) <- c("accuracy", "w.sp1", "w.dl1", "w.ml1", "w.rj1")
#################
# Guess rather than Loop
##################
w.sp1 <- 1
w.dl1 <- 2
w.ml1 <- 1
w.dl2 <- 2
# clear every time!
pred.iter <- data.frame()
for(j in 1:nrow(pred_dl1))
{
if(w.sp1>0){
vote.sp1[1:w.sp1] <- pred_sp1[j, 2]}
else{
vote.sp1 <- c()}
if(w.dl1>0){
vote.dl1[1:w.dl1] <- pred_dl1[j, 2]}
else{
vote.dl1 <- c()}
if(w.ml1>0){
vote.ml1[1:w.ml1] <- pred_ml1[j, 2]}
else{
vote.ml1 <- c()}
if(w.dl2>0){
vote.dl2[1:w.dl2] <- pred_dl2[j, 2]}
else{
vote.dl2 <- c()
}
vote.tot <- c(vote.dl1, vote.dl2, vote.sp1, vote.ml1 )
vote.table <- table(vote.tot)
iter <- c(j, as.numeric(names(which.max(vote.table))))
pred.iter <- rbind(pred.iter, iter )
}
names(pred.iter) <- n
# score for pred.iter
score.iter <- mean(pred.iter$Label == test$label)
pred.total <- rbind(pred.total, c(score.iter, w.sp1, w.dl1, w.ml1, w.dl2) )
##################################################################
# Get Best scores by weight and output:
max.accuracy <- max(pred.total$accuracy)
pred.total[pred.total$accuracy==max(pred.total$accuracy) ,]
write.csv(pred.total, "prediction_weights_final.csv")
|
library(shiny)
ui <- fluidPage(
sidebarPanel(),
mainPanel (textOutput("date"),
plotOutput("demand")
)
) | /ui.R | no_license | abghari/ShinyApp | R | false | false | 128 | r | library(shiny)
ui <- fluidPage(
sidebarPanel(),
mainPanel (textOutput("date"),
plotOutput("demand")
)
) |
#Julian Ramirez-Villegas
#UoL / CCAFS / CIAT
#December 2011
#Modified by Carlos Navarro
# April 2016
#########################
## 01- Read GSOD files ##
#########################
stop("error")
src.dir <- "Z:/DATA/WP2/00_scripts"
source(paste(src.dir,"/GHCND-GSOD-functions.R",sep=""))
#base dir
bDir <- "S:/observed/weather_station/gsod"; setwd(bDir)
gsodDir <- paste(bDir,"/organized-data",sep="")
# odir <- "D:/CIAT/Projects/col-cormacarena"
odir <- "Z:/DATA/WP2/01_Weather_Stations/GSOD"
reg <- "amz"
#gsod stations
stations.gsod <- read.csv(paste(gsodDir,"/ish-history.csv",sep=""))
stations.gsod$LON <- stations.gsod$LON/1000; stations.gsod$LAT <- stations.gsod$LAT/1000
stations.gsod$ELEV..1M. <- stations.gsod$ELEV..1M./10
#1. create extents
require(raster); require(maptools); require(rgdal)
#projection extents
# reg.xt <- extent(-90,-30,-40,24) #Lat
# reg.xt <- extent(-79.5, -72, -11.9, 3) #Napo region
reg.xt <- extent(-80, -66, -16, 5) #Study Region region
#plot the extents (for reference -commented!)
#rs <- raster(); rs[] <- rnorm(1:ncell(rs))
#data(wrld_simpl)
#plot(rs,col=colorRampPalette(c("grey10","grey90"))(100)); plot(wrld_simpl,add=T,col='white')
#plot(waf.xt,add=T,col='red'); plot(eaf.xt,add=T,col='blue'); plot(igp.xt,add=T,col='orange')
#plot(afr.xt,add=T,col='black',lty=1); plot(sas.xt,add=T,col='black',lty=2)
#2. define working gridcell
# cellSize <- 1
#3. Make inventory of data (points / day / fit region)
#define initial and final year
yearSeries <- c(1960:2009)
#select stations within 3+degree of interpolation extents
gsod.reg <- stations.gsod[which(stations.gsod$LON>=(reg.xt@xmin-3) & stations.gsod$LON<=(reg.xt@xmax+3)
& stations.gsod$LAT>=(reg.xt@ymin-3) & stations.gsod$LAT<=(reg.xt@ymax+3)),]
st_ids <- paste(gsod.reg$USAF,"-",gsod.reg$WBAN,sep="")
usaf_ids <- gsod.reg$USAF
st_loc <- as.data.frame(cbind("Station"=gsod.reg$USAF, "Name"=gsod.reg$STATION.NAME, "Lon"=gsod.reg$LON, "Lat"=gsod.reg$LAT, "Alt"=gsod.reg$ELEV..1M.))
write.csv(st_loc, paste0(odir, "/stations_names.csv"), row.names=F)
# gsod.sas <- stations.gsod[which(stations.gsod$LON>=(sas.xt@xmin-3) & stations.gsod$LON<=(sas.xt@xmax+3)
# & stations.gsod$LAT>=(sas.xt@ymin-3) & stations.gsod$LAT<=(sas.xt@ymax+3)),]
#clean gsod stations of Africa
# gsod.reg <- gsod.reg[-which(gsod.reg$LON == 0 | gsod.reg$LAT == 0),]
#do the snowfall stuff here
library(snowfall)
sfInit(parallel=T,cpus=32) #initiate cluster
#export functions
sfExport("convertGSOD")
sfExport("createDateGrid")
sfExport("leap")
#export variables
sfExport("bDir")
IDs <- paste("USAF",gsod.reg$USAF,"_WBAN",gsod.reg$WBAN,sep="")
count <- 1
for (yr in yearSeries) {
cat(yr,paste("(",count," out of ",length(yearSeries),")",sep=""),"\n")
gdir <- paste(gsodDir,"/",yr,sep="")
ogdir <- paste(odir,"/daily", sep=""); if (!file.exists(ogdir)) {dir.create(ogdir, recursive=T)}
controlConvert <- function(i) { #define a new function
convertGSOD(i,yr,gdir,ogdir)
}
sfExport("yr"); sfExport("gdir"); sfExport("ogdir")
system.time(sfSapply(as.vector(IDs), controlConvert))
count <- count+1
}
# Join all year in one file per station
mergeDailyGSOD(odir, ogdir, st_ids, usaf_ids)
# Monthly aggregation
varList <- c("prec", "tmax", "tmin")
for (var in varList){
monthly_agg(var, odir, odir)
}
## Add coordinates to the climatologies files
varList <- c("prec", "tmin", "tmax")
st_loc <- paste0(odir, "/stations_names.csv")
sY=1976
fY=2005
for (var in varList){
clim_calc(var, odir, odir, st_loc, sY, fY)
}
| /SAL_BMU_Amazon/04_GSOD-read.R | no_license | CIAT-DAPA/dapa-climate-change | R | false | false | 3,570 | r | #Julian Ramirez-Villegas
#UoL / CCAFS / CIAT
#December 2011
#Modified by Carlos Navarro
# April 2016
#########################
## 01- Read GSOD files ##
#########################
stop("error")
src.dir <- "Z:/DATA/WP2/00_scripts"
source(paste(src.dir,"/GHCND-GSOD-functions.R",sep=""))
#base dir
bDir <- "S:/observed/weather_station/gsod"; setwd(bDir)
gsodDir <- paste(bDir,"/organized-data",sep="")
# odir <- "D:/CIAT/Projects/col-cormacarena"
odir <- "Z:/DATA/WP2/01_Weather_Stations/GSOD"
reg <- "amz"
#gsod stations
stations.gsod <- read.csv(paste(gsodDir,"/ish-history.csv",sep=""))
stations.gsod$LON <- stations.gsod$LON/1000; stations.gsod$LAT <- stations.gsod$LAT/1000
stations.gsod$ELEV..1M. <- stations.gsod$ELEV..1M./10
#1. create extents
require(raster); require(maptools); require(rgdal)
#projection extents
# reg.xt <- extent(-90,-30,-40,24) #Lat
# reg.xt <- extent(-79.5, -72, -11.9, 3) #Napo region
reg.xt <- extent(-80, -66, -16, 5) #Study Region region
#plot the extents (for reference -commented!)
#rs <- raster(); rs[] <- rnorm(1:ncell(rs))
#data(wrld_simpl)
#plot(rs,col=colorRampPalette(c("grey10","grey90"))(100)); plot(wrld_simpl,add=T,col='white')
#plot(waf.xt,add=T,col='red'); plot(eaf.xt,add=T,col='blue'); plot(igp.xt,add=T,col='orange')
#plot(afr.xt,add=T,col='black',lty=1); plot(sas.xt,add=T,col='black',lty=2)
#2. define working gridcell
# cellSize <- 1
#3. Make inventory of data (points / day / fit region)
#define initial and final year
yearSeries <- c(1960:2009)
#select stations within 3+degree of interpolation extents
gsod.reg <- stations.gsod[which(stations.gsod$LON>=(reg.xt@xmin-3) & stations.gsod$LON<=(reg.xt@xmax+3)
& stations.gsod$LAT>=(reg.xt@ymin-3) & stations.gsod$LAT<=(reg.xt@ymax+3)),]
st_ids <- paste(gsod.reg$USAF,"-",gsod.reg$WBAN,sep="")
usaf_ids <- gsod.reg$USAF
st_loc <- as.data.frame(cbind("Station"=gsod.reg$USAF, "Name"=gsod.reg$STATION.NAME, "Lon"=gsod.reg$LON, "Lat"=gsod.reg$LAT, "Alt"=gsod.reg$ELEV..1M.))
write.csv(st_loc, paste0(odir, "/stations_names.csv"), row.names=F)
# gsod.sas <- stations.gsod[which(stations.gsod$LON>=(sas.xt@xmin-3) & stations.gsod$LON<=(sas.xt@xmax+3)
# & stations.gsod$LAT>=(sas.xt@ymin-3) & stations.gsod$LAT<=(sas.xt@ymax+3)),]
#clean gsod stations of Africa
# gsod.reg <- gsod.reg[-which(gsod.reg$LON == 0 | gsod.reg$LAT == 0),]
#do the snowfall stuff here
library(snowfall)
sfInit(parallel=T,cpus=32) #initiate cluster
#export functions
sfExport("convertGSOD")
sfExport("createDateGrid")
sfExport("leap")
#export variables
sfExport("bDir")
IDs <- paste("USAF",gsod.reg$USAF,"_WBAN",gsod.reg$WBAN,sep="")
count <- 1
for (yr in yearSeries) {
cat(yr,paste("(",count," out of ",length(yearSeries),")",sep=""),"\n")
gdir <- paste(gsodDir,"/",yr,sep="")
ogdir <- paste(odir,"/daily", sep=""); if (!file.exists(ogdir)) {dir.create(ogdir, recursive=T)}
controlConvert <- function(i) { #define a new function
convertGSOD(i,yr,gdir,ogdir)
}
sfExport("yr"); sfExport("gdir"); sfExport("ogdir")
system.time(sfSapply(as.vector(IDs), controlConvert))
count <- count+1
}
# Join all year in one file per station
mergeDailyGSOD(odir, ogdir, st_ids, usaf_ids)
# Monthly aggregation
varList <- c("prec", "tmax", "tmin")
for (var in varList){
monthly_agg(var, odir, odir)
}
## Add coordinates to the climatologies files
varList <- c("prec", "tmin", "tmax")
st_loc <- paste0(odir, "/stations_names.csv")
sY=1976
fY=2005
for (var in varList){
clim_calc(var, odir, odir, st_loc, sY, fY)
}
|
# server.R
source("helpers.R")
shinyServer(
function(input, output) {
output$poweroutput <- renderPrint({
LRPowerCorr(sampsize = input$n, nsims = input$nsims, p = input$rho,
a = input$a, b = input$b, c = input$c, d = input$d,
A = input$A, B = input$B, C = input$C, D = input$D,
or4 = input$or4, or5 = input$or5, or6 = input$or6,
or7 = input$or7, or8 = input$or8, or9 = input$or9, or10 = input$or10,
fullmodel = input$fullmodel,
reducedmodel = input$reducedmodel,
alpha = input$alpha,
dftest = input$df, pcx1 = input$pcx1, pcx2 = input$pcx2)
})
}
)
| /server.R | no_license | heathgauss/LRPowerCorrRev | R | false | false | 735 | r | # server.R
source("helpers.R")
shinyServer(
function(input, output) {
output$poweroutput <- renderPrint({
LRPowerCorr(sampsize = input$n, nsims = input$nsims, p = input$rho,
a = input$a, b = input$b, c = input$c, d = input$d,
A = input$A, B = input$B, C = input$C, D = input$D,
or4 = input$or4, or5 = input$or5, or6 = input$or6,
or7 = input$or7, or8 = input$or8, or9 = input$or9, or10 = input$or10,
fullmodel = input$fullmodel,
reducedmodel = input$reducedmodel,
alpha = input$alpha,
dftest = input$df, pcx1 = input$pcx1, pcx2 = input$pcx2)
})
}
)
|
################################# MITH#############################################
rm(list=ls())
##### Setting the Working Directory
setwd("~/Insofe/Decision Modeling/MITH/MITH_Data")
dir()
#### Reading the data files
traindata <- read.csv("train.csv",header = T)
testdata <- read.csv("test.csv",header = T)
buildingownership<- read.csv("buildingownership.csv",header = T)
buildingstructure<- read.csv("buildingstructure.csv",header = T)
####### Visualization of the data
str(traindata)
summary(traindata)
str(testdata)
summary(testdata)
str(buildingownership)
str(buildingstructure)
summary(buildingstructure)
####### Type Conversions
#### Train Data
library(dplyr)
traindata_cat<-select(traindata,starts_with("h"))
names(traindata_cat)
traindata_cat <- data.frame(apply(traindata_cat,2,as.factor))
traindata_catg<-subset(traindata,select = c(1,2,3,13))
names(traindata_catg)
traindata1<-data.frame(c(traindata_catg,traindata_cat))
str(traindata1)
names(traindata1)
names(testdata1)
#### Test Data
library(dplyr)
testdata_cat<-select(testdata,starts_with("h"))
names(testdata_cat)
testdata_cat <- data.frame(apply(testdata_cat,2,as.factor))
testdata_catg<-subset(testdata,select = c(1:4))
names(testdata_catg)
testdata1<-data.frame(c(testdata_catg,testdata_cat))
str(testdata1)
rm(testdata_cat,testdata_catg)
#### Buildiing Ownership Data
buildingownership_cat <- select(buildingownership,starts_with("h"))
names(buildingownership_cat)
buildingownership_catg <- select(buildingownership,1:5)
names(buildingownership_catg)
buildingownership_cat <- data.frame(apply(buildingownership_cat,2,as.factor))
buildingownership1<-data.frame(c(buildingownership_catg,buildingownership_cat))
names(buildingownership)
names(buildingownership1)
str(buildingownership1)
rm(buildingownership_cat,buildingownership_catg)
#### Building structure Data
buildingstructure_cat <- select(buildingstructure,starts_with("h"),c(26,27))
names(buildingstructure_cat)
buildingstructure_catg <- select(buildingstructure,1:14,28,29)
names(buildingstructure_catg)
buildingstructure_cat <- data.frame(apply(buildingstructure_cat,2,as.factor))
buildingstructure1<-data.frame(c(buildingstructure_catg,buildingstructure_cat))
names(buildingstructure)
names(buildingstructure1)
str(buildingstructure1)
rm(buildingstructure_cat,buildingstructure_catg)
#### Checking For Null Values
sum(is.na(traindata1))
sum(is.na(testdata1))
table(traindata1$has_repair_started)
table(testdata1$has_repair_started)
sum(is.na(buildingownership1))
sum(is.na(buildingstructure1))
table(traindata1$has_repair_started)
table(testdata1$has_repair_started)
str(traindata1)
### Imputing Missing Values
library(DMwR)
traindata1 <- centralImputation(traindata1)
table(traindata1$has_repair_started)
names(testdata1)
## Removing Target As it has all missing Values
testdata2 <- subset(testdata1,select=-2)
names(testdata2)
testdata3 <- subset(testdata1,select=2)
testdata2 <- centralImputation(testdata2)
sum(is.na(testdata2))
table(testdata2$has_repair_started)
testdata1 <-data.frame(c(testdata2,testdata3))
names(testdata1)
### Data Processing
names(testdata1)
traindata3 <- subset(traindata1,select=-2)
traindata2 <- subset(traindata1,select=2)
traindata1 <- data.frame(c(traindata3,traindata2))
names(traindata1)
library(caret)
nearZeroVar(buildingownership1)
nearZeroVar(buildingstructure1)
## The zero variance attributes in Building Ownership
### 5 8 9 10 11 12 13 14 15 16
## The zero variance attributes in Building Structure
### 14 19 20 25 26 27 28
## Removing Zero Variance attributes in both Building OwnerShip And Building Structure
buildingownership2 <- subset(buildingownership1,select=-c(5,8,9,10,11,12,13,14,15,16))
buildingstructure2 <- subset(buildingstructure1,select= -c(14,19,20,25,26,27,28))
names(buildingownership2)
names(buildingstructure2)
## Merging Of the Datasets
mergeddata <- merge(buildingownership2,buildingstructure2,by.x = c("building_id"),
by.y = c("building_id"),all.x = T)
train_merge <- merge(traindata1,mergeddata,by.x = c("building_id"),
by.y = c("building_id"),all.x = T)
test_merge <- merge(testdata1,mergeddata,by.x = c("building_id"),
by.y = c("building_id"),all.x = T)
names(test_merge)
names(train_merge)
str(train_merge)
str(test_merge)
summary(train_merge)
summary(test_merge)
## Removing target from the dataset
test_merge1<- subset(test_merge,select=-c(13))
test_merge2<- subset(test_merge,select=c(13))
train_merge1<- subset(train_merge,select=-c(13))
train_merge2<- subset(train_merge,select=c(13))
## Imputation Of missing Values in the new datasets
train <- centralImputation(train_merge1)
test <- centralImputation(test_merge1)
test1 <- data.frame(c(test,test_merge2))
names(test1)
train1<- data.frame(c(train,train_merge2))
names(train1)
sum(is.na(train1))
sum(is.na(test1))
## Converting the levels of the target to numbers
train1$damage_grade <- ifelse(train1$damage_grade == "High",yes = 3,ifelse(train1$damage_grade=="Medium",yes = 2,no=1))
## Checking for Class Imbalance in the data
prop.table(table(train1$damage_grade))
train1$damage_grade <- as.factor(train1$damage_grade)
str(train1)
## Removing Varaibles Which are not needed for prediction
names(train1)
train2 <- subset(train1,select = -c(13,14,15,18,19,20))
names(test1)
test2 <- subset(test1,select = -c(13,14,15,18,19,20))
names(train2)
names(test2)
## Writing the dataframes to the disk
write.csv(train2,"trainfinal.csv",row.names = FALSE)
write.csv(test2,"testfinal.csv",row.names = FALSE)
## Removing the columns not necessary for prediction
names(train2)
train3 <- subset(train2,select = -c(1,2,3,24,32))
test3<- subset(test2,select = -c(1,2,3,24,32))
str(train3)
### Building Random Forest
library(randomForest)
model_rf <- randomForest(damage_grade ~ .,data=train3,ntree = 100,mtry = 5)
importance(model_rf)
varImpPlot(model_rf)
plot(model_rf)
# Predict on the train data
preds_train_rf <- predict(model_rf)
confusionMatrix(preds_train_rf, train3$damage_grade)
## Predicting on the testdata
preds_test_rf <- predict(model_rf,newdata = test3)
preds_test_rf<- ifelse(preds_test_rf== 3,yes= "High",ifelse(preds_test_rf== 2,yes = "Medium",no="Low" ))
sub_data_1 <- data.frame(test2[1],preds_test_rf)
str(sub_data_1)
## Writing the submission file into csv
write.csv(sub_data_1,"submission.csv",row.names = FALSE)
## Writing the train and test to the dataframe
write.csv(train3,"traindata.csv",row.names = F)
write.csv(test3,"testdata.csv",row.names = F)
### Building Random Forest
library(randomForest)
model_rf1<- randomForest(damage_grade ~ .,data=train3,ntree = 200,mtry =5)
importance(model_rf1)
varImpPlot(model_rf1)
plot(model_rf1)
# Predict on the train data
preds_train_rf1 <- predict(model_rf1)
confusionMatrix(preds_train_rf1, train3$damage_grade)
## Predicting on the testdata
preds_test_rf1 <- predict(model_rf1,newdata = test3)
preds_test_rf2<- ifelse(preds_test_rf2== 3,yes= "High",ifelse(preds_test_rf2== 2,yes = "Medium",no="Low" ))
sub_data_1 <- data.frame(test2[1],preds_test_rf)
str(sub_data_1)
## Writing the submission file into csv
write.csv(sub_data_1,"submission2.csv",row.names = FALSE)
### Building Random Forest
library(randomForest)
model_rf3<- randomForest(damage_grade ~ .,data=train3,ntree = 200,mtry =10)
importance(model_rf3)
varImpPlot(model_rf3)
plot(model_rf3)
# Predict on the train data
preds_train_rf3 <- predict(model_rf3)
confusionMatrix(preds_train_rf3, train3$damage_grade)
## Predicting on the testdata
preds_test_rf3 <- predict(model_rf3,newdata = test3)
preds_test_rf3<- ifelse(preds_test_rf3== 3,yes= "High",ifelse(preds_test_rf3== 2,yes = "Medium",no="Low" ))
sub_data_3 <- data.frame(test2[1],preds_test_rf3)
str(sub_data_3)
## Writing the submission file into csv
write.csv(sub_data_3,"submission3.csv",row.names = FALSE)
| /MITH.R | no_license | AnkitaS-29/Determining-degree-of-damage-to-buildings-during-earthquakes- | R | false | false | 8,527 | r | ################################# MITH#############################################
rm(list=ls())
##### Setting the Working Directory
setwd("~/Insofe/Decision Modeling/MITH/MITH_Data")
dir()
#### Reading the data files
traindata <- read.csv("train.csv",header = T)
testdata <- read.csv("test.csv",header = T)
buildingownership<- read.csv("buildingownership.csv",header = T)
buildingstructure<- read.csv("buildingstructure.csv",header = T)
####### Visualization of the data
str(traindata)
summary(traindata)
str(testdata)
summary(testdata)
str(buildingownership)
str(buildingstructure)
summary(buildingstructure)
####### Type Conversions
#### Train Data
library(dplyr)
traindata_cat<-select(traindata,starts_with("h"))
names(traindata_cat)
traindata_cat <- data.frame(apply(traindata_cat,2,as.factor))
traindata_catg<-subset(traindata,select = c(1,2,3,13))
names(traindata_catg)
traindata1<-data.frame(c(traindata_catg,traindata_cat))
str(traindata1)
names(traindata1)
names(testdata1)
#### Test Data
library(dplyr)
testdata_cat<-select(testdata,starts_with("h"))
names(testdata_cat)
testdata_cat <- data.frame(apply(testdata_cat,2,as.factor))
testdata_catg<-subset(testdata,select = c(1:4))
names(testdata_catg)
testdata1<-data.frame(c(testdata_catg,testdata_cat))
str(testdata1)
rm(testdata_cat,testdata_catg)
#### Buildiing Ownership Data
buildingownership_cat <- select(buildingownership,starts_with("h"))
names(buildingownership_cat)
buildingownership_catg <- select(buildingownership,1:5)
names(buildingownership_catg)
buildingownership_cat <- data.frame(apply(buildingownership_cat,2,as.factor))
buildingownership1<-data.frame(c(buildingownership_catg,buildingownership_cat))
names(buildingownership)
names(buildingownership1)
str(buildingownership1)
rm(buildingownership_cat,buildingownership_catg)
#### Building structure Data
buildingstructure_cat <- select(buildingstructure,starts_with("h"),c(26,27))
names(buildingstructure_cat)
buildingstructure_catg <- select(buildingstructure,1:14,28,29)
names(buildingstructure_catg)
buildingstructure_cat <- data.frame(apply(buildingstructure_cat,2,as.factor))
buildingstructure1<-data.frame(c(buildingstructure_catg,buildingstructure_cat))
names(buildingstructure)
names(buildingstructure1)
str(buildingstructure1)
rm(buildingstructure_cat,buildingstructure_catg)
#### Checking For Null Values
sum(is.na(traindata1))
sum(is.na(testdata1))
table(traindata1$has_repair_started)
table(testdata1$has_repair_started)
sum(is.na(buildingownership1))
sum(is.na(buildingstructure1))
table(traindata1$has_repair_started)
table(testdata1$has_repair_started)
str(traindata1)
### Imputing Missing Values
library(DMwR)
traindata1 <- centralImputation(traindata1)
table(traindata1$has_repair_started)
names(testdata1)
## Removing Target As it has all missing Values
testdata2 <- subset(testdata1,select=-2)
names(testdata2)
testdata3 <- subset(testdata1,select=2)
testdata2 <- centralImputation(testdata2)
sum(is.na(testdata2))
table(testdata2$has_repair_started)
testdata1 <-data.frame(c(testdata2,testdata3))
names(testdata1)
### Data Processing
names(testdata1)
traindata3 <- subset(traindata1,select=-2)
traindata2 <- subset(traindata1,select=2)
traindata1 <- data.frame(c(traindata3,traindata2))
names(traindata1)
library(caret)
nearZeroVar(buildingownership1)
nearZeroVar(buildingstructure1)
## The zero variance attributes in Building Ownership
### 5 8 9 10 11 12 13 14 15 16
## The zero variance attributes in Building Structure
### 14 19 20 25 26 27 28
## Removing Zero Variance attributes in both Building OwnerShip And Building Structure
buildingownership2 <- subset(buildingownership1,select=-c(5,8,9,10,11,12,13,14,15,16))
buildingstructure2 <- subset(buildingstructure1,select= -c(14,19,20,25,26,27,28))
names(buildingownership2)
names(buildingstructure2)
## Merging Of the Datasets
mergeddata <- merge(buildingownership2,buildingstructure2,by.x = c("building_id"),
by.y = c("building_id"),all.x = T)
train_merge <- merge(traindata1,mergeddata,by.x = c("building_id"),
by.y = c("building_id"),all.x = T)
test_merge <- merge(testdata1,mergeddata,by.x = c("building_id"),
by.y = c("building_id"),all.x = T)
names(test_merge)
names(train_merge)
str(train_merge)
str(test_merge)
summary(train_merge)
summary(test_merge)
## Removing target from the dataset
test_merge1<- subset(test_merge,select=-c(13))
test_merge2<- subset(test_merge,select=c(13))
train_merge1<- subset(train_merge,select=-c(13))
train_merge2<- subset(train_merge,select=c(13))
## Imputation Of missing Values in the new datasets
train <- centralImputation(train_merge1)
test <- centralImputation(test_merge1)
test1 <- data.frame(c(test,test_merge2))
names(test1)
train1<- data.frame(c(train,train_merge2))
names(train1)
sum(is.na(train1))
sum(is.na(test1))
## Converting the levels of the target to numbers
train1$damage_grade <- ifelse(train1$damage_grade == "High",yes = 3,ifelse(train1$damage_grade=="Medium",yes = 2,no=1))
## Checking for Class Imbalance in the data
prop.table(table(train1$damage_grade))
train1$damage_grade <- as.factor(train1$damage_grade)
str(train1)
## Removing Varaibles Which are not needed for prediction
names(train1)
train2 <- subset(train1,select = -c(13,14,15,18,19,20))
names(test1)
test2 <- subset(test1,select = -c(13,14,15,18,19,20))
names(train2)
names(test2)
## Writing the dataframes to the disk
write.csv(train2,"trainfinal.csv",row.names = FALSE)
write.csv(test2,"testfinal.csv",row.names = FALSE)
## Removing the columns not necessary for prediction
names(train2)
train3 <- subset(train2,select = -c(1,2,3,24,32))
test3<- subset(test2,select = -c(1,2,3,24,32))
str(train3)
### Building Random Forest
library(randomForest)
model_rf <- randomForest(damage_grade ~ .,data=train3,ntree = 100,mtry = 5)
importance(model_rf)
varImpPlot(model_rf)
plot(model_rf)
# Predict on the train data
preds_train_rf <- predict(model_rf)
confusionMatrix(preds_train_rf, train3$damage_grade)
## Predicting on the testdata
preds_test_rf <- predict(model_rf,newdata = test3)
preds_test_rf<- ifelse(preds_test_rf== 3,yes= "High",ifelse(preds_test_rf== 2,yes = "Medium",no="Low" ))
sub_data_1 <- data.frame(test2[1],preds_test_rf)
str(sub_data_1)
## Writing the submission file into csv
write.csv(sub_data_1,"submission.csv",row.names = FALSE)
## Writing the train and test to the dataframe
write.csv(train3,"traindata.csv",row.names = F)
write.csv(test3,"testdata.csv",row.names = F)
### Building Random Forest
library(randomForest)
model_rf1<- randomForest(damage_grade ~ .,data=train3,ntree = 200,mtry =5)
importance(model_rf1)
varImpPlot(model_rf1)
plot(model_rf1)
# Predict on the train data
preds_train_rf1 <- predict(model_rf1)
confusionMatrix(preds_train_rf1, train3$damage_grade)
## Predicting on the testdata
preds_test_rf1 <- predict(model_rf1,newdata = test3)
preds_test_rf2<- ifelse(preds_test_rf2== 3,yes= "High",ifelse(preds_test_rf2== 2,yes = "Medium",no="Low" ))
sub_data_1 <- data.frame(test2[1],preds_test_rf)
str(sub_data_1)
## Writing the submission file into csv
write.csv(sub_data_1,"submission2.csv",row.names = FALSE)
### Building Random Forest
library(randomForest)
model_rf3<- randomForest(damage_grade ~ .,data=train3,ntree = 200,mtry =10)
importance(model_rf3)
varImpPlot(model_rf3)
plot(model_rf3)
# Predict on the train data
preds_train_rf3 <- predict(model_rf3)
confusionMatrix(preds_train_rf3, train3$damage_grade)
## Predicting on the testdata
preds_test_rf3 <- predict(model_rf3,newdata = test3)
preds_test_rf3<- ifelse(preds_test_rf3== 3,yes= "High",ifelse(preds_test_rf3== 2,yes = "Medium",no="Low" ))
sub_data_3 <- data.frame(test2[1],preds_test_rf3)
str(sub_data_3)
## Writing the submission file into csv
write.csv(sub_data_3,"submission3.csv",row.names = FALSE)
|
require(Matrix)
##' Determine if a variance components model is identified
##'
##' @param ... Comma-separated relatedness component matrices.
##' @param silent logical. Whether to print messages about identification.
##' @export
##'
##' @details
##' Returns of list of length 2. The first element is a single logical value:
##' TRUE if the model is identified, FALSE otherwise. The second list element
##' is the vector of non-identified parameters. For instance, a model might
##' have 5 components with 3 of them identified and 2 of them not. The second
##' list element will give the names of the components that are not
##' simultaneously identified.
##'
##' @examples
##'
##' identifyComponentModel(A=list(matrix(1, 2, 2)), C=list(matrix(1, 2, 2)), E= diag(1, 2))
##'
##'
identifyComponentModel <- function(..., silent=FALSE){
dots <- list(...)
nam <- names(dots)
if(is.null(nam)){
nam <- paste0('Comp', 1:length(dots))
}
compl <- lapply(dots, comp2vech, include.zeros=TRUE)
compm <- do.call(cbind, compl)
rank <- qr(compm)$rank
if(rank != length(dots)){
if(!silent) cat("Component model is not identified.\n")
jacOC <- Null(t(compm))
nidp <- nam[apply(jacOC, 1, function(x){sum(x^2)}) > 1e-17]
if(!silent) {
cat("Non-identified parameters are ",
paste(nidp, collapse=", "), "\n")
}
return(list(identified=FALSE, nidp=nidp))
} else{
if(!silent) cat("Component model is identified.\n")
return(list(identified=TRUE, nidp=character(0)))
}
}
##' Fit the estimated variance components of a model to covariance data
##'
##' @param covmat the covariance matrix of the raw data, possibly blockwise.
##' @param ... Comma-separated relatedness component matrices.
##' @export
##'
##' @details
##' Returns a regression (linear model fitted with \code{lm}).
##' The coefficients of the regression are the estimated variance components.
##'
##' @examples
##'
##' \dontrun{
##' # install.packages("OpenMX")
##' data(twinData, package = "OpenMx")
##' sellVars <- c("ht1", "ht2")
##' mzData <- subset(twinData, zyg %in% c(1), c(selVars, 'zyg'))
##' dzData <- subset(twinData, zyg %in% c(3), c(selVars, 'zyg'))
##'
##' fitComponentModel(
##' covmat = list(cov(mzData[,selVars], use = "pair"), cov(dzData[,selVars], use = "pair")),
##' A = list(matrix(1, nrow = 2, ncol = 2), matrix(c(1, 0.5, 0.5, 1), nrow = 2, ncol = 2)),
##' C = list(matrix(1, nrow = 2, ncol = 2), matrix(1, nrow = 2, ncol = 2)),
##' E = list(diag(1, nrow = 2), diag(1, nrow = 2))
##' )
##' }
##'
fitComponentModel <- function(covmat, ...){
dots <- list(...)
compl <- lapply(dots, comp2vech, include.zeros=TRUE)
compm <- do.call(cbind, compl)
rank <- qr(compm)$rank
y <- comp2vech(covmat, include.zeros=TRUE)
if(rank != length(dots)){
msg <- paste("Variance components are not all identified.",
"Try identifyComponentModel().")
stop(msg)
}
if(rank > length(y)){
msg <- paste0("Trying to estimate ",
rank, " variance components when at most ", length(y),
" are possible with the data given.\n")
warning(msg)
}
stats::lm(y ~ 0 + compm)
}
##' Create the half-vectorization of a matrix
##'
##' @param x a matrix, the half-vectorization of which is desired
##' @export
##'
##' @details
##' Returns the vector of the lower triangle of a matrix, including the diagonal.
##' The upper triangle is ignored with no checking that the provided matrix
##' is symmetric.
##'
##' @examples
##'
##' vech(matrix(c(1, 0.5, 0.5, 1), nrow = 2, ncol = 2))
##'
vech <- function(x){
x[lower.tri(x, diag=TRUE)]
}
##' Turn a variance component relatedness matrix into its half-vectorization
##'
##' @param x relatedness component matrix
##' @param include.zeros logical. Whether to include all-zero rows.
##' @export
##'
##' @details
##' This is a wrapper around the \code{vech} function for producing the
##' half-vectorization of a matrix. The extension here is to allow for
##' blockwise matrices.
##'
##' @examples comp2vech(list(matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2)))
##'
comp2vech <- function(x, include.zeros=FALSE){
if(is.matrix(x)){
return(vech(x))
}else if(is.list(x)){
if(include.zeros){
return(vech(as.matrix(Matrix::bdiag(x))))
} else {
return(do.call(c, lapply(x, vech)))
}
} else if(inherits(x, 'Matrix')) {
return(vech(as.matrix(x)))
} else {
msg <- paste("Can't make component into a half vectorization:",
"x is neither a list nor a matrix.")
stop(msg)
}
}
| /R/identification.R | no_license | cran/BGmisc | R | false | false | 4,420 | r | require(Matrix)
##' Determine if a variance components model is identified
##'
##' @param ... Comma-separated relatedness component matrices.
##' @param silent logical. Whether to print messages about identification.
##' @export
##'
##' @details
##' Returns of list of length 2. The first element is a single logical value:
##' TRUE if the model is identified, FALSE otherwise. The second list element
##' is the vector of non-identified parameters. For instance, a model might
##' have 5 components with 3 of them identified and 2 of them not. The second
##' list element will give the names of the components that are not
##' simultaneously identified.
##'
##' @examples
##'
##' identifyComponentModel(A=list(matrix(1, 2, 2)), C=list(matrix(1, 2, 2)), E= diag(1, 2))
##'
##'
identifyComponentModel <- function(..., silent=FALSE){
dots <- list(...)
nam <- names(dots)
if(is.null(nam)){
nam <- paste0('Comp', 1:length(dots))
}
compl <- lapply(dots, comp2vech, include.zeros=TRUE)
compm <- do.call(cbind, compl)
rank <- qr(compm)$rank
if(rank != length(dots)){
if(!silent) cat("Component model is not identified.\n")
jacOC <- Null(t(compm))
nidp <- nam[apply(jacOC, 1, function(x){sum(x^2)}) > 1e-17]
if(!silent) {
cat("Non-identified parameters are ",
paste(nidp, collapse=", "), "\n")
}
return(list(identified=FALSE, nidp=nidp))
} else{
if(!silent) cat("Component model is identified.\n")
return(list(identified=TRUE, nidp=character(0)))
}
}
##' Fit the estimated variance components of a model to covariance data
##'
##' @param covmat the covariance matrix of the raw data, possibly blockwise.
##' @param ... Comma-separated relatedness component matrices.
##' @export
##'
##' @details
##' Returns a regression (linear model fitted with \code{lm}).
##' The coefficients of the regression are the estimated variance components.
##'
##' @examples
##'
##' \dontrun{
##' # install.packages("OpenMX")
##' data(twinData, package = "OpenMx")
##' sellVars <- c("ht1", "ht2")
##' mzData <- subset(twinData, zyg %in% c(1), c(selVars, 'zyg'))
##' dzData <- subset(twinData, zyg %in% c(3), c(selVars, 'zyg'))
##'
##' fitComponentModel(
##' covmat = list(cov(mzData[,selVars], use = "pair"), cov(dzData[,selVars], use = "pair")),
##' A = list(matrix(1, nrow = 2, ncol = 2), matrix(c(1, 0.5, 0.5, 1), nrow = 2, ncol = 2)),
##' C = list(matrix(1, nrow = 2, ncol = 2), matrix(1, nrow = 2, ncol = 2)),
##' E = list(diag(1, nrow = 2), diag(1, nrow = 2))
##' )
##' }
##'
fitComponentModel <- function(covmat, ...){
dots <- list(...)
compl <- lapply(dots, comp2vech, include.zeros=TRUE)
compm <- do.call(cbind, compl)
rank <- qr(compm)$rank
y <- comp2vech(covmat, include.zeros=TRUE)
if(rank != length(dots)){
msg <- paste("Variance components are not all identified.",
"Try identifyComponentModel().")
stop(msg)
}
if(rank > length(y)){
msg <- paste0("Trying to estimate ",
rank, " variance components when at most ", length(y),
" are possible with the data given.\n")
warning(msg)
}
stats::lm(y ~ 0 + compm)
}
##' Create the half-vectorization of a matrix
##'
##' @param x a matrix, the half-vectorization of which is desired
##' @export
##'
##' @details
##' Returns the vector of the lower triangle of a matrix, including the diagonal.
##' The upper triangle is ignored with no checking that the provided matrix
##' is symmetric.
##'
##' @examples
##'
##' vech(matrix(c(1, 0.5, 0.5, 1), nrow = 2, ncol = 2))
##'
vech <- function(x){
x[lower.tri(x, diag=TRUE)]
}
##' Turn a variance component relatedness matrix into its half-vectorization
##'
##' @param x relatedness component matrix
##' @param include.zeros logical. Whether to include all-zero rows.
##' @export
##'
##' @details
##' This is a wrapper around the \code{vech} function for producing the
##' half-vectorization of a matrix. The extension here is to allow for
##' blockwise matrices.
##'
##' @examples comp2vech(list(matrix(c(1, .5, .5, 1), 2, 2), matrix(1, 2, 2)))
##'
comp2vech <- function(x, include.zeros=FALSE){
if(is.matrix(x)){
return(vech(x))
}else if(is.list(x)){
if(include.zeros){
return(vech(as.matrix(Matrix::bdiag(x))))
} else {
return(do.call(c, lapply(x, vech)))
}
} else if(inherits(x, 'Matrix')) {
return(vech(as.matrix(x)))
} else {
msg <- paste("Can't make component into a half vectorization:",
"x is neither a list nor a matrix.")
stop(msg)
}
}
|
testlist <- list(x = c(4.17267197626434e-309, 2.27610522232642e-159, 1.05463693025179e-163, 1.92263648223069e+53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result) | /myTAI/inst/testfiles/cpp_geom_mean/AFL_cpp_geom_mean/cpp_geom_mean_valgrind_files/1615839500-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 317 | r | testlist <- list(x = c(4.17267197626434e-309, 2.27610522232642e-159, 1.05463693025179e-163, 1.92263648223069e+53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/0-global.R
\name{progress}
\alias{progress}
\title{Make Progress Bar}
\usage{
progress(i, k, numTicks)
}
\arguments{
\item{i}{The current iteration.}
\item{k}{Total iterations.}
\item{numTicks}{The result of \code{progress}.}
}
\value{
The next \code{numTicks} argument.
}
\description{
Make Progress Bar
}
| /man/progress.Rd | no_license | zhenxuanzhang/rarsim | R | false | true | 387 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/0-global.R
\name{progress}
\alias{progress}
\title{Make Progress Bar}
\usage{
progress(i, k, numTicks)
}
\arguments{
\item{i}{The current iteration.}
\item{k}{Total iterations.}
\item{numTicks}{The result of \code{progress}.}
}
\value{
The next \code{numTicks} argument.
}
\description{
Make Progress Bar
}
|
# Plot to make the Log2FC plot comparing DE genes.
# Data for two DE comparisons is in the table "overlap", in long/tidy format.
# "Comparison" is the column containing the DE list (e.g. LPSvsVehicle or LPSLPSvsVehicle).
ggplot(overlap, aes(Log2FC, gene, group = gene)) +
geom_line(alpha = 0.5) +
geom_point(aes(colour = Comparison), alpha = 0.8) +
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
scale_colour_manual(values = c("firebrick", "springgreen4"))
| /R_scripts/log2FC_geneOverlap_plot.R | no_license | hancockinformatics/misc_R_scripts | R | false | false | 492 | r | # Plot to make the Log2FC plot comparing DE genes.
# Data for two DE comparisons is in the table "overlap", in long/tidy format.
# "Comparison" is the column containing the DE list (e.g. LPSvsVehicle or LPSLPSvsVehicle).
ggplot(overlap, aes(Log2FC, gene, group = gene)) +
geom_line(alpha = 0.5) +
geom_point(aes(colour = Comparison), alpha = 0.8) +
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank()) +
scale_colour_manual(values = c("firebrick", "springgreen4"))
|
library("Ecdat")
?CPSch3
data(CPSch3)
dimnames(CPSch3)[[2]]
male.earnings = CPSch3[CPSch3[, 3]=="male", 2]
sqrt.male.earnings = sqrt(male.earnings)
log.male.earnings = log(male.earnings)
par(mfrow=c(2,2))
qqnorm(male.earnings, datax=T, main="untransformed")
qqnorm(sqrt.male.earnings, datax=T, main="square-root transformed")
qqnorm(log.male.earnings, datax=T, main="log-transformed")
par(mfrow=c(2,2))
boxplot(male.earnings, main="untransformed")
boxplot(sqrt.male.earnings, main="square-root transformed")
boxplot(log.male.earnings, main="log transformed")
par(mfrow=c(2,2))
plot(density(male.earnings), main="untransformed")
plot(density(sqrt.male.earnings), main="square-root transformed")
plot(density(log.male.earnings, main="log transformed")
library("MASS")
windows()
boxcox(male.earnings~1)
boxcox(male.earnings~1,lambda = seq(.3, .45, 1/100))
bc = boxcox(male.earnings~1,lambda = seq(.3, .45, by=1/100),interp=F)
ind = (bc$y==max(bc$y))
ind2 = (bc$y > max(bc$y) - qchisq(.95,df=1)/2)
bc$x[ind]
bc$x[ind2]
library("fGarch")
stdFit = sstdFit(male.earnings,hessian=T)
stdEst = stdFit$estimate
gedFit = sgedFit(male.earnings,hessian=T)
gedEst = gedFit$par
plot(density(male.earnings), xlim=c(-1,54), ylim=c(0.0, 0.06), main="")
par(new=T)
plot(density(rsstd(length(male.earnings), mean=stdEst[[1]], sd=stdEst[[2]],
nu=stdEst[[3]], xi=stdEst[[4]])), xlim=c(-1,54), ylim=c(0.0, 0.06), col="blue", main="")
par(new=T)
plot(density(rsged(length(male.earnings), mean=gedEst[[1]], sd=gedEst[[2]],
nu=gedEst[[3]], xi=gedEst[[4]])), xlim=c(-1,54), ylim=c(0.0, 0.06), col="red", main="")
data(Garch, package="Ecdat")
library("fGarch")
data(EuStockMarkets)
logDAX = diff(log(EuStockMarkets[, 1]))
loglikStd = function(x){
f = -sum(log(dstd(logDAX, x[1], x[2], x[3])))
f
}
start = c(mean(logDAX), sd(logDAX), 4)
fitStd = optim(start, loglikStd, method="L-BFGS-B",
lower=c(-.1, .001, 2.1), upper=c(.1, 1, 20))
aicStd = 2 * fitStd$value + 2 * length(fitStd$par)
print(c("MLE=", round(fitStd$par, digits=5)))
loglikSstd = function(x){
f = -sum(log(dsstd(logDAX, x[1], x[2], x[3], x[4])))
f
}
start = c(mean(logDAX), sd(logDAX), 4, 1)
fitSstd = optim(start, loglikSstd, method="L-BFGS-B",
lower=c(-.1, .001, 2.1), upper=c(.1, 1, 20))
aicSstd = 2 * fitSstd$value + 2 * length(fitSstd$par)
print(c("MLE=", round(fitSstd$par, digits=5)))
transLogDAX = qnorm(pstd(logDAX, mean=fitStd$par[1], sd=fitStd$par[2], nu=fitStd$par[3]))
plot(density(transLogDAX))
plot(density(logDAX))
library('fGarch')
gasFlow = read.csv('F:\\R\\SaDAfFE\\data\\GasFlowData.csv')
xlimit = c(3e5,13.5e5)
ylimit = c(0.0, 3.5e-6)
stdEst = sstdFit(gasFlow[[1]])$estimate
plot(density(gasFlow[[1]]), xlim=xlimit, ylim=ylimit, main="")
par(new=T)
plot(density(rsstd(length(gasFlow[[1]]), mean=stdEst[[1]], sd=stdEst[[2]],
nu=stdEst[[3]], xi=stdEst[[4]])), xlim=xlimit, ylim=ylimit, col="blue", main="")
start = c(1, 1)
loglikPos = function(theta) {
- sum(log(dpois(y, lamda=theta[1] + theta[2] * x)))
}
mle = optim(start, loglikPos, hessian=T)
invFishInfo = solve(mle$hessian)
options(digits=4)
mle$par
mle$value
mle$convergence
sqrt(diag(invFishInfo))
library(evir)
library(fGarch)
data(bmw)
start_bmw = c(mean(bmw), sd(bmw), 4)
loglik_bmw = function(theta){
-sum(log(dstd(bmw, mean=theta[1], sd=theta[2], nu=theta[3])))
}
mle_bmw = optim(start_bmw, loglik_bmw, hessian=T)
fishInfo_bmw = solve(mle_bmw$hessian)
data(siemens)
n=length(siemens)
par(mfrow=c(3,2))
qqplot(siemens,qt(((1:n)-.5)/n,2),ylab="t(2) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,3),ylab="t(3) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,4),ylab="t(4) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,5),ylab="t(5) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,8),ylab="t(8) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,12),ylab="t(12) quantiles",
xlab="data quantiles")
loglik_siemens = function(theta){
-sum(log(dt(siemens, df=theta)))
}
mle_siemens = stdFit(siemens) | /SaDAfFE/src/Chapter5.r | no_license | StiffLiu/codes | R | false | false | 4,193 | r | library("Ecdat")
?CPSch3
data(CPSch3)
dimnames(CPSch3)[[2]]
male.earnings = CPSch3[CPSch3[, 3]=="male", 2]
sqrt.male.earnings = sqrt(male.earnings)
log.male.earnings = log(male.earnings)
par(mfrow=c(2,2))
qqnorm(male.earnings, datax=T, main="untransformed")
qqnorm(sqrt.male.earnings, datax=T, main="square-root transformed")
qqnorm(log.male.earnings, datax=T, main="log-transformed")
par(mfrow=c(2,2))
boxplot(male.earnings, main="untransformed")
boxplot(sqrt.male.earnings, main="square-root transformed")
boxplot(log.male.earnings, main="log transformed")
par(mfrow=c(2,2))
plot(density(male.earnings), main="untransformed")
plot(density(sqrt.male.earnings), main="square-root transformed")
plot(density(log.male.earnings, main="log transformed")
library("MASS")
windows()
boxcox(male.earnings~1)
boxcox(male.earnings~1,lambda = seq(.3, .45, 1/100))
bc = boxcox(male.earnings~1,lambda = seq(.3, .45, by=1/100),interp=F)
ind = (bc$y==max(bc$y))
ind2 = (bc$y > max(bc$y) - qchisq(.95,df=1)/2)
bc$x[ind]
bc$x[ind2]
library("fGarch")
stdFit = sstdFit(male.earnings,hessian=T)
stdEst = stdFit$estimate
gedFit = sgedFit(male.earnings,hessian=T)
gedEst = gedFit$par
plot(density(male.earnings), xlim=c(-1,54), ylim=c(0.0, 0.06), main="")
par(new=T)
plot(density(rsstd(length(male.earnings), mean=stdEst[[1]], sd=stdEst[[2]],
nu=stdEst[[3]], xi=stdEst[[4]])), xlim=c(-1,54), ylim=c(0.0, 0.06), col="blue", main="")
par(new=T)
plot(density(rsged(length(male.earnings), mean=gedEst[[1]], sd=gedEst[[2]],
nu=gedEst[[3]], xi=gedEst[[4]])), xlim=c(-1,54), ylim=c(0.0, 0.06), col="red", main="")
data(Garch, package="Ecdat")
library("fGarch")
data(EuStockMarkets)
logDAX = diff(log(EuStockMarkets[, 1]))
loglikStd = function(x){
f = -sum(log(dstd(logDAX, x[1], x[2], x[3])))
f
}
start = c(mean(logDAX), sd(logDAX), 4)
fitStd = optim(start, loglikStd, method="L-BFGS-B",
lower=c(-.1, .001, 2.1), upper=c(.1, 1, 20))
aicStd = 2 * fitStd$value + 2 * length(fitStd$par)
print(c("MLE=", round(fitStd$par, digits=5)))
loglikSstd = function(x){
f = -sum(log(dsstd(logDAX, x[1], x[2], x[3], x[4])))
f
}
start = c(mean(logDAX), sd(logDAX), 4, 1)
fitSstd = optim(start, loglikSstd, method="L-BFGS-B",
lower=c(-.1, .001, 2.1), upper=c(.1, 1, 20))
aicSstd = 2 * fitSstd$value + 2 * length(fitSstd$par)
print(c("MLE=", round(fitSstd$par, digits=5)))
transLogDAX = qnorm(pstd(logDAX, mean=fitStd$par[1], sd=fitStd$par[2], nu=fitStd$par[3]))
plot(density(transLogDAX))
plot(density(logDAX))
library('fGarch')
gasFlow = read.csv('F:\\R\\SaDAfFE\\data\\GasFlowData.csv')
xlimit = c(3e5,13.5e5)
ylimit = c(0.0, 3.5e-6)
stdEst = sstdFit(gasFlow[[1]])$estimate
plot(density(gasFlow[[1]]), xlim=xlimit, ylim=ylimit, main="")
par(new=T)
plot(density(rsstd(length(gasFlow[[1]]), mean=stdEst[[1]], sd=stdEst[[2]],
nu=stdEst[[3]], xi=stdEst[[4]])), xlim=xlimit, ylim=ylimit, col="blue", main="")
start = c(1, 1)
loglikPos = function(theta) {
- sum(log(dpois(y, lamda=theta[1] + theta[2] * x)))
}
mle = optim(start, loglikPos, hessian=T)
invFishInfo = solve(mle$hessian)
options(digits=4)
mle$par
mle$value
mle$convergence
sqrt(diag(invFishInfo))
library(evir)
library(fGarch)
data(bmw)
start_bmw = c(mean(bmw), sd(bmw), 4)
loglik_bmw = function(theta){
-sum(log(dstd(bmw, mean=theta[1], sd=theta[2], nu=theta[3])))
}
mle_bmw = optim(start_bmw, loglik_bmw, hessian=T)
fishInfo_bmw = solve(mle_bmw$hessian)
data(siemens)
n=length(siemens)
par(mfrow=c(3,2))
qqplot(siemens,qt(((1:n)-.5)/n,2),ylab="t(2) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,3),ylab="t(3) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,4),ylab="t(4) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,5),ylab="t(5) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,8),ylab="t(8) quantiles",
xlab="data quantiles")
qqplot(siemens,qt(((1:n)-.5)/n,12),ylab="t(12) quantiles",
xlab="data quantiles")
loglik_siemens = function(theta){
-sum(log(dt(siemens, df=theta)))
}
mle_siemens = stdFit(siemens) |
# date and time in base R (Ken p. 190 ff)
current_date <- Sys.Date()
Sys.time()
my_date <- as.Date("2018-02-12")
my_date
date1 <- my_date + 3
date2 <- my_date - 24
(date3 <- date2 - date1)
(my_time <- as.POSIXlt("2016-02-10 10:25:31"))
(my_time2 <- as.POSIXct("2016-02-10 10:25:31"))
# using format argument
(my_date <- as.Date("2019.01.03", format = "%Y.%d.%m"))
(my_datetime <- as.POSIXlt("7/25/2015 09:30:25", format = "%m/%d/%Y %H:%M:%S"))
?strptime
| /Base_R_Date and Time.R | no_license | KimF1/R-Training-BaseR | R | false | false | 475 | r | # date and time in base R (Ken p. 190 ff)
current_date <- Sys.Date()
Sys.time()
my_date <- as.Date("2018-02-12")
my_date
date1 <- my_date + 3
date2 <- my_date - 24
(date3 <- date2 - date1)
(my_time <- as.POSIXlt("2016-02-10 10:25:31"))
(my_time2 <- as.POSIXct("2016-02-10 10:25:31"))
# using format argument
(my_date <- as.Date("2019.01.03", format = "%Y.%d.%m"))
(my_datetime <- as.POSIXlt("7/25/2015 09:30:25", format = "%m/%d/%Y %H:%M:%S"))
?strptime
|
library(GLMsData)
### Name: heatcap
### Title: Heat capacity of hydrobromic acid
### Aliases: heatcap
### Keywords: datasets
### ** Examples
data(heatcap)
plot(heatcap)
| /data/genthat_extracted_code/GLMsData/examples/heatcap.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 176 | r | library(GLMsData)
### Name: heatcap
### Title: Heat capacity of hydrobromic acid
### Aliases: heatcap
### Keywords: datasets
### ** Examples
data(heatcap)
plot(heatcap)
|
currentLevelsSubs2 = function(subs.o, FUN = "VaR", p=0.98, width = 126, method = "modified", omissions = NULL, reportDate = NULL, mode = "change")
{
if(is.null(reportDate)){reportDate = end(subs.o$LSE)}
levels = list(
LSE = apply(subs.o$LSE[paste("/", reportDate, sep = ""), which(!(names(subs.o$LSE) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
LSD = apply(subs.o$LSD[paste("/", reportDate, sep = ""), which(!(names(subs.o$LSD) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
ED = apply(subs.o$ED[paste("/", reportDate, sep = ""), which(!(names(subs.o$ED) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
MN = apply(subs.o$MN[paste("/", reportDate, sep = ""), which(!(names(subs.o$MN) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
MF = apply(subs.o$MF[paste("/", reportDate, sep = ""), which(!(names(subs.o$MF) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode))
)
result = lapply(levels, FUN = function(l)return(as.data.frame(matrix(unlist(l), nrow = length(l), byrow = T, dimnames = list(names(l), colnames(l[[1]]))))))
names(result) = paste(names(result), FUN, p, sep = " ")
return(result)
}
| /Functions/currentLevelsSubs2.R | no_license | bplloyd/R-risk-mgmt | R | false | false | 1,509 | r | currentLevelsSubs2 = function(subs.o, FUN = "VaR", p=0.98, width = 126, method = "modified", omissions = NULL, reportDate = NULL, mode = "change")
{
if(is.null(reportDate)){reportDate = end(subs.o$LSE)}
levels = list(
LSE = apply(subs.o$LSE[paste("/", reportDate, sep = ""), which(!(names(subs.o$LSE) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
LSD = apply(subs.o$LSD[paste("/", reportDate, sep = ""), which(!(names(subs.o$LSD) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
ED = apply(subs.o$ED[paste("/", reportDate, sep = ""), which(!(names(subs.o$ED) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
MN = apply(subs.o$MN[paste("/", reportDate, sep = ""), which(!(names(subs.o$MN) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode)),
MF = apply(subs.o$MF[paste("/", reportDate, sep = ""), which(!(names(subs.o$MF) %in% omissions))], 2, function(x)compareLevels2(as.xts(x), FUN = FUN, p = p, width = width, method = method, mode = mode))
)
result = lapply(levels, FUN = function(l)return(as.data.frame(matrix(unlist(l), nrow = length(l), byrow = T, dimnames = list(names(l), colnames(l[[1]]))))))
names(result) = paste(names(result), FUN, p, sep = " ")
return(result)
}
|
# plot1.R
# Display Histogram for Global Active Power
# Read in dataset, reduce to necessary working set.
# Note: we have pleanty of RAM, we can import the whole thing
# Prepare conversion functions
setClass('hpcDate')
setAs("character", "hpcDate", function(from) as.Date(from, format="%d/%m/%Y") )
setClass('hpcTime')
setAs("character", "hpcTime", function(from) { return (as.numeric(strptime(from, format="%H:%M:%S"))
- as.numeric(trunc(Sys.time(), units=c("days"))))
})
# Source data
data <- read.table("data/household_power_consumption.txt", sep=";",
header=TRUE, stringsAsFactors = FALSE, na.strings="?",
colClasses = c("hpcDate", "hpcTime", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
# Trim to what we will use
dateRange <- c(as.Date("2007-02-01"), as.Date("2007-02-02"))
data <- data[data$Date %in% dateRange,]
# Generate index by day incrementing times
data$Epoc <- ifelse(data$Date == dateRange[2], data$Time+86400, data$Time)
# Generate plot
hist(data$Global_active_power, xlab="Global Active Power (kilowatts)", col="red", main="Global Active Power")
# Save plot to file
dev.copy(png, file="plot1.png")
dev.off() # close the PNG device
| /plot1.R | no_license | coursera-UikL5CoY9xI/ExData_Plotting1 | R | false | false | 1,391 | r | # plot1.R
# Display Histogram for Global Active Power
# Read in dataset, reduce to necessary working set.
# Note: we have pleanty of RAM, we can import the whole thing
# Prepare conversion functions
setClass('hpcDate')
setAs("character", "hpcDate", function(from) as.Date(from, format="%d/%m/%Y") )
setClass('hpcTime')
setAs("character", "hpcTime", function(from) { return (as.numeric(strptime(from, format="%H:%M:%S"))
- as.numeric(trunc(Sys.time(), units=c("days"))))
})
# Source data
data <- read.table("data/household_power_consumption.txt", sep=";",
header=TRUE, stringsAsFactors = FALSE, na.strings="?",
colClasses = c("hpcDate", "hpcTime", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
# Trim to what we will use
dateRange <- c(as.Date("2007-02-01"), as.Date("2007-02-02"))
data <- data[data$Date %in% dateRange,]
# Generate index by day incrementing times
data$Epoc <- ifelse(data$Date == dateRange[2], data$Time+86400, data$Time)
# Generate plot
hist(data$Global_active_power, xlab="Global Active Power (kilowatts)", col="red", main="Global Active Power")
# Save plot to file
dev.copy(png, file="plot1.png")
dev.off() # close the PNG device
|
#############
## IQ TREE JACKNIFING
############
#The R packages needed
library(Biostrings)
library(data.table)
library(ape)
library(phangorn)
library(phytools)
options(stringsAsFactors = FALSE)
###### PARAMETER SETUP ###########
###### Directory Setup ###########
jack.reps = "/Volumes/Armored/Mantellidae_Subfamily/Sequence_capture/Jacknife_Trees/IQTrees_all"
jack.reps = "/Volumes/Armored/Mantellidae_Subfamily/Transcriptome-full/Jacknife_Trees"
#Outgroup selection
outgroup.taxa<-c("Rana_kukunoris", "Pelophylax_nigromaculatus")
#########################################
# 1. Create majority rule consensus thing
#########################################
#Creates majority rule consensus tree from jacknifed trees
setwd(jack.reps)
tree.files = list.files(pattern = ".", full.names = F, recursive = F)
#Loop through trees and collapse poorly supported nodes into polytomies
system("rm ../alltrees.tre")
for (i in 1:length(tree.files)){
#read in tree file
temp.tree<-read.tree(tree.files[i])
root.tree<-root(temp.tree, outgroup = outgroup.taxa, resolve.root = T)
write.tree(root.tree, file = "../alltrees.tre", append = T)
}#end i loop
#Load back in the trees
all.trees<-read.tree(file = "../alltrees.tre")
#Computes the tree that most likely represents the topology of everything.
rf.tree = averageTree(all.trees,method="symmetric.difference")
plotTree(root(rf.tree,outgroup=outgroup.taxa,resolve.root=TRUE))
#Consensus edges
t1<-consensus.edges(all.trees)
plotTree(t1,fsize=0.4)
t2<-consensus.edges(all.trees,if.absent="ignore")
plotTree(t2,fsize=0.4)
t3<-consensus.edges(all.trees,method="least.squares")
plotTree(t3,fsize=0.4)
### Older
#Much bette
plotTree(compute.brlen(root(all.trees[[ii]],outgroup=outgroup.taxa,
resolve.root=TRUE)))
densiTree(all.trees[1:500], type = "cladogram")
#Makes a majority rule consensus tree from the trees
cons.tree<-consensus(all.trees, p = 0.5, check.labels = TRUE)
write.tree(cons.tree, file = paste0(out.dir, "/majority_consensus.tre"))
unlink(paste0(out.dir, "/alltrees.tre"))
#
# ##### END SCRIPT
#
| /4-Phylogeny-estimation/05_summarize-jacknife.R | no_license | chutter/murinae-seq | R | false | false | 2,106 | r | #############
## IQ TREE JACKNIFING
############
#The R packages needed
library(Biostrings)
library(data.table)
library(ape)
library(phangorn)
library(phytools)
options(stringsAsFactors = FALSE)
###### PARAMETER SETUP ###########
###### Directory Setup ###########
jack.reps = "/Volumes/Armored/Mantellidae_Subfamily/Sequence_capture/Jacknife_Trees/IQTrees_all"
jack.reps = "/Volumes/Armored/Mantellidae_Subfamily/Transcriptome-full/Jacknife_Trees"
#Outgroup selection
outgroup.taxa<-c("Rana_kukunoris", "Pelophylax_nigromaculatus")
#########################################
# 1. Create majority rule consensus thing
#########################################
#Creates majority rule consensus tree from jacknifed trees
setwd(jack.reps)
tree.files = list.files(pattern = ".", full.names = F, recursive = F)
#Loop through trees and collapse poorly supported nodes into polytomies
system("rm ../alltrees.tre")
for (i in 1:length(tree.files)){
#read in tree file
temp.tree<-read.tree(tree.files[i])
root.tree<-root(temp.tree, outgroup = outgroup.taxa, resolve.root = T)
write.tree(root.tree, file = "../alltrees.tre", append = T)
}#end i loop
#Load back in the trees
all.trees<-read.tree(file = "../alltrees.tre")
#Computes the tree that most likely represents the topology of everything.
rf.tree = averageTree(all.trees,method="symmetric.difference")
plotTree(root(rf.tree,outgroup=outgroup.taxa,resolve.root=TRUE))
#Consensus edges
t1<-consensus.edges(all.trees)
plotTree(t1,fsize=0.4)
t2<-consensus.edges(all.trees,if.absent="ignore")
plotTree(t2,fsize=0.4)
t3<-consensus.edges(all.trees,method="least.squares")
plotTree(t3,fsize=0.4)
### Older
#Much bette
plotTree(compute.brlen(root(all.trees[[ii]],outgroup=outgroup.taxa,
resolve.root=TRUE)))
densiTree(all.trees[1:500], type = "cladogram")
#Makes a majority rule consensus tree from the trees
cons.tree<-consensus(all.trees, p = 0.5, check.labels = TRUE)
write.tree(cons.tree, file = paste0(out.dir, "/majority_consensus.tre"))
unlink(paste0(out.dir, "/alltrees.tre"))
#
# ##### END SCRIPT
#
|
#Windows
Dropbox = "C:\\users/Jake/Dropbox"
#Mac/Unix
Dropbox = "~/Dropbox"
path= "/Booth/Winter 2014/Financial Econometrics/Week 8/"
setwd(paste(Dropbox, path, sep=""))
spdaily = read.csv("sp500_1990.csv")
sp = read.csv("sp.csv")
vix = read.csv("vix.csv")
library(fGarch)
library(nortest)
library(tseries)
#1
summary(sp)
sp$Adj.Close = rev(sp$Adj.Close)
sp$AR1 = c(sp$Adj.Close[1], sp$Adj.Close[1:length(sp$Adj.Close)-1])
sp$returns = log(sp$Adj.Close) - log(sp$AR1)
spfit = garch(sp$returns, order = c(c(1,1),0))
all_r = vector()
all_r
for(j in 1:2000)
{
last_r = sp$returns[length(sp$returns)]
last_h = abs(spfit$fitted[length(spfit$fitted)-1])^2
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r = rnorm(n=1, m=0, sd=1) * h^0.5
last_h = h
total = last_r
for(i in 1:29)
{
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r = rnorm(n=1, m=0, sd=1) * h^0.5
last_h = h
total = total + last_r
}
all_r <- c(all_r, total)
}
hist(all_r, breaks=19)
mean(all_r)
max(all_r)
min(all_r)
all_r2 = vector()
all_r2
for(j in 1:2000)
{
rands = sample(2:length(sp$returns), 30, replace=T)
last_r = sp$returns[length(sp$returns)]
last_h = abs(spfit$fitted[length(spfit$fitted)-1])^2
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r = rnorm(n=1, m=0, sd=1) * h^0.5
last_h = h
total = last_r
for(i in 2:30)
{
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r =(sp$returns[rands[i]] / spfit$fitted[rands[i]] ) * h^0.5
last_h = h
total = total + last_r
}
all_r2 <- c(all_r2, total)
}
hist(all_r2, breaks=19)
mean(all_r2)
max(all_r2)
min(all_r2)
skewness(all_r)
skewness(all_r2)
kurtosis(all_r)
kurtosis(all_r2)
#1% value at risk is value below which 1% of cases falls
sorted_r = sort(all_r)
sorted_r[length(sorted_r)/100]
sorted_r2 = sort(all_r2)
sorted_r2[length(sorted_r2)/100]
#15%
x15 = length(sorted_r);
for(i in 1:length(sorted_r))
{
if(sorted_r[i] <= -0.15)
{
x15 = i;
}
else
{
break;
}
}
x15/length(sorted_r)
x152 = length(sorted_r2);
for(i in 1:length(sorted_r2))
{
if(sorted_r2[i] <= -0.15)
{
x152 = i;
}
else
{
break;
}
}
x152/length(sorted_r2)
x15plus = 0;
for(i in 1:length(sorted_r))
{
if(sorted_r[i] >= 0.15)
{
x15plus = i;
break;
}
}
x15plus/length(sorted_r)
x15plus2 = 0;
for(i in 1:length(sorted_r2))
{
if(sorted_r2[i] >= 0.15)
{
x15plus2 = i;
break;
}
}
x15plus2/length(sorted_r2)
#2
summary(spdaily)
spdaily$Adj.Close = rev(spdaily$Adj.Close)
spdaily$AR1 = c(spdaily$Adj.Close[1], spdaily$Adj.Close[1:length(spdaily$Adj.Close)-1])
spdaily$returns = log(spdaily$Adj.Close) - log(spdaily$AR1)
spdaily$crash = spdaily$returns < -.03
spdaily$crash_numeric = spdaily$crash + 0
mean(spdaily$crash_numeric)
hist(spdaily$crash_numeric)
plot(spdaily$crash_numeric)
#gfit = garchFit(formula = ~aparch(1, 1), delta=1, include.delta=FALSE, include.mean=FALSE, data = spdaily$returns)
gfit = garch(spdaily$returns, order = c(c(1,1),0))
summary(gfit)
plot(predict(gfit)[,1], type="l")
spdaily$ht = predict(gfit)[,1]^2
model=lm(crash ~ ht, data=spdaily)
summary(model)
#3
vix$Adj.Close = rev(vix$Adj.Close)
spdaily$prev_vix = c(vix$Adj.Close[1], vix$Adj.Close[1:length(spdaily$returns) - 1])
model=lm(crash ~ ht + prev_vix, data=spdaily)
summary(model)
plot(model$fit, type="l")
total_r = c(0);
for(i in 2:length(spdaily$returns))
{
if(model$fit[i - 1] <= 0.05)
{
total_r = c(total_r, spdaily$returns[i]);
}
}
sort(total_r)[length(total_r)/100]
sort(spdaily$returns)[length(spdaily$returns)/100] | /scripts/financial_econometrics/FE_8_script.R | no_license | jakewalker56/ml-lab | R | false | false | 3,634 | r | #Windows
Dropbox = "C:\\users/Jake/Dropbox"
#Mac/Unix
Dropbox = "~/Dropbox"
path= "/Booth/Winter 2014/Financial Econometrics/Week 8/"
setwd(paste(Dropbox, path, sep=""))
spdaily = read.csv("sp500_1990.csv")
sp = read.csv("sp.csv")
vix = read.csv("vix.csv")
library(fGarch)
library(nortest)
library(tseries)
#1
summary(sp)
sp$Adj.Close = rev(sp$Adj.Close)
sp$AR1 = c(sp$Adj.Close[1], sp$Adj.Close[1:length(sp$Adj.Close)-1])
sp$returns = log(sp$Adj.Close) - log(sp$AR1)
spfit = garch(sp$returns, order = c(c(1,1),0))
all_r = vector()
all_r
for(j in 1:2000)
{
last_r = sp$returns[length(sp$returns)]
last_h = abs(spfit$fitted[length(spfit$fitted)-1])^2
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r = rnorm(n=1, m=0, sd=1) * h^0.5
last_h = h
total = last_r
for(i in 1:29)
{
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r = rnorm(n=1, m=0, sd=1) * h^0.5
last_h = h
total = total + last_r
}
all_r <- c(all_r, total)
}
hist(all_r, breaks=19)
mean(all_r)
max(all_r)
min(all_r)
all_r2 = vector()
all_r2
for(j in 1:2000)
{
rands = sample(2:length(sp$returns), 30, replace=T)
last_r = sp$returns[length(sp$returns)]
last_h = abs(spfit$fitted[length(spfit$fitted)-1])^2
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r = rnorm(n=1, m=0, sd=1) * h^0.5
last_h = h
total = last_r
for(i in 2:30)
{
h = spfit$coef[1] + spfit$coef[2] * last_r^2 + spfit$coef[3] * last_h
last_r =(sp$returns[rands[i]] / spfit$fitted[rands[i]] ) * h^0.5
last_h = h
total = total + last_r
}
all_r2 <- c(all_r2, total)
}
hist(all_r2, breaks=19)
mean(all_r2)
max(all_r2)
min(all_r2)
skewness(all_r)
skewness(all_r2)
kurtosis(all_r)
kurtosis(all_r2)
#1% value at risk is value below which 1% of cases falls
sorted_r = sort(all_r)
sorted_r[length(sorted_r)/100]
sorted_r2 = sort(all_r2)
sorted_r2[length(sorted_r2)/100]
#15%
x15 = length(sorted_r);
for(i in 1:length(sorted_r))
{
if(sorted_r[i] <= -0.15)
{
x15 = i;
}
else
{
break;
}
}
x15/length(sorted_r)
x152 = length(sorted_r2);
for(i in 1:length(sorted_r2))
{
if(sorted_r2[i] <= -0.15)
{
x152 = i;
}
else
{
break;
}
}
x152/length(sorted_r2)
x15plus = 0;
for(i in 1:length(sorted_r))
{
if(sorted_r[i] >= 0.15)
{
x15plus = i;
break;
}
}
x15plus/length(sorted_r)
x15plus2 = 0;
for(i in 1:length(sorted_r2))
{
if(sorted_r2[i] >= 0.15)
{
x15plus2 = i;
break;
}
}
x15plus2/length(sorted_r2)
#2
summary(spdaily)
spdaily$Adj.Close = rev(spdaily$Adj.Close)
spdaily$AR1 = c(spdaily$Adj.Close[1], spdaily$Adj.Close[1:length(spdaily$Adj.Close)-1])
spdaily$returns = log(spdaily$Adj.Close) - log(spdaily$AR1)
spdaily$crash = spdaily$returns < -.03
spdaily$crash_numeric = spdaily$crash + 0
mean(spdaily$crash_numeric)
hist(spdaily$crash_numeric)
plot(spdaily$crash_numeric)
#gfit = garchFit(formula = ~aparch(1, 1), delta=1, include.delta=FALSE, include.mean=FALSE, data = spdaily$returns)
gfit = garch(spdaily$returns, order = c(c(1,1),0))
summary(gfit)
plot(predict(gfit)[,1], type="l")
spdaily$ht = predict(gfit)[,1]^2
model=lm(crash ~ ht, data=spdaily)
summary(model)
#3
vix$Adj.Close = rev(vix$Adj.Close)
spdaily$prev_vix = c(vix$Adj.Close[1], vix$Adj.Close[1:length(spdaily$returns) - 1])
model=lm(crash ~ ht + prev_vix, data=spdaily)
summary(model)
plot(model$fit, type="l")
total_r = c(0);
for(i in 2:length(spdaily$returns))
{
if(model$fit[i - 1] <= 0.05)
{
total_r = c(total_r, spdaily$returns[i]);
}
}
sort(total_r)[length(total_r)/100]
sort(spdaily$returns)[length(spdaily$returns)/100] |
#cargar/instalar librerias
install.packages('igraph')
install.packages('network')
install.packages('sna')
install.packages('ndtv')
install.packages('visNetwork')
devtools::install_github("analyxcompany/ForceAtlas2")
library(readr)
library(dplyr)
library(tidyr)
library(tidyverse)
library(ggplot2)
library(igraph)
library(network)
library(sna)
library(ndtv)
library(visNetwork)
library(ForceAtlas2)
#mapas de color
library(viridis)
library(RColorBrewer)
#cargar datos
#rm(list = ls()) # Remove all the objects we created so far.
# DATASETS
head(MLTExpos1956_2016)
head(agente_agente_relacion)
head(artistas_artistas_acumulado)
head(curador_artistas_acumulado)
head(expo_agentes_curadore_artistas)
head(grafoMLTar_art_1956_2016)
head(grafoMLTcur_art_1956_2016)
head(expo_agentes)
head(expo_agentes_expandido)
head(MLT_expos)
#estadisticas
#expos curadas con año
expos_curadas<- expo_curadores %>%
mutate(curada=if_else(!is.na(nombre_registro_agente),"curada","no-curada",NA_character_)) %>%
select(id_expo,curada) %>%
group_by(id_expo,curada) %>%
summarise(num_expos=n()) %>%
left_join(MLT_expos,., by="id_expo" )%>%
#select(id_expo,ano,tipo_participacion,curada) %>%
#group_by(ano,tipo_participacion,curada) %>%
#summarise(num_curadores=if_el) %>%
rename(num_curadores=num_expos) %>%
transform(num_curadores=if_else(curada=="no-curada",0,1))
#calculamos solo los que han participado más de 4 veces
ex_curadores_ano<-inner_join( MLT_expos,expo_curadores,by="id_expo")
stats_curadores<-ex_curadores_ano %>%group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>% arrange(desc(num_expos))
stats_curadores$num_expos<-as.numeric(stats_curadores$num_expos)
curtop<-filter(stats_curadores,num_expos>4) %>% select(nombre_registro_agente)
expo_curadores_importancia<-expo_curadores %>%
mutate(cur_top=if_else(
nombre_registro_agente %in% as.vector(curtop$nombre_registro_agente),
nombre_registro_agente,"Otros"))
ex_curadores_ano_imp<-inner_join( MLT_expos,expo_curadores_importancia,by="id_expo")
#grafica de expos por tipo de participacion
p_ex <- ggplot(MLT_expos, aes( x=ano ) )
p_ex + geom_histogram(aes(fill=factor(tipo_participacion)), color="white",binwidth=1)+
labs(title ="Exposiciones por año por tipo de participacion",x="años", y="cantidad")
#grafica de expos con sin curador
p_ex_curaduria_ano<-ggplot(expos_curadas, aes(x=ano))
p_ex_curaduria_ano + geom_histogram(aes(fill=factor(curada)), color="white",binwidth=1)+
labs(title ="Exposiciones por año con curador(es)",x="años", y="cantidad")
#grafica curadores
p_ex_curadores_ano<-ggplot(ex_curadores_ano, aes(x=ano))
#todos los que han participado
p_ex_curadores_ano + geom_histogram(aes(fill=factor(nombre_registro_agente)),color="white",binwidth=1)+
theme(legend.position="none")+
labs(title ="Exposiciones no curadas y curadores que participaron en el año",x="años", y="cantidad")
p_ex_curadores_ano_imp<-ggplot(ex_curadores_ano_imp, aes(x=ano))
#todos los que han participado
p_ex_curadores_ano_imp + geom_histogram(aes(fill=factor(nombre_registro_agente)),color="white",binwidth=1)+
theme(legend.position="none")+
labs(title ="Exposiciones no curadas y curadores que participaron en el año",x="años", y="cantidad")
#solo los que han particpado más de 4 veces
p_ex_curadores_ano_imp + geom_histogram(aes(fill=factor(cur_top)),color="white",binwidth=1)+
labs(title ="Exposiciones no curadas y curadores que participaron en el año",x="años", y="cantidad")
##############################################
#Preparar datos para grafos con igraph
enlacesMLT<-MLTExpos1956_2016 %>%
rename(from = nombre_expo_order_ano,to=nombre_registro_agente) %>%
select(from,to,ano,rol_expo) %>% na.omit()
#enlacesMLT %>% filter(is.na(rol_expo))
nodosExpos<-MLT_expos %>%
mutate(tipo_agente="expocisión") %>%
select(nombre_expo_order_ano,ano,tipo_agente,nombre_espacio,id_expo) %>%
rename(nombre_registro_agente=nombre_expo_order_ano,ano_agente_inicia=ano,
nacionalidad_agente = nombre_espacio,id_agente=id_expo)
#funcio para evaluar si hay NA en una columna
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
nodosAgentes<-MLTExpos1956_2016 %>%
select(nombre_registro_agente,ano_agente_inicia,tipo_agente,nacionalidad_agente) %>%
distinct(nombre_registro_agente,.keep_all = T)
nodosAgentes<-completeFun(nodosAgentes,c("nombre_registro_agente"))
id_agente<-length(nodosExpos$id_agente)+1:length(nodosAgentes$nombre_registro_agente)
nodosAgentes$id_agente<-id_agente
nodosMLT<-bind_rows(nodosExpos,nodosAgentes)
#buscar duliplicados
nodosMLT%>%
group_by(nombre_registro_agente) %>%
filter(n()>1)
#curadores y artistas
enlacesArtCur<-curador_artistas_acumulado %>%
rename(from=a1,to=a2) %>%
select(from,to,veces,relacion)
nodosArtCur<-MLTExpos1956_2016 %>%
select(nombre_registro_agente,rol_expo, ano_agente_inicia,tipo_agente,nacionalidad_agente) %>%
filter(rol_expo %in% c("curador","artista")) %>%
distinct(nombre_registro_agente,.keep_all = T)
#revisar que las listas de nodos y enlaces esten bien formadas
nrow(nodosMLT);length(unique(nodosMLT$id_agente))
nrow(enlacesMLT); nrow(unique(enlacesMLT[,c("from", "to")]))
nodosverificacion<-unique(c(enlacesMLT$from,enlacesMLT$to))
verificacion<-nodosverificacion %in% nodosMLT$nombre_registro_agente
nodosverificacion[!verificacion]
nrow(nodosArtCur);length(unique(nodosArtCur$nombre_registro_agente))
nrow(enlacesArtCur); nrow(unique(enlacesArtCur[,c("from", "to")]))
nodosverificacion<-unique(c(enlacesArtCur$from,enlacesArtCur$to))
verificacion<-nodosverificacion %in% nodosArtCur$nombre_registro_agente
nodosverificacion[!verificacion]
#crear grafos con igraph
gMLT <- graph_from_data_frame(d=enlacesMLT, vertices=nodosMLT, directed=F)
gArtCur<- graph_from_data_frame(d=enlacesArtCur,vertices = nodosArtCur,directed = T)
#gArtCur.el<-graph_(as.matrix(enlacesArtCurAcumulado[,c("from","to")]), directed = T)
# gExArt<-graph_from_data_frame(d=Expos.Artistas, vertices=nodosExArt, directed=F)
# gExCur<-graph_from_data_frame(d=Expos.Curadores, vertices=nodosExCur, directed=F)
# gExAus<-graph_from_data_frame(d=Expos.Auspiciadores, vertices=nodosExAus, directed=F)
# gExPre<-graph_from_data_frame(d=Expos.Presentadores, vertices=nodosExPre, directed=F)
# gExObras<-graph_from_data_frame(d=Expos.Obras, vertices=nodosExObras, directed=F)
# gArtCur<-graph_from_data_frame(d=Artista.Curador, vertices=nodosArtCur, directed=F)
#metricas de los grafos
V(gArtCur)$grado_in<-deg_in<-degree(gArtCur,mode = "in")
V(gArtCur)$grado_out<-deg_out<-degree(gArtCur,mode = "out")
V(gArtCur)$grado_total<-deg_total<-degree(gArtCur,mode = "total")
#
V(gArtCur)$intermediacion_undir<-betweenness(graph = gArtCur , directed = F)
V(gArtCur)$intermediacion_dir<-betweenness(graph = gArtCur , directed = T)
V(gArtCur)$eigenvectores_undir<-evcent(gArtCur,directed = F)$vector
V(gArtCur)$eigenvectores_dir<-evcent(gArtCur,directed = T)$vector
V(gArtCur)$rank_undir<-page_rank(gArtCur,directed = F)$vector
V(gArtCur)$rank_dir<-page_rank(gArtCur,directed = T)$vector
V(gArtCur)$rank<-page_rank(gArtCur)$vector
V(gArtCur)$rank_undir_weighted<-page_rank(gArtCur,weights = E(gArtCur)$veces)$vector
V(gArtCur)$cercania_total<-closeness(gArtCur,mode = "total")
V(gArtCur)$cercania_out<-closeness(gArtCur,mode = "out")
V(gArtCur)$cercania_in<-closeness(gArtCur,mode = "in")
V(gMLT)$grado_total<-degree(gMLT,mode = "total")
V(gMLT)$intermediacion_undir<-betweenness(graph = gMLT , directed = F)
V(gMLT)$eigenvectores_undir<-evcent(gMLT,directed = F)$vector
V(gMLT)$rank_undir<-page_rank(gMLT,directed = F)$vector
#explorar datos graficas
hist(V(gArtCur)$grado_out, breaks=1:vcount(gArtCur)-1, main="Histogram of node degree")
sort(V(gArtCur)$grado,decreasing = T)[1:20]
sort(V(gArtCur)$grado_in,decreasing = T)[1:30]
sort(V(gArtCur)$grado_out,decreasing = T)[1:20]
sort(V(gArtCur)$eigenvectores_undir,decreasing = T)[1:20]
sort(V(gArtCur)$rank_dir,decreasing = T)[1:20]
sort(V(gArtCur)$intermediacion,decreasing = T)[1:20]
#examinar enlaces y vertices(nodos)
E(gMLT) # The edges of the "net" object
V(gMLT) # The vertices of the "net" object
E(gArtCur) # The edges of the "net" object
V(gArtCur) # The vertices of the "net" object
# Generate colors based on media type:
layouts <- grep("^layout_", ls("package:igraph"), value=TRUE)[-1]
colrs <- c("tomato", "blue","green","orange","purple","pink")
#asicolor y tamaño de vertice
V(gMLT)$color <- colrs[as.integer(factor(V(gMLT)$tipo_agente))]
V(gMLT)$size<-sqrt(V(gMLT)$grado_total+1)
V(gArtCur)$color <- colrs[as.integer(factor(V(gArtCur)$rol_expo))]
V(gArtCur)$size<-sqrt(V(gArtCur)$grado_total)
E(gArtCur)$edge.color <- "gray80"
E(gArtCur)$width <- 1+E(gArtCur)$veces/12
E(gArtCur)$weigtht<-E(gArtCur)$veces
#
# V(gExArt)$color <- colrs[as.integer(factor(V(gExArt)$class_nodo))]
# #V(gExArt)$size<- degree(gExArt)/max(degree(gExArt))*10+2
# V(gExArt)$size<-sqrt(degree(gExArt))+2
#
# V(gExCur)$color <- colrs[as.integer(factor(V(gExCur)$class_nodo))]
# #V(gExCur)$size<- degree(gExCur)/max(degree(gExCur))*10+2
# V(gExCur)$size<-sqrt(degree(gExCur))+2
#
# V(gExAus)$color <- colrs[as.integer(factor(V(gExAus)$class_nodo))]
# #V(gExAus)$size<- degree(gExCur)/max(degree(gExCur))*10+2
# V(gExAus)$size<-sqrt(degree(gExAus))+2
#
# V(gExPre)$color <- colrs[as.integer(factor(V(gExPre)$class_nodo))]
# V(gExPre)$size<-sqrt(degree(gExPre))+2
#
# V(gExObras)$color <- colrs[as.integer(factor(V(gExObras)$class_nodo))]
# V(gExObras)$size<-sqrt(degree(gExObras))+2
#plot pelado
plot.igraph(gMLT,vertex.label=NA,vertex.frame.color='white')
#layout mds
plot(gMLT, edge.arrow.size=.4,vertex.label=NA,
vertex.frame.color='white',layout=layout_with_mds)
legend(x=-1.5, y=-0.5, levels(factor(V(gMLT)$tipo_agente)), pch=21,col="#777777", pt.bg=colrs, pt.cex=2.5, bty="n", ncol=1)
# plot(gExArt, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExCur, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExAus, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExPre, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExObras, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gArtCur, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
plot(gMLT, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_kk)
plot(gMLT, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_graphopt)
legend(x=-1.5, y=-0.5, levels(factor(V(gMLT)$tipo_agente)), pch=21,col="#777777", pt.bg=colrs, pt.cex=2.5, bty="n", ncol=1)
plot(gMLT, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_on_grid)
l<-layout_with_graphopt(gArtCur, start = NULL, niter = 900, charge = 0.1,
mass = 30, spring.length = 2, spring.constant = 0.1,
max.sa.movement = 5)
l <- norm_coords(l, ymin=-1, ymax=1, xmin=-1, xmax=1)
plot(gArtCur, edge.arrow.size=.1,vertex.label=NA, rescale=F, vertex.frame.color='white',layout=l*2)
plot(gArtCur, edge.arrow.size=.1,vertex.label=NA, vertex.frame.color='white',layout=layout_with_kk)
## filtros
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista") %>%
dplyr::filter(nombre_registro_agente != "Varios")%>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
filter(num_expos>4) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->artistas_sel1
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista") %>%
dplyr::filter(nombre_registro_agente != "Varios")%>%
dplyr::filter(obra_en_coleccion_MTL>0) %>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->artistas_sel2
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="curador") %>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->curadores_sel
artistas_sel<-c(artistas_sel1,artistas_sel2) %>% unique()
curador_artistasel<-artistas_sel[artistas_sel %in% curadores_sel]
artistas_selu<-artistas_sel[!(artistas_sel %in% curador_artistasel)]
curadores_sel<-curadores_sel[!(curadores_sel %in% curador_artistasel)]
MLTExpos1956_2016 %>%
dplyr::filter(obra_en_coleccion_MTL>0) %>%
dplyr::filter(nombre_registro_agente %in% artistas_sel)%>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->artistas_obrasMLT
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% artistas_selu) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="artista") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel1
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% curador_artistasel) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="artista-curador") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel2
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="curador",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% curador_artistasel) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="artista-curador") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel3
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="curador",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% curadores_sel) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="curador") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel4
MLT_obras %>% filter(nombre_artista_registro_std %in% artistas_obrasMLT) %>%
transmute(nombre_s=nombre_artista_registro_std,
tipo_s=if_else(nombre_artista_registro_std %in% curador_artistasel,"artista-curador","artista"),
nombre_t=stringi::stri_paste(titulo_obra," - ",cod_registro),
tipo_t="obra", ano=ano_creacion)->enlacesMLTsel5
enlacesMLTsel<-bind_rows(enlacesMLTsel1,
enlacesMLTsel2,
enlacesMLTsel3,
# enlacesMLTsel5
enlacesMLTsel4) %>% distinct()
write_csv(enlacesMLTsel,"enlacesMTLsel.csv")
| /MLT-grafos-stats.R | no_license | correajfc/R-MLT | R | false | false | 15,266 | r | #cargar/instalar librerias
install.packages('igraph')
install.packages('network')
install.packages('sna')
install.packages('ndtv')
install.packages('visNetwork')
devtools::install_github("analyxcompany/ForceAtlas2")
library(readr)
library(dplyr)
library(tidyr)
library(tidyverse)
library(ggplot2)
library(igraph)
library(network)
library(sna)
library(ndtv)
library(visNetwork)
library(ForceAtlas2)
#mapas de color
library(viridis)
library(RColorBrewer)
#cargar datos
#rm(list = ls()) # Remove all the objects we created so far.
# DATASETS
head(MLTExpos1956_2016)
head(agente_agente_relacion)
head(artistas_artistas_acumulado)
head(curador_artistas_acumulado)
head(expo_agentes_curadore_artistas)
head(grafoMLTar_art_1956_2016)
head(grafoMLTcur_art_1956_2016)
head(expo_agentes)
head(expo_agentes_expandido)
head(MLT_expos)
#estadisticas
#expos curadas con año
expos_curadas<- expo_curadores %>%
mutate(curada=if_else(!is.na(nombre_registro_agente),"curada","no-curada",NA_character_)) %>%
select(id_expo,curada) %>%
group_by(id_expo,curada) %>%
summarise(num_expos=n()) %>%
left_join(MLT_expos,., by="id_expo" )%>%
#select(id_expo,ano,tipo_participacion,curada) %>%
#group_by(ano,tipo_participacion,curada) %>%
#summarise(num_curadores=if_el) %>%
rename(num_curadores=num_expos) %>%
transform(num_curadores=if_else(curada=="no-curada",0,1))
#calculamos solo los que han participado más de 4 veces
ex_curadores_ano<-inner_join( MLT_expos,expo_curadores,by="id_expo")
stats_curadores<-ex_curadores_ano %>%group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>% arrange(desc(num_expos))
stats_curadores$num_expos<-as.numeric(stats_curadores$num_expos)
curtop<-filter(stats_curadores,num_expos>4) %>% select(nombre_registro_agente)
expo_curadores_importancia<-expo_curadores %>%
mutate(cur_top=if_else(
nombre_registro_agente %in% as.vector(curtop$nombre_registro_agente),
nombre_registro_agente,"Otros"))
ex_curadores_ano_imp<-inner_join( MLT_expos,expo_curadores_importancia,by="id_expo")
#grafica de expos por tipo de participacion
p_ex <- ggplot(MLT_expos, aes( x=ano ) )
p_ex + geom_histogram(aes(fill=factor(tipo_participacion)), color="white",binwidth=1)+
labs(title ="Exposiciones por año por tipo de participacion",x="años", y="cantidad")
#grafica de expos con sin curador
p_ex_curaduria_ano<-ggplot(expos_curadas, aes(x=ano))
p_ex_curaduria_ano + geom_histogram(aes(fill=factor(curada)), color="white",binwidth=1)+
labs(title ="Exposiciones por año con curador(es)",x="años", y="cantidad")
#grafica curadores
p_ex_curadores_ano<-ggplot(ex_curadores_ano, aes(x=ano))
#todos los que han participado
p_ex_curadores_ano + geom_histogram(aes(fill=factor(nombre_registro_agente)),color="white",binwidth=1)+
theme(legend.position="none")+
labs(title ="Exposiciones no curadas y curadores que participaron en el año",x="años", y="cantidad")
p_ex_curadores_ano_imp<-ggplot(ex_curadores_ano_imp, aes(x=ano))
#todos los que han participado
p_ex_curadores_ano_imp + geom_histogram(aes(fill=factor(nombre_registro_agente)),color="white",binwidth=1)+
theme(legend.position="none")+
labs(title ="Exposiciones no curadas y curadores que participaron en el año",x="años", y="cantidad")
#solo los que han particpado más de 4 veces
p_ex_curadores_ano_imp + geom_histogram(aes(fill=factor(cur_top)),color="white",binwidth=1)+
labs(title ="Exposiciones no curadas y curadores que participaron en el año",x="años", y="cantidad")
##############################################
#Preparar datos para grafos con igraph
enlacesMLT<-MLTExpos1956_2016 %>%
rename(from = nombre_expo_order_ano,to=nombre_registro_agente) %>%
select(from,to,ano,rol_expo) %>% na.omit()
#enlacesMLT %>% filter(is.na(rol_expo))
nodosExpos<-MLT_expos %>%
mutate(tipo_agente="expocisión") %>%
select(nombre_expo_order_ano,ano,tipo_agente,nombre_espacio,id_expo) %>%
rename(nombre_registro_agente=nombre_expo_order_ano,ano_agente_inicia=ano,
nacionalidad_agente = nombre_espacio,id_agente=id_expo)
#funcio para evaluar si hay NA en una columna
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
nodosAgentes<-MLTExpos1956_2016 %>%
select(nombre_registro_agente,ano_agente_inicia,tipo_agente,nacionalidad_agente) %>%
distinct(nombre_registro_agente,.keep_all = T)
nodosAgentes<-completeFun(nodosAgentes,c("nombre_registro_agente"))
id_agente<-length(nodosExpos$id_agente)+1:length(nodosAgentes$nombre_registro_agente)
nodosAgentes$id_agente<-id_agente
nodosMLT<-bind_rows(nodosExpos,nodosAgentes)
#buscar duliplicados
nodosMLT%>%
group_by(nombre_registro_agente) %>%
filter(n()>1)
#curadores y artistas
enlacesArtCur<-curador_artistas_acumulado %>%
rename(from=a1,to=a2) %>%
select(from,to,veces,relacion)
nodosArtCur<-MLTExpos1956_2016 %>%
select(nombre_registro_agente,rol_expo, ano_agente_inicia,tipo_agente,nacionalidad_agente) %>%
filter(rol_expo %in% c("curador","artista")) %>%
distinct(nombre_registro_agente,.keep_all = T)
#revisar que las listas de nodos y enlaces esten bien formadas
nrow(nodosMLT);length(unique(nodosMLT$id_agente))
nrow(enlacesMLT); nrow(unique(enlacesMLT[,c("from", "to")]))
nodosverificacion<-unique(c(enlacesMLT$from,enlacesMLT$to))
verificacion<-nodosverificacion %in% nodosMLT$nombre_registro_agente
nodosverificacion[!verificacion]
nrow(nodosArtCur);length(unique(nodosArtCur$nombre_registro_agente))
nrow(enlacesArtCur); nrow(unique(enlacesArtCur[,c("from", "to")]))
nodosverificacion<-unique(c(enlacesArtCur$from,enlacesArtCur$to))
verificacion<-nodosverificacion %in% nodosArtCur$nombre_registro_agente
nodosverificacion[!verificacion]
#crear grafos con igraph
gMLT <- graph_from_data_frame(d=enlacesMLT, vertices=nodosMLT, directed=F)
gArtCur<- graph_from_data_frame(d=enlacesArtCur,vertices = nodosArtCur,directed = T)
#gArtCur.el<-graph_(as.matrix(enlacesArtCurAcumulado[,c("from","to")]), directed = T)
# gExArt<-graph_from_data_frame(d=Expos.Artistas, vertices=nodosExArt, directed=F)
# gExCur<-graph_from_data_frame(d=Expos.Curadores, vertices=nodosExCur, directed=F)
# gExAus<-graph_from_data_frame(d=Expos.Auspiciadores, vertices=nodosExAus, directed=F)
# gExPre<-graph_from_data_frame(d=Expos.Presentadores, vertices=nodosExPre, directed=F)
# gExObras<-graph_from_data_frame(d=Expos.Obras, vertices=nodosExObras, directed=F)
# gArtCur<-graph_from_data_frame(d=Artista.Curador, vertices=nodosArtCur, directed=F)
#metricas de los grafos
V(gArtCur)$grado_in<-deg_in<-degree(gArtCur,mode = "in")
V(gArtCur)$grado_out<-deg_out<-degree(gArtCur,mode = "out")
V(gArtCur)$grado_total<-deg_total<-degree(gArtCur,mode = "total")
#
V(gArtCur)$intermediacion_undir<-betweenness(graph = gArtCur , directed = F)
V(gArtCur)$intermediacion_dir<-betweenness(graph = gArtCur , directed = T)
V(gArtCur)$eigenvectores_undir<-evcent(gArtCur,directed = F)$vector
V(gArtCur)$eigenvectores_dir<-evcent(gArtCur,directed = T)$vector
V(gArtCur)$rank_undir<-page_rank(gArtCur,directed = F)$vector
V(gArtCur)$rank_dir<-page_rank(gArtCur,directed = T)$vector
V(gArtCur)$rank<-page_rank(gArtCur)$vector
V(gArtCur)$rank_undir_weighted<-page_rank(gArtCur,weights = E(gArtCur)$veces)$vector
V(gArtCur)$cercania_total<-closeness(gArtCur,mode = "total")
V(gArtCur)$cercania_out<-closeness(gArtCur,mode = "out")
V(gArtCur)$cercania_in<-closeness(gArtCur,mode = "in")
V(gMLT)$grado_total<-degree(gMLT,mode = "total")
V(gMLT)$intermediacion_undir<-betweenness(graph = gMLT , directed = F)
V(gMLT)$eigenvectores_undir<-evcent(gMLT,directed = F)$vector
V(gMLT)$rank_undir<-page_rank(gMLT,directed = F)$vector
#explorar datos graficas
hist(V(gArtCur)$grado_out, breaks=1:vcount(gArtCur)-1, main="Histogram of node degree")
sort(V(gArtCur)$grado,decreasing = T)[1:20]
sort(V(gArtCur)$grado_in,decreasing = T)[1:30]
sort(V(gArtCur)$grado_out,decreasing = T)[1:20]
sort(V(gArtCur)$eigenvectores_undir,decreasing = T)[1:20]
sort(V(gArtCur)$rank_dir,decreasing = T)[1:20]
sort(V(gArtCur)$intermediacion,decreasing = T)[1:20]
#examinar enlaces y vertices(nodos)
E(gMLT) # The edges of the "net" object
V(gMLT) # The vertices of the "net" object
E(gArtCur) # The edges of the "net" object
V(gArtCur) # The vertices of the "net" object
# Generate colors based on media type:
layouts <- grep("^layout_", ls("package:igraph"), value=TRUE)[-1]
colrs <- c("tomato", "blue","green","orange","purple","pink")
#asicolor y tamaño de vertice
V(gMLT)$color <- colrs[as.integer(factor(V(gMLT)$tipo_agente))]
V(gMLT)$size<-sqrt(V(gMLT)$grado_total+1)
V(gArtCur)$color <- colrs[as.integer(factor(V(gArtCur)$rol_expo))]
V(gArtCur)$size<-sqrt(V(gArtCur)$grado_total)
E(gArtCur)$edge.color <- "gray80"
E(gArtCur)$width <- 1+E(gArtCur)$veces/12
E(gArtCur)$weigtht<-E(gArtCur)$veces
#
# V(gExArt)$color <- colrs[as.integer(factor(V(gExArt)$class_nodo))]
# #V(gExArt)$size<- degree(gExArt)/max(degree(gExArt))*10+2
# V(gExArt)$size<-sqrt(degree(gExArt))+2
#
# V(gExCur)$color <- colrs[as.integer(factor(V(gExCur)$class_nodo))]
# #V(gExCur)$size<- degree(gExCur)/max(degree(gExCur))*10+2
# V(gExCur)$size<-sqrt(degree(gExCur))+2
#
# V(gExAus)$color <- colrs[as.integer(factor(V(gExAus)$class_nodo))]
# #V(gExAus)$size<- degree(gExCur)/max(degree(gExCur))*10+2
# V(gExAus)$size<-sqrt(degree(gExAus))+2
#
# V(gExPre)$color <- colrs[as.integer(factor(V(gExPre)$class_nodo))]
# V(gExPre)$size<-sqrt(degree(gExPre))+2
#
# V(gExObras)$color <- colrs[as.integer(factor(V(gExObras)$class_nodo))]
# V(gExObras)$size<-sqrt(degree(gExObras))+2
#plot pelado
plot.igraph(gMLT,vertex.label=NA,vertex.frame.color='white')
#layout mds
plot(gMLT, edge.arrow.size=.4,vertex.label=NA,
vertex.frame.color='white',layout=layout_with_mds)
legend(x=-1.5, y=-0.5, levels(factor(V(gMLT)$tipo_agente)), pch=21,col="#777777", pt.bg=colrs, pt.cex=2.5, bty="n", ncol=1)
# plot(gExArt, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExCur, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExAus, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExPre, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gExObras, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
# plot(gArtCur, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_mds)
plot(gMLT, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_kk)
plot(gMLT, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_with_graphopt)
legend(x=-1.5, y=-0.5, levels(factor(V(gMLT)$tipo_agente)), pch=21,col="#777777", pt.bg=colrs, pt.cex=2.5, bty="n", ncol=1)
plot(gMLT, edge.arrow.size=.4,vertex.label=NA, vertex.frame.color='white',layout=layout_on_grid)
l<-layout_with_graphopt(gArtCur, start = NULL, niter = 900, charge = 0.1,
mass = 30, spring.length = 2, spring.constant = 0.1,
max.sa.movement = 5)
l <- norm_coords(l, ymin=-1, ymax=1, xmin=-1, xmax=1)
plot(gArtCur, edge.arrow.size=.1,vertex.label=NA, rescale=F, vertex.frame.color='white',layout=l*2)
plot(gArtCur, edge.arrow.size=.1,vertex.label=NA, vertex.frame.color='white',layout=layout_with_kk)
## filtros
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista") %>%
dplyr::filter(nombre_registro_agente != "Varios")%>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
filter(num_expos>4) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->artistas_sel1
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista") %>%
dplyr::filter(nombre_registro_agente != "Varios")%>%
dplyr::filter(obra_en_coleccion_MTL>0) %>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->artistas_sel2
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="curador") %>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->curadores_sel
artistas_sel<-c(artistas_sel1,artistas_sel2) %>% unique()
curador_artistasel<-artistas_sel[artistas_sel %in% curadores_sel]
artistas_selu<-artistas_sel[!(artistas_sel %in% curador_artistasel)]
curadores_sel<-curadores_sel[!(curadores_sel %in% curador_artistasel)]
MLTExpos1956_2016 %>%
dplyr::filter(obra_en_coleccion_MTL>0) %>%
dplyr::filter(nombre_registro_agente %in% artistas_sel)%>%
group_by(nombre_registro_agente) %>%
summarise(num_expos=n()) %>%
arrange(desc(num_expos)) %>%
.$nombre_registro_agente %>% as.vector()->artistas_obrasMLT
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% artistas_selu) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="artista") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel1
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="artista",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% curador_artistasel) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="artista-curador") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel2
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="curador",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% curador_artistasel) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="artista-curador") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel3
MLTExpos1956_2016 %>%
dplyr::filter(rol_expo=="curador",
(id_expo %in% expos_curadas[expos_curadas$curada=="curada",]$id_expo)) %>%
dplyr::filter(nombre_registro_agente %in% curadores_sel) %>%
mutate(nombre_s=stringi::stri_paste(nombre_expo," - ",ano),tipo_s="expo",
nombre_t=nombre_registro_agente, tipo_t="curador") %>%
select(nombre_s,tipo_s,nombre_t,tipo_t,ano)->enlacesMLTsel4
MLT_obras %>% filter(nombre_artista_registro_std %in% artistas_obrasMLT) %>%
transmute(nombre_s=nombre_artista_registro_std,
tipo_s=if_else(nombre_artista_registro_std %in% curador_artistasel,"artista-curador","artista"),
nombre_t=stringi::stri_paste(titulo_obra," - ",cod_registro),
tipo_t="obra", ano=ano_creacion)->enlacesMLTsel5
enlacesMLTsel<-bind_rows(enlacesMLTsel1,
enlacesMLTsel2,
enlacesMLTsel3,
# enlacesMLTsel5
enlacesMLTsel4) %>% distinct()
write_csv(enlacesMLTsel,"enlacesMTLsel.csv")
|
url = "household_power_consumption.txt"
data <- read.table(url, header=TRUE, nrows=2100000, sep=";", stringsAsFactors =FALSE)
subset <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
date <- as.character(as.Date(subset$Date, "%d/%m/%Y"))
x <- paste(date, subset$Time)
dateTime <- strptime(x, "%Y-%m-%d %H:%M:%S")
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
plot(dateTime, as.numeric(subset$Global_active_power), type="l",
ylab="Global Active Power", xlab="", cex=0.2)
plot(dateTime, as.numeric(subset$Voltage), type="l",
ylab="Voltage", xlab="datetime")
plot(dateTime, as.numeric(subset$Sub_metering_1), type="l",
ylab="Energy Submetering", xlab="")
lines(dateTime, as.numeric(subset$Sub_metering_2), type="l", col="red")
lines(dateTime, as.numeric(subset$Sub_metering_3), type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd =2.5,
col=c("black", "red", "blue"), bty = "n")
plot(dateTime, as.numeric(subset$Global_reactive_power), type="l", xlab="datetime",
ylab="Global_reactive_power")
dev.off() | /plot4.R | no_license | suswaram/ExData_Plotting1 | R | false | false | 1,132 | r | url = "household_power_consumption.txt"
data <- read.table(url, header=TRUE, nrows=2100000, sep=";", stringsAsFactors =FALSE)
subset <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
date <- as.character(as.Date(subset$Date, "%d/%m/%Y"))
x <- paste(date, subset$Time)
dateTime <- strptime(x, "%Y-%m-%d %H:%M:%S")
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
plot(dateTime, as.numeric(subset$Global_active_power), type="l",
ylab="Global Active Power", xlab="", cex=0.2)
plot(dateTime, as.numeric(subset$Voltage), type="l",
ylab="Voltage", xlab="datetime")
plot(dateTime, as.numeric(subset$Sub_metering_1), type="l",
ylab="Energy Submetering", xlab="")
lines(dateTime, as.numeric(subset$Sub_metering_2), type="l", col="red")
lines(dateTime, as.numeric(subset$Sub_metering_3), type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd =2.5,
col=c("black", "red", "blue"), bty = "n")
plot(dateTime, as.numeric(subset$Global_reactive_power), type="l", xlab="datetime",
ylab="Global_reactive_power")
dev.off() |
args = commandArgs(trailingOnly=TRUE)
workdir = args[1]
fileinput = args[2]
fileoutput = args[3]
sparse_flag = strtoi(args[4])
setwd(workdir)
suppressWarnings(suppressMessages(library("igraph")))
suppressWarnings(suppressMessages(library("Matrix")))
# create unweighted matrix
m = as.matrix(read.table(fileinput,header=FALSE,sep=" "))
if (sparse_flag == 1)
{
x = sparseMatrix(m[,1], m[,2], x=m[,3], symmetric=TRUE)
g = graph.adjacency(x, mode="undirected", weighted=TRUE, diag=FALSE)
} else
{
g = graph.adjacency(m, mode="undirected", weighted=TRUE, diag=FALSE)
}
# run louvain and save communities
comm = (multilevel.community(g))$memberships
comm = comm[nrow(comm):1, ]
write.table(comm, file=fileoutput, row.names=FALSE, col.names=FALSE) | /real_perturbed/code/scripts/louvain/run_louvain.R | no_license | biomedical-cybernetics/LGI-MCL | R | false | false | 779 | r | args = commandArgs(trailingOnly=TRUE)
workdir = args[1]
fileinput = args[2]
fileoutput = args[3]
sparse_flag = strtoi(args[4])
setwd(workdir)
suppressWarnings(suppressMessages(library("igraph")))
suppressWarnings(suppressMessages(library("Matrix")))
# create unweighted matrix
m = as.matrix(read.table(fileinput,header=FALSE,sep=" "))
if (sparse_flag == 1)
{
x = sparseMatrix(m[,1], m[,2], x=m[,3], symmetric=TRUE)
g = graph.adjacency(x, mode="undirected", weighted=TRUE, diag=FALSE)
} else
{
g = graph.adjacency(m, mode="undirected", weighted=TRUE, diag=FALSE)
}
# run louvain and save communities
comm = (multilevel.community(g))$memberships
comm = comm[nrow(comm):1, ]
write.table(comm, file=fileoutput, row.names=FALSE, col.names=FALSE) |
#Create table of top dif ex genes sorted by p-value or logFC
library(RMySQL)
library(DBI)
library(shiny)
library(ggplot2)
library(limma)
library(data.table)
#Load ini file from specified location
ini<-read.table("~/Desktop/Group_Project/config_file.ini", sep="=", col.names=c("key","value"), as.is=c(1,2))
#Read contents of .ini file (Can easily break if file format is wrong)
myhost <- trimWhiteSpace(ini[1, "value"])
myname <- trimWhiteSpace(ini[2, "value"])
myuser <- trimWhiteSpace(ini[3, "value"])
mypass <- trimWhiteSpace(ini[4, "value"])
#Connect to database using values from ini file
con <- dbConnect(RMySQL::MySQL(), host = myhost, user = myuser, password = mypass, dbname = myname)
#Get tables from database
rn4_stats <- dbReadTable(con, "rn4_stats")
colnames(rn4_stats) <- c("gene_id", "logFC", "P", "adjP", "t", "B", "dif_ex")
rn6_stats <- dbReadTable(con, "rn6_stats")
colnames(rn6_stats) <- c("gene_id", "logFC", "P", "adjP", "t", "B", "dif_ex")
gene_id_and_name <- dbReadTable(con, "gene_id_and_name")
colnames(gene_id_and_name) <- c("gene_id", "gene_name")
#close database connection
dbDisconnect(con)
# Define UI for application that displays table
ui <- fluidPage(
# Application title
titlePanel("Most Differentially Expressed Genes"),
# Sidebar with controls to select a dataset,
# select what it is sorted by and specify the
# number of observations to view
sidebarLayout(
sidebarPanel(
selectInput("genome", "Choose a genome:",
choices = c("rn4", "rn6")),
selectInput("order", "Order by:",
choices = c("p.value", "logFC")),
numericInput("obs", "Number of genes to view:", 10)
),
mainPanel(
tableOutput("view")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# Return the requested genome
datasetInput <- reactive({
switch(input$genome,
"rn4" = merge(gene_id_and_name, rn4_stats, by= "gene_id"),
"rn6" = merge(gene_id_and_name, rn6_stats, by= "gene_id"))
})
#Sort table
sortedData <- reactive({
attach(datasetInput())
switch(input$order,
"p.value" = datasetInput()[order(P),],
"logFC" = datasetInput()[order(logFC),])
})
# Show the first "n" observations
output$view <- renderTable({
head(sortedData(), n = input$obs)
})
}
# Run the application
shinyApp(ui = ui, server = server) | /shiny_summary_table.R | no_license | MonkeyFish189/SteeredResearchProject | R | false | false | 2,460 | r | #Create table of top dif ex genes sorted by p-value or logFC
library(RMySQL)
library(DBI)
library(shiny)
library(ggplot2)
library(limma)
library(data.table)
#Load ini file from specified location
ini<-read.table("~/Desktop/Group_Project/config_file.ini", sep="=", col.names=c("key","value"), as.is=c(1,2))
#Read contents of .ini file (Can easily break if file format is wrong)
myhost <- trimWhiteSpace(ini[1, "value"])
myname <- trimWhiteSpace(ini[2, "value"])
myuser <- trimWhiteSpace(ini[3, "value"])
mypass <- trimWhiteSpace(ini[4, "value"])
#Connect to database using values from ini file
con <- dbConnect(RMySQL::MySQL(), host = myhost, user = myuser, password = mypass, dbname = myname)
#Get tables from database
rn4_stats <- dbReadTable(con, "rn4_stats")
colnames(rn4_stats) <- c("gene_id", "logFC", "P", "adjP", "t", "B", "dif_ex")
rn6_stats <- dbReadTable(con, "rn6_stats")
colnames(rn6_stats) <- c("gene_id", "logFC", "P", "adjP", "t", "B", "dif_ex")
gene_id_and_name <- dbReadTable(con, "gene_id_and_name")
colnames(gene_id_and_name) <- c("gene_id", "gene_name")
#close database connection
dbDisconnect(con)
# Define UI for application that displays table
ui <- fluidPage(
# Application title
titlePanel("Most Differentially Expressed Genes"),
# Sidebar with controls to select a dataset,
# select what it is sorted by and specify the
# number of observations to view
sidebarLayout(
sidebarPanel(
selectInput("genome", "Choose a genome:",
choices = c("rn4", "rn6")),
selectInput("order", "Order by:",
choices = c("p.value", "logFC")),
numericInput("obs", "Number of genes to view:", 10)
),
mainPanel(
tableOutput("view")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# Return the requested genome
datasetInput <- reactive({
switch(input$genome,
"rn4" = merge(gene_id_and_name, rn4_stats, by= "gene_id"),
"rn6" = merge(gene_id_and_name, rn6_stats, by= "gene_id"))
})
#Sort table
sortedData <- reactive({
attach(datasetInput())
switch(input$order,
"p.value" = datasetInput()[order(P),],
"logFC" = datasetInput()[order(logFC),])
})
# Show the first "n" observations
output$view <- renderTable({
head(sortedData(), n = input$obs)
})
}
# Run the application
shinyApp(ui = ui, server = server) |
context("Read osw files.")
test_that("test_fetchAnalytesInfo",{
filenames <- data.frame("filename" = c("data/raw/hroest_K120808_Strep10%PlasmaBiolRepl1_R03_SW_filt.mzML.gz",
"data/raw/hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt.mzML.gz",
"data/raw/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.mzML.gz"),
"runs" = c("hroest_K120808_Strep10%PlasmaBiolRepl1_R03_SW_filt",
"hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt",
"hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
oswName <- file.path(system.file("extdata", package = "DIAlignR"), "osw", "merged.osw")
expOutput <- data.frame("transition_group_id" = rep("19051_KLIVTSEGC[160]FK/2", 6),
"filename" = rep("data/raw/hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt.mzML.gz", 6),
"RT" = rep(2586.12, 6),
"delta_rt" = rep(78.9663, 6),
"assay_RT" = rep(13.5, 6),
"Intensity" = rep(26.2182, 6),
"leftWidth" = rep(2571.738, 6),
"rightWidth" = rep(2609.288, 6),
"peak_group_rank" = rep(1, 6),
"m_score" = rep(0.001041916, 6),
"transition_id" = c(58312, 58313, 58314, 58315, 58316, 58317),
stringsAsFactors=FALSE)
outData <- fetchAnalytesInfo(oswName, maxFdrQuery = 0.05, oswMerged = TRUE,
analytes = c("19051_KLIVTSEGC[160]FK/2"), filename = filenames$filename[2],
runType = "DIA_Proteomics", analyteInGroupLabel = TRUE)
expect_equal(outData, expOutput, tolerance=1e-6)
outData <- fetchAnalytesInfo(oswName, maxFdrQuery = 0.5, oswMerged = TRUE,
analytes = c("IHFLSPVRPFTLTPGDEEESFIQLITPVR_3"), filename = filenames$filename[3],
runType = "DIA_Proteomics", analyteInGroupLabel = FALSE)
expOutput <- data.frame("transition_group_id" = rep("IHFLSPVRPFTLTPGDEEESFIQLITPVR_3", 12),
"filename" = rep("data/raw/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.mzML.gz", 12),
"RT" = c(rep(6483.50, 6), rep(6597.54, 6)),
"delta_rt" = c(rep(78.8163, 6), rep(192.8560, 6)),
"assay_RT" = rep(126.7, 12),
"Intensity" = c(rep(61.0299, 6), rep(16.7115, 6)),
"leftWidth" = c(rep(6468.855, 6), rep(6574.684, 6)),
"rightWidth" = c(rep(6499.579, 6), rep(6615.649, 6)),
"peak_group_rank" = c(rep(1, 6), rep(2, 6)),
"m_score" = c(rep(5.692077e-05, 6), rep(3.690986e-01,6)),
"transition_id" = rep(c(14843, 14844, 14845, 14846, 14847, 14848), 2),
stringsAsFactors=FALSE)
expect_equal(outData, expOutput, tolerance=1e-6)
})
test_that("test_getOswAnalytes",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- getRunNames(dataPath, oswMerged = TRUE)
outData <- getOswAnalytes(fileInfo, oswMerged = TRUE,
maxFdrQuery = 0.01, runType = "DIA_Proteomics")
expData <- data.frame("transition_group_id" = rep("AAMIGGADATSNVR_2", 2),
"filename" = rep("data/raw/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.mzML.gz", 2),
"peak_group_rank" = c(1L, 1L),
"m_score" = rep(5.692077e-05, 2),
"transition_id" = c(81958L, 81959L),
stringsAsFactors=FALSE)
expect_identical(dim(outData[["run0"]]), c(1026L, 5L))
expect_identical(dim(outData[["run1"]]), c(1152L, 5L))
expect_identical(dim(outData[["run2"]]), c(1086L, 5L))
expect_equal(outData[["run2"]][1:2,], expData, tolerance=1e-6)
})
test_that("test_fetchPrecursorsInfo",{
dataPath <- system.file("extdata", package = "DIAlignR")
filename <- paste0(dataPath,"/osw/merged.osw")
outData <- fetchPrecursorsInfo(filename, runType = "DIA_Proteomics", NULL,
context = "experiment-wide", maxPeptideFdr = 1.0)
expData <- data.table("transition_group_id" = 32L,
"peptide_id" = 7040L,
"sequence" = "GNNSVYMNNFLNLILQNER",
"charge" = 3L,
"group_label" = "10030_GNNSVYMNNFLNLILQNER/3",
"transition_ids" = list(c(192L, 193L, 194L, 195L, 196L, 197L)))
expect_identical(outData[108,], expData)
expect_identical(dim(outData), c(302L, 6L))
})
test_that("test_getPrecursors",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- getPrecursors(fileInfo, oswMerged = TRUE, runType = "DIA_Proteomics",
context = "experiment-wide", maxPeptideFdr = 0.05)
expData <- data.table("transition_group_id" = 32L,
"peptide_id" = 7040L,
"sequence" = "GNNSVYMNNFLNLILQNER",
"charge" = 3L,
"group_label" = "10030_GNNSVYMNNFLNLILQNER/3",
"transition_ids" = list(c(192L, 193L, 194L, 195L, 196L, 197L)),
key = c("peptide_id", "transition_group_id"))
expect_identical(outData[79,], expData)
expect_identical(dim(outData), c(234L, 6L))
})
test_that("test_getPrecursorByID",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- getPrecursorByID(c(32L, 2474L), fileInfo, oswMerged = TRUE, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = c(32L, 2474L),
"peptide_id" = c(7040L, 8496L),
"sequence" = c("GNNSVYMNNFLNLILQNER", "IHFLSPVRPFTLTPGDEEESFIQLITPVR"),
"charge" = c(3L, 3L),
"group_label" = c("10030_GNNSVYMNNFLNLILQNER/3", "12300_IHFLSPVRPFTLTPGDEEESFIQLITPVR/3"),
"transition_ids" = list(c(192L, 193L, 194L, 195L, 196L, 197L),
c(14843L, 14844L, 14845L, 14846L, 14847L, 14848L)),
key = c("peptide_id", "transition_group_id"))
expect_identical(outData, expData)
})
test_that("test_fetchFeaturesFromRun",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
fileInfo$featureFile <- as.factor(fileInfo$featureFile)
outData <- fetchFeaturesFromRun(fileInfo$featureFile[1], runID = "125704171604355508", maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 32L, "feature_id" = bit64::as.integer64(484069199212214166),
"RT" = 6528.23, "intensity" = 26.7603,
"leftWidth" = 6518.602, "rightWidth" = 6535.67,
"peak_group_rank" = 1L, "m_score" = 0.0264475,
key = "transition_group_id")
expect_equal(outData[1,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(211L, 8L))
outData <- fetchFeaturesFromRun(fileInfo$featureFile[2], runID = "6752973645981403097", maxFdrQuery = 0.01, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 19954L, "feature_id" = bit64::as.integer64(3189052421957813097),
"RT" = 5226.47, "intensity" = 104.944,
"leftWidth" = 5215.051, "rightWidth" = 5228.706,
"peak_group_rank" = 3L, "m_score" = 0.0009634075,
key = "transition_group_id")
expect_equal(outData[192,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(192L, 8L))
outData <- fetchFeaturesFromRun(fileInfo$featureFile[3], runID = "2234664662238281994", maxFdrQuery = 1.00, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 10918L, "feature_id" = bit64::as.integer64(4248772434739795103),
"RT" = 6019.18, "intensity" = 78.4294,
"leftWidth" = 6006.667, "rightWidth" = 6044.217,
"peak_group_rank" = 3L, "m_score" = 0.3225775,
key = "transition_group_id")
expect_equal(outData[500,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(926L, 8L))
})
test_that("test_getFeatures",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
fileInfo$featureFile <- as.factor(fileInfo$featureFile)
outData <- getFeatures(fileInfo, maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expect_identical(length(outData), 3L)
expect_identical(dim(outData[["run1"]]), c(227L, 8L))
})
test_that("test_fetchPeptidesInfo", {
dataPath <- system.file("extdata", package = "DIAlignR")
filename <- paste0(dataPath,"/osw/merged.osw")
outData <- fetchPeptidesInfo(oswName = filename, runType = "DIA_Proteomics", context = "experiment-wide")
expData <- data.table("peptide_id" = c(19046L),
"run" = bit64::as.integer64(c(6752973645981403097, 2234664662238281994, 125704171604355508)),
"score" = c(7.182150, 7.664316, 7.588328),
"pvalue" = 5.603183e-05,
"qvalue" = 5.204949e-05)
expect_identical(dim(outData), c(896L, 5L))
expect_equal(tail(outData,3), expData, tolerance = 1e-06)
outData2 <- fetchPeptidesInfo(oswName = filename, runType = "DIA_Proteomics", context = "global")
expData <- data.table("peptide_id" = as.integer(), "run" = as.numeric(),
"score" = as.numeric(), "pvalue" = as.numeric(), "qvalue" = as.numeric(),
stringsAsFactors = FALSE)
expect_equal(outData2, expData)
})
test_that("test_getPeptideScores", {
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
peptides <- c(7260L, 3L, 4L)
outData <- getPeptideScores(fileInfo, peptides, oswMerged = TRUE, runType = "DIA_Proteomics", context = "experiment-wide")
expData <- data.table("peptide_id" = c(3L, 4L, rep(7260L, 3)),
"run" =c(NA_character_, NA_character_, "run0", "run1", "run2"),
"score" = c(NA_real_, NA_real_, 7.779751, 7.404515, 7.324655),
"pvalue" = c(NA_real_, NA_real_, rep(5.603183e-05, 3)),
"qvalue" = c(NA_real_, NA_real_, rep(5.204949e-05, 3)), key = c("peptide_id"))
expect_equal(outData, expData, tolerance = 1e-06)
outData2 <- getPeptideScores(fileInfo, peptides = 7260L, oswMerged = TRUE, runType = "DIA_Proteomics", context = "run-specific")
expect_equal(outData2, data.table("peptide_id" = 7260L,
"run" =NA_character_,
"score" = NA_real_, "pvalue" = NA_real_, "qvalue" = NA_real_, key = "peptide_id"))
})
test_that("test_fetchTransitionsFromRun",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- fetchTransitionsFromRun(fileInfo$featureFile[1], runID = "125704171604355508", maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 32L,
"feature_id" = bit64::as.integer64(484069199212214166),
"RT" = 6528.23, "intensity" = list(c(10.232500, 0.133768, 9.743950, 0.987916, 4.298210, 1.363980)),
"leftWidth" = 6518.602, "rightWidth" = 6535.67, "peak_group_rank" = 1L,
"m_score" = 0.0264475, key = c("transition_group_id", "peak_group_rank"))
expect_equal(outData[1,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(211L, 8L))
outData <- fetchTransitionsFromRun(fileInfo$featureFile[2], runID = "6752973645981403097", maxFdrQuery = 0.01, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 19954L, "feature_id" = bit64::as.integer64(3189052421957813097),
"RT" = 5226.47, "intensity" = list(c(41.11890, 19.45290, 12.51970, 11.41050, 8.10003, 12.34190)),
"leftWidth" = 5215.051, "rightWidth" = 5228.706, "peak_group_rank" = 3L,
"m_score" = 0.0009634075, key = c("transition_group_id", "peak_group_rank"))
expect_equal(outData[192,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(192L, 8L))
})
test_that("test_getTransitions",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- getTransitions(fileInfo, maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expect_identical(length(outData), 3L)
expect_identical(dim(outData[["run1"]]), c(227L, 8L))
})
| /tests/testthat/test_read_osw.R | no_license | bgruening/DIAlignR | R | false | false | 14,796 | r | context("Read osw files.")
test_that("test_fetchAnalytesInfo",{
filenames <- data.frame("filename" = c("data/raw/hroest_K120808_Strep10%PlasmaBiolRepl1_R03_SW_filt.mzML.gz",
"data/raw/hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt.mzML.gz",
"data/raw/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.mzML.gz"),
"runs" = c("hroest_K120808_Strep10%PlasmaBiolRepl1_R03_SW_filt",
"hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt",
"hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
oswName <- file.path(system.file("extdata", package = "DIAlignR"), "osw", "merged.osw")
expOutput <- data.frame("transition_group_id" = rep("19051_KLIVTSEGC[160]FK/2", 6),
"filename" = rep("data/raw/hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt.mzML.gz", 6),
"RT" = rep(2586.12, 6),
"delta_rt" = rep(78.9663, 6),
"assay_RT" = rep(13.5, 6),
"Intensity" = rep(26.2182, 6),
"leftWidth" = rep(2571.738, 6),
"rightWidth" = rep(2609.288, 6),
"peak_group_rank" = rep(1, 6),
"m_score" = rep(0.001041916, 6),
"transition_id" = c(58312, 58313, 58314, 58315, 58316, 58317),
stringsAsFactors=FALSE)
outData <- fetchAnalytesInfo(oswName, maxFdrQuery = 0.05, oswMerged = TRUE,
analytes = c("19051_KLIVTSEGC[160]FK/2"), filename = filenames$filename[2],
runType = "DIA_Proteomics", analyteInGroupLabel = TRUE)
expect_equal(outData, expOutput, tolerance=1e-6)
outData <- fetchAnalytesInfo(oswName, maxFdrQuery = 0.5, oswMerged = TRUE,
analytes = c("IHFLSPVRPFTLTPGDEEESFIQLITPVR_3"), filename = filenames$filename[3],
runType = "DIA_Proteomics", analyteInGroupLabel = FALSE)
expOutput <- data.frame("transition_group_id" = rep("IHFLSPVRPFTLTPGDEEESFIQLITPVR_3", 12),
"filename" = rep("data/raw/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.mzML.gz", 12),
"RT" = c(rep(6483.50, 6), rep(6597.54, 6)),
"delta_rt" = c(rep(78.8163, 6), rep(192.8560, 6)),
"assay_RT" = rep(126.7, 12),
"Intensity" = c(rep(61.0299, 6), rep(16.7115, 6)),
"leftWidth" = c(rep(6468.855, 6), rep(6574.684, 6)),
"rightWidth" = c(rep(6499.579, 6), rep(6615.649, 6)),
"peak_group_rank" = c(rep(1, 6), rep(2, 6)),
"m_score" = c(rep(5.692077e-05, 6), rep(3.690986e-01,6)),
"transition_id" = rep(c(14843, 14844, 14845, 14846, 14847, 14848), 2),
stringsAsFactors=FALSE)
expect_equal(outData, expOutput, tolerance=1e-6)
})
test_that("test_getOswAnalytes",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- getRunNames(dataPath, oswMerged = TRUE)
outData <- getOswAnalytes(fileInfo, oswMerged = TRUE,
maxFdrQuery = 0.01, runType = "DIA_Proteomics")
expData <- data.frame("transition_group_id" = rep("AAMIGGADATSNVR_2", 2),
"filename" = rep("data/raw/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.mzML.gz", 2),
"peak_group_rank" = c(1L, 1L),
"m_score" = rep(5.692077e-05, 2),
"transition_id" = c(81958L, 81959L),
stringsAsFactors=FALSE)
expect_identical(dim(outData[["run0"]]), c(1026L, 5L))
expect_identical(dim(outData[["run1"]]), c(1152L, 5L))
expect_identical(dim(outData[["run2"]]), c(1086L, 5L))
expect_equal(outData[["run2"]][1:2,], expData, tolerance=1e-6)
})
test_that("test_fetchPrecursorsInfo",{
dataPath <- system.file("extdata", package = "DIAlignR")
filename <- paste0(dataPath,"/osw/merged.osw")
outData <- fetchPrecursorsInfo(filename, runType = "DIA_Proteomics", NULL,
context = "experiment-wide", maxPeptideFdr = 1.0)
expData <- data.table("transition_group_id" = 32L,
"peptide_id" = 7040L,
"sequence" = "GNNSVYMNNFLNLILQNER",
"charge" = 3L,
"group_label" = "10030_GNNSVYMNNFLNLILQNER/3",
"transition_ids" = list(c(192L, 193L, 194L, 195L, 196L, 197L)))
expect_identical(outData[108,], expData)
expect_identical(dim(outData), c(302L, 6L))
})
test_that("test_getPrecursors",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- getPrecursors(fileInfo, oswMerged = TRUE, runType = "DIA_Proteomics",
context = "experiment-wide", maxPeptideFdr = 0.05)
expData <- data.table("transition_group_id" = 32L,
"peptide_id" = 7040L,
"sequence" = "GNNSVYMNNFLNLILQNER",
"charge" = 3L,
"group_label" = "10030_GNNSVYMNNFLNLILQNER/3",
"transition_ids" = list(c(192L, 193L, 194L, 195L, 196L, 197L)),
key = c("peptide_id", "transition_group_id"))
expect_identical(outData[79,], expData)
expect_identical(dim(outData), c(234L, 6L))
})
test_that("test_getPrecursorByID",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- getPrecursorByID(c(32L, 2474L), fileInfo, oswMerged = TRUE, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = c(32L, 2474L),
"peptide_id" = c(7040L, 8496L),
"sequence" = c("GNNSVYMNNFLNLILQNER", "IHFLSPVRPFTLTPGDEEESFIQLITPVR"),
"charge" = c(3L, 3L),
"group_label" = c("10030_GNNSVYMNNFLNLILQNER/3", "12300_IHFLSPVRPFTLTPGDEEESFIQLITPVR/3"),
"transition_ids" = list(c(192L, 193L, 194L, 195L, 196L, 197L),
c(14843L, 14844L, 14845L, 14846L, 14847L, 14848L)),
key = c("peptide_id", "transition_group_id"))
expect_identical(outData, expData)
})
test_that("test_fetchFeaturesFromRun",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
fileInfo$featureFile <- as.factor(fileInfo$featureFile)
outData <- fetchFeaturesFromRun(fileInfo$featureFile[1], runID = "125704171604355508", maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 32L, "feature_id" = bit64::as.integer64(484069199212214166),
"RT" = 6528.23, "intensity" = 26.7603,
"leftWidth" = 6518.602, "rightWidth" = 6535.67,
"peak_group_rank" = 1L, "m_score" = 0.0264475,
key = "transition_group_id")
expect_equal(outData[1,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(211L, 8L))
outData <- fetchFeaturesFromRun(fileInfo$featureFile[2], runID = "6752973645981403097", maxFdrQuery = 0.01, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 19954L, "feature_id" = bit64::as.integer64(3189052421957813097),
"RT" = 5226.47, "intensity" = 104.944,
"leftWidth" = 5215.051, "rightWidth" = 5228.706,
"peak_group_rank" = 3L, "m_score" = 0.0009634075,
key = "transition_group_id")
expect_equal(outData[192,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(192L, 8L))
outData <- fetchFeaturesFromRun(fileInfo$featureFile[3], runID = "2234664662238281994", maxFdrQuery = 1.00, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 10918L, "feature_id" = bit64::as.integer64(4248772434739795103),
"RT" = 6019.18, "intensity" = 78.4294,
"leftWidth" = 6006.667, "rightWidth" = 6044.217,
"peak_group_rank" = 3L, "m_score" = 0.3225775,
key = "transition_group_id")
expect_equal(outData[500,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(926L, 8L))
})
test_that("test_getFeatures",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
fileInfo$featureFile <- as.factor(fileInfo$featureFile)
outData <- getFeatures(fileInfo, maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expect_identical(length(outData), 3L)
expect_identical(dim(outData[["run1"]]), c(227L, 8L))
})
test_that("test_fetchPeptidesInfo", {
dataPath <- system.file("extdata", package = "DIAlignR")
filename <- paste0(dataPath,"/osw/merged.osw")
outData <- fetchPeptidesInfo(oswName = filename, runType = "DIA_Proteomics", context = "experiment-wide")
expData <- data.table("peptide_id" = c(19046L),
"run" = bit64::as.integer64(c(6752973645981403097, 2234664662238281994, 125704171604355508)),
"score" = c(7.182150, 7.664316, 7.588328),
"pvalue" = 5.603183e-05,
"qvalue" = 5.204949e-05)
expect_identical(dim(outData), c(896L, 5L))
expect_equal(tail(outData,3), expData, tolerance = 1e-06)
outData2 <- fetchPeptidesInfo(oswName = filename, runType = "DIA_Proteomics", context = "global")
expData <- data.table("peptide_id" = as.integer(), "run" = as.numeric(),
"score" = as.numeric(), "pvalue" = as.numeric(), "qvalue" = as.numeric(),
stringsAsFactors = FALSE)
expect_equal(outData2, expData)
})
test_that("test_getPeptideScores", {
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
peptides <- c(7260L, 3L, 4L)
outData <- getPeptideScores(fileInfo, peptides, oswMerged = TRUE, runType = "DIA_Proteomics", context = "experiment-wide")
expData <- data.table("peptide_id" = c(3L, 4L, rep(7260L, 3)),
"run" =c(NA_character_, NA_character_, "run0", "run1", "run2"),
"score" = c(NA_real_, NA_real_, 7.779751, 7.404515, 7.324655),
"pvalue" = c(NA_real_, NA_real_, rep(5.603183e-05, 3)),
"qvalue" = c(NA_real_, NA_real_, rep(5.204949e-05, 3)), key = c("peptide_id"))
expect_equal(outData, expData, tolerance = 1e-06)
outData2 <- getPeptideScores(fileInfo, peptides = 7260L, oswMerged = TRUE, runType = "DIA_Proteomics", context = "run-specific")
expect_equal(outData2, data.table("peptide_id" = 7260L,
"run" =NA_character_,
"score" = NA_real_, "pvalue" = NA_real_, "qvalue" = NA_real_, key = "peptide_id"))
})
test_that("test_fetchTransitionsFromRun",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- fetchTransitionsFromRun(fileInfo$featureFile[1], runID = "125704171604355508", maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 32L,
"feature_id" = bit64::as.integer64(484069199212214166),
"RT" = 6528.23, "intensity" = list(c(10.232500, 0.133768, 9.743950, 0.987916, 4.298210, 1.363980)),
"leftWidth" = 6518.602, "rightWidth" = 6535.67, "peak_group_rank" = 1L,
"m_score" = 0.0264475, key = c("transition_group_id", "peak_group_rank"))
expect_equal(outData[1,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(211L, 8L))
outData <- fetchTransitionsFromRun(fileInfo$featureFile[2], runID = "6752973645981403097", maxFdrQuery = 0.01, runType = "DIA_Proteomics")
expData <- data.table("transition_group_id" = 19954L, "feature_id" = bit64::as.integer64(3189052421957813097),
"RT" = 5226.47, "intensity" = list(c(41.11890, 19.45290, 12.51970, 11.41050, 8.10003, 12.34190)),
"leftWidth" = 5215.051, "rightWidth" = 5228.706, "peak_group_rank" = 3L,
"m_score" = 0.0009634075, key = c("transition_group_id", "peak_group_rank"))
expect_equal(outData[192,], expData, tolerance = 1e-04)
expect_identical(dim(outData), c(192L, 8L))
})
test_that("test_getTransitions",{
dataPath <- system.file("extdata", package = "DIAlignR")
fileInfo <- data.frame("featureFile" = rep(file.path(dataPath, "osw", "merged.osw"),3),
"spectraFileID" = c("125704171604355508", "6752973645981403097", "2234664662238281994"),
row.names = c("run0", "run1", "run2"),
stringsAsFactors=FALSE)
outData <- getTransitions(fileInfo, maxFdrQuery = 0.05, runType = "DIA_Proteomics")
expect_identical(length(outData), 3L)
expect_identical(dim(outData[["run1"]]), c(227L, 8L))
})
|
# Room 1 = O-room
# Room 2 = M-room
# Split by whether the cue object was in M or O-room
# Libraries
library(plyr)
library(ggplot2)
library(cowplot)
library(gridExtra)
library(grid)
library(knitr)
library(assortedRFunctions)
library(kableExtra)
library(MRColour)
library(reshape2)
library(latex2exp)
library(BayesFactor)
theme_set(theme_grey())
# /*
# ----------------------------- Load data ---------------------------
# */
# Load all data
prefix <- "data/Exp1/batch1/memoryTask/"
allFiles <- list.files(paste(prefix, sep = ''))
allFiles_paths <- paste(prefix, allFiles, sep = '')
n <- length(allFiles_paths)
for(i in 1:n){
############
# Load data files
tempDF <- read.csv(allFiles_paths[i], header = TRUE, na.strings = '')
# Recode key presses
response <- rep(NA, dim(tempDF)[1])
response[tempDF$key_press == 49] <- 1
response[tempDF$key_press == 50] <- 2
response[tempDF$key_press == 51] <- 3
tempDF$response <- response
############
# Temporal order
temporalOrder <- subset(tempDF, test_part == 'temporalOrder')
temporalOrder$rt <- as.numeric(as.character(temporalOrder$rt))
# Calculate accuracy
accuracy <- rep(NA, dim(temporalOrder)[1])
accuracy[temporalOrder$response == temporalOrder$corr_resp] <- 1
accuracy[temporalOrder$response != temporalOrder$corr_resp] <- 0
temporalOrder$accuracy <- accuracy
############
# Room type question
roomType <- subset(tempDF, test_part == 'roomType')
roomType$rt <- as.numeric(as.character(roomType$rt))
# Calculate accuracy
accuracy <- rep(NA, dim(roomType)[1])
accuracy[roomType$response == roomType$corr_resp] <- 1
accuracy[roomType$response != roomType$corr_resp] <- 0
roomType$accuracy <- accuracy
############
# Table question
tableNum <- subset(tempDF, test_part == 'tableNum')
tableNum$rt <- as.numeric(as.character(tableNum$rt))
# Recode because tables are named 2 and 3 in input data
tableNum$response[tableNum$key_press == 49] <- 3 # for key press 1
tableNum$response[tableNum$key_press == 50] <- 2 # for key press 2
# Calculate accuracy
accuracy <- rep(NA, dim(tableNum)[1])
accuracy[tableNum$response == tableNum$corr_resp] <- 1
accuracy[tableNum$response != tableNum$corr_resp] <- 0
tableNum$accuracy <- accuracy
# Add subject ID and concatenate to 1 data.frame
if(i == 1){
df_order <- temporalOrder
df_order$id <- i
df_room <- roomType
df_room$id <- i
df_table <- tableNum
df_table$id <- i
} else {
temporalOrder$id <- i
df_order <- rbind(df_order, temporalOrder)
roomType$id <- i
df_room <- rbind(df_room, roomType)
tableNum$id <- i
df_table <- rbind(df_table, tableNum)
}
}
# Rename according for batch1
df_order_b1 <- df_order
df_room_b1 <- df_room
df_table_b1 <- df_table
df_order_b1_roomInfo <- df_order_b1
df_order_b1_roomInfo$roomType <- df_room_b1$corr_resp
df_order_b1_roomInfo$roomType <- ifelse(df_order_b1_roomInfo$roomType == 1, 'O-room', 'M-Room')
agg_order_b1_roomInfo <- ddply(df_order_b1_roomInfo, c('id', 'context', 'roomType'), summarise, acc = mean(accuracy), rt = mean(rt))
agg_order_b1_roomInfo$boundary <- ifelse(agg_order_b1_roomInfo$context == 'across', 'across', 'within')
agg_order_b1_roomInfo$Condition <- 'across'
agg_order_b1_roomInfo$Condition[agg_order_b1_roomInfo$context == 'within-open plane'] <- 'O-room'
agg_order_b1_roomInfo$Condition[agg_order_b1_roomInfo$context == 'within-M-shape'] <- 'M-room'
# /*
# ----------------------------- Plot ---------------------------
# */
plt1 <- ggplot(agg_order_b1_roomInfo, aes(x = boundary, y = acc, fill = interaction(boundary,roomType))) +
geom_boxplot(alpha = 0.5,outlier.shape = NA, key_glyph = "rect") +
geom_point(position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 1/3) +
stat_summary(geom = "point", fun = "mean", col = 'black', size = 3, shape = 24, aes(fill = interaction(boundary,roomType)),
position=position_dodge(width = 0.75),
key_glyph = "rect") +
geom_segment(aes(x = 1.5, xend = 1.5, y= 0.1, yend= 1/3),colour = 'black',
arrow = arrow(length = unit(0.30,"cm"), type = "closed"), show.legend = FALSE) +
annotate('text', x = 1.5, y = 0.1 - 0.03, label = 'Chance') +
scale_color_mrc(palette = 'secondary') +
scale_fill_mrc(palette = 'secondary') +
labs(y = '3AFC accuracy', x = "Boundary", title = 'Temporal Order (Exp 1a)') +
theme(legend.justification = c(0, 1),
legend.position = c(0, 1),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.5,"line")) +
coord_cartesian(ylim = c(0, 1))
# /*
# ----------------------------- Load data Exp1b ---------------------------
# */
# Load trial information
load("~/boundaryVR/experiments/Exp1/batch2/r_supportFiles/trialData_20200522_182214.RData")
# Note that counterbalancing in that images goes from 1 to 8, while it goes from 0 to 7 in the javascript
# files.
# Order trial information
trials_cond5 <- trials_cond5[order(trials_cond5$objNum),]
trials_cond6 <- trials_cond6[order(trials_cond6$objNum),]
trials_cond7 <- trials_cond7[order(trials_cond7$objNum),]
trials_cond8 <- trials_cond8[order(trials_cond8$objNum),]
# Load all data
prefix <- '~/boundaryVR/data/Exp1/batch2/memoryTask/'
allFiles <- list.files(paste(prefix, sep = ''))
allFiles_paths <- paste(prefix, allFiles, sep = '')
n <- length(allFiles_paths)
for(i in 1:n){
############
# Loading data
tempDF <- read.csv(allFiles_paths[i], header = TRUE, na.strings = '')
# Recode key presses
response <- rep(NA, dim(tempDF)[1])
response[tempDF$key_press == 49] <- 1
response[tempDF$key_press == 50] <- 2
response[tempDF$key_press == 51] <- 3
tempDF$response <- response
############
# Temporal order memory
temporalOrder <- subset(tempDF, test_part == 'temporalOrder')
# Sort by objectNumber
temporalOrder <- temporalOrder[order(temporalOrder$probe),]
# get trialinfo and add to temporalOrder
cond <- temporalOrder$condition[1] + 1 # to correct for difference
temporalOrder$foil1Pos <- get(paste0("trials_cond", cond))$foil1Pos
temporalOrder$foil2Pos <- get(paste0("trials_cond", cond))$foil2Pos
temporalOrder$rt <- as.numeric(as.character(temporalOrder$rt))
# Calcalate accuracy
accuracy <- rep(NA, dim(temporalOrder)[1])
accuracy[temporalOrder$response == temporalOrder$corr_resp] <- 1
accuracy[temporalOrder$response != temporalOrder$corr_resp] <- 0
temporalOrder$accuracy <- accuracy
# Create variable that desribe whether target, foil1, foil2 was choosen
choice <- rep('Target', dim(temporalOrder)[1])
choice[temporalOrder$response == temporalOrder$foil1Pos] <- 'Foil 1'
choice[temporalOrder$response == temporalOrder$foil2Pos] <- 'Foil 2'
temporalOrder$choice <- choice
############
# Room type question
roomType <- subset(tempDF, test_part == 'roomType')
roomType$rt <- as.numeric(as.character(roomType$rt))
# get trialinfo and add to roomType
cond <- roomType$condition[1] + 1 # to correct for difference
roomType$roomType <- get(paste0("trials_cond", cond))$roomType
corr_room <- rep(NA, nrow(roomType))
corr_room[roomType$roomType == "nw"] <- 1
corr_room[roomType$roomType == "ww"] <- 2
roomType$corr_room <- corr_room
# Calculate accuracy
accuracy <- rep(NA, dim(roomType)[1])
accuracy[roomType$response == roomType$corr_room] <- 1
accuracy[roomType$response != roomType$corr_room] <- 0
roomType$accuracy <- accuracy
############
# Table question
tableNum <- subset(tempDF, test_part == 'tableNum')
tableNum$rt <- as.numeric(as.character(tableNum$rt))
# Recode keypresses
response <- rep(NA, dim(tableNum)[1])
response[tableNum$key_press == 49] <- 3 # for key press 1
response[tableNum$key_press == 50] <- 2 # for key press 2
tableNum$response <- response
# Calculate accuracy
accuracy <- rep(NA, dim(tableNum)[1])
accuracy[tableNum$response == tableNum$corr_resp] <- 1
accuracy[tableNum$response != tableNum$corr_resp] <- 0
tableNum$accuracy <- accuracy
# Create or bind to data.frame
if(i == 1){
df_order_b2 <- temporalOrder
df_order_b2$id <- i
df_room_b2 <- roomType
df_room_b2$id <- i
df_table_b2 <- tableNum
df_table_b2$id <- i
} else {
temporalOrder$id <- i
df_order_b2 <- rbind(df_order_b2, temporalOrder)
roomType$id <- i
df_room_b2 <- rbind(df_room_b2, roomType)
tableNum$id <- i
df_table_b2 <- rbind(df_table_b2, tableNum)
}
}
# Convert to id factor
df_order_b2$id <- as.factor(df_order_b2$id)
df_room_b2$id <- as.factor(df_room_b2$id)
df_table_b2$id <- as.factor(df_table_b2$id)
df_order_b2_roomInfo <- df_order_b2
df_order_b2_roomInfo <- df_order_b2_roomInfo[order(df_order_b2_roomInfo$id,df_order_b2_roomInfo$trial_index),]
df_room_b2 <- df_room_b2[order(df_room_b2$id, df_room_b2$trial_index),]
df_order_b2_roomInfo$roomType <- df_room_b2$corr_resp
df_order_b2_roomInfo$roomType <- ifelse(df_order_b2_roomInfo$roomType == 1, 'O-room', 'M-Room')
agg_order_b2_roomInfo <- ddply(df_order_b2_roomInfo, c('id', 'context', 'roomType'), summarise, acc = mean(accuracy), rt = mean(rt))
agg_order_b2_roomInfo$boundary <- ifelse(agg_order_b2_roomInfo$context == 'across', 'across', 'within')
agg_order_b2_roomInfo$Condition <- 'across'
agg_order_b2_roomInfo$Condition[agg_order_b2_roomInfo$context == 'within-open plane'] <- 'O-room'
agg_order_b2_roomInfo$Condition[agg_order_b2_roomInfo$context == 'within-M-shape'] <- 'M-room'
# /*
# ----------------------------- Plot Exp1b ---------------------------
# */
plt2 <- ggplot(agg_order_b2_roomInfo, aes(x = boundary, y = acc, fill = interaction(boundary,roomType))) +
geom_boxplot(alpha = 0.5,outlier.shape = NA, key_glyph = "rect") +
geom_point(position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 1/3) +
stat_summary(geom = "point", fun = "mean", col = 'black', size = 3, shape = 24, aes(fill = interaction(boundary,roomType)),
position=position_dodge(width = 0.75),
key_glyph = "rect") +
geom_segment(aes(x = 1.5, xend = 1.5, y= 0.1, yend= 1/3),colour = 'black',
arrow = arrow(length = unit(0.30,"cm"), type = "closed"), show.legend = FALSE) +
annotate('text', x = 1.5, y = 0.1 - 0.03, label = 'Chance') +
scale_color_mrc(palette = 'secondary') +
scale_fill_mrc(palette = 'secondary') +
labs(y = '3AFC accuracy', x = "Boundary", title = 'Temporal Order (Exp 1b)') +
theme(legend.justification = c(0, 1),
legend.position = c(0, 1),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.5,"line")) +
coord_cartesian(ylim = c(0, 1))
# /*
# ----------------------------- Plot Exp1c ---------------------------
# */
# Load all data
prefix <- '~/boundaryVR/data/Exp1/batch3/memoryTask/'
allFiles <- list.files(paste(prefix, sep = ''))
allFiles_paths <- paste(prefix, allFiles, sep = '')
n <- length(allFiles_paths)
# Load trial information
load("~/boundaryVR/experiments/Exp1/batch3/r_supportFiles/trialData_randomFoils.RData")
# Note that counterbalancing in that images goes from 1 to 8, while it goes from 0 to 7 in the javascript
# files.
# Order trial information
# Due to an error only 78 trials were tested during
trials_cond5 <- trials_cond5[order(trials_cond5$objNum),][1:78,]
trials_cond6 <- trials_cond6[order(trials_cond6$objNum),][1:78,]
trials_cond7 <- trials_cond7[order(trials_cond7$objNum),][1:78,]
trials_cond8 <- trials_cond8[order(trials_cond8$objNum),][1:78,]
for(i in 1:n){
############
# Loading data
tempDF <- read.csv(allFiles_paths[i], header = TRUE, na.strings = '')
# Recode key presses
response <- rep(NA, dim(tempDF)[1])
response[tempDF$key_press == 49] <- 1
response[tempDF$key_press == 50] <- 2
response[tempDF$key_press == 51] <- 3
tempDF$response <- response
############
# Temporal order memory
temporalOrder <- subset(tempDF, test_part == 'temporalOrder')
# Sort by objectNumber
temporalOrder <- temporalOrder[order(temporalOrder$probe),]
# get trialinfo and add to temporalOrder
cond <- temporalOrder$condition[1] + 1 # to correct for difference
temporalOrder$foil1Pos <- get(paste0("trials_cond", cond))$foil1Pos
temporalOrder$foil2Pos <- get(paste0("trials_cond", cond))$foil2Pos
temporalOrder$rt <- as.numeric(as.character(temporalOrder$rt))
# Calculate accuracy
accuracy <- rep(NA, dim(temporalOrder)[1])
accuracy[temporalOrder$response == temporalOrder$corr_resp] <- 1
accuracy[temporalOrder$response != temporalOrder$corr_resp] <- 0
temporalOrder$accuracy <- accuracy
# Create variable that describe whether target, foil1, foil2 was chosen
choice <- rep('Target', dim(temporalOrder)[1])
choice[temporalOrder$response == temporalOrder$foil1Pos] <- 'Foil 1'
choice[temporalOrder$response == temporalOrder$foil2Pos] <- 'Foil 2'
temporalOrder$choice <- choice
############
# Room type question
roomType <- subset(tempDF, test_part == 'roomType')
roomType$rt <- as.numeric(as.character(roomType$rt))
roomType$roomType <- NA
# Assign room type
cond <- roomType$condition[1] + 1 # to correct for difference
for(j in 1:nrow(roomType)){
temp <- get(paste0("trials_cond", cond, '_full'))
roomType$roomType[j] <- temp[temp$room == roomType$roomNum[j], 'roomType'][1]
}
corr_room <- rep(NA, nrow(roomType))
corr_room[roomType$roomType == "nw"] <- 1
corr_room[roomType$roomType == "ww"] <- 2
roomType$corr_room <- corr_room
# Calculate accuracy
accuracy <- rep(NA, dim(roomType)[1])
accuracy[roomType$response == roomType$corr_room] <- 1
accuracy[roomType$response != roomType$corr_room] <- 0
roomType$accuracy <- accuracy
# Adding table information to temporal order memory
# Get right information and create temp variable
tempInfo <- get(paste0("trials_cond", temporalOrder$condition[1]))
tempInfo_full <- get(paste0("trials_cond", temporalOrder$condition[1], '_full'))
# Order both data frames by objNum/probe
tempInfo <- tempInfo[1:78, ] # Because of an error in the code only 78 trials exist per participant
tempInfo <- tempInfo[order(tempInfo$objNum),]
temporalOrder <- temporalOrder[order(temporalOrder$probe),]
# Transferring information between dfs
temporalOrder$probeTable <- tempInfo$table
# Loop through df to get table of target, foil1 and foil2
targetTable <- c()
foil1Table <- c()
foil2Table <- c()
for(j in 1:dim(tempInfo)[1]){
targetTable[j] <- tempInfo_full[tempInfo_full$objNum == tempInfo$target[j], 'table']
foil1Table[j] <- tempInfo_full[tempInfo_full$objNum == tempInfo$foil1[j], 'table']
foil2Table[j] <- tempInfo_full[tempInfo_full$objNum == tempInfo$foil2[j], 'table']
}
# Add the information to main data.frame
temporalOrder$targetTable <- targetTable
temporalOrder$foil1Table <- foil1Table
temporalOrder$foil2Table <- foil2Table
############
# Table question
tableNum <- subset(tempDF, test_part == 'tableNum')
tableNum$rt <- as.numeric(as.character(tableNum$rt))
# Recode keypresses
response <- rep(NA, dim(tableNum)[1])
response[tableNum$key_press == 49] <- 3 # for key press 1
response[tableNum$key_press == 50] <- 2 # for key press 2
tableNum$response <- response
# Calculate accuracy
accuracy <- rep(NA, dim(tableNum)[1])
accuracy[tableNum$response == tableNum$corr_resp] <- 1
accuracy[tableNum$response != tableNum$corr_resp] <- 0
tableNum$accuracy <- accuracy
# Create or bind to data.frame
if(i == 1){
df_order_b3 <- temporalOrder
df_order_b3$id <- i
df_room_b3 <- roomType
df_room_b3$id <- i
df_table_b3 <- tableNum
df_table_b3$id <- i
} else {
temporalOrder$id <- i
df_order_b3 <- rbind(df_order_b3, temporalOrder)
roomType$id <- i
df_room_b3 <- rbind(df_room_b3, roomType)
tableNum$id <- i
df_table_b3 <- rbind(df_table_b3, tableNum)
}
}
# Convert to id factor
df_order_b3$id <- as.factor(df_order_b3$id)
df_room_b3$id <- as.factor(df_room_b3$id)
df_table_b3$id <- as.factor(df_table_b3$id)
df_order_b3_roomInfo <- df_order_b3
df_order_b3_roomInfo <- df_order_b3_roomInfo[order(df_order_b3_roomInfo$id, df_order_b3_roomInfo$trial_index),]
df_room_b3 <- df_room_b3[order(df_room_b3$id, df_room_b3$trial_index),]
df_order_b3_roomInfo$roomType <- df_room_b3$roomType
df_order_b3_roomInfo$roomType <- ifelse(df_order_b3_roomInfo$roomType == 'nw', 'O-room', 'M-room')
agg_order_b3_roomInfo <- ddply(df_order_b3_roomInfo, c('id', 'context', 'roomType', 'condition'), summarise, acc = mean(accuracy), rt = mean(rt))
agg_order_b3_roomInfo$boundary <- ifelse(agg_order_b3_roomInfo$context == 'across', 'across', 'within')
agg_order_b3_roomInfo$Condition <- 'across'
agg_order_b3_roomInfo$Condition[agg_order_b3_roomInfo$context == 'within-open plane'] <- 'O-room'
agg_order_b3_roomInfo$Condition[agg_order_b3_roomInfo$context == 'within-M-shape'] <- 'M-room'
# /*
# ----------------------------- Plot Exp1c ---------------------------
# */
plt3 <- ggplot(agg_order_b3_roomInfo, aes(x = boundary, y = acc, fill = interaction(boundary,roomType))) +
geom_boxplot(alpha = 0.5,outlier.shape = NA, key_glyph = "rect") +
geom_point(position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 1/3) +
stat_summary(geom = "point", fun = "mean", col = 'black', size = 3, shape = 24, aes(fill = interaction(boundary,roomType)),
position=position_dodge(width = 0.75),
key_glyph = "rect") +
geom_segment(aes(x = 1.5, xend = 1.5, y= 0.1, yend= 1/3),colour = 'black',
arrow = arrow(length = unit(0.30,"cm"), type = "closed"), show.legend = FALSE) +
annotate('text', x = 1.5, y = 0.1 - 0.03, label = 'Chance') +
scale_color_mrc(palette = 'secondary') +
scale_fill_mrc(palette = 'secondary') +
labs(y = '3AFC accuracy', x = "Boundary", title = 'Temporal Order (Exp 1c)') +
theme(legend.justification = c(0, 1),
legend.position = c(0, 1),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.5,"line"))+
coord_cartesian(ylim = c(0, 1))
# /*
# ----------------------------- Plot all ---------------------------
# */
all_plots <- plot_grid(plt1, plt2, plt3, ncol = 3)
save_plot("splitting_across_trials.png", all_plots,
base_height = 10/cm(1)*1.5,
base_width = 19/cm(1)*1.5,
base_aspect_ratio = 1)
a <- agg_order_b3_roomInfo[agg_order_b3_roomInfo$context == 'across' & agg_order_b3_roomInfo$roomType == 'M-room', 'acc']
b <- agg_order_b3_roomInfo[agg_order_b3_roomInfo$context == 'across' & agg_order_b3_roomInfo$roomType == 'O-room', 'acc']
a <- agg_order_b1_roomInfo[agg_order_b1_roomInfo$context == 'across' & agg_order_b1_roomInfo$roomType == 'M-room', 'acc']
b <- agg_order_b1_roomInfo[agg_order_b1_roomInfo$context == 'across' & agg_order_b1_roomInfo$roomType == 'O-room', 'acc']
ttestBF(arcsine_transform(a), arcsine_transform(b), paired = TRUE)
ttestBF(a, b, paired = TRUE)
| /analysis/additional_analysis_roomtype_for_across.R | no_license | JAQuent/boundaryVR | R | false | false | 19,694 | r | # Room 1 = O-room
# Room 2 = M-room
# Split by whether the cue object was in M or O-room
# Libraries
library(plyr)
library(ggplot2)
library(cowplot)
library(gridExtra)
library(grid)
library(knitr)
library(assortedRFunctions)
library(kableExtra)
library(MRColour)
library(reshape2)
library(latex2exp)
library(BayesFactor)
theme_set(theme_grey())
# /*
# ----------------------------- Load data ---------------------------
# */
# Load all data
prefix <- "data/Exp1/batch1/memoryTask/"
allFiles <- list.files(paste(prefix, sep = ''))
allFiles_paths <- paste(prefix, allFiles, sep = '')
n <- length(allFiles_paths)
for(i in 1:n){
############
# Load data files
tempDF <- read.csv(allFiles_paths[i], header = TRUE, na.strings = '')
# Recode key presses
response <- rep(NA, dim(tempDF)[1])
response[tempDF$key_press == 49] <- 1
response[tempDF$key_press == 50] <- 2
response[tempDF$key_press == 51] <- 3
tempDF$response <- response
############
# Temporal order
temporalOrder <- subset(tempDF, test_part == 'temporalOrder')
temporalOrder$rt <- as.numeric(as.character(temporalOrder$rt))
# Calculate accuracy
accuracy <- rep(NA, dim(temporalOrder)[1])
accuracy[temporalOrder$response == temporalOrder$corr_resp] <- 1
accuracy[temporalOrder$response != temporalOrder$corr_resp] <- 0
temporalOrder$accuracy <- accuracy
############
# Room type question
roomType <- subset(tempDF, test_part == 'roomType')
roomType$rt <- as.numeric(as.character(roomType$rt))
# Calculate accuracy
accuracy <- rep(NA, dim(roomType)[1])
accuracy[roomType$response == roomType$corr_resp] <- 1
accuracy[roomType$response != roomType$corr_resp] <- 0
roomType$accuracy <- accuracy
############
# Table question
tableNum <- subset(tempDF, test_part == 'tableNum')
tableNum$rt <- as.numeric(as.character(tableNum$rt))
# Recode because tables are named 2 and 3 in input data
tableNum$response[tableNum$key_press == 49] <- 3 # for key press 1
tableNum$response[tableNum$key_press == 50] <- 2 # for key press 2
# Calculate accuracy
accuracy <- rep(NA, dim(tableNum)[1])
accuracy[tableNum$response == tableNum$corr_resp] <- 1
accuracy[tableNum$response != tableNum$corr_resp] <- 0
tableNum$accuracy <- accuracy
# Add subject ID and concatenate to 1 data.frame
if(i == 1){
df_order <- temporalOrder
df_order$id <- i
df_room <- roomType
df_room$id <- i
df_table <- tableNum
df_table$id <- i
} else {
temporalOrder$id <- i
df_order <- rbind(df_order, temporalOrder)
roomType$id <- i
df_room <- rbind(df_room, roomType)
tableNum$id <- i
df_table <- rbind(df_table, tableNum)
}
}
# Rename according for batch1
df_order_b1 <- df_order
df_room_b1 <- df_room
df_table_b1 <- df_table
df_order_b1_roomInfo <- df_order_b1
df_order_b1_roomInfo$roomType <- df_room_b1$corr_resp
df_order_b1_roomInfo$roomType <- ifelse(df_order_b1_roomInfo$roomType == 1, 'O-room', 'M-Room')
agg_order_b1_roomInfo <- ddply(df_order_b1_roomInfo, c('id', 'context', 'roomType'), summarise, acc = mean(accuracy), rt = mean(rt))
agg_order_b1_roomInfo$boundary <- ifelse(agg_order_b1_roomInfo$context == 'across', 'across', 'within')
agg_order_b1_roomInfo$Condition <- 'across'
agg_order_b1_roomInfo$Condition[agg_order_b1_roomInfo$context == 'within-open plane'] <- 'O-room'
agg_order_b1_roomInfo$Condition[agg_order_b1_roomInfo$context == 'within-M-shape'] <- 'M-room'
# /*
# ----------------------------- Plot ---------------------------
# */
plt1 <- ggplot(agg_order_b1_roomInfo, aes(x = boundary, y = acc, fill = interaction(boundary,roomType))) +
geom_boxplot(alpha = 0.5,outlier.shape = NA, key_glyph = "rect") +
geom_point(position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 1/3) +
stat_summary(geom = "point", fun = "mean", col = 'black', size = 3, shape = 24, aes(fill = interaction(boundary,roomType)),
position=position_dodge(width = 0.75),
key_glyph = "rect") +
geom_segment(aes(x = 1.5, xend = 1.5, y= 0.1, yend= 1/3),colour = 'black',
arrow = arrow(length = unit(0.30,"cm"), type = "closed"), show.legend = FALSE) +
annotate('text', x = 1.5, y = 0.1 - 0.03, label = 'Chance') +
scale_color_mrc(palette = 'secondary') +
scale_fill_mrc(palette = 'secondary') +
labs(y = '3AFC accuracy', x = "Boundary", title = 'Temporal Order (Exp 1a)') +
theme(legend.justification = c(0, 1),
legend.position = c(0, 1),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.5,"line")) +
coord_cartesian(ylim = c(0, 1))
# /*
# ----------------------------- Load data Exp1b ---------------------------
# */
# Load trial information
load("~/boundaryVR/experiments/Exp1/batch2/r_supportFiles/trialData_20200522_182214.RData")
# Note that counterbalancing in that images goes from 1 to 8, while it goes from 0 to 7 in the javascript
# files.
# Order trial information
trials_cond5 <- trials_cond5[order(trials_cond5$objNum),]
trials_cond6 <- trials_cond6[order(trials_cond6$objNum),]
trials_cond7 <- trials_cond7[order(trials_cond7$objNum),]
trials_cond8 <- trials_cond8[order(trials_cond8$objNum),]
# Load all data
prefix <- '~/boundaryVR/data/Exp1/batch2/memoryTask/'
allFiles <- list.files(paste(prefix, sep = ''))
allFiles_paths <- paste(prefix, allFiles, sep = '')
n <- length(allFiles_paths)
for(i in 1:n){
############
# Loading data
tempDF <- read.csv(allFiles_paths[i], header = TRUE, na.strings = '')
# Recode key presses
response <- rep(NA, dim(tempDF)[1])
response[tempDF$key_press == 49] <- 1
response[tempDF$key_press == 50] <- 2
response[tempDF$key_press == 51] <- 3
tempDF$response <- response
############
# Temporal order memory
temporalOrder <- subset(tempDF, test_part == 'temporalOrder')
# Sort by objectNumber
temporalOrder <- temporalOrder[order(temporalOrder$probe),]
# get trialinfo and add to temporalOrder
cond <- temporalOrder$condition[1] + 1 # to correct for difference
temporalOrder$foil1Pos <- get(paste0("trials_cond", cond))$foil1Pos
temporalOrder$foil2Pos <- get(paste0("trials_cond", cond))$foil2Pos
temporalOrder$rt <- as.numeric(as.character(temporalOrder$rt))
# Calcalate accuracy
accuracy <- rep(NA, dim(temporalOrder)[1])
accuracy[temporalOrder$response == temporalOrder$corr_resp] <- 1
accuracy[temporalOrder$response != temporalOrder$corr_resp] <- 0
temporalOrder$accuracy <- accuracy
# Create variable that desribe whether target, foil1, foil2 was choosen
choice <- rep('Target', dim(temporalOrder)[1])
choice[temporalOrder$response == temporalOrder$foil1Pos] <- 'Foil 1'
choice[temporalOrder$response == temporalOrder$foil2Pos] <- 'Foil 2'
temporalOrder$choice <- choice
############
# Room type question
roomType <- subset(tempDF, test_part == 'roomType')
roomType$rt <- as.numeric(as.character(roomType$rt))
# get trialinfo and add to roomType
cond <- roomType$condition[1] + 1 # to correct for difference
roomType$roomType <- get(paste0("trials_cond", cond))$roomType
corr_room <- rep(NA, nrow(roomType))
corr_room[roomType$roomType == "nw"] <- 1
corr_room[roomType$roomType == "ww"] <- 2
roomType$corr_room <- corr_room
# Calculate accuracy
accuracy <- rep(NA, dim(roomType)[1])
accuracy[roomType$response == roomType$corr_room] <- 1
accuracy[roomType$response != roomType$corr_room] <- 0
roomType$accuracy <- accuracy
############
# Table question
tableNum <- subset(tempDF, test_part == 'tableNum')
tableNum$rt <- as.numeric(as.character(tableNum$rt))
# Recode keypresses
response <- rep(NA, dim(tableNum)[1])
response[tableNum$key_press == 49] <- 3 # for key press 1
response[tableNum$key_press == 50] <- 2 # for key press 2
tableNum$response <- response
# Calculate accuracy
accuracy <- rep(NA, dim(tableNum)[1])
accuracy[tableNum$response == tableNum$corr_resp] <- 1
accuracy[tableNum$response != tableNum$corr_resp] <- 0
tableNum$accuracy <- accuracy
# Create or bind to data.frame
if(i == 1){
df_order_b2 <- temporalOrder
df_order_b2$id <- i
df_room_b2 <- roomType
df_room_b2$id <- i
df_table_b2 <- tableNum
df_table_b2$id <- i
} else {
temporalOrder$id <- i
df_order_b2 <- rbind(df_order_b2, temporalOrder)
roomType$id <- i
df_room_b2 <- rbind(df_room_b2, roomType)
tableNum$id <- i
df_table_b2 <- rbind(df_table_b2, tableNum)
}
}
# Convert to id factor
df_order_b2$id <- as.factor(df_order_b2$id)
df_room_b2$id <- as.factor(df_room_b2$id)
df_table_b2$id <- as.factor(df_table_b2$id)
df_order_b2_roomInfo <- df_order_b2
df_order_b2_roomInfo <- df_order_b2_roomInfo[order(df_order_b2_roomInfo$id,df_order_b2_roomInfo$trial_index),]
df_room_b2 <- df_room_b2[order(df_room_b2$id, df_room_b2$trial_index),]
df_order_b2_roomInfo$roomType <- df_room_b2$corr_resp
df_order_b2_roomInfo$roomType <- ifelse(df_order_b2_roomInfo$roomType == 1, 'O-room', 'M-Room')
agg_order_b2_roomInfo <- ddply(df_order_b2_roomInfo, c('id', 'context', 'roomType'), summarise, acc = mean(accuracy), rt = mean(rt))
agg_order_b2_roomInfo$boundary <- ifelse(agg_order_b2_roomInfo$context == 'across', 'across', 'within')
agg_order_b2_roomInfo$Condition <- 'across'
agg_order_b2_roomInfo$Condition[agg_order_b2_roomInfo$context == 'within-open plane'] <- 'O-room'
agg_order_b2_roomInfo$Condition[agg_order_b2_roomInfo$context == 'within-M-shape'] <- 'M-room'
# /*
# ----------------------------- Plot Exp1b ---------------------------
# */
plt2 <- ggplot(agg_order_b2_roomInfo, aes(x = boundary, y = acc, fill = interaction(boundary,roomType))) +
geom_boxplot(alpha = 0.5,outlier.shape = NA, key_glyph = "rect") +
geom_point(position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 1/3) +
stat_summary(geom = "point", fun = "mean", col = 'black', size = 3, shape = 24, aes(fill = interaction(boundary,roomType)),
position=position_dodge(width = 0.75),
key_glyph = "rect") +
geom_segment(aes(x = 1.5, xend = 1.5, y= 0.1, yend= 1/3),colour = 'black',
arrow = arrow(length = unit(0.30,"cm"), type = "closed"), show.legend = FALSE) +
annotate('text', x = 1.5, y = 0.1 - 0.03, label = 'Chance') +
scale_color_mrc(palette = 'secondary') +
scale_fill_mrc(palette = 'secondary') +
labs(y = '3AFC accuracy', x = "Boundary", title = 'Temporal Order (Exp 1b)') +
theme(legend.justification = c(0, 1),
legend.position = c(0, 1),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.5,"line")) +
coord_cartesian(ylim = c(0, 1))
# /*
# ----------------------------- Plot Exp1c ---------------------------
# */
# Load all data
prefix <- '~/boundaryVR/data/Exp1/batch3/memoryTask/'
allFiles <- list.files(paste(prefix, sep = ''))
allFiles_paths <- paste(prefix, allFiles, sep = '')
n <- length(allFiles_paths)
# Load trial information
load("~/boundaryVR/experiments/Exp1/batch3/r_supportFiles/trialData_randomFoils.RData")
# Note that counterbalancing in that images goes from 1 to 8, while it goes from 0 to 7 in the javascript
# files.
# Order trial information
# Due to an error only 78 trials were tested during
trials_cond5 <- trials_cond5[order(trials_cond5$objNum),][1:78,]
trials_cond6 <- trials_cond6[order(trials_cond6$objNum),][1:78,]
trials_cond7 <- trials_cond7[order(trials_cond7$objNum),][1:78,]
trials_cond8 <- trials_cond8[order(trials_cond8$objNum),][1:78,]
for(i in 1:n){
############
# Loading data
tempDF <- read.csv(allFiles_paths[i], header = TRUE, na.strings = '')
# Recode key presses
response <- rep(NA, dim(tempDF)[1])
response[tempDF$key_press == 49] <- 1
response[tempDF$key_press == 50] <- 2
response[tempDF$key_press == 51] <- 3
tempDF$response <- response
############
# Temporal order memory
temporalOrder <- subset(tempDF, test_part == 'temporalOrder')
# Sort by objectNumber
temporalOrder <- temporalOrder[order(temporalOrder$probe),]
# get trialinfo and add to temporalOrder
cond <- temporalOrder$condition[1] + 1 # to correct for difference
temporalOrder$foil1Pos <- get(paste0("trials_cond", cond))$foil1Pos
temporalOrder$foil2Pos <- get(paste0("trials_cond", cond))$foil2Pos
temporalOrder$rt <- as.numeric(as.character(temporalOrder$rt))
# Calculate accuracy
accuracy <- rep(NA, dim(temporalOrder)[1])
accuracy[temporalOrder$response == temporalOrder$corr_resp] <- 1
accuracy[temporalOrder$response != temporalOrder$corr_resp] <- 0
temporalOrder$accuracy <- accuracy
# Create variable that describe whether target, foil1, foil2 was chosen
choice <- rep('Target', dim(temporalOrder)[1])
choice[temporalOrder$response == temporalOrder$foil1Pos] <- 'Foil 1'
choice[temporalOrder$response == temporalOrder$foil2Pos] <- 'Foil 2'
temporalOrder$choice <- choice
############
# Room type question
roomType <- subset(tempDF, test_part == 'roomType')
roomType$rt <- as.numeric(as.character(roomType$rt))
roomType$roomType <- NA
# Assign room type
cond <- roomType$condition[1] + 1 # to correct for difference
for(j in 1:nrow(roomType)){
temp <- get(paste0("trials_cond", cond, '_full'))
roomType$roomType[j] <- temp[temp$room == roomType$roomNum[j], 'roomType'][1]
}
corr_room <- rep(NA, nrow(roomType))
corr_room[roomType$roomType == "nw"] <- 1
corr_room[roomType$roomType == "ww"] <- 2
roomType$corr_room <- corr_room
# Calculate accuracy
accuracy <- rep(NA, dim(roomType)[1])
accuracy[roomType$response == roomType$corr_room] <- 1
accuracy[roomType$response != roomType$corr_room] <- 0
roomType$accuracy <- accuracy
# Adding table information to temporal order memory
# Get right information and create temp variable
tempInfo <- get(paste0("trials_cond", temporalOrder$condition[1]))
tempInfo_full <- get(paste0("trials_cond", temporalOrder$condition[1], '_full'))
# Order both data frames by objNum/probe
tempInfo <- tempInfo[1:78, ] # Because of an error in the code only 78 trials exist per participant
tempInfo <- tempInfo[order(tempInfo$objNum),]
temporalOrder <- temporalOrder[order(temporalOrder$probe),]
# Transferring information between dfs
temporalOrder$probeTable <- tempInfo$table
# Loop through df to get table of target, foil1 and foil2
targetTable <- c()
foil1Table <- c()
foil2Table <- c()
for(j in 1:dim(tempInfo)[1]){
targetTable[j] <- tempInfo_full[tempInfo_full$objNum == tempInfo$target[j], 'table']
foil1Table[j] <- tempInfo_full[tempInfo_full$objNum == tempInfo$foil1[j], 'table']
foil2Table[j] <- tempInfo_full[tempInfo_full$objNum == tempInfo$foil2[j], 'table']
}
# Add the information to main data.frame
temporalOrder$targetTable <- targetTable
temporalOrder$foil1Table <- foil1Table
temporalOrder$foil2Table <- foil2Table
############
# Table question
tableNum <- subset(tempDF, test_part == 'tableNum')
tableNum$rt <- as.numeric(as.character(tableNum$rt))
# Recode keypresses
response <- rep(NA, dim(tableNum)[1])
response[tableNum$key_press == 49] <- 3 # for key press 1
response[tableNum$key_press == 50] <- 2 # for key press 2
tableNum$response <- response
# Calculate accuracy
accuracy <- rep(NA, dim(tableNum)[1])
accuracy[tableNum$response == tableNum$corr_resp] <- 1
accuracy[tableNum$response != tableNum$corr_resp] <- 0
tableNum$accuracy <- accuracy
# Create or bind to data.frame
if(i == 1){
df_order_b3 <- temporalOrder
df_order_b3$id <- i
df_room_b3 <- roomType
df_room_b3$id <- i
df_table_b3 <- tableNum
df_table_b3$id <- i
} else {
temporalOrder$id <- i
df_order_b3 <- rbind(df_order_b3, temporalOrder)
roomType$id <- i
df_room_b3 <- rbind(df_room_b3, roomType)
tableNum$id <- i
df_table_b3 <- rbind(df_table_b3, tableNum)
}
}
# Convert to id factor
df_order_b3$id <- as.factor(df_order_b3$id)
df_room_b3$id <- as.factor(df_room_b3$id)
df_table_b3$id <- as.factor(df_table_b3$id)
df_order_b3_roomInfo <- df_order_b3
df_order_b3_roomInfo <- df_order_b3_roomInfo[order(df_order_b3_roomInfo$id, df_order_b3_roomInfo$trial_index),]
df_room_b3 <- df_room_b3[order(df_room_b3$id, df_room_b3$trial_index),]
df_order_b3_roomInfo$roomType <- df_room_b3$roomType
df_order_b3_roomInfo$roomType <- ifelse(df_order_b3_roomInfo$roomType == 'nw', 'O-room', 'M-room')
agg_order_b3_roomInfo <- ddply(df_order_b3_roomInfo, c('id', 'context', 'roomType', 'condition'), summarise, acc = mean(accuracy), rt = mean(rt))
agg_order_b3_roomInfo$boundary <- ifelse(agg_order_b3_roomInfo$context == 'across', 'across', 'within')
agg_order_b3_roomInfo$Condition <- 'across'
agg_order_b3_roomInfo$Condition[agg_order_b3_roomInfo$context == 'within-open plane'] <- 'O-room'
agg_order_b3_roomInfo$Condition[agg_order_b3_roomInfo$context == 'within-M-shape'] <- 'M-room'
# /*
# ----------------------------- Plot Exp1c ---------------------------
# */
plt3 <- ggplot(agg_order_b3_roomInfo, aes(x = boundary, y = acc, fill = interaction(boundary,roomType))) +
geom_boxplot(alpha = 0.5,outlier.shape = NA, key_glyph = "rect") +
geom_point(position = position_jitterdodge(jitter.width = 0.2)) +
geom_hline(yintercept = 1/3) +
stat_summary(geom = "point", fun = "mean", col = 'black', size = 3, shape = 24, aes(fill = interaction(boundary,roomType)),
position=position_dodge(width = 0.75),
key_glyph = "rect") +
geom_segment(aes(x = 1.5, xend = 1.5, y= 0.1, yend= 1/3),colour = 'black',
arrow = arrow(length = unit(0.30,"cm"), type = "closed"), show.legend = FALSE) +
annotate('text', x = 1.5, y = 0.1 - 0.03, label = 'Chance') +
scale_color_mrc(palette = 'secondary') +
scale_fill_mrc(palette = 'secondary') +
labs(y = '3AFC accuracy', x = "Boundary", title = 'Temporal Order (Exp 1c)') +
theme(legend.justification = c(0, 1),
legend.position = c(0, 1),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.5,"line"))+
coord_cartesian(ylim = c(0, 1))
# /*
# ----------------------------- Plot all ---------------------------
# */
all_plots <- plot_grid(plt1, plt2, plt3, ncol = 3)
save_plot("splitting_across_trials.png", all_plots,
base_height = 10/cm(1)*1.5,
base_width = 19/cm(1)*1.5,
base_aspect_ratio = 1)
a <- agg_order_b3_roomInfo[agg_order_b3_roomInfo$context == 'across' & agg_order_b3_roomInfo$roomType == 'M-room', 'acc']
b <- agg_order_b3_roomInfo[agg_order_b3_roomInfo$context == 'across' & agg_order_b3_roomInfo$roomType == 'O-room', 'acc']
a <- agg_order_b1_roomInfo[agg_order_b1_roomInfo$context == 'across' & agg_order_b1_roomInfo$roomType == 'M-room', 'acc']
b <- agg_order_b1_roomInfo[agg_order_b1_roomInfo$context == 'across' & agg_order_b1_roomInfo$roomType == 'O-room', 'acc']
ttestBF(arcsine_transform(a), arcsine_transform(b), paired = TRUE)
ttestBF(a, b, paired = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesisvideo_operations.R
\name{kinesisvideo_get_signaling_channel_endpoint}
\alias{kinesisvideo_get_signaling_channel_endpoint}
\title{Provides an endpoint for the specified signaling channel to send and
receive messages}
\usage{
kinesisvideo_get_signaling_channel_endpoint(ChannelARN,
SingleMasterChannelEndpointConfiguration)
}
\arguments{
\item{ChannelARN}{[required] The Amazon Resource Name (ARN) of the signalling channel for which you
want to get an endpoint.}
\item{SingleMasterChannelEndpointConfiguration}{A structure containing the endpoint configuration for the
\code{SINGLE_MASTER} channel type.}
}
\description{
Provides an endpoint for the specified signaling channel to send and
receive messages. This API uses the
\code{SingleMasterChannelEndpointConfiguration} input parameter, which
consists of the \code{Protocols} and \code{Role} properties.
}
\details{
\code{Protocols} is used to determine the communication mechanism. For
example, if you specify \code{WSS} as the protocol, this API produces a
secure websocket endpoint. If you specify \code{HTTPS} as the protocol, this
API generates an HTTPS endpoint.
\code{Role} determines the messaging permissions. A \code{MASTER} role results in
this API generating an endpoint that a client can use to communicate
with any of the viewers on the channel. A \code{VIEWER} role results in this
API generating an endpoint that a client can use to communicate only
with a \code{MASTER}.
}
\section{Request syntax}{
\preformatted{svc$get_signaling_channel_endpoint(
ChannelARN = "string",
SingleMasterChannelEndpointConfiguration = list(
Protocols = list(
"WSS"|"HTTPS"
),
Role = "MASTER"|"VIEWER"
)
)
}
}
\keyword{internal}
| /paws/man/kinesisvideo_get_signaling_channel_endpoint.Rd | permissive | jcheng5/paws | R | false | true | 1,792 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesisvideo_operations.R
\name{kinesisvideo_get_signaling_channel_endpoint}
\alias{kinesisvideo_get_signaling_channel_endpoint}
\title{Provides an endpoint for the specified signaling channel to send and
receive messages}
\usage{
kinesisvideo_get_signaling_channel_endpoint(ChannelARN,
SingleMasterChannelEndpointConfiguration)
}
\arguments{
\item{ChannelARN}{[required] The Amazon Resource Name (ARN) of the signalling channel for which you
want to get an endpoint.}
\item{SingleMasterChannelEndpointConfiguration}{A structure containing the endpoint configuration for the
\code{SINGLE_MASTER} channel type.}
}
\description{
Provides an endpoint for the specified signaling channel to send and
receive messages. This API uses the
\code{SingleMasterChannelEndpointConfiguration} input parameter, which
consists of the \code{Protocols} and \code{Role} properties.
}
\details{
\code{Protocols} is used to determine the communication mechanism. For
example, if you specify \code{WSS} as the protocol, this API produces a
secure websocket endpoint. If you specify \code{HTTPS} as the protocol, this
API generates an HTTPS endpoint.
\code{Role} determines the messaging permissions. A \code{MASTER} role results in
this API generating an endpoint that a client can use to communicate
with any of the viewers on the channel. A \code{VIEWER} role results in this
API generating an endpoint that a client can use to communicate only
with a \code{MASTER}.
}
\section{Request syntax}{
\preformatted{svc$get_signaling_channel_endpoint(
ChannelARN = "string",
SingleMasterChannelEndpointConfiguration = list(
Protocols = list(
"WSS"|"HTTPS"
),
Role = "MASTER"|"VIEWER"
)
)
}
}
\keyword{internal}
|
#例1。农村投递线路新增主题数量
#读入数据,并绘制时序图
a<-read.table("D:\\书籍资料整理\\时间序列分析_王燕\\file8.csv",sep=",",header = T)
x<-ts(a$kilometer,start = 1950)
plot(x)
#白噪声检验
for(i in 1:2) print(Box.test(x,type = "Ljung-Box",lag=6*i))
#绘制自相关图和偏自相关图
acf(x)
pacf(x)
library(zoo)
library(forecast)
#例3-9系统自动定阶。并会拟合s.e.标准误
auto.arima(x)
#ML 极大似然拟合
x.fit<-arima(x,order = c(2,0,0),method = "ML")
x.fit
#LB拟合检验。检验残差序列是否残留相关信息。
#如果拒绝原假设说明残差序列含有相关信息,拟合模型不显著。
for(i in 1:2) print(Box.test(x.fit$residual,lag=6*i))
#t参数检验,参数/标准误,仿佛使用最小二乘法估计的t检验不相同。df=n-m m为参数个数
#ar1系数显著性检验
t1<-0.7185/0.1083
pt(t1,df=56,lower.tail = F)
#ar2系数显著性检验
t2<-0.5294/0.1067
pt(t2,df=56,lower.tail = T)
#ar3系数显著性检验
t0=11.0223/3.0906
pt(t0,df=56,lower.tail = F)
#预测
x.fore<-forecast(x.fit,h=5)
x.fore
plot(x.fore)
L1<-x.fore$fitted-1.96*sqrt(x.fit$sigma2)
U1<-x.fore$fitted+1.96*sqrt(x.fit$sigma2)
L2<-ts(x.fore$lower[,2],start = 2009)
U2<-ts(x.fore$upper[,2],start = 2009)
c1<-min(x,L1,L2)
c2<-max(x,L2,U2)
plot(x,type = "p",pch=8,xlim = c(1950,2013),ylim = c(c1,c2))
lines(x.fore$fitted,col=2,lwd=2)
lines(x.fore$mean,col=2,lwd=2)
lines(L1,col=4,lty=2)
lines(L2,col=4,lty=2)
lines(U1,col=4,lty=2)
lines(U2,col=4,lty=2)
| /r/时间序列分析/平稳时间序列分析/农村快递线路发展的完整例子.R | no_license | qingfengliu/statics_use | R | false | false | 1,575 | r | #例1。农村投递线路新增主题数量
#读入数据,并绘制时序图
a<-read.table("D:\\书籍资料整理\\时间序列分析_王燕\\file8.csv",sep=",",header = T)
x<-ts(a$kilometer,start = 1950)
plot(x)
#白噪声检验
for(i in 1:2) print(Box.test(x,type = "Ljung-Box",lag=6*i))
#绘制自相关图和偏自相关图
acf(x)
pacf(x)
library(zoo)
library(forecast)
#例3-9系统自动定阶。并会拟合s.e.标准误
auto.arima(x)
#ML 极大似然拟合
x.fit<-arima(x,order = c(2,0,0),method = "ML")
x.fit
#LB拟合检验。检验残差序列是否残留相关信息。
#如果拒绝原假设说明残差序列含有相关信息,拟合模型不显著。
for(i in 1:2) print(Box.test(x.fit$residual,lag=6*i))
#t参数检验,参数/标准误,仿佛使用最小二乘法估计的t检验不相同。df=n-m m为参数个数
#ar1系数显著性检验
t1<-0.7185/0.1083
pt(t1,df=56,lower.tail = F)
#ar2系数显著性检验
t2<-0.5294/0.1067
pt(t2,df=56,lower.tail = T)
#ar3系数显著性检验
t0=11.0223/3.0906
pt(t0,df=56,lower.tail = F)
#预测
x.fore<-forecast(x.fit,h=5)
x.fore
plot(x.fore)
L1<-x.fore$fitted-1.96*sqrt(x.fit$sigma2)
U1<-x.fore$fitted+1.96*sqrt(x.fit$sigma2)
L2<-ts(x.fore$lower[,2],start = 2009)
U2<-ts(x.fore$upper[,2],start = 2009)
c1<-min(x,L1,L2)
c2<-max(x,L2,U2)
plot(x,type = "p",pch=8,xlim = c(1950,2013),ylim = c(c1,c2))
lines(x.fore$fitted,col=2,lwd=2)
lines(x.fore$mean,col=2,lwd=2)
lines(L1,col=4,lty=2)
lines(L2,col=4,lty=2)
lines(U1,col=4,lty=2)
lines(U2,col=4,lty=2)
|
rm(list=ls())
#Code for Alpine insect diversity, Glacier National Park
#Timothy Cline
#8/28/2019
library(dplyr)
library(topicmodels)
library(ldatuning)
library(slam)
library(tidytext)
library(ggplot2)
library(DirichletReg)
library(tidyr)
library(lme4)
library(MuMIn)
library(merTools)
library(doParallel)
library(parallel)
library(pscl)
library(RColorBrewer)
library(AICcmodavg)
#Required functions
logit<-function(x){return(log(x/(1-x)))}
iLogit<-function(x){return(exp(x)/(1+exp(x)))}
Zscore<-function(x){return((x-mean(x))/sd(x))}
setwd('~/Documents/AlpineBugs')
AlpineMaster<-read.csv('AlpineMasterData20191122_FewerGroups_GlacialSprings.csv',header=T,stringsAsFactors=FALSE)
IceAndSnow<-read.csv('LIA calcs_20191122_2.csv',header=T,stringsAsFactors=F)
SpeciesAbundance<-AlpineMaster[,which(colnames(AlpineMaster)=="Liodessus_affinis"):ncol(AlpineMaster)]
SpeciesAbundance<-SpeciesAbundance[,colSums(SpeciesAbundance)>0]
SpeciesPresence <- data.frame(matrix(as.numeric(SpeciesAbundance>0),nrow=nrow(SpeciesAbundance),ncol=ncol(SpeciesAbundance)))
colnames(SpeciesPresence) <- colnames(SpeciesAbundance)
AlpineMaster$Pcnt_Just_Ice <- 100 * (IceAndSnow$just_ice_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)])
AlpineMaster$LIAprop <- IceAndSnow$LIA_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]
AlpineMaster$PropLoss <- 1-((IceAndSnow$just_ice_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)])/(IceAndSnow$LIA_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]))#LIA$JG.prop.loss[LIAsiteInd]
AlpineMaster$PropLoss[is.na(AlpineMaster$PropLoss)]<-0 #zeros for sites that had no glaciers at LIA
AlpineMaster$PropDiff <- (IceAndSnow$LIA_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]) - (IceAndSnow$just_ice_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)])
#### Using Latent Dirichlet Allocation to separate communities.
#Function to create integer values
forceMatrixToInteger <- function(m){
apply (m, c (1, 2), function (x) {
(as.integer(x))
})
}
#LDA takes counts.
rndSpecAbund<-forceMatrixToInteger(round(SpeciesAbundance*1E3))
intSpeciesAbundance<-as.simple_triplet_matrix(rndSpecAbund)
#RUN LDA with 2 groups
SEED<-55
nComm<-2
VEM<-LDA(rndSpecAbund,k=nComm, control = list(seed = SEED,best=TRUE))
PerCommunity_PerSpecies_Prob<-tidy(VEM,matrix='beta')
topSpecies<-PerCommunity_PerSpecies_Prob %>%
group_by(topic)%>%
top_n(20,beta) %>%
ungroup() %>%
arrange(topic,-beta)
#plot topSpecies
quartz()
topSpecies %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
z=posterior(VEM)
commun.plot=z$topics
commun.spp=z$terms
#creates a table of species that differ most between communities (topics)
beta_spread <- PerCommunity_PerSpecies_Prob %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1))
#10 most different species from cold and warm
BetaRat.Cold<-beta_spread %>% arrange(log_ratio) %>% slice(1:10)
BetaRat.Warm<-beta_spread %>% arrange(desc(log_ratio)) %>% slice(1:10)
#Cleaning up names
Names.BetaRat.Cold<-c('Stygobromus glacialis','Gymnopais','Allomyia bifosa','Gonomyodes','Lednia tumana',
'Pseudokiefferiella','Prosimulium','Rhyacophila ebria','Diamesa','Thienemanniella')
Names.BetaRat.Warm<-c('Parapsyche elsis','Syndiamesa','Paraperla','Roederiodes','Diplocladius cultriger',
'Epeorus grandis','Pedicia','Drunella coloradensis','Epeorus deceptivus','Zapada glacier')
#ColdWater and WarmWater communities
CWC<-rev(sort(commun.spp[1,]))
WWC<-rev(sort(commun.spp[2,]))
Names.WWC<-c('Orthocladius','Tvetenia','Pagastia','Rheocricotopus','Sweltsa','Eukiefferiella','Diamesa','Megarcys',
'Rhyacophila belona','Rhithrogena','Zapada_glacier','Zapada_columbiana','Diplocladius cultriger','Tokunagaia',
'Cinygmula','Clinocera','Setvena bradleyi','Chaetocladius','Parorthocladius','Ameletus')
Names.CWC<-c('Diamesa','Lednia tumana','Prosimulium','Allomyia bifosa','Orthocladius','Tokunagaia','Rhyacophila ebria',
'Polycelis','Thienemanniella','Gonomyodes','Chaetocladius','Gymnopais','Pseudodiamesa','Ameletus','Pseudokiefferiella',
'Stilocladius','Tvetenia', 'Allomyia tripunctata','Corynoneura','Dicranota')
#Binary as to whether glacial stream or not
AlpineMaster$Glac_Y_N <- ifelse(AlpineMaster$Pcnt_Just_Ice>0,'Y','N')
#Add communities to data table
ColdComm<-commun.plot[,1]
AlpineMaster$ColdComm <- ColdComm
AlpineMaster$logitColdComm<-logit(ColdComm)
WarmComm<-commun.plot[,2]
LCC <-AlpineMaster$logitColdComm
DIST <- AlpineMaster$Distance
ELEV<-AlpineMaster$Elevation
LDIST <- log(DIST)
GCC <- AlpineMaster$Pcnt_Just_Ice
LGCC <- logit(GCC/100+0.01)
TEMP <- AlpineMaster$ts8_base
GYN <- as.numeric(AlpineMaster$Glac_Y_N=='Y')
DRAIN <- AlpineMaster$DrainageCode
STREAM <- AlpineMaster$Stream_Name
nL<-length(LCC)
#Regression
#1Distance
#2Elev
#3GCC
#4Temp
if(TRUE){
for(j in c(1,2,3,4)){
cl <- makeCluster(detectCores())
registerDoParallel(cl)
VAR <- switch(j,LDIST,ELEV,LGCC,TEMP)
if(j!=3) lmIn<-lmer(LCC ~ VAR + GYN + VAR:GYN + (1|DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
if(j==3) lmIn<-lmer(LCC ~ VAR + (1|DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
VarDredge<-dredge(lmIn);VarDredge
TopModels<-get.models(VarDredge,delta<2)
TopModTab<-aictab(TopModels)
pVAR <- seq(min(VAR),max(VAR),length=nL)
pLGR<-seq(min(LGCC),max(LGCC),length=nL)
pY <- rep(1,nL)
pN <- rep(0,nL)
dfY<-data.frame(VAR=pVAR,GYN=pY)
dfN<-data.frame(VAR=pVAR,GYN=pN)
#dfY<-data.frame(VAR=pVAR,LGCC=rep(mean(LGCC),nL))
#dfN<-data.frame(VAR=pVAR,LGCC=rep(min(LGCC,nL)))
thisPredG<-list()
thisPredNG<-list()
for(i in 1:length(TopModels)){
thisPredG[[i]]<-predictInterval(TopModels[[i]],newdata=dfY,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE,include.resid.var = FALSE)
thisPredNG[[i]]<-predictInterval(TopModels[[i]],newdata=dfN,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE,include.resid.var = FALSE)
}
thisPredG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisPredNG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisUpperG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisUpperNG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisLowerG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisLowerNG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
for(i in 1:length(TopModels)){
thisPredG_W[i,]<-thisPredG[[i]]$fit
thisPredNG_W[i,]<-thisPredNG[[i]]$fit
thisUpperG_W[i,]<-thisPredG[[i]]$upr
thisUpperNG_W[i,]<-thisPredNG[[i]]$upr
thisLowerG_W[i,]<-thisPredG[[i]]$lwr
thisLowerNG_W[i,]<-thisPredNG[[i]]$lwr
}
FullPred_G<-iLogit(t(thisPredG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullPred_NG<-iLogit(t(thisPredNG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullLower_G<-iLogit(t(thisLowerG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullLower_NG<-iLogit(t(thisLowerNG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullUpper_G<-iLogit(t(thisUpperG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullUpper_NG<-iLogit(t(thisUpperNG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
assign(x=switch(j,'PredG_Dist','PredG_Elev','PredG_GCC','PredG_Temp'),value=cbind(FullLower_G,FullPred_G,FullUpper_G))
assign(x=switch(j,'PredNG_Dist','PredNG_Elev','PredNG_GCC','PredNG_Temp'),value=cbind(FullLower_NG,FullPred_NG,FullUpper_NG))
assign(x=switch(j,'TopModels_Dist','TopModels_Elev','TopModels_GCC','TopModels_Temp'),value=TopModels)
assign(x=switch(j,'Lm_Dist','Lm_Elev','Lm_GCC','Lm_Temp'),value=lmIn)
assign(x=switch(j,'Dredge_Dist','Dredge_Elev','Dredge_GCC','Dredge_Temp'),value=VarDredge)
stopCluster(cl)
}
}
###### #Regression analysis for sites without glaciers
p0<-AlpineMaster %>% filter(LIAprop < 0.01) #Sites that had NO glacier at LIA
p100<-AlpineMaster %>% filter(LIAprop > 0.01 & Pcnt_Just_Ice==0) #Sites that LOST glacier since LIA
p00<-AlpineMaster %>% filter((LIAprop < 0.01 & Pcnt_Just_Ice==0) | (LIAprop > 0.01 & Pcnt_Just_Ice == 0)) #Sites that either lost or had no glacier (combination of previous 2)
pElse<-AlpineMaster %>% filter(Pcnt_Just_Ice>0) #Sites with Glaciers
cl <- makeCluster(detectCores())
registerDoParallel(cl)
G0set<-p00
P00LCC<-G0set$logitColdComm
P00ELEV<-G0set$Elevation/1000
P00DIST<-log(G0set$Distance)
P00SNOW<-logit((IceAndSnow$permanent_snow_area_m[match(G0set$Site_Name,IceAndSnow$Site_Name)]/IceAndSnow$watershed_area_m[match(G0set$Site_Name,IceAndSnow$Site_Name)])+0.01)
P00TEMP<-G0set$ts8_base
P00DRAIN<-G0set$DrainageCode
P00STREAM <- G0set$Stream_Name
#Test for random effects
p00full_DRAIN<-lmer(P00LCC ~ P00DIST+P00ELEV+P00TEMP+P00SNOW + (1|P00DRAIN),REML=T,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
p00full_STREAM<-lmer(P00LCC ~ P00DIST+P00ELEV+P00TEMP+P00SNOW + (1|P00STREAM),REML=T,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
AICc(p00full_DRAIN)
AICc(p00full_STREAM)
#DRAINAGE
lmBase<-lmer(P00LCC ~ 1 +(1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmBase)
lmElv<-lmer(P00LCC ~ P00ELEV + (1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmElv)
dfP00elev<-data.frame(P00ELEV=seq(min(P00ELEV),max(P00ELEV),length=nrow(G0set)))
P00PRED<-dfP00elev$P00ELEV
if(F){
PP_ELV<-iLogit(predictInterval(lmElv,newdata=dfP00elev,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE))
}
plot(G0set$Elevation,G0set$ColdComm,pch=16,cex=1.25)
lmDist<-lmer(P00LCC ~ P00DIST + (1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmDist)
dfP00dist<-data.frame(P00DIST=seq(min(P00DIST),max(P00DIST),length=nrow(G0set)))
P00PRED<-dfP00dist$P00DIST
if(FALSE){
PP_DIST<-iLogit(predictInterval(lmDist,newdata=dfP00dist,n.sims=100000,level=0.50,which='fixed',.parallel=TRUE))
}
plot(G0set$Distance,G0set$ColdComm,pch=16,cex=1.25)
lmTemp<-lmer(P00LCC ~ P00TEMP + (1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmTemp)
dfP00temp<-data.frame(P00TEMP=seq(min(P00TEMP),max(P00TEMP),length=nrow(G0set)))
P00PRED<-dfP00temp$P00TEMP
PP_TEMP<-iLogit(predictInterval(lmTemp,newdata=dfP00temp,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE))
stopCluster(cl)
save(list=ls(),file='AlpineBugs_LDA_Clean.Rdata')
| /AlpineBugs_LDA_and_NoGlacierAnalysis_PNAS.R | no_license | tjcline/AlpineBugs_PNAS2020 | R | false | false | 11,331 | r | rm(list=ls())
#Code for Alpine insect diversity, Glacier National Park
#Timothy Cline
#8/28/2019
library(dplyr)
library(topicmodels)
library(ldatuning)
library(slam)
library(tidytext)
library(ggplot2)
library(DirichletReg)
library(tidyr)
library(lme4)
library(MuMIn)
library(merTools)
library(doParallel)
library(parallel)
library(pscl)
library(RColorBrewer)
library(AICcmodavg)
#Required functions
logit<-function(x){return(log(x/(1-x)))}
iLogit<-function(x){return(exp(x)/(1+exp(x)))}
Zscore<-function(x){return((x-mean(x))/sd(x))}
setwd('~/Documents/AlpineBugs')
AlpineMaster<-read.csv('AlpineMasterData20191122_FewerGroups_GlacialSprings.csv',header=T,stringsAsFactors=FALSE)
IceAndSnow<-read.csv('LIA calcs_20191122_2.csv',header=T,stringsAsFactors=F)
SpeciesAbundance<-AlpineMaster[,which(colnames(AlpineMaster)=="Liodessus_affinis"):ncol(AlpineMaster)]
SpeciesAbundance<-SpeciesAbundance[,colSums(SpeciesAbundance)>0]
SpeciesPresence <- data.frame(matrix(as.numeric(SpeciesAbundance>0),nrow=nrow(SpeciesAbundance),ncol=ncol(SpeciesAbundance)))
colnames(SpeciesPresence) <- colnames(SpeciesAbundance)
AlpineMaster$Pcnt_Just_Ice <- 100 * (IceAndSnow$just_ice_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)])
AlpineMaster$LIAprop <- IceAndSnow$LIA_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]
AlpineMaster$PropLoss <- 1-((IceAndSnow$just_ice_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)])/(IceAndSnow$LIA_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]))#LIA$JG.prop.loss[LIAsiteInd]
AlpineMaster$PropLoss[is.na(AlpineMaster$PropLoss)]<-0 #zeros for sites that had no glaciers at LIA
AlpineMaster$PropDiff <- (IceAndSnow$LIA_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]) - (IceAndSnow$just_ice_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)]/IceAndSnow$watershed_area_m[match(IceAndSnow$Site_Name,AlpineMaster$Site_Name)])
#### Using Latent Dirichlet Allocation to separate communities.
#Function to create integer values
forceMatrixToInteger <- function(m){
apply (m, c (1, 2), function (x) {
(as.integer(x))
})
}
#LDA takes counts.
rndSpecAbund<-forceMatrixToInteger(round(SpeciesAbundance*1E3))
intSpeciesAbundance<-as.simple_triplet_matrix(rndSpecAbund)
#RUN LDA with 2 groups
SEED<-55
nComm<-2
VEM<-LDA(rndSpecAbund,k=nComm, control = list(seed = SEED,best=TRUE))
PerCommunity_PerSpecies_Prob<-tidy(VEM,matrix='beta')
topSpecies<-PerCommunity_PerSpecies_Prob %>%
group_by(topic)%>%
top_n(20,beta) %>%
ungroup() %>%
arrange(topic,-beta)
#plot topSpecies
quartz()
topSpecies %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
z=posterior(VEM)
commun.plot=z$topics
commun.spp=z$terms
#creates a table of species that differ most between communities (topics)
beta_spread <- PerCommunity_PerSpecies_Prob %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1))
#10 most different species from cold and warm
BetaRat.Cold<-beta_spread %>% arrange(log_ratio) %>% slice(1:10)
BetaRat.Warm<-beta_spread %>% arrange(desc(log_ratio)) %>% slice(1:10)
#Cleaning up names
Names.BetaRat.Cold<-c('Stygobromus glacialis','Gymnopais','Allomyia bifosa','Gonomyodes','Lednia tumana',
'Pseudokiefferiella','Prosimulium','Rhyacophila ebria','Diamesa','Thienemanniella')
Names.BetaRat.Warm<-c('Parapsyche elsis','Syndiamesa','Paraperla','Roederiodes','Diplocladius cultriger',
'Epeorus grandis','Pedicia','Drunella coloradensis','Epeorus deceptivus','Zapada glacier')
#ColdWater and WarmWater communities
CWC<-rev(sort(commun.spp[1,]))
WWC<-rev(sort(commun.spp[2,]))
Names.WWC<-c('Orthocladius','Tvetenia','Pagastia','Rheocricotopus','Sweltsa','Eukiefferiella','Diamesa','Megarcys',
'Rhyacophila belona','Rhithrogena','Zapada_glacier','Zapada_columbiana','Diplocladius cultriger','Tokunagaia',
'Cinygmula','Clinocera','Setvena bradleyi','Chaetocladius','Parorthocladius','Ameletus')
Names.CWC<-c('Diamesa','Lednia tumana','Prosimulium','Allomyia bifosa','Orthocladius','Tokunagaia','Rhyacophila ebria',
'Polycelis','Thienemanniella','Gonomyodes','Chaetocladius','Gymnopais','Pseudodiamesa','Ameletus','Pseudokiefferiella',
'Stilocladius','Tvetenia', 'Allomyia tripunctata','Corynoneura','Dicranota')
#Binary as to whether glacial stream or not
AlpineMaster$Glac_Y_N <- ifelse(AlpineMaster$Pcnt_Just_Ice>0,'Y','N')
#Add communities to data table
ColdComm<-commun.plot[,1]
AlpineMaster$ColdComm <- ColdComm
AlpineMaster$logitColdComm<-logit(ColdComm)
WarmComm<-commun.plot[,2]
LCC <-AlpineMaster$logitColdComm
DIST <- AlpineMaster$Distance
ELEV<-AlpineMaster$Elevation
LDIST <- log(DIST)
GCC <- AlpineMaster$Pcnt_Just_Ice
LGCC <- logit(GCC/100+0.01)
TEMP <- AlpineMaster$ts8_base
GYN <- as.numeric(AlpineMaster$Glac_Y_N=='Y')
DRAIN <- AlpineMaster$DrainageCode
STREAM <- AlpineMaster$Stream_Name
nL<-length(LCC)
#Regression
#1Distance
#2Elev
#3GCC
#4Temp
if(TRUE){
for(j in c(1,2,3,4)){
cl <- makeCluster(detectCores())
registerDoParallel(cl)
VAR <- switch(j,LDIST,ELEV,LGCC,TEMP)
if(j!=3) lmIn<-lmer(LCC ~ VAR + GYN + VAR:GYN + (1|DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
if(j==3) lmIn<-lmer(LCC ~ VAR + (1|DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
VarDredge<-dredge(lmIn);VarDredge
TopModels<-get.models(VarDredge,delta<2)
TopModTab<-aictab(TopModels)
pVAR <- seq(min(VAR),max(VAR),length=nL)
pLGR<-seq(min(LGCC),max(LGCC),length=nL)
pY <- rep(1,nL)
pN <- rep(0,nL)
dfY<-data.frame(VAR=pVAR,GYN=pY)
dfN<-data.frame(VAR=pVAR,GYN=pN)
#dfY<-data.frame(VAR=pVAR,LGCC=rep(mean(LGCC),nL))
#dfN<-data.frame(VAR=pVAR,LGCC=rep(min(LGCC,nL)))
thisPredG<-list()
thisPredNG<-list()
for(i in 1:length(TopModels)){
thisPredG[[i]]<-predictInterval(TopModels[[i]],newdata=dfY,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE,include.resid.var = FALSE)
thisPredNG[[i]]<-predictInterval(TopModels[[i]],newdata=dfN,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE,include.resid.var = FALSE)
}
thisPredG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisPredNG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisUpperG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisUpperNG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisLowerG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
thisLowerNG_W<-matrix(NA,nrow=length(TopModels),ncol=length(thisPredG[[1]][,1]))
for(i in 1:length(TopModels)){
thisPredG_W[i,]<-thisPredG[[i]]$fit
thisPredNG_W[i,]<-thisPredNG[[i]]$fit
thisUpperG_W[i,]<-thisPredG[[i]]$upr
thisUpperNG_W[i,]<-thisPredNG[[i]]$upr
thisLowerG_W[i,]<-thisPredG[[i]]$lwr
thisLowerNG_W[i,]<-thisPredNG[[i]]$lwr
}
FullPred_G<-iLogit(t(thisPredG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullPred_NG<-iLogit(t(thisPredNG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullLower_G<-iLogit(t(thisLowerG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullLower_NG<-iLogit(t(thisLowerNG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullUpper_G<-iLogit(t(thisUpperG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
FullUpper_NG<-iLogit(t(thisUpperNG_W) %*% matrix(TopModTab$AICcWt[1:length(TopModels)],ncol=1))
assign(x=switch(j,'PredG_Dist','PredG_Elev','PredG_GCC','PredG_Temp'),value=cbind(FullLower_G,FullPred_G,FullUpper_G))
assign(x=switch(j,'PredNG_Dist','PredNG_Elev','PredNG_GCC','PredNG_Temp'),value=cbind(FullLower_NG,FullPred_NG,FullUpper_NG))
assign(x=switch(j,'TopModels_Dist','TopModels_Elev','TopModels_GCC','TopModels_Temp'),value=TopModels)
assign(x=switch(j,'Lm_Dist','Lm_Elev','Lm_GCC','Lm_Temp'),value=lmIn)
assign(x=switch(j,'Dredge_Dist','Dredge_Elev','Dredge_GCC','Dredge_Temp'),value=VarDredge)
stopCluster(cl)
}
}
###### #Regression analysis for sites without glaciers
p0<-AlpineMaster %>% filter(LIAprop < 0.01) #Sites that had NO glacier at LIA
p100<-AlpineMaster %>% filter(LIAprop > 0.01 & Pcnt_Just_Ice==0) #Sites that LOST glacier since LIA
p00<-AlpineMaster %>% filter((LIAprop < 0.01 & Pcnt_Just_Ice==0) | (LIAprop > 0.01 & Pcnt_Just_Ice == 0)) #Sites that either lost or had no glacier (combination of previous 2)
pElse<-AlpineMaster %>% filter(Pcnt_Just_Ice>0) #Sites with Glaciers
cl <- makeCluster(detectCores())
registerDoParallel(cl)
G0set<-p00
P00LCC<-G0set$logitColdComm
P00ELEV<-G0set$Elevation/1000
P00DIST<-log(G0set$Distance)
P00SNOW<-logit((IceAndSnow$permanent_snow_area_m[match(G0set$Site_Name,IceAndSnow$Site_Name)]/IceAndSnow$watershed_area_m[match(G0set$Site_Name,IceAndSnow$Site_Name)])+0.01)
P00TEMP<-G0set$ts8_base
P00DRAIN<-G0set$DrainageCode
P00STREAM <- G0set$Stream_Name
#Test for random effects
p00full_DRAIN<-lmer(P00LCC ~ P00DIST+P00ELEV+P00TEMP+P00SNOW + (1|P00DRAIN),REML=T,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
p00full_STREAM<-lmer(P00LCC ~ P00DIST+P00ELEV+P00TEMP+P00SNOW + (1|P00STREAM),REML=T,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
AICc(p00full_DRAIN)
AICc(p00full_STREAM)
#DRAINAGE
lmBase<-lmer(P00LCC ~ 1 +(1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmBase)
lmElv<-lmer(P00LCC ~ P00ELEV + (1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmElv)
dfP00elev<-data.frame(P00ELEV=seq(min(P00ELEV),max(P00ELEV),length=nrow(G0set)))
P00PRED<-dfP00elev$P00ELEV
if(F){
PP_ELV<-iLogit(predictInterval(lmElv,newdata=dfP00elev,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE))
}
plot(G0set$Elevation,G0set$ColdComm,pch=16,cex=1.25)
lmDist<-lmer(P00LCC ~ P00DIST + (1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmDist)
dfP00dist<-data.frame(P00DIST=seq(min(P00DIST),max(P00DIST),length=nrow(G0set)))
P00PRED<-dfP00dist$P00DIST
if(FALSE){
PP_DIST<-iLogit(predictInterval(lmDist,newdata=dfP00dist,n.sims=100000,level=0.50,which='fixed',.parallel=TRUE))
}
plot(G0set$Distance,G0set$ColdComm,pch=16,cex=1.25)
lmTemp<-lmer(P00LCC ~ P00TEMP + (1|P00DRAIN),REML=F,na.action=na.fail,control=lmerControl(optimizer="Nelder_Mead"))
summary(lmTemp)
dfP00temp<-data.frame(P00TEMP=seq(min(P00TEMP),max(P00TEMP),length=nrow(G0set)))
P00PRED<-dfP00temp$P00TEMP
PP_TEMP<-iLogit(predictInterval(lmTemp,newdata=dfP00temp,n.sims=10000,level=0.95,which='fixed',.parallel=TRUE))
stopCluster(cl)
save(list=ls(),file='AlpineBugs_LDA_Clean.Rdata')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info.R
\name{clsAdd}
\alias{clsAdd}
\alias{clsAdd,AnalysisData-method}
\alias{clsAdd,Analysis-method}
\alias{clsArrange}
\alias{clsArrange,AnalysisData-method}
\alias{clsArrange,Analysis-method}
\alias{clsAvailable}
\alias{clsAvailable,AnalysisData-method}
\alias{clsAvailable,Analysis-method}
\alias{clsExtract}
\alias{clsExtract,AnalysisData-method}
\alias{clsExtract,Analysis-method}
\alias{clsRemove}
\alias{clsRemove,AnalysisData-method}
\alias{clsRemove,Analysis-method}
\alias{clsRename}
\alias{clsRename,AnalysisData-method}
\alias{clsRename,Analysis-method}
\alias{clsReplace}
\alias{clsReplace,AnalysisData-method}
\alias{clsReplace,Analysis-method}
\title{Sample meta information wrangling}
\usage{
clsAdd(d, cls, value, ...)
\S4method{clsAdd}{AnalysisData}(d, cls, value)
\S4method{clsAdd}{Analysis}(d, cls, value, type = c("pre-treated", "raw"))
clsArrange(d, cls = "class", descending = FALSE, ...)
\S4method{clsArrange}{AnalysisData}(d, cls = "class", descending = FALSE)
\S4method{clsArrange}{Analysis}(
d,
cls = "class",
descending = FALSE,
type = c("pre-treated", "raw")
)
clsAvailable(d, ...)
\S4method{clsAvailable}{AnalysisData}(d)
\S4method{clsAvailable}{Analysis}(d, type = c("pre-treated", "raw"))
clsExtract(d, cls = "class", ...)
\S4method{clsExtract}{AnalysisData}(d, cls = "class")
\S4method{clsExtract}{Analysis}(d, cls = "class", type = c("pre-treated", "raw"))
clsRemove(d, cls, ...)
\S4method{clsRemove}{AnalysisData}(d, cls)
\S4method{clsRemove}{Analysis}(d, cls, type = c("pre-treated", "raw"))
clsRename(d, cls, newName, ...)
\S4method{clsRename}{AnalysisData}(d, cls, newName)
\S4method{clsRename}{Analysis}(d, cls, newName, type = c("pre-treated", "raw"))
clsReplace(d, value, cls = "class", ...)
\S4method{clsReplace}{AnalysisData}(d, value, cls = "class")
\S4method{clsReplace}{Analysis}(d, value, cls = "class", type = c("pre-treated", "raw"))
}
\arguments{
\item{d}{S4 object of class Analysis or AnalysisData}
\item{cls}{sample info column to extract}
\item{value}{vactor of new sample information for replacement}
\item{...}{arguments to pass to specific method}
\item{type}{\code{raw} or \code{pre-treated} sample information}
\item{descending}{TRUE/FALSE, arrange samples in descending order}
\item{newName}{new column name}
}
\description{
Query or alter sample meta information in \code{AnalysisData} or \code{Analysis} class objects.
Replace a given sample info column from an Analysis or
AnalysisData object.
}
\section{Methods}{
\itemize{
\item \code{clsAdd}: Add a sample information column.
\item \code{clsArrange}: Arrange sample row order by a specified sample information column.
\item \code{clsAvailable}: Retrieve the names of the available sample information columns.
\item \code{clsExtract}: Extract the values of a specified sample information column.
\item \code{clsRemove}: Remove a sample information column.
\item \code{clsRename}: Rename a sample information column.
\item \code{clsReplace}: Replace a sample information column.
}
}
\examples{
library(metaboData)
d <- analysisData(abr1$neg,abr1$fact)
## Add a sample information column named 'new'
d <- clsAdd(d,'new',1:nSamples(d))
print(d)
## Arrange the row orders by the 'day' column
d <- clsArrange(d,'day')
clsExtract(d,'day')
## Retreive the available sample information column names
clsAvailable(d)
## Extract the values of the 'day' column
clsExtract(d,'day')
## Remove the 'class' column
d <- clsRemove(d,'class')
clsAvailable(d)
## Rename the 'day' column to 'treatment'
d <- clsRename(d,'day','treatment')
clsAvailable(d)
## Replace the values of the 'treatment' column
d <- clsReplace(d,rep(1,nSamples(d)),'treatment')
clsExtract(d,'treatment')
}
| /man/cls.Rd | no_license | jasenfinch/metabolyseR | R | false | true | 3,803 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info.R
\name{clsAdd}
\alias{clsAdd}
\alias{clsAdd,AnalysisData-method}
\alias{clsAdd,Analysis-method}
\alias{clsArrange}
\alias{clsArrange,AnalysisData-method}
\alias{clsArrange,Analysis-method}
\alias{clsAvailable}
\alias{clsAvailable,AnalysisData-method}
\alias{clsAvailable,Analysis-method}
\alias{clsExtract}
\alias{clsExtract,AnalysisData-method}
\alias{clsExtract,Analysis-method}
\alias{clsRemove}
\alias{clsRemove,AnalysisData-method}
\alias{clsRemove,Analysis-method}
\alias{clsRename}
\alias{clsRename,AnalysisData-method}
\alias{clsRename,Analysis-method}
\alias{clsReplace}
\alias{clsReplace,AnalysisData-method}
\alias{clsReplace,Analysis-method}
\title{Sample meta information wrangling}
\usage{
clsAdd(d, cls, value, ...)
\S4method{clsAdd}{AnalysisData}(d, cls, value)
\S4method{clsAdd}{Analysis}(d, cls, value, type = c("pre-treated", "raw"))
clsArrange(d, cls = "class", descending = FALSE, ...)
\S4method{clsArrange}{AnalysisData}(d, cls = "class", descending = FALSE)
\S4method{clsArrange}{Analysis}(
d,
cls = "class",
descending = FALSE,
type = c("pre-treated", "raw")
)
clsAvailable(d, ...)
\S4method{clsAvailable}{AnalysisData}(d)
\S4method{clsAvailable}{Analysis}(d, type = c("pre-treated", "raw"))
clsExtract(d, cls = "class", ...)
\S4method{clsExtract}{AnalysisData}(d, cls = "class")
\S4method{clsExtract}{Analysis}(d, cls = "class", type = c("pre-treated", "raw"))
clsRemove(d, cls, ...)
\S4method{clsRemove}{AnalysisData}(d, cls)
\S4method{clsRemove}{Analysis}(d, cls, type = c("pre-treated", "raw"))
clsRename(d, cls, newName, ...)
\S4method{clsRename}{AnalysisData}(d, cls, newName)
\S4method{clsRename}{Analysis}(d, cls, newName, type = c("pre-treated", "raw"))
clsReplace(d, value, cls = "class", ...)
\S4method{clsReplace}{AnalysisData}(d, value, cls = "class")
\S4method{clsReplace}{Analysis}(d, value, cls = "class", type = c("pre-treated", "raw"))
}
\arguments{
\item{d}{S4 object of class Analysis or AnalysisData}
\item{cls}{sample info column to extract}
\item{value}{vactor of new sample information for replacement}
\item{...}{arguments to pass to specific method}
\item{type}{\code{raw} or \code{pre-treated} sample information}
\item{descending}{TRUE/FALSE, arrange samples in descending order}
\item{newName}{new column name}
}
\description{
Query or alter sample meta information in \code{AnalysisData} or \code{Analysis} class objects.
Replace a given sample info column from an Analysis or
AnalysisData object.
}
\section{Methods}{
\itemize{
\item \code{clsAdd}: Add a sample information column.
\item \code{clsArrange}: Arrange sample row order by a specified sample information column.
\item \code{clsAvailable}: Retrieve the names of the available sample information columns.
\item \code{clsExtract}: Extract the values of a specified sample information column.
\item \code{clsRemove}: Remove a sample information column.
\item \code{clsRename}: Rename a sample information column.
\item \code{clsReplace}: Replace a sample information column.
}
}
\examples{
library(metaboData)
d <- analysisData(abr1$neg,abr1$fact)
## Add a sample information column named 'new'
d <- clsAdd(d,'new',1:nSamples(d))
print(d)
## Arrange the row orders by the 'day' column
d <- clsArrange(d,'day')
clsExtract(d,'day')
## Retreive the available sample information column names
clsAvailable(d)
## Extract the values of the 'day' column
clsExtract(d,'day')
## Remove the 'class' column
d <- clsRemove(d,'class')
clsAvailable(d)
## Rename the 'day' column to 'treatment'
d <- clsRename(d,'day','treatment')
clsAvailable(d)
## Replace the values of the 'treatment' column
d <- clsReplace(d,rep(1,nSamples(d)),'treatment')
clsExtract(d,'treatment')
}
|
# Differences between old and new aggregated data files ----
agg_new <- readRDS(file = "./Cache/Re-analysis_cache/agg_data.rds")
agg_old <- readRDS(file = "./Cache/Re-analysis_cache/agg_data_old.rds")
# Big differences seem to arise for
# - Oligochaeta
# - Agraylea
# - Hydroptila
# - Ithytrichia
# -> why?
diff_dat <- agg_new$weighted - agg_old$weighted
diff_dat$taxon <- rownames(diff_dat)
setDT(diff_dat)
melt(diff_dat, id.vars = "taxon") %>%
.[value >= 0.5| value <= -0.5, ]
agg_old$weighted[rownames(agg_old$weighted) == "Oligochaeta", ]
agg_new$weighted[rownames(agg_new$weighted) == "Oligochaeta", ]
# Differences in old and new trait data ----
# New version uses first fwe and than tachet,
# + problem with sum(c()) solved
trait_dat <-
load_data(path = "./Data/", pattern = "harmonized.*\\.rds")
# European trait data are already normalized
trait_eu <- trait_dat[["Trait_freshecol_2020_pp_harmonized.rds"]]
trait_eu_old <- trait_dat[["Trait_freshecol_2020_pp_harmonized_old.rds"]]
trait_datasets <- list("new" = trait_eu,
"old" = trait_eu_old)
output <- list()
for(nam in names(trait_datasets)) {
dat <- trait_datasets[[nam]]
# Select traits
# taxon_cp includes taxa names from the raw data
cols <-
"feed|locom|resp|volt|ovip|size|order|family|genus|species|taxon_cp"
trait_eu_subset <- dat[, .SD, .SDcols = names(trait_eu) %like% cols]
# change col order
setcolorder(
x = trait_eu_subset,
neworder = c(
"order",
"family",
"genus",
"species",
"feed_herbivore",
"feed_gatherer",
"feed_predator",
"feed_shredder",
"feed_filter",
"feed_parasite",
"locom_swim",
"locom_burrow",
"locom_crawl",
"locom_sessil",
"resp_teg",
"resp_gil",
"resp_pls_spi"
)
)
# clean genus and family column from sp. (only for merging later!)
trait_eu_subset[is.na(species), genus := sub("(?i) Gen\\. sp\\.| sp\\.", "", genus)]
trait_eu_subset[is.na(species) &
is.na(genus), family := sub("(?i) Gen\\. sp\\.| sp\\.", "", family)]
#____________________________________________________________________________________________
# Preparation for re-analysis ----
# few taxa have to be re-named or re-assigned
#____________________________________________________________________________________________
# Chaetopterygini to genus column
trait_eu_subset[taxon_cp %like% "Chaetopterygini", genus := "Chaetopterygini"]
# ORTHOCLADIINAE/DIAMESINAE is also PRODIAMESINAE
trait_eu_subset[taxon_cp %like% "Orthocladiinae", genus := "Prodiamesinae"]
# Some taxa on subfamily level re-assigned to genus-lvl to merge them
# later to ecor_Q
search <- paste0(
c(
"Chironomini",
"Limnephilini",
"Tanypodinae",
"Tanytarsini",
"Hemerodromiinae",
"Clinocerinae",
"Pediciinae"
),
collapse = "|"
)
trait_eu_subset[taxon_cp %like% search, genus := sub(" Gen\\. sp\\.", "", taxon_cp)]
# add taxa column for merge
trait_eu_subset[, taxa := coalesce(species, genus, family, order)]
# taxa that are on sub-family/family-lvl in ecor_L:
# (and genus or species-level in tachet/freshwaterecol)
# need to be aggregated via median (likewise in Szöcs et al. 2014)
trait_cols <- grep(
"order|family|genus|species|tax.*",
names(trait_eu_subset),
value = TRUE,
invert = TRUE
)
agg_traits <- trait_eu_subset[family %in% c(
"Ceratopogonidae",
"Empididae",
"Lepidostomatidae",
"Limoniidae",
"Perlodidae",
"Prodiamesinae",
"Psychomyiidae",
"Spongillidae",
"Chironomidae",
"Tubificidae",
"Limnephilidae",
"Coenagrionidae"
), lapply(.SD, median, na.rm = TRUE),
.SDcols = trait_cols,
by = "family"] %>%
normalize_by_rowSum(
x = .,
non_trait_cols = c("order",
"family",
"genus",
"species",
"taxa",
"taxon_cp")
)
# create taxa column
agg_traits[, taxa := family]
# aggregate all Oligochaeta taxa according to Szöcs et al. 2014
# -> are actually (sub)class
trait_eu_subset[family %in% c(
"Haplotaxidae",
"Tubificidae",
"Enchytraeidae",
"Propappidae",
"Lumbriculidae",
"Dorydrilidae",
"Lumbricidae",
"Sparganophilidae",
"Branchiobdellidae"
), sub_class := "Oligochaeta"]
output[[nam]] <- trait_eu_subset
}
# Here, the difference between the falsely (old) and newly harmonised trait dataset
# becomes apparent
# -> in "old" sometimes 0 has been inserted instead of NA (sum(c()) = 0 in R)
# Therefore, e.g. all locom traits are assigned 0 for Oligochaeta.
# Instead, new, locom_burrow == 1, the others == 0
output$old[sub_class == "Oligochaeta", lapply(.SD, median, na.rm = TRUE),
.SDcols = trait_cols,
by = "sub_class"] %>%
normalize_by_rowSum(x = .,
non_trait_cols = "sub_class") %>% print()
output$new[sub_class == "Oligochaeta", lapply(.SD, median, na.rm = TRUE),
.SDcols = trait_cols,
by = "sub_class"]
#####
trait_eu_old[species == "Cernosvitoviella atrata", ]
trait_eu[species == "Cernosvitoviella atrata", ]
trait_eu_old[genus %like% "Agraylea", ]
trait_eu[genus %like% "Agraylea", ] | /R/test_agg.R | no_license | KunzstLD/Trait-aggregation | R | false | false | 5,397 | r | # Differences between old and new aggregated data files ----
agg_new <- readRDS(file = "./Cache/Re-analysis_cache/agg_data.rds")
agg_old <- readRDS(file = "./Cache/Re-analysis_cache/agg_data_old.rds")
# Big differences seem to arise for
# - Oligochaeta
# - Agraylea
# - Hydroptila
# - Ithytrichia
# -> why?
diff_dat <- agg_new$weighted - agg_old$weighted
diff_dat$taxon <- rownames(diff_dat)
setDT(diff_dat)
melt(diff_dat, id.vars = "taxon") %>%
.[value >= 0.5| value <= -0.5, ]
agg_old$weighted[rownames(agg_old$weighted) == "Oligochaeta", ]
agg_new$weighted[rownames(agg_new$weighted) == "Oligochaeta", ]
# Differences in old and new trait data ----
# New version uses first fwe and than tachet,
# + problem with sum(c()) solved
trait_dat <-
load_data(path = "./Data/", pattern = "harmonized.*\\.rds")
# European trait data are already normalized
trait_eu <- trait_dat[["Trait_freshecol_2020_pp_harmonized.rds"]]
trait_eu_old <- trait_dat[["Trait_freshecol_2020_pp_harmonized_old.rds"]]
trait_datasets <- list("new" = trait_eu,
"old" = trait_eu_old)
output <- list()
for(nam in names(trait_datasets)) {
dat <- trait_datasets[[nam]]
# Select traits
# taxon_cp includes taxa names from the raw data
cols <-
"feed|locom|resp|volt|ovip|size|order|family|genus|species|taxon_cp"
trait_eu_subset <- dat[, .SD, .SDcols = names(trait_eu) %like% cols]
# change col order
setcolorder(
x = trait_eu_subset,
neworder = c(
"order",
"family",
"genus",
"species",
"feed_herbivore",
"feed_gatherer",
"feed_predator",
"feed_shredder",
"feed_filter",
"feed_parasite",
"locom_swim",
"locom_burrow",
"locom_crawl",
"locom_sessil",
"resp_teg",
"resp_gil",
"resp_pls_spi"
)
)
# clean genus and family column from sp. (only for merging later!)
trait_eu_subset[is.na(species), genus := sub("(?i) Gen\\. sp\\.| sp\\.", "", genus)]
trait_eu_subset[is.na(species) &
is.na(genus), family := sub("(?i) Gen\\. sp\\.| sp\\.", "", family)]
#____________________________________________________________________________________________
# Preparation for re-analysis ----
# few taxa have to be re-named or re-assigned
#____________________________________________________________________________________________
# Chaetopterygini to genus column
trait_eu_subset[taxon_cp %like% "Chaetopterygini", genus := "Chaetopterygini"]
# ORTHOCLADIINAE/DIAMESINAE is also PRODIAMESINAE
trait_eu_subset[taxon_cp %like% "Orthocladiinae", genus := "Prodiamesinae"]
# Some taxa on subfamily level re-assigned to genus-lvl to merge them
# later to ecor_Q
search <- paste0(
c(
"Chironomini",
"Limnephilini",
"Tanypodinae",
"Tanytarsini",
"Hemerodromiinae",
"Clinocerinae",
"Pediciinae"
),
collapse = "|"
)
trait_eu_subset[taxon_cp %like% search, genus := sub(" Gen\\. sp\\.", "", taxon_cp)]
# add taxa column for merge
trait_eu_subset[, taxa := coalesce(species, genus, family, order)]
# taxa that are on sub-family/family-lvl in ecor_L:
# (and genus or species-level in tachet/freshwaterecol)
# need to be aggregated via median (likewise in Szöcs et al. 2014)
trait_cols <- grep(
"order|family|genus|species|tax.*",
names(trait_eu_subset),
value = TRUE,
invert = TRUE
)
agg_traits <- trait_eu_subset[family %in% c(
"Ceratopogonidae",
"Empididae",
"Lepidostomatidae",
"Limoniidae",
"Perlodidae",
"Prodiamesinae",
"Psychomyiidae",
"Spongillidae",
"Chironomidae",
"Tubificidae",
"Limnephilidae",
"Coenagrionidae"
), lapply(.SD, median, na.rm = TRUE),
.SDcols = trait_cols,
by = "family"] %>%
normalize_by_rowSum(
x = .,
non_trait_cols = c("order",
"family",
"genus",
"species",
"taxa",
"taxon_cp")
)
# create taxa column
agg_traits[, taxa := family]
# aggregate all Oligochaeta taxa according to Szöcs et al. 2014
# -> are actually (sub)class
trait_eu_subset[family %in% c(
"Haplotaxidae",
"Tubificidae",
"Enchytraeidae",
"Propappidae",
"Lumbriculidae",
"Dorydrilidae",
"Lumbricidae",
"Sparganophilidae",
"Branchiobdellidae"
), sub_class := "Oligochaeta"]
output[[nam]] <- trait_eu_subset
}
# Here, the difference between the falsely (old) and newly harmonised trait dataset
# becomes apparent
# -> in "old" sometimes 0 has been inserted instead of NA (sum(c()) = 0 in R)
# Therefore, e.g. all locom traits are assigned 0 for Oligochaeta.
# Instead, new, locom_burrow == 1, the others == 0
output$old[sub_class == "Oligochaeta", lapply(.SD, median, na.rm = TRUE),
.SDcols = trait_cols,
by = "sub_class"] %>%
normalize_by_rowSum(x = .,
non_trait_cols = "sub_class") %>% print()
output$new[sub_class == "Oligochaeta", lapply(.SD, median, na.rm = TRUE),
.SDcols = trait_cols,
by = "sub_class"]
#####
trait_eu_old[species == "Cernosvitoviella atrata", ]
trait_eu[species == "Cernosvitoviella atrata", ]
trait_eu_old[genus %like% "Agraylea", ]
trait_eu[genus %like% "Agraylea", ] |
#plus11<- sapply(plus11sample, function(x){x$getElementText()})
#plus11<- c(plus11, unlist(repl))
#more$clickElement()
#plus11sample$clickElement()
library(stringr)
library(RSelenium)
library(dplyr)
remDr <- remoteDriver(remoteServerAddr = "localhost" , port = 4445, browserName = "chrome")
remDr$open()
#pb 상품
url <- 'http://gs25.gsretail.com/gscvs/ko/products/youus-main'
remDr$navigate(url)
#FRESH 상품 더보기
more<-remDr$findElement(using='css','#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div.cnt_section.mt50 > div > div.prod_section.differentiation > div > span > a')
more$getElementTagName()
more$getElementText()
more$clickElement()
plusPBSselector <- NULL
plusPBSsample<-NULL
plusPBSname<-NULL
plusPBSmanuf<-NULL
plusPBSprice <- NULL
plusPBSdate <- NULL
plusPBSstore <- NULL
plusPBSphoto <- NULL
names<-NULL
eventGoodPBS<-NULL
plusPBSnameSample<-NULL
k<-1
finished <- FALSE
for (i in 1:39){
for(n in 1:16){
#상품명
plusPBSsample <- NULL
plusPBSsample <- remDr$findElement(using='css',paste0("#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div > div > div > div.tblwrap.mt20 > div.tab_cont.on > ul > li:nth-child(",n,") > div > p.tit"))
if(is.null(plusPBSsample)){
print("finish")
finished <- TRUE
break;
}
plusPBSname<- plusPBSsample$getElementText()
plusPBSname<-as.character(plusPBSname)
plusPBSnameSample<-append(plusPBSnameSample, plusPBSname)
plusPBSname%>%strsplit(.,")")%>%unlist -> names
# eventGood
# 롯데)김밥세트(김치) 라면 // 롯데)김밥세트 // 롯데)김밥세트(김치)
#제조사
if(names[1]=='더큰'|names[1]=='삼각'|names[1]=='NEW'){
eventGoodPBS<-append(eventGoodPBS,paste0(names2[1],")",names2[2]))
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
}
else{
#for(j in 1:1){
if(length(names)==3) {
eventGoodPBS<-append(eventGoodPBS,paste0(names[2],names[3],sep=")"))
plusPBSmanuf <- append(plusPBSmanuf,names[1])
}
else if(length(names)==2&length(grep('\\(',names))==0){
eventGoodPBS<-append(eventGoodPBS,names[2])
plusPBSmanuf <- append(plusPBSmanuf,names[1])
}
else if(length(names)==2&length(grep('\\(', names))==2){
eventGoodPBS<-append(eventGoodPBS, paste0(names[1],names[2],")"))
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
}
else if(length(names)==1){
eventGoodPBS<-append(eventGoodPBS,names[1])
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
}else{
eventGoodPBS<-append(eventGoodPBS,plusPBSname)
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
# print(plusPBSname)
# print("another")
}
#}
}
cat(k)
k<-k+1
#가격
plusPBSsample <- remDr$findElement(using='css', paste0("#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div > div > div > div.tblwrap.mt20 > div.tab_cont.on > ul > li:nth-child(",n,") > div > p.price > span"))
plusPBSprice<- append(plusPBSprice,plusPBSsample$getElementText())
#날짜
plusPBSdate <- append(plusPBSdate, Sys.Date())
#유통업체
plusPBSstore <- append(plusPBSstore, "GS리테일")
}
#if(finished == TRUE) break;
more<-remDr$findElement(using='css','#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div > div > div > div.tblwrap.mt20 > div.paging > a.next')
more$getElementTagName()
more$getElementText()
more$clickElement()
Sys.sleep(1)
}
#가격
plusPBSprice%>% gsub("원","",.) ->plusPBSprice
#cbind
gsplusPBSproduct <- data.frame(plusPBSdate, eventGoodPBS, plusPBSstore, plusPBSprice, plusPBSmanuf) #157 152 157 157 152
View(gsplusPBSproduct)
names(gsplusPBSproduct)=c("기준날짜","상품명","판매업소","판매가격","제조사")
write.csv(gsplusPBSsproduct,paste0(Sys.Date(),"_GSPB_Special.csv"))
| /R/gs_pb_spe.R | no_license | GyuyoungEom/pmkim-project | R | false | false | 3,971 | r | #plus11<- sapply(plus11sample, function(x){x$getElementText()})
#plus11<- c(plus11, unlist(repl))
#more$clickElement()
#plus11sample$clickElement()
library(stringr)
library(RSelenium)
library(dplyr)
remDr <- remoteDriver(remoteServerAddr = "localhost" , port = 4445, browserName = "chrome")
remDr$open()
#pb 상품
url <- 'http://gs25.gsretail.com/gscvs/ko/products/youus-main'
remDr$navigate(url)
#FRESH 상품 더보기
more<-remDr$findElement(using='css','#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div.cnt_section.mt50 > div > div.prod_section.differentiation > div > span > a')
more$getElementTagName()
more$getElementText()
more$clickElement()
plusPBSselector <- NULL
plusPBSsample<-NULL
plusPBSname<-NULL
plusPBSmanuf<-NULL
plusPBSprice <- NULL
plusPBSdate <- NULL
plusPBSstore <- NULL
plusPBSphoto <- NULL
names<-NULL
eventGoodPBS<-NULL
plusPBSnameSample<-NULL
k<-1
finished <- FALSE
for (i in 1:39){
for(n in 1:16){
#상품명
plusPBSsample <- NULL
plusPBSsample <- remDr$findElement(using='css',paste0("#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div > div > div > div.tblwrap.mt20 > div.tab_cont.on > ul > li:nth-child(",n,") > div > p.tit"))
if(is.null(plusPBSsample)){
print("finish")
finished <- TRUE
break;
}
plusPBSname<- plusPBSsample$getElementText()
plusPBSname<-as.character(plusPBSname)
plusPBSnameSample<-append(plusPBSnameSample, plusPBSname)
plusPBSname%>%strsplit(.,")")%>%unlist -> names
# eventGood
# 롯데)김밥세트(김치) 라면 // 롯데)김밥세트 // 롯데)김밥세트(김치)
#제조사
if(names[1]=='더큰'|names[1]=='삼각'|names[1]=='NEW'){
eventGoodPBS<-append(eventGoodPBS,paste0(names2[1],")",names2[2]))
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
}
else{
#for(j in 1:1){
if(length(names)==3) {
eventGoodPBS<-append(eventGoodPBS,paste0(names[2],names[3],sep=")"))
plusPBSmanuf <- append(plusPBSmanuf,names[1])
}
else if(length(names)==2&length(grep('\\(',names))==0){
eventGoodPBS<-append(eventGoodPBS,names[2])
plusPBSmanuf <- append(plusPBSmanuf,names[1])
}
else if(length(names)==2&length(grep('\\(', names))==2){
eventGoodPBS<-append(eventGoodPBS, paste0(names[1],names[2],")"))
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
}
else if(length(names)==1){
eventGoodPBS<-append(eventGoodPBS,names[1])
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
}else{
eventGoodPBS<-append(eventGoodPBS,plusPBSname)
plusPBSmanuf<-append(plusPBSmanuf, "GSPB")
# print(plusPBSname)
# print("another")
}
#}
}
cat(k)
k<-k+1
#가격
plusPBSsample <- remDr$findElement(using='css', paste0("#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div > div > div > div.tblwrap.mt20 > div.tab_cont.on > ul > li:nth-child(",n,") > div > p.price > span"))
plusPBSprice<- append(plusPBSprice,plusPBSsample$getElementText())
#날짜
plusPBSdate <- append(plusPBSdate, Sys.Date())
#유통업체
plusPBSstore <- append(plusPBSstore, "GS리테일")
}
#if(finished == TRUE) break;
more<-remDr$findElement(using='css','#contents > div.yCmsComponent.span-24.section1.cms_disp-img_slot > div > div > div > div > div > div.tblwrap.mt20 > div.paging > a.next')
more$getElementTagName()
more$getElementText()
more$clickElement()
Sys.sleep(1)
}
#가격
plusPBSprice%>% gsub("원","",.) ->plusPBSprice
#cbind
gsplusPBSproduct <- data.frame(plusPBSdate, eventGoodPBS, plusPBSstore, plusPBSprice, plusPBSmanuf) #157 152 157 157 152
View(gsplusPBSproduct)
names(gsplusPBSproduct)=c("기준날짜","상품명","판매업소","판매가격","제조사")
write.csv(gsplusPBSsproduct,paste0(Sys.Date(),"_GSPB_Special.csv"))
|
#'MaxPrecip function
#'
#'Generated data
#'
#'@format A data frame with 365 rows and 2 columns
#'\describe{
#'\item{rain}-{Generated precipitation values in inches per day}
#'\item{date}-{Generated dates from January 1, 1995 to December 31, 1995}
#'}
"precip"
| /R/precip.R | no_license | nburola/climateimpacts | R | false | false | 260 | r | #'MaxPrecip function
#'
#'Generated data
#'
#'@format A data frame with 365 rows and 2 columns
#'\describe{
#'\item{rain}-{Generated precipitation values in inches per day}
#'\item{date}-{Generated dates from January 1, 1995 to December 31, 1995}
#'}
"precip"
|
l = list(
a = c('1', '2', '3'),
b = c('4', '5', '6'),
m = matrix (
c(100, 101, 102, 103,
104, 105, 106, 107),
ncol = 2,
byrow = TRUE
)
)
options(width=300)
ul = unlist(l)
ul
# a1 a2 a3 b1 b2 b3 m1 m2 m3 m4 m5 m6 m7 m8
# "1" "2" "3" "4" "5" "6" "100" "102" "104" "106" "101" "103" "105" "107"
typeof(ul)
# [1] "character"
sum(as.numeric(ul))
# [1] 849
| /functions/unlist.R | no_license | ReneNyffenegger/about-r | R | false | false | 474 | r | l = list(
a = c('1', '2', '3'),
b = c('4', '5', '6'),
m = matrix (
c(100, 101, 102, 103,
104, 105, 106, 107),
ncol = 2,
byrow = TRUE
)
)
options(width=300)
ul = unlist(l)
ul
# a1 a2 a3 b1 b2 b3 m1 m2 m3 m4 m5 m6 m7 m8
# "1" "2" "3" "4" "5" "6" "100" "102" "104" "106" "101" "103" "105" "107"
typeof(ul)
# [1] "character"
sum(as.numeric(ul))
# [1] 849
|
\name{occuTTD}
\alias{occuTTD}
\title{Fit Single-Season and Dynamic Time-to-detection Occupancy Models}
\usage{occuTTD(psiformula= ~1, gammaformula = ~ 1, epsilonformula = ~ 1,
detformula = ~ 1, data, ttdDist = c("exp", "weibull"),
linkPsi = c("logit", "cloglog"), starts, method="BFGS", se=TRUE,
engine = c("C", "R"), ...)}
\arguments{
\item{psiformula}{Right-hand sided formula for the initial probability of
occupancy at each site.}
\item{gammaformula}{Right-hand sided formula for colonization probability.}
\item{epsilonformula}{Right-hand sided formula for extinction probability.}
\item{detformula}{Right-hand sided formula for mean time-to-detection.}
\item{data}{\code{unmarkedFrameOccuTTD} object that supplies the data
(see \code{\link{unmarkedFrameOccuTTD}}).}
\item{ttdDist}{Distribution to use for time-to-detection; either
\code{"exp"} for the exponential, or \code{"weibull"} for the Weibull,
which adds an additional shape parameter \eqn{k}.}
\item{linkPsi}{Link function for the occupancy model. Options are
\code{"logit"} for the standard occupancy model or \code{"cloglog"}
for the complimentary log-log link, which relates occupancy
to site-level abundance.}
\item{starts}{optionally, initial values for parameters in the optimization.}
\item{method}{Optimization method used by \code{\link{optim}}.}
\item{se}{logical specifying whether or not to compute standard errors.}
\item{engine}{Either "C" or "R" to use fast C++ code or native R
code during the optimization.}
\item{\dots}{Additional arguments to optim, such as lower and upper bounds}
}
\description{Fit time-to-detection occupancy models of Garrard et al.
(2008, 2013), either single-season or dynamic. Time-to-detection can be
modeled with either an exponential or Weibull distribution.}
\value{unmarkedFitOccuTTD object describing model fit.}
\details{
Estimates site occupancy and detection probability from time-to-detection
(TTD) data, e.g. time to first detection of a particular bird species
during a point count or time-to-detection of a plant species while searching
a quadrat (Garrard et al. 2008). Time-to-detection can be modeled
as an exponential (\code{ttdDist="exp"}) or Weibull (\code{ttdDist="weibull"})
random variable with rate parameter \eqn{\lambda} and, for the Weibull,
an additional shape parameter \eqn{k}. Note that \code{occuTTD} puts covariates
on \eqn{\lambda} and not \eqn{1/\lambda}, i.e., the expected time between events.
In the case where there are no detections before the maximum sample time at
a site (\code{surveyLength}) is reached, we are not sure if the site is
unoccupied or if we just didn't wait long enough for a detection. We therefore
must censor the exponential or Weibull distribution at the maximum survey
length, \eqn{Tmax}. Thus, assuming true site occupancy at site \eqn{i} is
\eqn{z_i}, an exponential distribution for the TTD \eqn{y_i}, and that
\eqn{d_i = 1} indicates \eqn{y_i} is censored (Kery and Royle 2016):
\deqn{d_i = z_i * I(y_i > Tmax_i) + (1 - z_i)}
and
\deqn{y_i|z_i \sim Exponential(\lambda_i), d_i = 0}
\deqn{y_i|z_i = Missing, d_i = 1}
Because in \code{unmarked} values of \code{NA} are typically used to indicate
missing values that were a result of the sampling structure (e.g., lost data),
we indicate a censored \eqn{y_i} in \code{occuTTD} instead by setting
\eqn{y_i = Tmax_i} in the \code{y} matrix provided to
\code{\link{unmarkedFrameOccuTTD}}. You can provide either a single value of
\eqn{Tmax} to the \code{surveyLength} argument of \code{unmarkedFrameOccuTTD},
or provide a matrix, potentially with a unique value of \eqn{Tmax} for each
value of \code{y}. Note that in the latter case the value of \code{y} that will
be interpreted by \code{occutTTD} as a censored observation (i.e., \eqn{Tmax})
will differ between observations!
Occupancy and detection can be estimated with only a single survey per site,
unlike a traditional occupancy model that requires at least two replicated
surveys at at least some sites. However, \code{occuTTD} also supports
multiple surveys per site using the model described in Garrard et al. (2013).
Furthermore, multi-season dynamic models are supported, using the same basic
structure as for standard occupancy models (see \code{\link{colext}}).
When \code{linkPsi = "cloglog"}, the complimentary log-log link
function is used for \eqn{psi} instead of the logit link. The cloglog link
relates occupancy probability to the intensity parameter of an underlying
Poisson process (Kery and Royle 2016). Thus, if abundance at a site is
can be modeled as \eqn{N_i ~ Poisson(\lambda_i)}, where
\eqn{log(\lambda_i) = \alpha + \beta*x}, then presence/absence data at the
site can be modeled as \eqn{Z_i ~ Binomial(\psi_i)} where
\eqn{cloglog(\psi_i) = \alpha + \beta*x}.
}
\references{
Garrard, G.E., Bekessy, S.A., McCarthy, M.A. and Wintle, B.A. 2008. When have
we looked hard enough? A novel method for setting minimum survey effort
protocols for flora surveys. Austral Ecology 33: 986-998.
Garrard, G.E., McCarthy, M.A., Williams, N.S., Bekessy, S.A. and Wintle,
B.A. 2013. A general model of detectability using species traits. Methods in
Ecology and Evolution 4: 45-52.
Kery, Marc, and J. Andrew Royle. 2016. \emph{Applied Hierarchical Modeling in
Ecology}, Volume 1. Academic Press.
}
\author{Ken Kellner \email{contact@kenkellner.com}}
\seealso{\code{\link{unmarked}}, \code{\link{unmarkedFrameOccuTTD}}}
\keyword{models}
\examples{
\dontrun{
### Single season model
N <- 500; J <- 1
#Simulate occupancy
scovs <- data.frame(elev=c(scale(runif(N, 0,100))),
forest=runif(N,0,1),
wind=runif(N,0,1))
beta_psi <- c(-0.69, 0.71, -0.5)
psi <- plogis(cbind(1, scovs$elev, scovs$forest) \%*\% beta_psi)
z <- rbinom(N, 1, psi)
#Simulate detection
Tmax <- 10 #Same survey length for all observations
beta_lam <- c(-2, -0.2, 0.7)
rate <- exp(cbind(1, scovs$elev, scovs$wind) \%*\% beta_lam)
ttd <- rexp(N, rate)
ttd[z==0] <- Tmax #Censor at unoccupied sites
ttd[ttd>Tmax] <- Tmax #Censor when ttd was greater than survey length
#Build unmarkedFrame
umf <- unmarkedFrameOccuTTD(y=ttd, surveyLength=Tmax, siteCovs=scovs)
#Fit model
fit <- occuTTD(psiformula=~elev+forest, detformula=~elev+wind, data=umf)
#Predict psi values
predict(fit, type='psi', newdata=data.frame(elev=0.5, forest=1))
#Predict lambda values
predict(fit, type='det', newdata=data.frame(elev=0.5, wind=0))
#Calculate p, probability species is detected at a site given it is present
#for a value of lambda. This is equivalent to eq 4 of Garrard et al. 2008
lam <- predict(fit, type='det', newdata=data.frame(elev=0.5, wind=0))$Predicted
pexp(Tmax, lam)
#Estimated p for all observations
head(getP(fit))
### Dynamic model
N <- 1000; J <- 2; T <- 2
scovs <- data.frame(elev=c(scale(runif(N, 0,100))),
forest=runif(N,0,1),
wind=runif(N,0,1))
beta_psi <- c(-0.69, 0.71, -0.5)
psi <- plogis(cbind(1, scovs$elev, scovs$forest) \%*\% beta_psi)
z <- matrix(NA, N, T)
z[,1] <- rbinom(N, 1, psi)
#Col/ext process
ysc <- data.frame(forest=rep(scovs$forest, each=T),
elev=rep(scovs$elev, each=T))
c_b0 <- -0.4; c_b1 <- 0.3
gam <- plogis(c_b0 + c_b1 * scovs$forest)
e_b0 <- -0.7; e_b1 <- 0.4
ext <- plogis(e_b0 + e_b1 * scovs$elev)
for (i in 1:N){
for (t in 1:(T-1)){
if(z[i,t]==1){
#ext
z[i,t+1] <- rbinom(1, 1, (1-ext[i]))
} else {
#col
z[i,t+1] <- rbinom(1,1, gam[i])
}
}
}
#Simulate detection
ocovs <- data.frame(obs=rep(c('A','B'),N*T))
Tmax <- 10
beta_lam <- c(-2, -0.2, 0.7)
rate <- exp(cbind(1, scovs$elev, scovs$wind) \%*\% beta_lam)
#Add second observer at each site
rateB <- exp(cbind(1, scovs$elev, scovs$wind) \%*\% beta_lam - 0.5)
#Across seasons
rate2 <- as.numeric(t(cbind(rate, rateB, rate, rateB)))
ttd <- rexp(N*T*2, rate2)
ttd <- matrix(ttd, nrow=N, byrow=T)
ttd[ttd>Tmax] <- Tmax
ttd[z[,1]==0,1:2] <- Tmax
ttd[z[,2]==0,3:4] <- Tmax
umf <- unmarkedFrameOccuTTD(y = ttd, surveyLength = Tmax,
siteCovs = scovs, obsCovs=ocovs,
yearlySiteCovs=ysc, numPrimary=2)
dim(umf@y) #num sites, (num surveys x num primary periods)
fit <- occuTTD(psiformula=~elev+forest,detformula=~elev+wind+obs,
gammaformula=~forest, epsilonformula=~elev,
data=umf,se=T,engine="C")
truth <- c(beta_psi, c_b0, c_b1, e_b0, e_b1, beta_lam, -0.5)
#Compare to truth
cbind(coef(fit), truth)
}
}
| /fuzzedpackages/unmarked/man/occuTTD.Rd | no_license | akhikolla/testpackages | R | false | false | 8,628 | rd | \name{occuTTD}
\alias{occuTTD}
\title{Fit Single-Season and Dynamic Time-to-detection Occupancy Models}
\usage{occuTTD(psiformula= ~1, gammaformula = ~ 1, epsilonformula = ~ 1,
detformula = ~ 1, data, ttdDist = c("exp", "weibull"),
linkPsi = c("logit", "cloglog"), starts, method="BFGS", se=TRUE,
engine = c("C", "R"), ...)}
\arguments{
\item{psiformula}{Right-hand sided formula for the initial probability of
occupancy at each site.}
\item{gammaformula}{Right-hand sided formula for colonization probability.}
\item{epsilonformula}{Right-hand sided formula for extinction probability.}
\item{detformula}{Right-hand sided formula for mean time-to-detection.}
\item{data}{\code{unmarkedFrameOccuTTD} object that supplies the data
(see \code{\link{unmarkedFrameOccuTTD}}).}
\item{ttdDist}{Distribution to use for time-to-detection; either
\code{"exp"} for the exponential, or \code{"weibull"} for the Weibull,
which adds an additional shape parameter \eqn{k}.}
\item{linkPsi}{Link function for the occupancy model. Options are
\code{"logit"} for the standard occupancy model or \code{"cloglog"}
for the complimentary log-log link, which relates occupancy
to site-level abundance.}
\item{starts}{optionally, initial values for parameters in the optimization.}
\item{method}{Optimization method used by \code{\link{optim}}.}
\item{se}{logical specifying whether or not to compute standard errors.}
\item{engine}{Either "C" or "R" to use fast C++ code or native R
code during the optimization.}
\item{\dots}{Additional arguments to optim, such as lower and upper bounds}
}
\description{Fit time-to-detection occupancy models of Garrard et al.
(2008, 2013), either single-season or dynamic. Time-to-detection can be
modeled with either an exponential or Weibull distribution.}
\value{unmarkedFitOccuTTD object describing model fit.}
\details{
Estimates site occupancy and detection probability from time-to-detection
(TTD) data, e.g. time to first detection of a particular bird species
during a point count or time-to-detection of a plant species while searching
a quadrat (Garrard et al. 2008). Time-to-detection can be modeled
as an exponential (\code{ttdDist="exp"}) or Weibull (\code{ttdDist="weibull"})
random variable with rate parameter \eqn{\lambda} and, for the Weibull,
an additional shape parameter \eqn{k}. Note that \code{occuTTD} puts covariates
on \eqn{\lambda} and not \eqn{1/\lambda}, i.e., the expected time between events.
In the case where there are no detections before the maximum sample time at
a site (\code{surveyLength}) is reached, we are not sure if the site is
unoccupied or if we just didn't wait long enough for a detection. We therefore
must censor the exponential or Weibull distribution at the maximum survey
length, \eqn{Tmax}. Thus, assuming true site occupancy at site \eqn{i} is
\eqn{z_i}, an exponential distribution for the TTD \eqn{y_i}, and that
\eqn{d_i = 1} indicates \eqn{y_i} is censored (Kery and Royle 2016):
\deqn{d_i = z_i * I(y_i > Tmax_i) + (1 - z_i)}
and
\deqn{y_i|z_i \sim Exponential(\lambda_i), d_i = 0}
\deqn{y_i|z_i = Missing, d_i = 1}
Because in \code{unmarked} values of \code{NA} are typically used to indicate
missing values that were a result of the sampling structure (e.g., lost data),
we indicate a censored \eqn{y_i} in \code{occuTTD} instead by setting
\eqn{y_i = Tmax_i} in the \code{y} matrix provided to
\code{\link{unmarkedFrameOccuTTD}}. You can provide either a single value of
\eqn{Tmax} to the \code{surveyLength} argument of \code{unmarkedFrameOccuTTD},
or provide a matrix, potentially with a unique value of \eqn{Tmax} for each
value of \code{y}. Note that in the latter case the value of \code{y} that will
be interpreted by \code{occutTTD} as a censored observation (i.e., \eqn{Tmax})
will differ between observations!
Occupancy and detection can be estimated with only a single survey per site,
unlike a traditional occupancy model that requires at least two replicated
surveys at at least some sites. However, \code{occuTTD} also supports
multiple surveys per site using the model described in Garrard et al. (2013).
Furthermore, multi-season dynamic models are supported, using the same basic
structure as for standard occupancy models (see \code{\link{colext}}).
When \code{linkPsi = "cloglog"}, the complimentary log-log link
function is used for \eqn{psi} instead of the logit link. The cloglog link
relates occupancy probability to the intensity parameter of an underlying
Poisson process (Kery and Royle 2016). Thus, if abundance at a site is
can be modeled as \eqn{N_i ~ Poisson(\lambda_i)}, where
\eqn{log(\lambda_i) = \alpha + \beta*x}, then presence/absence data at the
site can be modeled as \eqn{Z_i ~ Binomial(\psi_i)} where
\eqn{cloglog(\psi_i) = \alpha + \beta*x}.
}
\references{
Garrard, G.E., Bekessy, S.A., McCarthy, M.A. and Wintle, B.A. 2008. When have
we looked hard enough? A novel method for setting minimum survey effort
protocols for flora surveys. Austral Ecology 33: 986-998.
Garrard, G.E., McCarthy, M.A., Williams, N.S., Bekessy, S.A. and Wintle,
B.A. 2013. A general model of detectability using species traits. Methods in
Ecology and Evolution 4: 45-52.
Kery, Marc, and J. Andrew Royle. 2016. \emph{Applied Hierarchical Modeling in
Ecology}, Volume 1. Academic Press.
}
\author{Ken Kellner \email{contact@kenkellner.com}}
\seealso{\code{\link{unmarked}}, \code{\link{unmarkedFrameOccuTTD}}}
\keyword{models}
\examples{
\dontrun{
### Single season model
N <- 500; J <- 1
#Simulate occupancy
scovs <- data.frame(elev=c(scale(runif(N, 0,100))),
forest=runif(N,0,1),
wind=runif(N,0,1))
beta_psi <- c(-0.69, 0.71, -0.5)
psi <- plogis(cbind(1, scovs$elev, scovs$forest) \%*\% beta_psi)
z <- rbinom(N, 1, psi)
#Simulate detection
Tmax <- 10 #Same survey length for all observations
beta_lam <- c(-2, -0.2, 0.7)
rate <- exp(cbind(1, scovs$elev, scovs$wind) \%*\% beta_lam)
ttd <- rexp(N, rate)
ttd[z==0] <- Tmax #Censor at unoccupied sites
ttd[ttd>Tmax] <- Tmax #Censor when ttd was greater than survey length
#Build unmarkedFrame
umf <- unmarkedFrameOccuTTD(y=ttd, surveyLength=Tmax, siteCovs=scovs)
#Fit model
fit <- occuTTD(psiformula=~elev+forest, detformula=~elev+wind, data=umf)
#Predict psi values
predict(fit, type='psi', newdata=data.frame(elev=0.5, forest=1))
#Predict lambda values
predict(fit, type='det', newdata=data.frame(elev=0.5, wind=0))
#Calculate p, probability species is detected at a site given it is present
#for a value of lambda. This is equivalent to eq 4 of Garrard et al. 2008
lam <- predict(fit, type='det', newdata=data.frame(elev=0.5, wind=0))$Predicted
pexp(Tmax, lam)
#Estimated p for all observations
head(getP(fit))
### Dynamic model
N <- 1000; J <- 2; T <- 2
scovs <- data.frame(elev=c(scale(runif(N, 0,100))),
forest=runif(N,0,1),
wind=runif(N,0,1))
beta_psi <- c(-0.69, 0.71, -0.5)
psi <- plogis(cbind(1, scovs$elev, scovs$forest) \%*\% beta_psi)
z <- matrix(NA, N, T)
z[,1] <- rbinom(N, 1, psi)
#Col/ext process
ysc <- data.frame(forest=rep(scovs$forest, each=T),
elev=rep(scovs$elev, each=T))
c_b0 <- -0.4; c_b1 <- 0.3
gam <- plogis(c_b0 + c_b1 * scovs$forest)
e_b0 <- -0.7; e_b1 <- 0.4
ext <- plogis(e_b0 + e_b1 * scovs$elev)
for (i in 1:N){
for (t in 1:(T-1)){
if(z[i,t]==1){
#ext
z[i,t+1] <- rbinom(1, 1, (1-ext[i]))
} else {
#col
z[i,t+1] <- rbinom(1,1, gam[i])
}
}
}
#Simulate detection
ocovs <- data.frame(obs=rep(c('A','B'),N*T))
Tmax <- 10
beta_lam <- c(-2, -0.2, 0.7)
rate <- exp(cbind(1, scovs$elev, scovs$wind) \%*\% beta_lam)
#Add second observer at each site
rateB <- exp(cbind(1, scovs$elev, scovs$wind) \%*\% beta_lam - 0.5)
#Across seasons
rate2 <- as.numeric(t(cbind(rate, rateB, rate, rateB)))
ttd <- rexp(N*T*2, rate2)
ttd <- matrix(ttd, nrow=N, byrow=T)
ttd[ttd>Tmax] <- Tmax
ttd[z[,1]==0,1:2] <- Tmax
ttd[z[,2]==0,3:4] <- Tmax
umf <- unmarkedFrameOccuTTD(y = ttd, surveyLength = Tmax,
siteCovs = scovs, obsCovs=ocovs,
yearlySiteCovs=ysc, numPrimary=2)
dim(umf@y) #num sites, (num surveys x num primary periods)
fit <- occuTTD(psiformula=~elev+forest,detformula=~elev+wind+obs,
gammaformula=~forest, epsilonformula=~elev,
data=umf,se=T,engine="C")
truth <- c(beta_psi, c_b0, c_b1, e_b0, e_b1, beta_lam, -0.5)
#Compare to truth
cbind(coef(fit), truth)
}
}
|
# Author: Brady Engelke
library(tidyverse)
library("Hmisc")
library(MatchIt)
library(readxl)
library(ggpubr)
setwd("~/MSBA/spring/Econometrics/project")
# load data
master <- read.csv('clean_data/normalized_m.csv', header = TRUE)
# perform necessary transformations
master <- master %>%
select(-X, -X.1, -legalization, -PScore) %>%
filter(State != 'Colorado' & State != 'Washington' & State != 'Alaska' & State != 'Oregon') %>%
mutate(legality = ifelse(State %in% c('Nevada', 'California', 'Massachusetts', 'Maine'), 1, 0))
master_pre <- master %>% select(-State)
master$legality <- as.factor(master$legality)
master_pre$legality <- as.factor(master_pre$legality)
# correlation matrix
controls <- master_pre %>% select(-legality, -pp)
rcorr(as.matrix(controls))
# psm
ps_model <- glm(legality ~ age + pp + prop_male + ps + rev_ratio + beverage + population_density,
data = master_pre, family = 'binomial')
summary(ps_model)
ps_model <- glm(legality ~ age + pp + prop_male + ps + rev_ratio + beverage + population_density,
data = master_pre, family = binomial(link = 'probit'))
summary(ps_model)
# check results
master$propensity_score <- ps_model$fitted.values
ggplot(master) + geom_density(aes(x = propensity_score, color = legality))
master <- master %>% select(State, legality, PS, age:population_density)
# run MatchIt algo
match_output <- matchit(legality ~ age + pp + prop_male + ps + rev_ratio + beverage + population_density,
data = master_pre, method = "nearest", distance = "logit",
caliper = 0.01, replace = FALSE, ratio = 1)
summary(match_output)
data_matched <- match.data(match_output)
data_matched$state <- c('Maine', 'Minnesota', 'Nevada', 'Texas')
pscores <- master %>%
filter(State %in% c('Maine', 'Minnesota', 'Nevada', 'Texas')) %>%
rename(state = State) %>%
select(state, propensity_score)
data_matched <- merge(data_matched, pscores, by ='state')
data_matched <- data_matched %>% select(state, legality, propensity_score, age:population_density)
# visualizations
ggplot(data_matched) + geom_col(aes(x = state, y = propensity_score, fill = legality)) +
labs(color = 'Search Engine') +
ylab('Propensity Score') + xlab('State') + theme_minimal()
master$states <- c(1:46)
master <- master %>% filter(State != 'California')
master$state_symb <- c('', '', '', '', '', '', '', '', '' , '', '',
'', '', '', '', 'ME', '', '', '', 'MN', '' , '', '',
'', 'NV', '', '', '', '', '', '', '', '' , '', '',
'', '', '', 'TX', '', '', '', '', '', '' )
ggplot(master, aes(x = states, y = propensity_score, color = legality)) + geom_point() +
geom_text(aes(label= state_symb), hjust = -0.15, vjust = -0.15) +
labs(shape = 'Legality') +
ylab('Propensity Score') + xlab('State') + theme_minimal() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
| /econometrics/car_accidents/scripts/(3)_psm.R | no_license | BradyEngelke/msba | R | false | false | 2,983 | r | # Author: Brady Engelke
library(tidyverse)
library("Hmisc")
library(MatchIt)
library(readxl)
library(ggpubr)
setwd("~/MSBA/spring/Econometrics/project")
# load data
master <- read.csv('clean_data/normalized_m.csv', header = TRUE)
# perform necessary transformations
master <- master %>%
select(-X, -X.1, -legalization, -PScore) %>%
filter(State != 'Colorado' & State != 'Washington' & State != 'Alaska' & State != 'Oregon') %>%
mutate(legality = ifelse(State %in% c('Nevada', 'California', 'Massachusetts', 'Maine'), 1, 0))
master_pre <- master %>% select(-State)
master$legality <- as.factor(master$legality)
master_pre$legality <- as.factor(master_pre$legality)
# correlation matrix
controls <- master_pre %>% select(-legality, -pp)
rcorr(as.matrix(controls))
# psm
ps_model <- glm(legality ~ age + pp + prop_male + ps + rev_ratio + beverage + population_density,
data = master_pre, family = 'binomial')
summary(ps_model)
ps_model <- glm(legality ~ age + pp + prop_male + ps + rev_ratio + beverage + population_density,
data = master_pre, family = binomial(link = 'probit'))
summary(ps_model)
# check results
master$propensity_score <- ps_model$fitted.values
ggplot(master) + geom_density(aes(x = propensity_score, color = legality))
master <- master %>% select(State, legality, PS, age:population_density)
# run MatchIt algo
match_output <- matchit(legality ~ age + pp + prop_male + ps + rev_ratio + beverage + population_density,
data = master_pre, method = "nearest", distance = "logit",
caliper = 0.01, replace = FALSE, ratio = 1)
summary(match_output)
data_matched <- match.data(match_output)
data_matched$state <- c('Maine', 'Minnesota', 'Nevada', 'Texas')
pscores <- master %>%
filter(State %in% c('Maine', 'Minnesota', 'Nevada', 'Texas')) %>%
rename(state = State) %>%
select(state, propensity_score)
data_matched <- merge(data_matched, pscores, by ='state')
data_matched <- data_matched %>% select(state, legality, propensity_score, age:population_density)
# visualizations
ggplot(data_matched) + geom_col(aes(x = state, y = propensity_score, fill = legality)) +
labs(color = 'Search Engine') +
ylab('Propensity Score') + xlab('State') + theme_minimal()
master$states <- c(1:46)
master <- master %>% filter(State != 'California')
master$state_symb <- c('', '', '', '', '', '', '', '', '' , '', '',
'', '', '', '', 'ME', '', '', '', 'MN', '' , '', '',
'', 'NV', '', '', '', '', '', '', '', '' , '', '',
'', '', '', 'TX', '', '', '', '', '', '' )
ggplot(master, aes(x = states, y = propensity_score, color = legality)) + geom_point() +
geom_text(aes(label= state_symb), hjust = -0.15, vjust = -0.15) +
labs(shape = 'Legality') +
ylab('Propensity Score') + xlab('State') + theme_minimal() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
|
#' Calculates index scores for Number Line Estimation game.
#'
#' Now the mean absolute error (mean_err) is calculated. Future work will
#' be to do model fitting.
#'
#' @param data Raw data of class \code{data.frame}.
#' @param ... Other input argument for future expansion.
#' @return A \code{data.frame} contains following values:
#' \describe{
#' \item{mean_err}{Mean percent absolute error.}
#' \item{is_normal}{Checking result whether the data is normal.}
#' }
#' @importFrom magrittr %>%
#' @importFrom rlang .data
#' @export
nle <- function(data, ...) {
if (!all(utils::hasName(data, c("Number", "Resp")))) {
warning("`Number` and `Resp` variables are required.")
return(
data.frame(
mean_err = NA_real_,
is_normal = FALSE
)
)
}
data %>%
dplyr::mutate(err = abs(.data$Number - .data$Resp)) %>%
dplyr::summarise(mean_err = mean(.data$err), is_normal = TRUE)
}
| /R/nle.R | permissive | Blockhead-yj/cognitive_training | R | false | false | 925 | r | #' Calculates index scores for Number Line Estimation game.
#'
#' Now the mean absolute error (mean_err) is calculated. Future work will
#' be to do model fitting.
#'
#' @param data Raw data of class \code{data.frame}.
#' @param ... Other input argument for future expansion.
#' @return A \code{data.frame} contains following values:
#' \describe{
#' \item{mean_err}{Mean percent absolute error.}
#' \item{is_normal}{Checking result whether the data is normal.}
#' }
#' @importFrom magrittr %>%
#' @importFrom rlang .data
#' @export
nle <- function(data, ...) {
if (!all(utils::hasName(data, c("Number", "Resp")))) {
warning("`Number` and `Resp` variables are required.")
return(
data.frame(
mean_err = NA_real_,
is_normal = FALSE
)
)
}
data %>%
dplyr::mutate(err = abs(.data$Number - .data$Resp)) %>%
dplyr::summarise(mean_err = mean(.data$err), is_normal = TRUE)
}
|
#' Construct plotmath labels for time lines
#'
#' Internal, called by `plot_timeline`.
#'
#' @param description The text for the items to be shown beneath the images.
#' Should be a character vector of the same length as `image_path`.
#' @param description_width Width of the description text in characters to wrap
#' the text in the labels.
#' @param ref_number A string of the number for reference. If NA, then the
#' reference will not be included in the plot.
#' @return A character vector of plotmath, to enable superscripts, italics, and
#' text wrapping.
#' @importFrom stringr str_replace_all
make_labs <- function(description, description_width, ref_number) {
descr_wrap <- strwrap(description, width = description_width)
descr_wrap <- paste0(descr_wrap, collapse = "<br>")
if (is.na(ref_number)) {
descr_wrap
} else {
paste0(descr_wrap, "<sup>", ref_number, "</sup>")
}
}
#' Plot time line with images and descriptions of each event
#'
#' The time line is plotted horizontally with ticks for years. The labels with
#' descriptions are above and/or below that line.
#'
#' @param events_df A data frame with at least these columns:
#' \describe{
#' \item{date_published}{A vector of Date objects for dates when the event of
#' interest was published. Note that the actual time when those events occurred
#' is most likely earlier, sometimes much earlier than the publication date,
#' and the events might have become quite influential before their publication.
#' But the publication date is the easiest way to get an actual date.}
#' \item{description}{Short descriptions of the events. The plot won't look
#' good if the descriptions are too long.}
#' }
#' @param ys A numeric vector of the y coordinates of the items. Since I don't
#' know how to implement the ggrepel algorithm to make sure that the labels don't
#' overlap for geom_richtext, I have to manually set the y coordinates to make
#' sure that the labels don't overlap and look good.
#' @param description_width Width of the description text in characters to wrap
#' the text in the labels.
#' @param expand_x A numeric vector of length 2 of the proportion to expand the
#' x axis on the left and the right. This is a way to manually make sure that the
#' labels are not cropped off at the edge of the plot.
#' @param expand_y Same as expand_x, but for the y axis.
#' @param include_refs Logical, indicating whether to include references in the
#' text box.
#' @return A ggplot2 object for the plot.
#' @importFrom dplyr case_when arrange between
#' @importFrom purrr map2_chr
#' @importFrom lubridate floor_date ceiling_date
#' @importFrom ggplot2 ggplot geom_point aes geom_hline geom_segment scale_x_date
#' expansion scale_y_continuous theme_void annotate scale_fill_manual
#' @importFrom rlang %||%
#' @export
plot_timeline <- function(events_df, ys, description_width = 20,
expand_x = c(0.1, 0.1), expand_y = c(0.05, 0.05),
include_refs = TRUE) {
.pkg_check("grid")
.pkg_check("gridtext")
image <- date_published <- description <- lab <- vjusts <- NULL
events_df <- events_df %>%
mutate(sheet = factor(sheet, levels = names(sheet_fill)),
description = paste0(year(date_published), " ", description))
if (include_refs) {
events_df <- events_df %>%
mutate(lab = map2_chr(description, ref_number,
~ make_labs(.x, description_width, .y)))
} else {
events_df <- events_df %>%
mutate(lab = map_chr(description,
~ make_labs(.x, description_width,
ref_number = NA)))
}
events_df <- events_df %>%
arrange(date_published) %>%
mutate(vjusts = case_when(ys >= 0 ~ 1,
TRUE ~ 0),
ys = ys)
yrs_range <- max(year(events_df$date_published)) - min(year(events_df$date_published))
date_brks <- case_when(yrs_range <= 20 ~ "1 year",
between(yrs_range, 20, 50) ~ "2 years",
between(yrs_range, 50, 100) ~ "5 years",
TRUE ~ "10 years")
axis <- seq(floor_date(min(events_df$date_published), "year"),
ceiling_date(max(events_df$date_published), "year"),
by = date_brks)
sheet_fill <- sheet_fill[names(sheet_fill) %in% unique(events_df$sheet)]
p <- ggplot(events_df) +
geom_point(aes(x = date_published), y = 0) +
geom_hline(yintercept = 0) +
geom_segment(aes(x = date_published, y = 0, xend = date_published, yend = ys)) +
geom_richtext(aes(x = date_published, y = ys, fill = sheet,
label = lab, vjust = vjusts)) +
scale_x_date(expand = expansion(expand_x)) +
scale_y_continuous(expand = expansion(expand_y)) +
scale_fill_manual(values = sheet_fill, name = "Type") +
theme_void() +
annotate("point", x = axis, y = 0, shape = 3) +
annotate("text", x = axis, y = 0, label = format(axis, "%Y"),
color = "gray50", vjust = 1.4)
p
}
#' Number of publications per year
#'
#' Plot bar plot of the number of publications per year. I find facetting
#' makes the plot easier to read than filling with different colors.
#'
#' @param pubs A data frame with at least these columns:
#' \describe{
#' \item{journal}{Name of the journal of the paper.}
#' \item{year}{Year when the paper was published.}
#' }
#' There must be one row per publication or per method or species for each title
#' if faceting by those. If facetting, then a column whose name is the value in
#' `facet_by` must be present.
#' @param facet_by Name of a column for facetting.
#' @param fill_by Name of a column of a categorical variable with which to color
#' the histogram.
#' @param binwidth Width of bins for the histogram in days.
#' @param preprints Logical, whether preprints should be included. Defaults to
#' `TRUE` to include preprints.
#' @param n_top Number of categories with the most publications to plot in facets;
#' the other categories are lumped into "other".
#' @param n_top_fill Number of categories with the most publications to be
#' differentiated by color.
#' @param sort_by How to sort the facets. first_appeared means the category that
#' appeared earlier will be nearer to the top. count means the category with more
#' count (number of publications) will be nearer to the top. Ignored if not
#' facetting.
#' @return A ggplot2 object.
#' @importFrom dplyr filter select
#' @importFrom forcats fct_reorder fct_lump_n
#' @importFrom rlang !! sym
#' @importFrom ggplot2 geom_bar scale_x_continuous labs theme facet_wrap ggproto
#' layer
#' @importFrom scales breaks_pretty
#' @export
pubs_per_year <- function(pubs, facet_by = NULL, fill_by = NULL, binwidth = 365,
preprints = TRUE, n_top = Inf, n_top_fill = Inf,
sort_by = c("first_appeared", "count", "recent_count")) {
journal <- date_published <- facets <- NULL
sort_by <- match.arg(sort_by)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
if (!is.null(facet_by)) {
pubs <- pubs %>%
mutate(facets = !!sym(facet_by))
if (sort_by == "first_appeared") {
pubs <- pubs %>%
mutate(facets = fct_reorder(facets, date_published, .fun = "min"),
w = 1)
} else if (sort_by == "count") {
pubs <- pubs %>%
mutate(facets = fct_infreq(facets),
w = 1)
} else {
date_thresh <- lubridate::as_date(max(pubs$date_published) - lubridate::ddays(binwidth) * 2)
pubs <- pubs %>%
mutate(w = as.numeric(date_published > date_thresh),
facets = fct_reorder(facets, w, .fun = "sum", .desc = TRUE))
}
pubs <- pubs %>%
mutate(facets = fct_lump_n(facets, n = n_top, ties.method = "first", w = w))
}
if (!is.null(fill_by)) {
if (fill_by == "species") {
# Use fixed colors for species
pubs <- pubs %>%
mutate(fill = case_when(species %in% names(species_cols) ~ species,
TRUE ~ "Other"),
fill = fct_infreq(fill) %>% fct_relevel("Other", after = Inf))
} else {
# For continuous variables
is_discrete <- is.character(pubs[[fill_by]]) | is.factor(pubs[[fill_by]])
if (is_discrete) {
if (n_top_fill > 11) {
warning("Maximum of 12 colors are supported for colorblind friendly palette, ",
"less common categories are lumped into Other.")
n_top_fill <- 11
}
pubs <- pubs %>%
mutate(fill = fct_infreq(!!sym(fill_by)),
fill = fct_lump_n(fill, n = n_top_fill, ties.method = "first"))
if ("Other" %in% pubs$fill) {
pubs <- pubs %>%
mutate(fill = fct_infreq(fill) %>% fct_relevel("Other", after = Inf))
}
} else {
use_int <- is.integer(pubs[[fill_by]]) & length(unique(pubs[[fill_by]])) < 10
if (use_int) {
pubs <- pubs %>%
mutate(fill = factor(!!sym(fill_by), levels = seq.int(min(pubs[[fill_by]]),
max(pubs[[fill_by]]), 1)))
} else {
if (n_top_fill > 9) {
warning("Maximum of 9 colors are supported for binned palette.")
n_top_fill <- 9
}
pubs <- pubs %>%
mutate(fill = cut(!!sym(fill_by), breaks = n_top_fill))
}
}
}
}
p <- ggplot(pubs, aes(date_published))
if (!is.null(facet_by)) {
p <- p +
geom_histogram(aes(fill = "all"), binwidth = binwidth,
data = select(pubs, -facets),
fill = "gray70", alpha = 0.5, show.legend = FALSE)
}
if (!is.null(fill_by)) {
p <- p +
geom_histogram(aes(fill = fill), binwidth = binwidth)
if (fill_by != "species") {
if (is_discrete) {
pal_use <- ifelse(n_top_fill > 7, "Paired", "Set2")
p <- p +
scale_fill_brewer(palette = pal_use)
} else if (use_int) {
n_viridis <- max(pubs[[fill_by]]) - min(pubs[[fill_by]]) + 1
pal_use <- scales::viridis_pal()(n_viridis)
names(pal_use) <- as.character(seq.int(min(pubs[[fill_by]]),
max(pubs[[fill_by]]), 1))
pal_use <- pal_use[as.character(sort(unique(pubs$fill)))]
p <- p + scale_fill_manual(values = pal_use, drop = TRUE)
} else {
p <- p +
scale_fill_viridis_d()
}
} else {
species_cols <- species_cols[names(species_cols) %in% unique(pubs$fill)]
p <- p +
scale_fill_manual(values = species_cols, name = "", drop = TRUE) +
theme(legend.text = element_text(face = "italic"))
}
} else {
p <- p +
geom_histogram(binwidth = binwidth)
}
p <- p +
scale_y_continuous(expand = expansion(mult = c(0, 0.05)),
breaks = breaks_pretty()) +
scale_x_date(breaks = breaks_pretty(10)) +
labs(y = "Number of publications", x = "Date published") +
theme(panel.grid.minor = element_blank())
if (!is.null(facet_by)) {
p <- p +
facet_wrap(~ facets, ncol = 1)
}
p
}
#' Number of publications per category
#'
#' I think it looks better when the bars are horizontal to make the category
#' names easier to read, as the names can be quite long. This will plot a bar
#' chart for the number of publications per category, sorted according to the
#' number of publications.
#'
#' These are the sources of images that are not stated to be under public domain
#' found online with filter "free to share and use".
#' No changes were made to the images unless indicated. The author and license,
#' if found, are listed here as well.
#'
#' \describe{
#' \item{drosophila.jpg}{http://gompel.org/images-2/drosophilidae}
#' \item{zebrafish.jpg}{https://thumbs.dreamstime.com/m/zebrafish-zebra-barb-danio-rerio-freshwater-aquarium-fish-isolated-white-background-50201849.jpg}
#' \item{ciona.jpg}{http://www.habitas.org.uk/marinelife/tunicata/cioints.jpg}
#' \item{xenopus.jpg}{https://en.wikipedia.org/wiki/African_clawed_frog#/media/File:Xenopus_laevis_02.jpg
#' by Brian Gratwicke. License: https://creativecommons.org/licenses/by/2.0/
#' }
#' \item{celegans.jpg}{https://en.wikipedia.org/wiki/Caenorhabditis_elegans#/media/File:Adult_Caenorhabditis_elegans.jpg
#' by Kbradnam at English Wikipedia. License: https://creativecommons.org/licenses/by-sa/2.5/
#' A smaller version of the original is used here.
#' }
#' \item{arabidopsis.jpg}{http://parts.igem.org/wiki/images/b/bd/Plants_Arabidopsis_thaliana_400px.jpg}
#' \item{skull.jpg}{http://pngimg.com/download/42558 License: https://creativecommons.org/licenses/by-nc/4.0/
#' The original was compressed and converted to jpg here.
#' }
#' \item{platynereis.jpg}{https://en.wikipedia.org/wiki/Epitoky#/media/File:PlatynereisDumeriliiFemaleEpitoke.tif
#' By Martin Gühmann. A smaller jpg version of the original is used here.
#' License: https://creativecommons.org/licenses/by-sa/4.0/
#' }
#' \item{yeast.jpg}{https://en.wikipedia.org/wiki/Shmoo#/media/File:Shmoos_s_cerevisiae.jpg
#' By Pilarbini. This is a smaller version of the original.
#' License: https://creativecommons.org/licenses/by-sa/4.0/deed.en
#' }
#' }
#'
#' @param pubs A data frame at least with a column for the category of interest.
#' @param category Column name to plot. Tidyeval is supported. If it's species
#' or language, then img_df does not have to be supplied for isotype plot since
#' the images are supplied internally.
#' @param n_top Number of top entries to plot. Especially useful for isotype.
#' @param isotype Logical, whether to make isotype plot, like one icon stands for
#' a certain number of publications.
#' @param img_df A data frame with one column with the same name as `category`
#' and another column called `image_paths` for path to the images. Relative path
#' is fine since it will be internally converted to absolute path. This argument
#' can be left as NULL if category is species or language, since in these two
#' cases, the `img_df` is provided internally.
#' @param img_unit Integer, how many publications for one icon.
#' @return A ggplot2 object.
#' @importFrom rlang enquo as_name
#' @importFrom forcats fct_infreq fct_rev
#' @importFrom grid unit
#' @importFrom ggplot2 coord_flip theme element_blank
#' @importFrom purrr map_chr map
#' @importFrom dplyr row_number desc inner_join
#' @importFrom stringr str_to_sentence
#' @export
pubs_per_cat <- function(pubs, category, n_top = NULL, isotype = FALSE, img_df = NULL,
img_unit = NULL) {
n <- reordered <- image <- NULL
category <- enquo(category)
if (!is.null(n_top)) {
top <- pubs %>%
count(!!category) %>%
filter(row_number(desc(n)) <= n_top) %>%
pull(!!category)
pubs <- pubs %>%
filter(!!category %in% top)
}
if (isotype) {
.pkg_check("magick")
.pkg_check("ggtextures")
image_paths <- NULL
if (quo_name(category) == "species") {
img_df <- species_img %>%
mutate(image_paths = map_chr(image_paths, system.file, package = "museumst"))
} else if (quo_name(category) == "language") {
img_df <- lang_img %>%
mutate(image_paths = map_chr(image_paths, system.file, package = "museumst"))
}
img_df <- img_df %>%
mutate(image_paths = map_chr(image_paths, normalizePath, mustWork = TRUE),
image = map(image_paths, magick::image_read))
pubs <- pubs %>%
inner_join(img_df, by = as_name(category))
if (is.null(img_unit)) {
img_unit <- round(nrow(pubs)/20)
message("img_unit not supplied. Using heuristic value ", img_unit)
}
pubs <- pubs %>%
mutate(reordered = fct_infreq(!!category) %>% fct_rev())
p <- ggplot(pubs, aes(reordered)) +
ggtextures::geom_isotype_bar(aes(image = image),
img_width = grid::unit(img_unit, "native"),
img_height = NULL,
nrow = 1, ncol = NA,
hjust = 0, vjust = 0.5)
} else {
pubs <- pubs %>%
mutate(reordered = fct_infreq(!!category) %>% fct_rev())
p <- ggplot(pubs, aes(reordered)) + geom_bar()
}
p <- p +
scale_y_continuous(expand = expansion(mult = c(0, 0.05)),
breaks = breaks_pretty()) +
labs(y = "Number of publications", x = str_to_sentence(quo_name(category))) +
coord_flip() +
theme(panel.grid.minor = element_blank(), panel.grid.major.y = element_blank())
p
}
#' Plot number of publications at each location
#'
#' Plots points on a map, and the areas of the points are proportional to the
#' number of publications at the location. Can facet by some category like
#' method or species.
#'
#' World map will use the Robinson projection. European map uses the LAEA Europe
#' projection (EPSG:3035), and the American map uses the US National Atlas Equal Area
#' projection (EPSG:2163) and Alaska and Hawaii are moved and included. The option
#' to zoom in on Europe and the US is available because those are the two regions
#' of the world with the most publications and it's hard to see the data when
#' plotted on a world map.
#'
#' @inheritParams pubs_per_year
#' @param city_gc From geocode_inst_city
#' @param zoom Whether to plot the world map or only Europe (centered on Western
#' Europe and some Eastern European countries are partially cropped off) or only
#' the US or only northeast Asia.
#' @param ncol Number of columns in facetted plot.
#' @param label_insts Logical, whether to label institutions.
#' @param label_cities Logical, whether to label cities.
#' @param n_label Number of top cities to label, so the labels won't clutter the
#' plot.
#' @param per_year Logical, whether to do the count for each year separately.
#' This is for making animations with gganimate.
#' @param plot Whether to plot points, rectangular bins (bin2d), or hexagonal
#' bins (hexbin). The binned options are useful when there's overplotting.
#' @param bins Numeric vector of length 2, the number of bins for bin2d or hex
#' in the x and y directions. Ignored if plotting points.
#' @return A ggplot2 object
#' @importFrom rlang !!!
#' @importFrom dplyr left_join count semi_join vars
#' @importFrom ggplot2 geom_sf scale_size_area scale_color_viridis_c coord_sf
#' @importFrom scales breaks_width
#' @importFrom ggrepel geom_label_repel
#' @export
pubs_on_map <- function(pubs, city_gc,
zoom = c("world", "europe", "usa", "ne_asia"),
plot = c("point", "bin2d", "hexbin"),
facet_by = NULL, n_top = Inf,
ncol = 3, label_insts = TRUE, label_cities = FALSE,
n_label = 10,
per_year = FALSE, bins = c(70, 70)) {
zoom <- match.arg(zoom)
plot <- match.arg(plot)
.pkg_check("sf")
.pkg_check("rnaturalearth")
.pkg_check("rnaturalearthdata")
.pkg_check("rgeos")
if (zoom == "usa") .pkg_check("urbnmapr")
if (plot == "hexbin") {
.pkg_check("hexbin")
}
if (per_year) {
.pkg_check("gganimate")
vars_count <- c("country", "state/province", "city", "year")
label_cities <- FALSE
label_insts <- FALSE
} else if (!is.null(facet_by)) {
vars_count <- c("country", "state/province", "city", "facets")
} else {
vars_count <- c("country", "state/province", "city")
}
if (!is.null(facet_by)) {
pubs <- pubs %>%
mutate(facets = fct_lump_n(!!sym(facet_by), n = n_top),
facets = fct_infreq(facets, ordered = TRUE))
if (!is.infinite(n_top)) {
pubs <- pubs %>%
mutate(facets = fct_relevel(facets, "Other", after = Inf))
}
}
inst_count <- pubs %>%
count(!!!syms(vars_count))
suppressWarnings(sf::st_crs(city_gc) <- 4326)
inst_count <- inst_count %>%
left_join(city_gc, by = c("country", "state/province", "city"))
country <- geometry <- NULL
if (zoom == "world") {
map_use <- rnaturalearth::ne_countries(scale = "small", returnclass = "sf")
# use Robinson projection
robin <- "+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m no_defs"
map_use <- sf::st_transform(map_use, robin)
inst_count <- inst_count %>%
mutate(geometry = sf::st_transform(geometry, robin))
# Work around an issue with gdal backward compatibility
suppressWarnings(sf::st_crs(one_world_small) <- 4326)
map_all <- sf::st_transform(one_world_small, robin)
} else if (zoom == "europe") {
map_use <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf")
crs_europe <- 3035
inst_count <- inst_count %>%
filter(country %in% europe_countries) %>%
mutate(geometry = sf::st_transform(geometry, crs = crs_europe))
# project on European transformation
map_use <- sf::st_transform(map_use, crs = crs_europe)
suppressWarnings(sf::st_crs(one_world_medium) <- 4326)
map_all <- sf::st_transform(one_world_medium, crs = crs_europe)
} else if (zoom == "usa") {
map_use <- na_w_pop
crs_usa <- 2163
inst_count <- inst_count %>%
filter(country %in% c("USA", "US", "United States",
"United States of America", "Canada", "Mexico")) %>%
mutate(geometry = sf::st_transform(geometry, crs = crs_usa))
suppressWarnings(sf::st_crs(one_world_medium) <- 4326)
map_all <- sf::st_transform(one_world_medium, crs = crs_usa)
} else if (zoom == "ne_asia") {
map_use <- ne
inst_count <- inst_count %>%
filter(country %in% c("China", "Taiwan", "Korea", "Japan", "Mongolia",
"Vietnam"))
map_all <- one_world_medium
}
if (max(inst_count$n, na.rm = TRUE) < 4) {
size_break_width <- 1
} else {
size_break_width1 <- ceiling((max(inst_count$n, na.rm = TRUE) -
min(inst_count$n, na.rm = TRUE))/3)
size_break_width2 <- ceiling((max(inst_count$n, na.rm = TRUE) -
min(inst_count$n, na.rm = TRUE))/4)
}
n <- NULL
if (plot == "point") {
p <- ggplot() +
geom_sf(data = map_use, linetype = "dotted") +
geom_sf(data = map_all, fill = NA) +
scale_size_area(breaks = breaks_width(size_break_width1),
name = "Number of\npublications") +
theme(panel.border = element_blank(), axis.title = element_blank()) +
scale_color_viridis_c(breaks_width(size_break_width2), name = "")
city2 <- city <- NULL
if (is.null(facet_by)) {
if (per_year) {
p <- p +
geom_sf(data = inst_count, aes(geometry = geometry, size = n, color = n,
group = city2),
alpha = 0.7, show.legend = "point")
} else {
p <- p +
geom_sf(data = inst_count, aes(geometry = geometry, size = n, color = n),
alpha = 0.7, show.legend = "point")
}
} else {
if (per_year) {
p <- p +
geom_sf(data = inst_count, aes(geometry = geometry, size = n,
group = city2,
color = facets),
alpha = 0.7, show.legend = "point")
} else {
inst_count_all <- inst_count %>%
group_by(country, `state/province`, city) %>%
summarize(n_all = sum(n)) %>%
left_join(inst_count[, c("country", "state/province", "city", "geometry")],
by = c("country", "state/province", "city"))
p <- p +
geom_sf(data = inst_count_all,
aes(geometry = geometry, size = n_all, color = "all"),
alpha = 0.5, color = "gray50", show.legend = "point") +
geom_sf(data = inst_count, aes(geometry = geometry, size = n,
color = n),
alpha = 0.7, show.legend = "point")
}
p <- p +
facet_wrap(vars(facets), ncol = ncol) #+
#theme(legend.position = "none")
}
if (zoom != "world") {
# Limit to that box
xylims_use <- switch (zoom,
europe = xylims,
usa = xylims_us,
ne_asia = xylims_ne
)
crs_use <- switch (zoom,
europe = crs_europe,
usa = crs_usa,
ne_asia = 4326
)
p <- p +
coord_sf(xlim = xylims_use[c("xmin", "xmax")], ylim = xylims_use[c("ymin", "ymax")],
crs = crs_use)
}
if (per_year) {
year <- NULL
p <- p +
gganimate::transition_states(year, state_length = 5, transition_length = 1) +
labs(title = "{closest_state}") +
gganimate::enter_fade() +
gganimate::exit_fade()
}
} else {
inst_count2 <- uncount(inst_count, n)
coords <- sf::st_coordinates(inst_count2$geometry)
colnames(coords) <- c("lon", "lat")
coords <- as_tibble(coords)
inst_count2 <- cbind(inst_count2, coords)
p <- ggplot() +
geom_sf(data = map_use, linetype = "dotted") +
geom_sf(data = map_all, fill = NA) +
#scale_fill_distiller(palette = "Blues", direction = 1) +
scale_fill_viridis_c() +
theme(panel.border = element_blank(), axis.title = element_blank())
if (zoom != "world") {
# Limit to that box
xylims_use <- if (zoom == "europe") xylims else xylims_us
crs_use <- if (zoom == "europe") crs_europe else crs_usa
p <- p +
coord_sf(xlim = xylims_use[c("xmin", "xmax")], ylim = xylims_use[c("ymin", "ymax")],
crs = crs_use)
}
if (plot == "hexbin") {
p <- p +
geom_hex(data = inst_count2, aes(lon, lat), bins = bins)
} else if (plot == "bin2d") {
p <- p +
geom_bin2d(data = inst_count2, aes(lon, lat), bins = bins)
}
if (!is.null(facet_by)) {
p <- p +
facet_wrap(vars(facets), ncol = ncol)
}
}
if (!per_year) {
if (label_cities) {
if (!is.null(facet_by)) {
inst_count <- inst_count %>%
group_by(facets)
}
inst_count <- inst_count %>%
mutate(city_rank = row_number(desc(n)),
city_label = case_when(city_rank <= n_label ~ city,
TRUE ~ ""))
p <- p +
geom_label_repel(data = inst_count, aes(geometry = geometry, label = city_label),
alpha = 0.7, stat = "sf_coordinates", max.overlaps = Inf)
} else if (label_insts) {
# Find out what the "top" institutions are
inst_sn <- pubs %>%
count(!!!syms(c(vars_count, "short_name")), name = "nn")
inst_count <- inst_count %>%
left_join(inst_sn, by = vars_count)
if (!is.null(facet_by)) {
inst_count <- inst_count %>%
group_by(facets)
}
inst_count <- inst_count %>%
mutate(inst_rank = row_number(desc(nn)),
inst_label = case_when(inst_rank <= n_label ~ short_name,
TRUE ~ ""))
p <- p +
geom_label_repel(data = inst_count, aes(geometry = geometry, label = inst_label),
alpha = 0.7, stat = "sf_coordinates", max.overlaps = Inf)
}
}
p
}
#' Plot per capita data as choropleth or bar plot
#'
#' For the entire world, Europe (for European countries tend to be smaller) and
#' states within the US.
#'
#' @param pubs A data frame with one row per publication and columns country and
#' for the US, also a column "state/province".
#' @param zoom Whether to plot the world map or only Europe (centered on Western
#' Europe and some Eastern European countries are partially cropped off) or only
#' the US.
#' @param plot Whether to plot choropleth or bar plot.
#' @param label_states If plotting the US, whether to label the states.
#' @return A ggplot2 object.
#' @importFrom ggplot2 scale_fill_distiller geom_col geom_sf_text
#' @importFrom stringr str_length
#' @export
pubs_per_capita <- function(pubs, zoom = c("world", "europe", "usa"),
plot = c("choropleth", "bar"),
label_states = TRUE) {
.pkg_check("sf")
.pkg_check("rnaturalearth")
.pkg_check("rnaturalearthdata")
.pkg_check("rgeos")
zoom <- match.arg(zoom)
plot <- match.arg(plot)
if (zoom != "usa") {
if (zoom == "world") {
map_use <- rnaturalearth::ne_countries(scale = "small", returnclass = "sf")
if (plot == "choropleth") {
robin <- "+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m no_defs"
map_use <- sf::st_transform(map_use, robin)
map_all <- sf::st_transform(one_world_small, robin)
}
} else {
map_use <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf")
if (plot == "choropleth") {
map_use <- sf::st_transform(map_use, 3035)
map_all <- sf::st_transform(one_world_medium, 3035)
}
}
country <- per_capita <- pop_est <- country_full <- n <- `state/province` <-
`2019` <- state_name <- area <- NULL
pubs_count <- pubs %>%
mutate(country_full = case_when(country == "USA" ~ "United States",
country == "UK" ~ "United Kingdom",
TRUE ~ country)) %>%
count(country_full, country)
if (zoom == "europe") {
pubs_count <- pubs_count %>%
filter(country %in% europe_countries)
}
map_use <- map_use %>%
left_join(pubs_count, by = c("name" = "country_full")) %>%
mutate(per_capita = n/pop_est)
if (plot == "bar") {
map_use <- map_use %>%
filter(!is.na(per_capita)) %>%
mutate(area = fct_reorder(country, per_capita))
}
} else {
map_use <- na_w_pop
map_all <- sf::st_transform(one_world_medium, 2163)
pubs_count <- pubs %>%
filter(country %in% c("USA", "US", "United States", "United States of America")) %>%
count(`state/province`)
# Convert state names to abbreviations
pubs_count <- pubs_count %>%
mutate(`state/province` = case_when(
`state/province` %in% map_use$postal ~ `state/province`,
TRUE ~ map_use$postal[match(`state/province`, map_use$state_name)]
))
map_use <- map_use %>%
left_join(pubs_count,
by = c("postal" = "state/province")) %>%
mutate(per_capita = n/`2019`)
if (plot == "bar") {
map_use <- map_use %>%
filter(!is.na(per_capita)) %>%
mutate(area = fct_reorder(state_name, per_capita))
}
}
if (plot == "choropleth") {
p <- ggplot(map_use) +
geom_sf(aes(fill = log10(per_capita)), linetype = "dotted") +
geom_sf(data = map_all, fill = NA) +
scale_fill_distiller(palette = "Blues", na.value = "white", direction = 1,
name = "# pub.\nper capita\n(log10)") +
theme(panel.border = element_blank(), axis.title = element_blank())
if (zoom != "world") {
# Limit to that box
xylims_use <- if (zoom == "europe") xylims else xylims_us
crs_use <- if (zoom == "europe") 3035 else 2163
p <- p +
coord_sf(xlim = xylims_use[c("xmin", "xmax")], ylim = xylims_use[c("ymin", "ymax")],
crs = crs_use)
}
if (zoom == "usa" && label_states) {
# Label the states
state_labels <- urbnmapr::get_urbn_labels("states", sf = TRUE) %>%
filter(!state_abbv %in% c("AK", "HI"))
p <- p +
geom_sf_text(data = state_labels, aes(geometry = geometry, label = state_abbv))
}
} else {
area_lab <- if (zoom == "usa") "state" else "country"
if (zoom == "europe") {
map_use <- map_use %>%
filter(country %in% europe_countries)
}
p <- ggplot(map_use, aes(per_capita, area)) +
geom_col() +
scale_x_continuous(expand = expansion(mult = c(0, 0.05))) +
labs(y = area_lab)
}
p
}
#' Plot heatmap to show relationship between two categorical variables
#'
#' For instance, are papers for certain techniques more likely to be in certain
#' journals? Is there an association between species and journal? This is just
#' for visualization. Use `fisher.test` to see if it's significant. I still wonder
#' if I should rewrite this with ggplot2, which is more work than base R in this
#' case.
#'
#' @inheritParams pubs_per_year
#' @param row_var Variable for rows of the heatmap. Tidyeval is supported.
#' @param col_var Variable for columns of the heatmap.
#' @param ... Extra arguments to pass to `heatmap`
#' @return A base R heatmap is plotted to the current device.
#' @importFrom dplyr pull
#' @importFrom tidyr pivot_wider
#' @importFrom stats heatmap
#' @export
cat_heatmap <- function(pubs, row_var, col_var, ...) {
rv <- enquo(row_var)
cv <- enquo(col_var)
mat1 <- pubs %>%
count(!!rv, !!cv) %>%
pivot_wider(names_from = !!cv, values_from = "n")
method_mat <- as.matrix(mat1[,-1])
rownames(method_mat) <- pull(mat1, !!rv)
method_mat[is.na(method_mat)] <- 0
heatmap(method_mat, ...)
}
#' Plot histogram for each value of a logical variable
#'
#' Plots 3 histograms showing the number of publications per year for TRUE, FALSE,
#' and NA, with the histogram overlaid on top of a translucent one for all
#' values. There's one facet per row so it's easy to compare how things change
#' with time.
#'
#' @inheritParams pubs_per_year
#' @param col_use Which logical variable to plot. Tidyeval is supported.
#' @return A ggplot2 object
#' @importFrom ggplot2 geom_histogram facet_grid scale_fill_brewer
#' @export
hist_bool <- function(pubs, col_use, binwidth = 365, preprints = TRUE) {
date_published <- journal <- v <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
pubs <- pubs %>%
mutate(v = !!col_use)
ggplot(pubs, aes(date_published)) +
geom_histogram(aes(fill = 'all'), alpha = 0.7, fill = "gray70",
data = select(pubs, -v), binwidth = binwidth) +
geom_histogram(aes(fill = v), binwidth = binwidth) +
facet_grid(rows = vars(v)) +
scale_y_continuous(breaks = breaks_pretty(), expand = expansion(c(0, 0.05))) +
scale_x_date(breaks = breaks_pretty(10)) +
scale_fill_brewer(palette = "Set1", na.value = "gray50") +
theme(panel.grid.minor = element_blank(), legend.position = "none") +
labs(x = "date published")
}
#' Plot outlines of histogram for a logical variable
#'
#' Kind of like `hist_bool`, but instead of plotting TRUE, FALSE, and NA in 3
#' separate facets, it plots them as an outline overlaid on a translucent
#' histogram for all values. This is useful when facetting with another categorical
#' variable, such as programming language.
#'
#' @inheritParams hist_bool
#' @inheritParams pubs_per_year
#' @inheritParams pubs_on_map
#' @importFrom tidyr complete
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom rlang quo_name
#' @importFrom stringr str_to_sentence
#' @export
hist_bool_line <- function(pubs, col_use, facet_by = NULL, ncol = 3, n_top = Inf,
binwidth = 365, preprints = TRUE) {
date_published <- journal <- v <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
pubs <- pubs %>%
mutate(v = !!col_use)
if (!is.null(facet_by)) {
pubs <- pubs %>%
mutate(facets = fct_lump_n(!!sym(facet_by), n = n_top,
ties.method = "first"),
facets = fct_infreq(facets))
if ("Other" %in% pubs$facets) {
pubs <- pubs %>%
mutate(facets = fct_relevel(facets, "Other", after = Inf))
}
pubs <- pubs %>% group_by(v, facets)
} else {
pubs <- pubs %>% group_by(v)
}
p <- ggplot(pubs, aes(date_published, after_stat(count))) +
geom_histogram(aes(fill = v), alpha = 0.7, binwidth = binwidth) +
#geom_line(aes(color = v), stat = "bin", binwidth = binwidth) +
scale_y_continuous(breaks = breaks_pretty(), expand = expansion(c(0, 0.05))) +
scale_x_date(breaks = breaks_pretty(10)) +
scale_fill_brewer(name = str_to_sentence(quo_name(col_use)),
palette = "Set1", na.value = "gray50") +
theme(panel.grid.minor = element_blank(), legend.position = "top") +
labs(y = "count", x = "date published")
if (!is.null(facet_by)) {
p <- p + facet_wrap(~ facets, ncol = ncol)
}
p
}
#' Test whether something is associated with time
#'
#' Fits a logistic regression model with glm to use year to predict proportion
#' of a logical variable is TRUE, and tests whether beta is 0.
#'
#' @inheritParams hist_bool
#' @return A glm object is returned invisibly. The summary is printed to screen
#' @importFrom dplyr group_by summarize
#' @importFrom stats glm
#' @export
test_year_bool <- function(pubs, col_use, preprints = TRUE) {
journal <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
pubs <- pubs %>%
mutate(bool_use = !!col_use)
out <- glm(bool_use ~ date_published, data = pubs, family = "binomial")
print(summary(out))
invisible(out)
}
#' Prequel vs current binned over time
#'
#' Plots freqpoly for prequel vs current, with an option to start both at the
#' date when the first publication in the category appeared. The point here is
#' not to compare to the distribution of everything, like in hist_bool, or to
#' compare when things started, like when I plotted a histogram of different
#' methods over time, but to compare the height of the histograms and how steeply
#' they rise and fall. So I think freqpoly may be better than blocky histograms
#' for this purposes.
#'
#' @inheritParams pubs_per_year
#' @inheritParams hist_bool
#' @param since_first Logical. Whether to plot days after the first publication
#' appeared.
#' @param do_smooth Logical. Whether to plot smoothed curve for the trend rather
#' than freqpoly.
#' @param smooth_method Method of smoothing, passed to \code{geom_smooth}.
#' @param smooth_formula Formula of smoothing, passed to \code{geom_smooth}.
#' @return A ggplot2 object
#' @importFrom ggplot2 scale_color_brewer geom_freqpoly scale_fill_discrete
#' @importFrom ggplot2 after_stat
#' @export
era_freqpoly <- function(pubs, col_use, since_first = FALSE, binwidth = 365,
preprints = TRUE, do_smooth = FALSE,
smooth_method = NULL, smooth_formula = NULL) {
journal <- date_published <- days_since_first <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
if (since_first) {
df_plt <- pubs %>%
group_by(!!col_use) %>%
mutate(days_since_first = as.numeric(date_published - min(date_published)))
breaks_use <- seq(-binwidth, max(df_plt$days_since_first), by = binwidth)
df_plt <- df_plt %>%
mutate(date_bin = cut(days_since_first, breaks_use, right = TRUE,
labels = FALSE)) %>%
group_by(!!col_use, date_bin, .drop = FALSE) %>%
count() %>%
mutate(x = breaks_use[date_bin+1],
is_last = x == date_bin[which.max(date_bin)])
p <- ggplot(df_plt, aes(x, n, color = !!col_use)) +
labs(x = "Days since the first publication")
} else {
df_plt <- pubs %>%
mutate(x = cut(date_published, paste(binwidth, "days"), right = TRUE,
include.lowest = TRUE)) %>%
group_by(!!col_use, x, .drop = FALSE) %>%
count() %>%
mutate(is_last = x == tail(levels(x), 1),
x = as.Date(x))
p <- ggplot(df_plt, aes(x, n, color = !!col_use)) +
labs(x = "Date published")
}
if (do_smooth) {
p <- p +
geom_smooth(data = df_plt %>% filter(!is_last), se = FALSE,
method = smooth_method, formula = smooth_formula) +
geom_point(aes(shape = is_last)) +
scale_shape_manual(values = c(16, 4))
} else {
p <- p +
geom_line(data = df_plt %>% filter(!is_last)) +
geom_point(data = df_plt %>% filter(is_last), shape = 4)
}
p <- p +
scale_color_brewer(name = str_to_sentence(quo_name(col_use)),
palette = "Set1", na.value = "gray50") +
labs(y = "Number of publications")
p
}
# From https://stackoverflow.com/a/44090582/8916916
gtable_stack <- function(g1, g2) {
g1$grobs <- c(g1$grobs, g2$grobs)
g1$layout <- transform(g1$layout, z= z-max(z), name="g2")
g1$layout <- rbind(g1$layout, g2$layout)
g1
}
gtable_select <- function (x, ...) {
matches <- c(...)
x$layout <- x$layout[matches, , drop = FALSE]
x$grobs <- x$grobs[matches]
x
}
#' Color facet strips by a variable
#'
#' @param p A ggplot object for the original facetted plot.
#' @param strip_color Categorical column in the data for the original plot to
#' use to color the facet strips. Tidyeval is used here.
#' @param palette A character vector of colors. Can be named to assign each
#' color to a value in `strip_color`.
#' @return Nothing, the plot is printed to device.
#' @importFrom ggplot2 ggplotGrob
#' @importFrom grid grid.newpage grid.draw
#' @export
plot_facets_color <- function(p, strip_color, palette) {
strip_color <- enquo(strip_color)
dummy <- ggplot(p$data, p$mapping) +
facet_wrap(vars(!!!p$facet$params$facets),
ncol = p$facet$params$ncol,
labeller = p$facet$params$labeller) +
geom_rect(aes(fill = !!strip_color), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
theme_minimal() +
scale_fill_manual(values = palette)
if ("legend.position" %in% names(p$theme)) {
dummy <- dummy +
theme(legend.position = p$theme$legend.position)
}
g1 <- ggplotGrob(p)
g2 <- ggplotGrob(dummy)
# move dummy panels one cell up
panels <- grepl(pattern="panel", g2$layout$name)
strips <- grepl(pattern="strip-t", g2$layout$name)
g2$layout$t[panels] <- g2$layout$t[panels] - 1
g2$layout$b[panels] <- g2$layout$b[panels] - 1
new_strips <- gtable_select(g2, panels | strips)
# stack new strips on top of gtable
# ideally you'd remove the old strips, for now they're just covered
new_plot <- gtable_stack(g1, new_strips)
grid.newpage()
grid.draw(new_plot)
}
| /R/plot.R | permissive | huangsunan/museumst | R | false | false | 42,333 | r | #' Construct plotmath labels for time lines
#'
#' Internal, called by `plot_timeline`.
#'
#' @param description The text for the items to be shown beneath the images.
#' Should be a character vector of the same length as `image_path`.
#' @param description_width Width of the description text in characters to wrap
#' the text in the labels.
#' @param ref_number A string of the number for reference. If NA, then the
#' reference will not be included in the plot.
#' @return A character vector of plotmath, to enable superscripts, italics, and
#' text wrapping.
#' @importFrom stringr str_replace_all
make_labs <- function(description, description_width, ref_number) {
descr_wrap <- strwrap(description, width = description_width)
descr_wrap <- paste0(descr_wrap, collapse = "<br>")
if (is.na(ref_number)) {
descr_wrap
} else {
paste0(descr_wrap, "<sup>", ref_number, "</sup>")
}
}
#' Plot time line with images and descriptions of each event
#'
#' The time line is plotted horizontally with ticks for years. The labels with
#' descriptions are above and/or below that line.
#'
#' @param events_df A data frame with at least these columns:
#' \describe{
#' \item{date_published}{A vector of Date objects for dates when the event of
#' interest was published. Note that the actual time when those events occurred
#' is most likely earlier, sometimes much earlier than the publication date,
#' and the events might have become quite influential before their publication.
#' But the publication date is the easiest way to get an actual date.}
#' \item{description}{Short descriptions of the events. The plot won't look
#' good if the descriptions are too long.}
#' }
#' @param ys A numeric vector of the y coordinates of the items. Since I don't
#' know how to implement the ggrepel algorithm to make sure that the labels don't
#' overlap for geom_richtext, I have to manually set the y coordinates to make
#' sure that the labels don't overlap and look good.
#' @param description_width Width of the description text in characters to wrap
#' the text in the labels.
#' @param expand_x A numeric vector of length 2 of the proportion to expand the
#' x axis on the left and the right. This is a way to manually make sure that the
#' labels are not cropped off at the edge of the plot.
#' @param expand_y Same as expand_x, but for the y axis.
#' @param include_refs Logical, indicating whether to include references in the
#' text box.
#' @return A ggplot2 object for the plot.
#' @importFrom dplyr case_when arrange between
#' @importFrom purrr map2_chr
#' @importFrom lubridate floor_date ceiling_date
#' @importFrom ggplot2 ggplot geom_point aes geom_hline geom_segment scale_x_date
#' expansion scale_y_continuous theme_void annotate scale_fill_manual
#' @importFrom rlang %||%
#' @export
plot_timeline <- function(events_df, ys, description_width = 20,
expand_x = c(0.1, 0.1), expand_y = c(0.05, 0.05),
include_refs = TRUE) {
.pkg_check("grid")
.pkg_check("gridtext")
image <- date_published <- description <- lab <- vjusts <- NULL
events_df <- events_df %>%
mutate(sheet = factor(sheet, levels = names(sheet_fill)),
description = paste0(year(date_published), " ", description))
if (include_refs) {
events_df <- events_df %>%
mutate(lab = map2_chr(description, ref_number,
~ make_labs(.x, description_width, .y)))
} else {
events_df <- events_df %>%
mutate(lab = map_chr(description,
~ make_labs(.x, description_width,
ref_number = NA)))
}
events_df <- events_df %>%
arrange(date_published) %>%
mutate(vjusts = case_when(ys >= 0 ~ 1,
TRUE ~ 0),
ys = ys)
yrs_range <- max(year(events_df$date_published)) - min(year(events_df$date_published))
date_brks <- case_when(yrs_range <= 20 ~ "1 year",
between(yrs_range, 20, 50) ~ "2 years",
between(yrs_range, 50, 100) ~ "5 years",
TRUE ~ "10 years")
axis <- seq(floor_date(min(events_df$date_published), "year"),
ceiling_date(max(events_df$date_published), "year"),
by = date_brks)
sheet_fill <- sheet_fill[names(sheet_fill) %in% unique(events_df$sheet)]
p <- ggplot(events_df) +
geom_point(aes(x = date_published), y = 0) +
geom_hline(yintercept = 0) +
geom_segment(aes(x = date_published, y = 0, xend = date_published, yend = ys)) +
geom_richtext(aes(x = date_published, y = ys, fill = sheet,
label = lab, vjust = vjusts)) +
scale_x_date(expand = expansion(expand_x)) +
scale_y_continuous(expand = expansion(expand_y)) +
scale_fill_manual(values = sheet_fill, name = "Type") +
theme_void() +
annotate("point", x = axis, y = 0, shape = 3) +
annotate("text", x = axis, y = 0, label = format(axis, "%Y"),
color = "gray50", vjust = 1.4)
p
}
#' Number of publications per year
#'
#' Plot bar plot of the number of publications per year. I find facetting
#' makes the plot easier to read than filling with different colors.
#'
#' @param pubs A data frame with at least these columns:
#' \describe{
#' \item{journal}{Name of the journal of the paper.}
#' \item{year}{Year when the paper was published.}
#' }
#' There must be one row per publication or per method or species for each title
#' if faceting by those. If facetting, then a column whose name is the value in
#' `facet_by` must be present.
#' @param facet_by Name of a column for facetting.
#' @param fill_by Name of a column of a categorical variable with which to color
#' the histogram.
#' @param binwidth Width of bins for the histogram in days.
#' @param preprints Logical, whether preprints should be included. Defaults to
#' `TRUE` to include preprints.
#' @param n_top Number of categories with the most publications to plot in facets;
#' the other categories are lumped into "other".
#' @param n_top_fill Number of categories with the most publications to be
#' differentiated by color.
#' @param sort_by How to sort the facets. first_appeared means the category that
#' appeared earlier will be nearer to the top. count means the category with more
#' count (number of publications) will be nearer to the top. Ignored if not
#' facetting.
#' @return A ggplot2 object.
#' @importFrom dplyr filter select
#' @importFrom forcats fct_reorder fct_lump_n
#' @importFrom rlang !! sym
#' @importFrom ggplot2 geom_bar scale_x_continuous labs theme facet_wrap ggproto
#' layer
#' @importFrom scales breaks_pretty
#' @export
pubs_per_year <- function(pubs, facet_by = NULL, fill_by = NULL, binwidth = 365,
preprints = TRUE, n_top = Inf, n_top_fill = Inf,
sort_by = c("first_appeared", "count", "recent_count")) {
journal <- date_published <- facets <- NULL
sort_by <- match.arg(sort_by)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
if (!is.null(facet_by)) {
pubs <- pubs %>%
mutate(facets = !!sym(facet_by))
if (sort_by == "first_appeared") {
pubs <- pubs %>%
mutate(facets = fct_reorder(facets, date_published, .fun = "min"),
w = 1)
} else if (sort_by == "count") {
pubs <- pubs %>%
mutate(facets = fct_infreq(facets),
w = 1)
} else {
date_thresh <- lubridate::as_date(max(pubs$date_published) - lubridate::ddays(binwidth) * 2)
pubs <- pubs %>%
mutate(w = as.numeric(date_published > date_thresh),
facets = fct_reorder(facets, w, .fun = "sum", .desc = TRUE))
}
pubs <- pubs %>%
mutate(facets = fct_lump_n(facets, n = n_top, ties.method = "first", w = w))
}
if (!is.null(fill_by)) {
if (fill_by == "species") {
# Use fixed colors for species
pubs <- pubs %>%
mutate(fill = case_when(species %in% names(species_cols) ~ species,
TRUE ~ "Other"),
fill = fct_infreq(fill) %>% fct_relevel("Other", after = Inf))
} else {
# For continuous variables
is_discrete <- is.character(pubs[[fill_by]]) | is.factor(pubs[[fill_by]])
if (is_discrete) {
if (n_top_fill > 11) {
warning("Maximum of 12 colors are supported for colorblind friendly palette, ",
"less common categories are lumped into Other.")
n_top_fill <- 11
}
pubs <- pubs %>%
mutate(fill = fct_infreq(!!sym(fill_by)),
fill = fct_lump_n(fill, n = n_top_fill, ties.method = "first"))
if ("Other" %in% pubs$fill) {
pubs <- pubs %>%
mutate(fill = fct_infreq(fill) %>% fct_relevel("Other", after = Inf))
}
} else {
use_int <- is.integer(pubs[[fill_by]]) & length(unique(pubs[[fill_by]])) < 10
if (use_int) {
pubs <- pubs %>%
mutate(fill = factor(!!sym(fill_by), levels = seq.int(min(pubs[[fill_by]]),
max(pubs[[fill_by]]), 1)))
} else {
if (n_top_fill > 9) {
warning("Maximum of 9 colors are supported for binned palette.")
n_top_fill <- 9
}
pubs <- pubs %>%
mutate(fill = cut(!!sym(fill_by), breaks = n_top_fill))
}
}
}
}
p <- ggplot(pubs, aes(date_published))
if (!is.null(facet_by)) {
p <- p +
geom_histogram(aes(fill = "all"), binwidth = binwidth,
data = select(pubs, -facets),
fill = "gray70", alpha = 0.5, show.legend = FALSE)
}
if (!is.null(fill_by)) {
p <- p +
geom_histogram(aes(fill = fill), binwidth = binwidth)
if (fill_by != "species") {
if (is_discrete) {
pal_use <- ifelse(n_top_fill > 7, "Paired", "Set2")
p <- p +
scale_fill_brewer(palette = pal_use)
} else if (use_int) {
n_viridis <- max(pubs[[fill_by]]) - min(pubs[[fill_by]]) + 1
pal_use <- scales::viridis_pal()(n_viridis)
names(pal_use) <- as.character(seq.int(min(pubs[[fill_by]]),
max(pubs[[fill_by]]), 1))
pal_use <- pal_use[as.character(sort(unique(pubs$fill)))]
p <- p + scale_fill_manual(values = pal_use, drop = TRUE)
} else {
p <- p +
scale_fill_viridis_d()
}
} else {
species_cols <- species_cols[names(species_cols) %in% unique(pubs$fill)]
p <- p +
scale_fill_manual(values = species_cols, name = "", drop = TRUE) +
theme(legend.text = element_text(face = "italic"))
}
} else {
p <- p +
geom_histogram(binwidth = binwidth)
}
p <- p +
scale_y_continuous(expand = expansion(mult = c(0, 0.05)),
breaks = breaks_pretty()) +
scale_x_date(breaks = breaks_pretty(10)) +
labs(y = "Number of publications", x = "Date published") +
theme(panel.grid.minor = element_blank())
if (!is.null(facet_by)) {
p <- p +
facet_wrap(~ facets, ncol = 1)
}
p
}
#' Number of publications per category
#'
#' I think it looks better when the bars are horizontal to make the category
#' names easier to read, as the names can be quite long. This will plot a bar
#' chart for the number of publications per category, sorted according to the
#' number of publications.
#'
#' These are the sources of images that are not stated to be under public domain
#' found online with filter "free to share and use".
#' No changes were made to the images unless indicated. The author and license,
#' if found, are listed here as well.
#'
#' \describe{
#' \item{drosophila.jpg}{http://gompel.org/images-2/drosophilidae}
#' \item{zebrafish.jpg}{https://thumbs.dreamstime.com/m/zebrafish-zebra-barb-danio-rerio-freshwater-aquarium-fish-isolated-white-background-50201849.jpg}
#' \item{ciona.jpg}{http://www.habitas.org.uk/marinelife/tunicata/cioints.jpg}
#' \item{xenopus.jpg}{https://en.wikipedia.org/wiki/African_clawed_frog#/media/File:Xenopus_laevis_02.jpg
#' by Brian Gratwicke. License: https://creativecommons.org/licenses/by/2.0/
#' }
#' \item{celegans.jpg}{https://en.wikipedia.org/wiki/Caenorhabditis_elegans#/media/File:Adult_Caenorhabditis_elegans.jpg
#' by Kbradnam at English Wikipedia. License: https://creativecommons.org/licenses/by-sa/2.5/
#' A smaller version of the original is used here.
#' }
#' \item{arabidopsis.jpg}{http://parts.igem.org/wiki/images/b/bd/Plants_Arabidopsis_thaliana_400px.jpg}
#' \item{skull.jpg}{http://pngimg.com/download/42558 License: https://creativecommons.org/licenses/by-nc/4.0/
#' The original was compressed and converted to jpg here.
#' }
#' \item{platynereis.jpg}{https://en.wikipedia.org/wiki/Epitoky#/media/File:PlatynereisDumeriliiFemaleEpitoke.tif
#' By Martin Gühmann. A smaller jpg version of the original is used here.
#' License: https://creativecommons.org/licenses/by-sa/4.0/
#' }
#' \item{yeast.jpg}{https://en.wikipedia.org/wiki/Shmoo#/media/File:Shmoos_s_cerevisiae.jpg
#' By Pilarbini. This is a smaller version of the original.
#' License: https://creativecommons.org/licenses/by-sa/4.0/deed.en
#' }
#' }
#'
#' @param pubs A data frame at least with a column for the category of interest.
#' @param category Column name to plot. Tidyeval is supported. If it's species
#' or language, then img_df does not have to be supplied for isotype plot since
#' the images are supplied internally.
#' @param n_top Number of top entries to plot. Especially useful for isotype.
#' @param isotype Logical, whether to make isotype plot, like one icon stands for
#' a certain number of publications.
#' @param img_df A data frame with one column with the same name as `category`
#' and another column called `image_paths` for path to the images. Relative path
#' is fine since it will be internally converted to absolute path. This argument
#' can be left as NULL if category is species or language, since in these two
#' cases, the `img_df` is provided internally.
#' @param img_unit Integer, how many publications for one icon.
#' @return A ggplot2 object.
#' @importFrom rlang enquo as_name
#' @importFrom forcats fct_infreq fct_rev
#' @importFrom grid unit
#' @importFrom ggplot2 coord_flip theme element_blank
#' @importFrom purrr map_chr map
#' @importFrom dplyr row_number desc inner_join
#' @importFrom stringr str_to_sentence
#' @export
pubs_per_cat <- function(pubs, category, n_top = NULL, isotype = FALSE, img_df = NULL,
img_unit = NULL) {
n <- reordered <- image <- NULL
category <- enquo(category)
if (!is.null(n_top)) {
top <- pubs %>%
count(!!category) %>%
filter(row_number(desc(n)) <= n_top) %>%
pull(!!category)
pubs <- pubs %>%
filter(!!category %in% top)
}
if (isotype) {
.pkg_check("magick")
.pkg_check("ggtextures")
image_paths <- NULL
if (quo_name(category) == "species") {
img_df <- species_img %>%
mutate(image_paths = map_chr(image_paths, system.file, package = "museumst"))
} else if (quo_name(category) == "language") {
img_df <- lang_img %>%
mutate(image_paths = map_chr(image_paths, system.file, package = "museumst"))
}
img_df <- img_df %>%
mutate(image_paths = map_chr(image_paths, normalizePath, mustWork = TRUE),
image = map(image_paths, magick::image_read))
pubs <- pubs %>%
inner_join(img_df, by = as_name(category))
if (is.null(img_unit)) {
img_unit <- round(nrow(pubs)/20)
message("img_unit not supplied. Using heuristic value ", img_unit)
}
pubs <- pubs %>%
mutate(reordered = fct_infreq(!!category) %>% fct_rev())
p <- ggplot(pubs, aes(reordered)) +
ggtextures::geom_isotype_bar(aes(image = image),
img_width = grid::unit(img_unit, "native"),
img_height = NULL,
nrow = 1, ncol = NA,
hjust = 0, vjust = 0.5)
} else {
pubs <- pubs %>%
mutate(reordered = fct_infreq(!!category) %>% fct_rev())
p <- ggplot(pubs, aes(reordered)) + geom_bar()
}
p <- p +
scale_y_continuous(expand = expansion(mult = c(0, 0.05)),
breaks = breaks_pretty()) +
labs(y = "Number of publications", x = str_to_sentence(quo_name(category))) +
coord_flip() +
theme(panel.grid.minor = element_blank(), panel.grid.major.y = element_blank())
p
}
#' Plot number of publications at each location
#'
#' Plots points on a map, and the areas of the points are proportional to the
#' number of publications at the location. Can facet by some category like
#' method or species.
#'
#' World map will use the Robinson projection. European map uses the LAEA Europe
#' projection (EPSG:3035), and the American map uses the US National Atlas Equal Area
#' projection (EPSG:2163) and Alaska and Hawaii are moved and included. The option
#' to zoom in on Europe and the US is available because those are the two regions
#' of the world with the most publications and it's hard to see the data when
#' plotted on a world map.
#'
#' @inheritParams pubs_per_year
#' @param city_gc From geocode_inst_city
#' @param zoom Whether to plot the world map or only Europe (centered on Western
#' Europe and some Eastern European countries are partially cropped off) or only
#' the US or only northeast Asia.
#' @param ncol Number of columns in facetted plot.
#' @param label_insts Logical, whether to label institutions.
#' @param label_cities Logical, whether to label cities.
#' @param n_label Number of top cities to label, so the labels won't clutter the
#' plot.
#' @param per_year Logical, whether to do the count for each year separately.
#' This is for making animations with gganimate.
#' @param plot Whether to plot points, rectangular bins (bin2d), or hexagonal
#' bins (hexbin). The binned options are useful when there's overplotting.
#' @param bins Numeric vector of length 2, the number of bins for bin2d or hex
#' in the x and y directions. Ignored if plotting points.
#' @return A ggplot2 object
#' @importFrom rlang !!!
#' @importFrom dplyr left_join count semi_join vars
#' @importFrom ggplot2 geom_sf scale_size_area scale_color_viridis_c coord_sf
#' @importFrom scales breaks_width
#' @importFrom ggrepel geom_label_repel
#' @export
pubs_on_map <- function(pubs, city_gc,
zoom = c("world", "europe", "usa", "ne_asia"),
plot = c("point", "bin2d", "hexbin"),
facet_by = NULL, n_top = Inf,
ncol = 3, label_insts = TRUE, label_cities = FALSE,
n_label = 10,
per_year = FALSE, bins = c(70, 70)) {
zoom <- match.arg(zoom)
plot <- match.arg(plot)
.pkg_check("sf")
.pkg_check("rnaturalearth")
.pkg_check("rnaturalearthdata")
.pkg_check("rgeos")
if (zoom == "usa") .pkg_check("urbnmapr")
if (plot == "hexbin") {
.pkg_check("hexbin")
}
if (per_year) {
.pkg_check("gganimate")
vars_count <- c("country", "state/province", "city", "year")
label_cities <- FALSE
label_insts <- FALSE
} else if (!is.null(facet_by)) {
vars_count <- c("country", "state/province", "city", "facets")
} else {
vars_count <- c("country", "state/province", "city")
}
if (!is.null(facet_by)) {
pubs <- pubs %>%
mutate(facets = fct_lump_n(!!sym(facet_by), n = n_top),
facets = fct_infreq(facets, ordered = TRUE))
if (!is.infinite(n_top)) {
pubs <- pubs %>%
mutate(facets = fct_relevel(facets, "Other", after = Inf))
}
}
inst_count <- pubs %>%
count(!!!syms(vars_count))
suppressWarnings(sf::st_crs(city_gc) <- 4326)
inst_count <- inst_count %>%
left_join(city_gc, by = c("country", "state/province", "city"))
country <- geometry <- NULL
if (zoom == "world") {
map_use <- rnaturalearth::ne_countries(scale = "small", returnclass = "sf")
# use Robinson projection
robin <- "+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m no_defs"
map_use <- sf::st_transform(map_use, robin)
inst_count <- inst_count %>%
mutate(geometry = sf::st_transform(geometry, robin))
# Work around an issue with gdal backward compatibility
suppressWarnings(sf::st_crs(one_world_small) <- 4326)
map_all <- sf::st_transform(one_world_small, robin)
} else if (zoom == "europe") {
map_use <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf")
crs_europe <- 3035
inst_count <- inst_count %>%
filter(country %in% europe_countries) %>%
mutate(geometry = sf::st_transform(geometry, crs = crs_europe))
# project on European transformation
map_use <- sf::st_transform(map_use, crs = crs_europe)
suppressWarnings(sf::st_crs(one_world_medium) <- 4326)
map_all <- sf::st_transform(one_world_medium, crs = crs_europe)
} else if (zoom == "usa") {
map_use <- na_w_pop
crs_usa <- 2163
inst_count <- inst_count %>%
filter(country %in% c("USA", "US", "United States",
"United States of America", "Canada", "Mexico")) %>%
mutate(geometry = sf::st_transform(geometry, crs = crs_usa))
suppressWarnings(sf::st_crs(one_world_medium) <- 4326)
map_all <- sf::st_transform(one_world_medium, crs = crs_usa)
} else if (zoom == "ne_asia") {
map_use <- ne
inst_count <- inst_count %>%
filter(country %in% c("China", "Taiwan", "Korea", "Japan", "Mongolia",
"Vietnam"))
map_all <- one_world_medium
}
if (max(inst_count$n, na.rm = TRUE) < 4) {
size_break_width <- 1
} else {
size_break_width1 <- ceiling((max(inst_count$n, na.rm = TRUE) -
min(inst_count$n, na.rm = TRUE))/3)
size_break_width2 <- ceiling((max(inst_count$n, na.rm = TRUE) -
min(inst_count$n, na.rm = TRUE))/4)
}
n <- NULL
if (plot == "point") {
p <- ggplot() +
geom_sf(data = map_use, linetype = "dotted") +
geom_sf(data = map_all, fill = NA) +
scale_size_area(breaks = breaks_width(size_break_width1),
name = "Number of\npublications") +
theme(panel.border = element_blank(), axis.title = element_blank()) +
scale_color_viridis_c(breaks_width(size_break_width2), name = "")
city2 <- city <- NULL
if (is.null(facet_by)) {
if (per_year) {
p <- p +
geom_sf(data = inst_count, aes(geometry = geometry, size = n, color = n,
group = city2),
alpha = 0.7, show.legend = "point")
} else {
p <- p +
geom_sf(data = inst_count, aes(geometry = geometry, size = n, color = n),
alpha = 0.7, show.legend = "point")
}
} else {
if (per_year) {
p <- p +
geom_sf(data = inst_count, aes(geometry = geometry, size = n,
group = city2,
color = facets),
alpha = 0.7, show.legend = "point")
} else {
inst_count_all <- inst_count %>%
group_by(country, `state/province`, city) %>%
summarize(n_all = sum(n)) %>%
left_join(inst_count[, c("country", "state/province", "city", "geometry")],
by = c("country", "state/province", "city"))
p <- p +
geom_sf(data = inst_count_all,
aes(geometry = geometry, size = n_all, color = "all"),
alpha = 0.5, color = "gray50", show.legend = "point") +
geom_sf(data = inst_count, aes(geometry = geometry, size = n,
color = n),
alpha = 0.7, show.legend = "point")
}
p <- p +
facet_wrap(vars(facets), ncol = ncol) #+
#theme(legend.position = "none")
}
if (zoom != "world") {
# Limit to that box
xylims_use <- switch (zoom,
europe = xylims,
usa = xylims_us,
ne_asia = xylims_ne
)
crs_use <- switch (zoom,
europe = crs_europe,
usa = crs_usa,
ne_asia = 4326
)
p <- p +
coord_sf(xlim = xylims_use[c("xmin", "xmax")], ylim = xylims_use[c("ymin", "ymax")],
crs = crs_use)
}
if (per_year) {
year <- NULL
p <- p +
gganimate::transition_states(year, state_length = 5, transition_length = 1) +
labs(title = "{closest_state}") +
gganimate::enter_fade() +
gganimate::exit_fade()
}
} else {
inst_count2 <- uncount(inst_count, n)
coords <- sf::st_coordinates(inst_count2$geometry)
colnames(coords) <- c("lon", "lat")
coords <- as_tibble(coords)
inst_count2 <- cbind(inst_count2, coords)
p <- ggplot() +
geom_sf(data = map_use, linetype = "dotted") +
geom_sf(data = map_all, fill = NA) +
#scale_fill_distiller(palette = "Blues", direction = 1) +
scale_fill_viridis_c() +
theme(panel.border = element_blank(), axis.title = element_blank())
if (zoom != "world") {
# Limit to that box
xylims_use <- if (zoom == "europe") xylims else xylims_us
crs_use <- if (zoom == "europe") crs_europe else crs_usa
p <- p +
coord_sf(xlim = xylims_use[c("xmin", "xmax")], ylim = xylims_use[c("ymin", "ymax")],
crs = crs_use)
}
if (plot == "hexbin") {
p <- p +
geom_hex(data = inst_count2, aes(lon, lat), bins = bins)
} else if (plot == "bin2d") {
p <- p +
geom_bin2d(data = inst_count2, aes(lon, lat), bins = bins)
}
if (!is.null(facet_by)) {
p <- p +
facet_wrap(vars(facets), ncol = ncol)
}
}
if (!per_year) {
if (label_cities) {
if (!is.null(facet_by)) {
inst_count <- inst_count %>%
group_by(facets)
}
inst_count <- inst_count %>%
mutate(city_rank = row_number(desc(n)),
city_label = case_when(city_rank <= n_label ~ city,
TRUE ~ ""))
p <- p +
geom_label_repel(data = inst_count, aes(geometry = geometry, label = city_label),
alpha = 0.7, stat = "sf_coordinates", max.overlaps = Inf)
} else if (label_insts) {
# Find out what the "top" institutions are
inst_sn <- pubs %>%
count(!!!syms(c(vars_count, "short_name")), name = "nn")
inst_count <- inst_count %>%
left_join(inst_sn, by = vars_count)
if (!is.null(facet_by)) {
inst_count <- inst_count %>%
group_by(facets)
}
inst_count <- inst_count %>%
mutate(inst_rank = row_number(desc(nn)),
inst_label = case_when(inst_rank <= n_label ~ short_name,
TRUE ~ ""))
p <- p +
geom_label_repel(data = inst_count, aes(geometry = geometry, label = inst_label),
alpha = 0.7, stat = "sf_coordinates", max.overlaps = Inf)
}
}
p
}
#' Plot per capita data as choropleth or bar plot
#'
#' For the entire world, Europe (for European countries tend to be smaller) and
#' states within the US.
#'
#' @param pubs A data frame with one row per publication and columns country and
#' for the US, also a column "state/province".
#' @param zoom Whether to plot the world map or only Europe (centered on Western
#' Europe and some Eastern European countries are partially cropped off) or only
#' the US.
#' @param plot Whether to plot choropleth or bar plot.
#' @param label_states If plotting the US, whether to label the states.
#' @return A ggplot2 object.
#' @importFrom ggplot2 scale_fill_distiller geom_col geom_sf_text
#' @importFrom stringr str_length
#' @export
pubs_per_capita <- function(pubs, zoom = c("world", "europe", "usa"),
plot = c("choropleth", "bar"),
label_states = TRUE) {
.pkg_check("sf")
.pkg_check("rnaturalearth")
.pkg_check("rnaturalearthdata")
.pkg_check("rgeos")
zoom <- match.arg(zoom)
plot <- match.arg(plot)
if (zoom != "usa") {
if (zoom == "world") {
map_use <- rnaturalearth::ne_countries(scale = "small", returnclass = "sf")
if (plot == "choropleth") {
robin <- "+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m no_defs"
map_use <- sf::st_transform(map_use, robin)
map_all <- sf::st_transform(one_world_small, robin)
}
} else {
map_use <- rnaturalearth::ne_countries(scale = "medium", returnclass = "sf")
if (plot == "choropleth") {
map_use <- sf::st_transform(map_use, 3035)
map_all <- sf::st_transform(one_world_medium, 3035)
}
}
country <- per_capita <- pop_est <- country_full <- n <- `state/province` <-
`2019` <- state_name <- area <- NULL
pubs_count <- pubs %>%
mutate(country_full = case_when(country == "USA" ~ "United States",
country == "UK" ~ "United Kingdom",
TRUE ~ country)) %>%
count(country_full, country)
if (zoom == "europe") {
pubs_count <- pubs_count %>%
filter(country %in% europe_countries)
}
map_use <- map_use %>%
left_join(pubs_count, by = c("name" = "country_full")) %>%
mutate(per_capita = n/pop_est)
if (plot == "bar") {
map_use <- map_use %>%
filter(!is.na(per_capita)) %>%
mutate(area = fct_reorder(country, per_capita))
}
} else {
map_use <- na_w_pop
map_all <- sf::st_transform(one_world_medium, 2163)
pubs_count <- pubs %>%
filter(country %in% c("USA", "US", "United States", "United States of America")) %>%
count(`state/province`)
# Convert state names to abbreviations
pubs_count <- pubs_count %>%
mutate(`state/province` = case_when(
`state/province` %in% map_use$postal ~ `state/province`,
TRUE ~ map_use$postal[match(`state/province`, map_use$state_name)]
))
map_use <- map_use %>%
left_join(pubs_count,
by = c("postal" = "state/province")) %>%
mutate(per_capita = n/`2019`)
if (plot == "bar") {
map_use <- map_use %>%
filter(!is.na(per_capita)) %>%
mutate(area = fct_reorder(state_name, per_capita))
}
}
if (plot == "choropleth") {
p <- ggplot(map_use) +
geom_sf(aes(fill = log10(per_capita)), linetype = "dotted") +
geom_sf(data = map_all, fill = NA) +
scale_fill_distiller(palette = "Blues", na.value = "white", direction = 1,
name = "# pub.\nper capita\n(log10)") +
theme(panel.border = element_blank(), axis.title = element_blank())
if (zoom != "world") {
# Limit to that box
xylims_use <- if (zoom == "europe") xylims else xylims_us
crs_use <- if (zoom == "europe") 3035 else 2163
p <- p +
coord_sf(xlim = xylims_use[c("xmin", "xmax")], ylim = xylims_use[c("ymin", "ymax")],
crs = crs_use)
}
if (zoom == "usa" && label_states) {
# Label the states
state_labels <- urbnmapr::get_urbn_labels("states", sf = TRUE) %>%
filter(!state_abbv %in% c("AK", "HI"))
p <- p +
geom_sf_text(data = state_labels, aes(geometry = geometry, label = state_abbv))
}
} else {
area_lab <- if (zoom == "usa") "state" else "country"
if (zoom == "europe") {
map_use <- map_use %>%
filter(country %in% europe_countries)
}
p <- ggplot(map_use, aes(per_capita, area)) +
geom_col() +
scale_x_continuous(expand = expansion(mult = c(0, 0.05))) +
labs(y = area_lab)
}
p
}
#' Plot heatmap to show relationship between two categorical variables
#'
#' For instance, are papers for certain techniques more likely to be in certain
#' journals? Is there an association between species and journal? This is just
#' for visualization. Use `fisher.test` to see if it's significant. I still wonder
#' if I should rewrite this with ggplot2, which is more work than base R in this
#' case.
#'
#' @inheritParams pubs_per_year
#' @param row_var Variable for rows of the heatmap. Tidyeval is supported.
#' @param col_var Variable for columns of the heatmap.
#' @param ... Extra arguments to pass to `heatmap`
#' @return A base R heatmap is plotted to the current device.
#' @importFrom dplyr pull
#' @importFrom tidyr pivot_wider
#' @importFrom stats heatmap
#' @export
cat_heatmap <- function(pubs, row_var, col_var, ...) {
rv <- enquo(row_var)
cv <- enquo(col_var)
mat1 <- pubs %>%
count(!!rv, !!cv) %>%
pivot_wider(names_from = !!cv, values_from = "n")
method_mat <- as.matrix(mat1[,-1])
rownames(method_mat) <- pull(mat1, !!rv)
method_mat[is.na(method_mat)] <- 0
heatmap(method_mat, ...)
}
#' Plot histogram for each value of a logical variable
#'
#' Plots 3 histograms showing the number of publications per year for TRUE, FALSE,
#' and NA, with the histogram overlaid on top of a translucent one for all
#' values. There's one facet per row so it's easy to compare how things change
#' with time.
#'
#' @inheritParams pubs_per_year
#' @param col_use Which logical variable to plot. Tidyeval is supported.
#' @return A ggplot2 object
#' @importFrom ggplot2 geom_histogram facet_grid scale_fill_brewer
#' @export
hist_bool <- function(pubs, col_use, binwidth = 365, preprints = TRUE) {
date_published <- journal <- v <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
pubs <- pubs %>%
mutate(v = !!col_use)
ggplot(pubs, aes(date_published)) +
geom_histogram(aes(fill = 'all'), alpha = 0.7, fill = "gray70",
data = select(pubs, -v), binwidth = binwidth) +
geom_histogram(aes(fill = v), binwidth = binwidth) +
facet_grid(rows = vars(v)) +
scale_y_continuous(breaks = breaks_pretty(), expand = expansion(c(0, 0.05))) +
scale_x_date(breaks = breaks_pretty(10)) +
scale_fill_brewer(palette = "Set1", na.value = "gray50") +
theme(panel.grid.minor = element_blank(), legend.position = "none") +
labs(x = "date published")
}
#' Plot outlines of histogram for a logical variable
#'
#' Kind of like `hist_bool`, but instead of plotting TRUE, FALSE, and NA in 3
#' separate facets, it plots them as an outline overlaid on a translucent
#' histogram for all values. This is useful when facetting with another categorical
#' variable, such as programming language.
#'
#' @inheritParams hist_bool
#' @inheritParams pubs_per_year
#' @inheritParams pubs_on_map
#' @importFrom tidyr complete
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom rlang quo_name
#' @importFrom stringr str_to_sentence
#' @export
hist_bool_line <- function(pubs, col_use, facet_by = NULL, ncol = 3, n_top = Inf,
binwidth = 365, preprints = TRUE) {
date_published <- journal <- v <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
pubs <- pubs %>%
mutate(v = !!col_use)
if (!is.null(facet_by)) {
pubs <- pubs %>%
mutate(facets = fct_lump_n(!!sym(facet_by), n = n_top,
ties.method = "first"),
facets = fct_infreq(facets))
if ("Other" %in% pubs$facets) {
pubs <- pubs %>%
mutate(facets = fct_relevel(facets, "Other", after = Inf))
}
pubs <- pubs %>% group_by(v, facets)
} else {
pubs <- pubs %>% group_by(v)
}
p <- ggplot(pubs, aes(date_published, after_stat(count))) +
geom_histogram(aes(fill = v), alpha = 0.7, binwidth = binwidth) +
#geom_line(aes(color = v), stat = "bin", binwidth = binwidth) +
scale_y_continuous(breaks = breaks_pretty(), expand = expansion(c(0, 0.05))) +
scale_x_date(breaks = breaks_pretty(10)) +
scale_fill_brewer(name = str_to_sentence(quo_name(col_use)),
palette = "Set1", na.value = "gray50") +
theme(panel.grid.minor = element_blank(), legend.position = "top") +
labs(y = "count", x = "date published")
if (!is.null(facet_by)) {
p <- p + facet_wrap(~ facets, ncol = ncol)
}
p
}
#' Test whether something is associated with time
#'
#' Fits a logistic regression model with glm to use year to predict proportion
#' of a logical variable is TRUE, and tests whether beta is 0.
#'
#' @inheritParams hist_bool
#' @return A glm object is returned invisibly. The summary is printed to screen
#' @importFrom dplyr group_by summarize
#' @importFrom stats glm
#' @export
test_year_bool <- function(pubs, col_use, preprints = TRUE) {
journal <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
pubs <- pubs %>%
mutate(bool_use = !!col_use)
out <- glm(bool_use ~ date_published, data = pubs, family = "binomial")
print(summary(out))
invisible(out)
}
#' Prequel vs current binned over time
#'
#' Plots freqpoly for prequel vs current, with an option to start both at the
#' date when the first publication in the category appeared. The point here is
#' not to compare to the distribution of everything, like in hist_bool, or to
#' compare when things started, like when I plotted a histogram of different
#' methods over time, but to compare the height of the histograms and how steeply
#' they rise and fall. So I think freqpoly may be better than blocky histograms
#' for this purposes.
#'
#' @inheritParams pubs_per_year
#' @inheritParams hist_bool
#' @param since_first Logical. Whether to plot days after the first publication
#' appeared.
#' @param do_smooth Logical. Whether to plot smoothed curve for the trend rather
#' than freqpoly.
#' @param smooth_method Method of smoothing, passed to \code{geom_smooth}.
#' @param smooth_formula Formula of smoothing, passed to \code{geom_smooth}.
#' @return A ggplot2 object
#' @importFrom ggplot2 scale_color_brewer geom_freqpoly scale_fill_discrete
#' @importFrom ggplot2 after_stat
#' @export
era_freqpoly <- function(pubs, col_use, since_first = FALSE, binwidth = 365,
preprints = TRUE, do_smooth = FALSE,
smooth_method = NULL, smooth_formula = NULL) {
journal <- date_published <- days_since_first <- NULL
col_use <- enquo(col_use)
if (!preprints) {
pubs <- pubs %>%
filter(!journal %in% c("bioRxiv", "arXiv"))
}
if (since_first) {
df_plt <- pubs %>%
group_by(!!col_use) %>%
mutate(days_since_first = as.numeric(date_published - min(date_published)))
breaks_use <- seq(-binwidth, max(df_plt$days_since_first), by = binwidth)
df_plt <- df_plt %>%
mutate(date_bin = cut(days_since_first, breaks_use, right = TRUE,
labels = FALSE)) %>%
group_by(!!col_use, date_bin, .drop = FALSE) %>%
count() %>%
mutate(x = breaks_use[date_bin+1],
is_last = x == date_bin[which.max(date_bin)])
p <- ggplot(df_plt, aes(x, n, color = !!col_use)) +
labs(x = "Days since the first publication")
} else {
df_plt <- pubs %>%
mutate(x = cut(date_published, paste(binwidth, "days"), right = TRUE,
include.lowest = TRUE)) %>%
group_by(!!col_use, x, .drop = FALSE) %>%
count() %>%
mutate(is_last = x == tail(levels(x), 1),
x = as.Date(x))
p <- ggplot(df_plt, aes(x, n, color = !!col_use)) +
labs(x = "Date published")
}
if (do_smooth) {
p <- p +
geom_smooth(data = df_plt %>% filter(!is_last), se = FALSE,
method = smooth_method, formula = smooth_formula) +
geom_point(aes(shape = is_last)) +
scale_shape_manual(values = c(16, 4))
} else {
p <- p +
geom_line(data = df_plt %>% filter(!is_last)) +
geom_point(data = df_plt %>% filter(is_last), shape = 4)
}
p <- p +
scale_color_brewer(name = str_to_sentence(quo_name(col_use)),
palette = "Set1", na.value = "gray50") +
labs(y = "Number of publications")
p
}
# From https://stackoverflow.com/a/44090582/8916916
gtable_stack <- function(g1, g2) {
g1$grobs <- c(g1$grobs, g2$grobs)
g1$layout <- transform(g1$layout, z= z-max(z), name="g2")
g1$layout <- rbind(g1$layout, g2$layout)
g1
}
gtable_select <- function (x, ...) {
matches <- c(...)
x$layout <- x$layout[matches, , drop = FALSE]
x$grobs <- x$grobs[matches]
x
}
#' Color facet strips by a variable
#'
#' @param p A ggplot object for the original facetted plot.
#' @param strip_color Categorical column in the data for the original plot to
#' use to color the facet strips. Tidyeval is used here.
#' @param palette A character vector of colors. Can be named to assign each
#' color to a value in `strip_color`.
#' @return Nothing, the plot is printed to device.
#' @importFrom ggplot2 ggplotGrob
#' @importFrom grid grid.newpage grid.draw
#' @export
plot_facets_color <- function(p, strip_color, palette) {
strip_color <- enquo(strip_color)
dummy <- ggplot(p$data, p$mapping) +
facet_wrap(vars(!!!p$facet$params$facets),
ncol = p$facet$params$ncol,
labeller = p$facet$params$labeller) +
geom_rect(aes(fill = !!strip_color), xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
theme_minimal() +
scale_fill_manual(values = palette)
if ("legend.position" %in% names(p$theme)) {
dummy <- dummy +
theme(legend.position = p$theme$legend.position)
}
g1 <- ggplotGrob(p)
g2 <- ggplotGrob(dummy)
# move dummy panels one cell up
panels <- grepl(pattern="panel", g2$layout$name)
strips <- grepl(pattern="strip-t", g2$layout$name)
g2$layout$t[panels] <- g2$layout$t[panels] - 1
g2$layout$b[panels] <- g2$layout$b[panels] - 1
new_strips <- gtable_select(g2, panels | strips)
# stack new strips on top of gtable
# ideally you'd remove the old strips, for now they're just covered
new_plot <- gtable_stack(g1, new_strips)
grid.newpage()
grid.draw(new_plot)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_execute_provisioned_product_plan}
\alias{servicecatalog_execute_provisioned_product_plan}
\title{Provisions or modifies a product based on the resource changes for the
specified plan}
\usage{
servicecatalog_execute_provisioned_product_plan(AcceptLanguage, PlanId,
IdempotencyToken)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{PlanId}{[required] The plan identifier.}
\item{IdempotencyToken}{[required] A unique identifier that you provide to ensure idempotency. If multiple
requests differ only by the idempotency token, the same response is
returned for each repeated request.}
}
\value{
A list with the following syntax:\preformatted{list(
RecordDetail = list(
RecordId = "string",
ProvisionedProductName = "string",
Status = "CREATED"|"IN_PROGRESS"|"IN_PROGRESS_IN_ERROR"|"SUCCEEDED"|"FAILED",
CreatedTime = as.POSIXct(
"2015-01-01"
),
UpdatedTime = as.POSIXct(
"2015-01-01"
),
ProvisionedProductType = "string",
RecordType = "string",
ProvisionedProductId = "string",
ProductId = "string",
ProvisioningArtifactId = "string",
PathId = "string",
RecordErrors = list(
list(
Code = "string",
Description = "string"
)
),
RecordTags = list(
list(
Key = "string",
Value = "string"
)
),
LaunchRoleArn = "string"
)
)
}
}
\description{
Provisions or modifies a product based on the resource changes for the
specified plan.
}
\section{Request syntax}{
\preformatted{svc$execute_provisioned_product_plan(
AcceptLanguage = "string",
PlanId = "string",
IdempotencyToken = "string"
)
}
}
\keyword{internal}
| /cran/paws.management/man/servicecatalog_execute_provisioned_product_plan.Rd | permissive | TWarczak/paws | R | false | true | 1,897 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_execute_provisioned_product_plan}
\alias{servicecatalog_execute_provisioned_product_plan}
\title{Provisions or modifies a product based on the resource changes for the
specified plan}
\usage{
servicecatalog_execute_provisioned_product_plan(AcceptLanguage, PlanId,
IdempotencyToken)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{PlanId}{[required] The plan identifier.}
\item{IdempotencyToken}{[required] A unique identifier that you provide to ensure idempotency. If multiple
requests differ only by the idempotency token, the same response is
returned for each repeated request.}
}
\value{
A list with the following syntax:\preformatted{list(
RecordDetail = list(
RecordId = "string",
ProvisionedProductName = "string",
Status = "CREATED"|"IN_PROGRESS"|"IN_PROGRESS_IN_ERROR"|"SUCCEEDED"|"FAILED",
CreatedTime = as.POSIXct(
"2015-01-01"
),
UpdatedTime = as.POSIXct(
"2015-01-01"
),
ProvisionedProductType = "string",
RecordType = "string",
ProvisionedProductId = "string",
ProductId = "string",
ProvisioningArtifactId = "string",
PathId = "string",
RecordErrors = list(
list(
Code = "string",
Description = "string"
)
),
RecordTags = list(
list(
Key = "string",
Value = "string"
)
),
LaunchRoleArn = "string"
)
)
}
}
\description{
Provisions or modifies a product based on the resource changes for the
specified plan.
}
\section{Request syntax}{
\preformatted{svc$execute_provisioned_product_plan(
AcceptLanguage = "string",
PlanId = "string",
IdempotencyToken = "string"
)
}
}
\keyword{internal}
|
func_4769_file<-function(dataframe){
df_base<-dataframe[dataframe$deviceEventClassId %like% "%4769",]
}
func_4769_date<-function(dataframe){
file_dates<-unique(as.Date(substr(dataframe[,"Event.Time"],1,10),"%Y/%m/%d"))
}
func_4769_users<-function(data_frame,dates){
my_list<-0
user_function<-function(x){
user_vector<-data_frame[(as.Date(substr(data_frame$Event.Time,1,10)) %like% x)
&(nchar(regmatches(data_frame$destinationUserName,regexpr("^[^@]+",data_frame$destinationUserName)))<5)
&(data_frame$destinationUserName %like% c("t%","u%","h%","z%")),"destinationUserName"]
final_user_vector<-unique(regmatches(user_vector,regexpr("^[^@]+",user_vector)))
fv<-final_user_vector[nchar(final_user_vector)<5]
}
file_users<-lapply(dates,user_function)
for (i in 1:length(dates)){
if (!length(file_users[[i]])==0){
r2<-cbind(rep(as.character.Date(dates[i]),as.integer(length(file_users[[i]]))),file_users[[i]])
if (class(my_list)=="numeric"){my_list<-list(r2) }else{
my_list<-c(my_list,list(r2)) }
}
}
my_list
}
func_4769_src<-function(df_bs,usr_lst){
src_lst<-0
r1<-1
#uv<-regmatches(df_bs$destinationUserName,regexpr("^[^@]+",df_bs$destinationUserName))
for (i in 1:length(usr_lst)){
df_src<-0
for (j in 1:nrow(usr_lst[[i]])){
src<-unique(df_bs[ ((as.Date(substr(df_bs[,"Event.Time"],1,10),"%Y/%m/%d") %like% usr_lst[[i]][j,1]) &
(df_bs$destinationUserName %like% paste0(usr_lst[[i]][j,2],"%"))
&! (df_bs$destinationServiceName %like% "krbtgt%"))
,c("destinationServiceName" ,"deviceCustomString3")])
if (length(src[[1]])==0){
src<-10
}
if(!(class(src)=="numeric")){
for (k in 1:nrow(src)){
r1<-cbind(usr_lst[[i]][j,1],usr_lst[[i]][j,2],src[k,1],substring(src[k,2],8))
if (!(is.na(r1[,4])&is.na(r1[,3]))){
if (class(df_src)=="numeric"){
df_src<-r1 }else {
df_src<-merge(df_src,r1,all=TRUE) }
}
}
}
}
if(!(df_src==0)){
df_src<-data.frame(cbind((as.character(df_src[,1])),(as.character(df_src[,2])),(as.character(df_src[,3])),(as.character(df_src[,4]))),stringsAsFactors = FALSE)
colnames(df_src)<-c("date","user","dst","src")
if (class(src_lst)=="numeric"){
src_lst<-list(df_src)}else {
src_lst<-c(src_lst,list(df_src))}
}
}
src_lst
}
func_4769_file_src<-function(file_nm){
file1_src<-0
require(parallel)
logf<-file("file1.log","at")
file1<-read.csv(file_nm,stringsAsFactors = FALSE)
file1_base<-func_4769_file(file1)
if (nrow(file1_base)>0){
file1_dates<-func_4769_date(file1_base)
file1_users<-func_4769_users(file1_base,file1_dates)
# file1_src<-func_4769_src(file1_base,file1_users)
st<-system.time(file1_src<-func_4769_src(file1_base,file1_users))
writeLines(paste(file_nm,"4769",st[3],sep=","),logf)
close(logf)
}
file1_src
}
func_4769_output<-function(src_lst,file_nm){
trgt_files<-list.files(path="../output", pattern= "^4769_[0-9]{4}-[0-9]{2}-[0-9]{2}")
trgt_dates<-substr(trgt_files,6,15)
for(i in 1:length(src_lst)){
if(!(src_lst[[i]][1,"date"] %in% trgt_dates)){
write.csv(src_lst[[i]],paste("../output/4769_",src_lst[[i]][1,"date"],".csv",sep = ""),row.names = FALSE)
} else {
src11<-read.csv(paste("../output/",trgt_files[match(src_lst[[i]][1,"date"],trgt_dates)],sep=""),stringsAsFactors = FALSE)
src11<-merge(src11,src_lst[[i]],all=TRUE)
write.csv(src11,paste("../output/",trgt_files[match(src_lst[[i]][1,"date"],trgt_dates)],sep=""),row.names = FALSE)
}
}
file.rename(file_nm,paste("../processed/",file_nm,sep=""))
}
func_4769<-function(input_filter){
library(DescTools)
csv<-list.files(pattern= "*.csv")
csv<-grep(input_filter,csv,value = TRUE)
for (i in 1:length(csv)){
src<-func_4769_file_src(csv[i])
if (!(class(src)=="numeric")){
func_4769_output(src,csv[i])}
}
}
| /event_4769_functions.R | no_license | jpl33/adv_stat_5091 | R | false | false | 4,226 | r | func_4769_file<-function(dataframe){
df_base<-dataframe[dataframe$deviceEventClassId %like% "%4769",]
}
func_4769_date<-function(dataframe){
file_dates<-unique(as.Date(substr(dataframe[,"Event.Time"],1,10),"%Y/%m/%d"))
}
func_4769_users<-function(data_frame,dates){
my_list<-0
user_function<-function(x){
user_vector<-data_frame[(as.Date(substr(data_frame$Event.Time,1,10)) %like% x)
&(nchar(regmatches(data_frame$destinationUserName,regexpr("^[^@]+",data_frame$destinationUserName)))<5)
&(data_frame$destinationUserName %like% c("t%","u%","h%","z%")),"destinationUserName"]
final_user_vector<-unique(regmatches(user_vector,regexpr("^[^@]+",user_vector)))
fv<-final_user_vector[nchar(final_user_vector)<5]
}
file_users<-lapply(dates,user_function)
for (i in 1:length(dates)){
if (!length(file_users[[i]])==0){
r2<-cbind(rep(as.character.Date(dates[i]),as.integer(length(file_users[[i]]))),file_users[[i]])
if (class(my_list)=="numeric"){my_list<-list(r2) }else{
my_list<-c(my_list,list(r2)) }
}
}
my_list
}
func_4769_src<-function(df_bs,usr_lst){
src_lst<-0
r1<-1
#uv<-regmatches(df_bs$destinationUserName,regexpr("^[^@]+",df_bs$destinationUserName))
for (i in 1:length(usr_lst)){
df_src<-0
for (j in 1:nrow(usr_lst[[i]])){
src<-unique(df_bs[ ((as.Date(substr(df_bs[,"Event.Time"],1,10),"%Y/%m/%d") %like% usr_lst[[i]][j,1]) &
(df_bs$destinationUserName %like% paste0(usr_lst[[i]][j,2],"%"))
&! (df_bs$destinationServiceName %like% "krbtgt%"))
,c("destinationServiceName" ,"deviceCustomString3")])
if (length(src[[1]])==0){
src<-10
}
if(!(class(src)=="numeric")){
for (k in 1:nrow(src)){
r1<-cbind(usr_lst[[i]][j,1],usr_lst[[i]][j,2],src[k,1],substring(src[k,2],8))
if (!(is.na(r1[,4])&is.na(r1[,3]))){
if (class(df_src)=="numeric"){
df_src<-r1 }else {
df_src<-merge(df_src,r1,all=TRUE) }
}
}
}
}
if(!(df_src==0)){
df_src<-data.frame(cbind((as.character(df_src[,1])),(as.character(df_src[,2])),(as.character(df_src[,3])),(as.character(df_src[,4]))),stringsAsFactors = FALSE)
colnames(df_src)<-c("date","user","dst","src")
if (class(src_lst)=="numeric"){
src_lst<-list(df_src)}else {
src_lst<-c(src_lst,list(df_src))}
}
}
src_lst
}
func_4769_file_src<-function(file_nm){
file1_src<-0
require(parallel)
logf<-file("file1.log","at")
file1<-read.csv(file_nm,stringsAsFactors = FALSE)
file1_base<-func_4769_file(file1)
if (nrow(file1_base)>0){
file1_dates<-func_4769_date(file1_base)
file1_users<-func_4769_users(file1_base,file1_dates)
# file1_src<-func_4769_src(file1_base,file1_users)
st<-system.time(file1_src<-func_4769_src(file1_base,file1_users))
writeLines(paste(file_nm,"4769",st[3],sep=","),logf)
close(logf)
}
file1_src
}
func_4769_output<-function(src_lst,file_nm){
trgt_files<-list.files(path="../output", pattern= "^4769_[0-9]{4}-[0-9]{2}-[0-9]{2}")
trgt_dates<-substr(trgt_files,6,15)
for(i in 1:length(src_lst)){
if(!(src_lst[[i]][1,"date"] %in% trgt_dates)){
write.csv(src_lst[[i]],paste("../output/4769_",src_lst[[i]][1,"date"],".csv",sep = ""),row.names = FALSE)
} else {
src11<-read.csv(paste("../output/",trgt_files[match(src_lst[[i]][1,"date"],trgt_dates)],sep=""),stringsAsFactors = FALSE)
src11<-merge(src11,src_lst[[i]],all=TRUE)
write.csv(src11,paste("../output/",trgt_files[match(src_lst[[i]][1,"date"],trgt_dates)],sep=""),row.names = FALSE)
}
}
file.rename(file_nm,paste("../processed/",file_nm,sep=""))
}
func_4769<-function(input_filter){
library(DescTools)
csv<-list.files(pattern= "*.csv")
csv<-grep(input_filter,csv,value = TRUE)
for (i in 1:length(csv)){
src<-func_4769_file_src(csv[i])
if (!(class(src)=="numeric")){
func_4769_output(src,csv[i])}
}
}
|
# Gene analysis for liver expression data for B6,S1,S2 <- maybe should be changed so the loop runs for all tissues ?
### load in normalized expresssion data ###
setwd("/home/aimee/Aimee Test Data")
expressions <- read.csv("Expressionlevels_DeikeClarionS.norm.samples.txt", sep = "\t", header = TRUE)
ids = colnames(expressions)
#extract liver expression data <- adjust this so any tissue can be used for that (P - Pankreas , SM skeletal Mucle, GF - Gonadal fat)
tissue <- which(grepl("L", ids))
# make heatmat to see if correlation looks normal and no samples are switched (quality control switch)
corM <- cor(expressions)
png("heatmap.png", width = 2000, height = 2000)
heatmap(corM, scale = "none")
dev.off()
#test for significant differences in tissue expression data (S1/S2/B6)
tissueexpr = expressions[, tissue]
expressions = expressions[, -tissue]
tissueexpr[1:4,]
strain = substr(colnames(tissueexpr), 0,2)
results = t(apply(tissueexpr, 1, function(Y){
pval = anova(lm(Y ~ strain))[[5]][1]
return(c(coef(lm(Y ~ strain + 0)), pval))
}))
rownames(results) <- gsub("_at", "", rownames(results))
results[1:5,]
# load in table of fibrosis genes
setwd("/home/aimee/Aimee Test Data")
fibgenes <- read.table("fibrosisgenes.txt", sep="\t", header=TRUE, row.names=1, fill=TRUE)
# subset for the fibrosis genes
fibresults = results[which(rownames(results) %in% fibgenes[, "ensembl_gene_id"]), ]
colnames(fibresults) = c("meanB6", "meanS1", "meanS2", "p.value")
fibresults[1:5, ]
#annotate file with ensemble data
annotate <- function(significant){
library(biomaRt)
bio.mart <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
res.biomart <- getBM(attributes = c("ensembl_gene_id", "mgi_id", "mgi_symbol", "mgi_description", "chromosome_name", "start_position", "end_position", "strand"),
filters = c("ensembl_gene_id"),
values = rownames(significant), mart = bio.mart)
rownames(res.biomart) <- res.biomart[,1]
annotated <- cbind(res.biomart[rownames(significant), ], significant)
return(annotated)
}
fibresults = annotate(fibresults)
fibresults[1:5, ]
# extract data table for the tissue -> change file name for specific tissue
write.table(fibresults, "ResultsLiver_Expression_DeikeClariomS.txt", sep="\t", row.names = TRUE) | /ClarionS/03.1_GeneExpressionAnalysis_DeikeClariomS.R | no_license | aimeefreiberg/Internship2020 | R | false | false | 2,306 | r | # Gene analysis for liver expression data for B6,S1,S2 <- maybe should be changed so the loop runs for all tissues ?
### load in normalized expresssion data ###
setwd("/home/aimee/Aimee Test Data")
expressions <- read.csv("Expressionlevels_DeikeClarionS.norm.samples.txt", sep = "\t", header = TRUE)
ids = colnames(expressions)
#extract liver expression data <- adjust this so any tissue can be used for that (P - Pankreas , SM skeletal Mucle, GF - Gonadal fat)
tissue <- which(grepl("L", ids))
# make heatmat to see if correlation looks normal and no samples are switched (quality control switch)
corM <- cor(expressions)
png("heatmap.png", width = 2000, height = 2000)
heatmap(corM, scale = "none")
dev.off()
#test for significant differences in tissue expression data (S1/S2/B6)
tissueexpr = expressions[, tissue]
expressions = expressions[, -tissue]
tissueexpr[1:4,]
strain = substr(colnames(tissueexpr), 0,2)
results = t(apply(tissueexpr, 1, function(Y){
pval = anova(lm(Y ~ strain))[[5]][1]
return(c(coef(lm(Y ~ strain + 0)), pval))
}))
rownames(results) <- gsub("_at", "", rownames(results))
results[1:5,]
# load in table of fibrosis genes
setwd("/home/aimee/Aimee Test Data")
fibgenes <- read.table("fibrosisgenes.txt", sep="\t", header=TRUE, row.names=1, fill=TRUE)
# subset for the fibrosis genes
fibresults = results[which(rownames(results) %in% fibgenes[, "ensembl_gene_id"]), ]
colnames(fibresults) = c("meanB6", "meanS1", "meanS2", "p.value")
fibresults[1:5, ]
#annotate file with ensemble data
annotate <- function(significant){
library(biomaRt)
bio.mart <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
res.biomart <- getBM(attributes = c("ensembl_gene_id", "mgi_id", "mgi_symbol", "mgi_description", "chromosome_name", "start_position", "end_position", "strand"),
filters = c("ensembl_gene_id"),
values = rownames(significant), mart = bio.mart)
rownames(res.biomart) <- res.biomart[,1]
annotated <- cbind(res.biomart[rownames(significant), ], significant)
return(annotated)
}
fibresults = annotate(fibresults)
fibresults[1:5, ]
# extract data table for the tissue -> change file name for specific tissue
write.table(fibresults, "ResultsLiver_Expression_DeikeClariomS.txt", sep="\t", row.names = TRUE) |
library(rcd)
library(igraph)
library("maps")
library("geosphere")
source('../clinet/utils_network.R')
library(scatterplot3d)
library(ggplot2)
library(glmnet)
library(e1071)
library(kernlab)
library(maptools)
library(fields)
data(wrld_simpl)
#https://www.esrl.noaa.gov/psd/data/gridded/data.cmap.html
#load("./dat/precip_mon_mean_mon_mean_removed_sub.RData")
load("./dat/precip_mon_mean_mon_mean_not_removed_sub.RData")
#load("../clinet/data/air_mon_mean_mon_mean_removed_sub.RData")
#load("../clinet/data/air_mon_mean_mon_mean_not_removed_sub.RData")
#load("../clinet/data/cor_rcd_matrix.RData")
source('utils_lscm.R')
library(mvtnorm)
library(tikzDevice)
idlon=1:72#1:144#1:72#(1:36)*2
idlat=7:32#16:63#7:32#(2:17)*2
NLON=length(idlon); NLAT=length(idlat)
LON=LON[idlon]
LAT=LAT[idlat]
X=X[idlon,idlat,]
dfv=NULL
count=0
for(i in 1:NLON){ # stack by row
for(j in 1:NLAT){
count=count+1
dfv=rbind(dfv,c(count,i,j,LON[i],LAT[j],X[i,j,1]))#372+360+1
}
}
colnames(dfv)=c("vertex","idlon","idlat","lon","lat","x")
dfv=data.frame(dfv)
p=dim(dfv)[1]
X1=NULL # Long Data (2D)
for(i in 1:NLON){
for(j in 1:NLAT){
X1=cbind(X1,X[i,j,])
}
} # X1 is the final data matrix to work on. Vertex data frame is in dfv. Edge data frame need analysis with correlation
#plot_lonlat_df(dfv,vcol="x",region="world",CEX=2)
# sum(abs(X1)>1e10)
# which(abs(X)>1e3,arr.ind = T)
# plot(apply(X1,2,function(x){sum(abs(x)>1e3)==0}))
# X1=apply(X1,2,function(x){if(any(abs(x)>1e3)){x[abs(x)>1e3]=mean(x[abs(x)<1e3]);return(x)}else{return(x)}})
# X1[abs(X1)>1e3]=NA
#X1n=scale(X1)
t0=proc.time()
# Step 1: Estimate model parameters
#S=cor(X1,use="pairwise.complete.obs")#+1e-5*diag(1:dim(X1)[2])
S=cor(X1)
hatfit=a_efratio_hat(S=S,p=nrow(S),d=2,la=1)
a_hat=hatfit[-length(hatfit)]
efratio_hat=hatfit[length(hatfit)]
#lstar=LS5m(a=a_hat,ef_ratio=efratio_hat,d=2,dd=9,sf=1,r=1)$ls5 # length(a)=1
# Step 2: Reconstruct the model with the estimated parameters (empirical Shat will be useless)
para1=list(n=dim(X1)[1],p=dim(X1)[2],a=a_hat,r=0.8,sf=1,se=efratio_hat,i0=NULL,j0=NULL,M=NULL,d=1,dd=5,py=NLAT,method="equalvar",N=100)
para1$i0=round(para1$p*0.23); para1$j0=round(para1$p*0.77)
para2=para1; para2$d=2; para2$dd=9;
covmodel=TCM(para2) # generating true covariance model
lstar=LS5(covmodel$S1,para2,Amat=NULL)$ls5
lstar
# Step 3: use model optimal lambda to find modified estimator (need empirical Shat and optimal lambda)
Sbar=sigmabar(S=S,mu=lstar,bandwidth=NLAT,dd=9,py=NLAT)
t1=proc.time()-t0
# par(mfrow=c(1,1))
# printM(S*(S>0.8))
# printM(Sbar*(Sbar>0.8))#Sbar[232,441] # 100 vs 207 225
# dfv[100,]
# dfv[207,]
# plot(X[6,10,],X[12,9,])
#
# plot(X1[,232],X1[,441]) # Indian vs Brazil part
# plot(X1[,40],X1[,137]) # El nino part
# plot arc map
thres=5
S0=nn_ind(NLON,NLAT,thres,thres) # nearest neighbour indicator matrix
par(mfrow=c(1,1))
#net1=graph_from_adjacency_matrix((abs(S)>0.8)*(!S0),mode = "undirected")
net1=graph_from_adjacency_matrix(((abs(S)*(!S0))>0.83) & ((abs(S)*(!S0))<0.832),mode = "undirected") #0.525precip 0.515/150 # 0.8485 to avoid tele
dfe1=as_edgelist(net1)
plot_arc(dfv,dfe1,cap="Precipitation")
net1=graph_from_adjacency_matrix(((abs(S)*(!S0))>0.8485) ,mode = "undirected") #0.525precip 0.515/150 # 0.8485 to avoid tele
dfe1=as_edgelist(net1)
plot_arc(dfv,dfe1,cap="Precipitation")
#dfv[dfe1[,1],5]< -10 # 79 261 (14th row)
#net2=graph_from_adjacency_matrix((abs(Sbar)>0.8)*(!S0),mode = "undirected")
### paper plot ******
net2=graph_from_adjacency_matrix((abs(Sbar)*(!S0))>1.1,mode = "undirected") #0.675/100precip 0.61305/150; 1.056
dfe2=as_edgelist(net2)
plot_arc(dfv,dfe2,cap="Precipitation")
nrow(dfe2)
net3=graph_from_adjacency_matrix((abs(S)*(!S0)-abs(Sbar)*(!S0))>0.08,mode = "undirected")
net3=graph_from_adjacency_matrix((S-Sbar)>0.2 & S>0.8 & !S0,mode = "undirected")
dfe3=as_edgelist(net3)
plot_arc(dfv,dfe3,cap="Precipitation")
#net3=graph_from_adjacency_matrix((abs(S)*(!S0)-abs(Sbar)*(!S0))>0.08,mode = "undirected")
### paper plot ******
net3=graph_from_adjacency_matrix((S-Sbar)>0.49 & S>0.4 & !S0,mode = "undirected")
dfe3=as_edgelist(net3)
plot_arc(dfv,dfe3,cap="Precipitation")
for(i in 1:nrow(dfe3)){
cat(i,S[dfe3[i,1],dfe3[i,2]],Sbar[dfe3[i,1],dfe3[i,2]],"\n")
}
for(i in 1:nrow(dfe1)){
cat(i,S[dfe1[i,1],dfe1[i,2]],Sbar[dfe1[i,1],dfe1[i,2]],"\n")
}
############### rank S as a long vector ###############
#### list of S Sbar S0
LS=matrix(0,sum(S0==0),3)
count=0
for(i in 1:p){
for(j in i:p){
if(!S0[i,j]){
count=count+1
LS[count,]=c(S[i,j],abs(S[i,j])-abs(Sbar[i,j]),Sbar[i,j])
}
}
}
#i00=688;j00=1398; i00=899; j00=1133; i00=1473; j00=1479
i00=660; j00=1077
i00=1078; j00=1398
q1=ecdf(LS[,1])
q2=ecdf(LS[,3])
q1(S[i00,j00])
q2(Sbar[i00,j00])
sum(LS[,1]>S[i00,j00])
sum(LS[,3]>Sbar[i00,j00])
tikz("./fig/real_plots.tex", width = 6.5, height = 3.25)
par(mfrow=c(1,2))
xx=X1[,232]; yy=X1[,441]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$5^\\circ$S, $35^\\circ$W",
ylab="$5^\\circ$N, $95^\\circ$E",pch=19,main="Temperature")
abline(lm(xx~yy), col="blue",lty=2)
#text(c(-1,1),paste0("$\\rho=$",round(cor(xx,yy),2)),col="blue")
xx=X1[,40]; yy=X1[,137]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$5^\\circ$S, $155^\\circ$W",
ylab="$5^\\circ$N, $95^\\circ$W",pch=19,main="Temperature")
abline(lm(xx~yy), col="blue",lty=2)
#text(c(-1,1),paste0("$\\rho=$",round(cor(xx,yy),2)),col="blue")
dev.off()
tikz("./fig/real_plots2.tex", width = 6.5, height = 6.5)
par(mfrow=c(2,2))
ii=79;jj=261
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $163.75^\\circ$W",
ylab="$58.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
ii=79;jj=262
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $163.75^\\circ$W",
ylab="$53.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
ii=79+NLAT; jj=261
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $158.75^\\circ$W",
ylab="$58.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
ii=79+NLAT; jj=262
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $158.75^\\circ$W",
ylab="$53.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
dev.off()
#### screening features ####
dfs=NULL
N=dim(S)[1]
for(i in 1:(N-2)){
#print(i)
for(j in (i+1):(N-1)){
if((i%%NLON>=2)){
if((abs(S[i,j])-mean(abs(S[i,j+1]),abs(S[i,j-1])))>0.3){
dfs=rbind(dfs,c(i,j,S[i,j],S[i,j-1],S[i,j+1]))
}
}
}
}
# Temp, removed, 5*5 deg, model length(a)=2, 2d model
i00=173; j00=7070
i00=57; j00=4559
i00=2544; j00=4028
i00=981; j00=1136
i00=871; j00=1398
i00=1078; j00=1398
i00=660; j00=1077
S[i00+c(-1,0,1),j00+c(-1,0,1)]
S[i00+c(-NLAT,0,NLAT),j00+c(-NLAT,0,NLAT)]
plot(X1[,i00],X1[,j00+NLAT])#,xlim=c(-10,10),ylim=c(-10,10)
plot(rank(X1[,i00]),rank(X1[,j00-1]))
############### paper plots ###############
###### real_plot3 ######
tikz("./fig/real_plots3.tex", width = 6.5, height = 6.5)
i00=1078; j00=1398;
#i00=688; j00=1398
par(mfrow=c(2,2))
ii=i00;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$3.75^\\circ$S, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $86.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00+1;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$1.25^\\circ$N, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $86.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00+NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$3.75^\\circ$S, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $91.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,4,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00-NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$3.75^\\circ$S, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $81.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,2,paste0("cor=",round(cor(xx,yy),2)),col="blue")
dev.off()
###### real_plot4 ######
tikz("./fig/real_plots4.tex", width = 6.5, height = 6.5)
i00=660; j00=1077; #i00=688; j00=1398
par(mfrow=c(2,2))
ii=i00;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$13.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $26.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00+1;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$8.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $26.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00+NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$13.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $31.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00-NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$13.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $21.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
dev.off()
############ Add: find null cutoff ############
nk=1000
lmax=rep(0,nk)
for(k in 1:nk){
#t0=proc.time()
print(k)
data = mvrnorm(dim(X)[3], mu = rep(0,p), Sigma = covmodel$S0)
Shat=cor(data)
lmax[k]=maxd.r(abs(Shat)*(!S0),6)
rm(data)
#proc.time()-t0
}
save(lmax,file="results/real_H0_lmax.RData")
load("results/real_H0_lmax.RData")
quantile(lmax,0.999) #0.4935794
max(abs(LS[,1])) #0.889298
sum(abs(LS[,1])>quantile(lmax,0.999)) #55387
55387/length(LS[,1])
| /real_data_clim.R | no_license | liyi-1989/covtest | R | false | false | 10,473 | r | library(rcd)
library(igraph)
library("maps")
library("geosphere")
source('../clinet/utils_network.R')
library(scatterplot3d)
library(ggplot2)
library(glmnet)
library(e1071)
library(kernlab)
library(maptools)
library(fields)
data(wrld_simpl)
#https://www.esrl.noaa.gov/psd/data/gridded/data.cmap.html
#load("./dat/precip_mon_mean_mon_mean_removed_sub.RData")
load("./dat/precip_mon_mean_mon_mean_not_removed_sub.RData")
#load("../clinet/data/air_mon_mean_mon_mean_removed_sub.RData")
#load("../clinet/data/air_mon_mean_mon_mean_not_removed_sub.RData")
#load("../clinet/data/cor_rcd_matrix.RData")
source('utils_lscm.R')
library(mvtnorm)
library(tikzDevice)
idlon=1:72#1:144#1:72#(1:36)*2
idlat=7:32#16:63#7:32#(2:17)*2
NLON=length(idlon); NLAT=length(idlat)
LON=LON[idlon]
LAT=LAT[idlat]
X=X[idlon,idlat,]
dfv=NULL
count=0
for(i in 1:NLON){ # stack by row
for(j in 1:NLAT){
count=count+1
dfv=rbind(dfv,c(count,i,j,LON[i],LAT[j],X[i,j,1]))#372+360+1
}
}
colnames(dfv)=c("vertex","idlon","idlat","lon","lat","x")
dfv=data.frame(dfv)
p=dim(dfv)[1]
X1=NULL # Long Data (2D)
for(i in 1:NLON){
for(j in 1:NLAT){
X1=cbind(X1,X[i,j,])
}
} # X1 is the final data matrix to work on. Vertex data frame is in dfv. Edge data frame need analysis with correlation
#plot_lonlat_df(dfv,vcol="x",region="world",CEX=2)
# sum(abs(X1)>1e10)
# which(abs(X)>1e3,arr.ind = T)
# plot(apply(X1,2,function(x){sum(abs(x)>1e3)==0}))
# X1=apply(X1,2,function(x){if(any(abs(x)>1e3)){x[abs(x)>1e3]=mean(x[abs(x)<1e3]);return(x)}else{return(x)}})
# X1[abs(X1)>1e3]=NA
#X1n=scale(X1)
t0=proc.time()
# Step 1: Estimate model parameters
#S=cor(X1,use="pairwise.complete.obs")#+1e-5*diag(1:dim(X1)[2])
S=cor(X1)
hatfit=a_efratio_hat(S=S,p=nrow(S),d=2,la=1)
a_hat=hatfit[-length(hatfit)]
efratio_hat=hatfit[length(hatfit)]
#lstar=LS5m(a=a_hat,ef_ratio=efratio_hat,d=2,dd=9,sf=1,r=1)$ls5 # length(a)=1
# Step 2: Reconstruct the model with the estimated parameters (empirical Shat will be useless)
para1=list(n=dim(X1)[1],p=dim(X1)[2],a=a_hat,r=0.8,sf=1,se=efratio_hat,i0=NULL,j0=NULL,M=NULL,d=1,dd=5,py=NLAT,method="equalvar",N=100)
para1$i0=round(para1$p*0.23); para1$j0=round(para1$p*0.77)
para2=para1; para2$d=2; para2$dd=9;
covmodel=TCM(para2) # generating true covariance model
lstar=LS5(covmodel$S1,para2,Amat=NULL)$ls5
lstar
# Step 3: use model optimal lambda to find modified estimator (need empirical Shat and optimal lambda)
Sbar=sigmabar(S=S,mu=lstar,bandwidth=NLAT,dd=9,py=NLAT)
t1=proc.time()-t0
# par(mfrow=c(1,1))
# printM(S*(S>0.8))
# printM(Sbar*(Sbar>0.8))#Sbar[232,441] # 100 vs 207 225
# dfv[100,]
# dfv[207,]
# plot(X[6,10,],X[12,9,])
#
# plot(X1[,232],X1[,441]) # Indian vs Brazil part
# plot(X1[,40],X1[,137]) # El nino part
# plot arc map
thres=5
S0=nn_ind(NLON,NLAT,thres,thres) # nearest neighbour indicator matrix
par(mfrow=c(1,1))
#net1=graph_from_adjacency_matrix((abs(S)>0.8)*(!S0),mode = "undirected")
net1=graph_from_adjacency_matrix(((abs(S)*(!S0))>0.83) & ((abs(S)*(!S0))<0.832),mode = "undirected") #0.525precip 0.515/150 # 0.8485 to avoid tele
dfe1=as_edgelist(net1)
plot_arc(dfv,dfe1,cap="Precipitation")
net1=graph_from_adjacency_matrix(((abs(S)*(!S0))>0.8485) ,mode = "undirected") #0.525precip 0.515/150 # 0.8485 to avoid tele
dfe1=as_edgelist(net1)
plot_arc(dfv,dfe1,cap="Precipitation")
#dfv[dfe1[,1],5]< -10 # 79 261 (14th row)
#net2=graph_from_adjacency_matrix((abs(Sbar)>0.8)*(!S0),mode = "undirected")
### paper plot ******
net2=graph_from_adjacency_matrix((abs(Sbar)*(!S0))>1.1,mode = "undirected") #0.675/100precip 0.61305/150; 1.056
dfe2=as_edgelist(net2)
plot_arc(dfv,dfe2,cap="Precipitation")
nrow(dfe2)
net3=graph_from_adjacency_matrix((abs(S)*(!S0)-abs(Sbar)*(!S0))>0.08,mode = "undirected")
net3=graph_from_adjacency_matrix((S-Sbar)>0.2 & S>0.8 & !S0,mode = "undirected")
dfe3=as_edgelist(net3)
plot_arc(dfv,dfe3,cap="Precipitation")
#net3=graph_from_adjacency_matrix((abs(S)*(!S0)-abs(Sbar)*(!S0))>0.08,mode = "undirected")
### paper plot ******
net3=graph_from_adjacency_matrix((S-Sbar)>0.49 & S>0.4 & !S0,mode = "undirected")
dfe3=as_edgelist(net3)
plot_arc(dfv,dfe3,cap="Precipitation")
for(i in 1:nrow(dfe3)){
cat(i,S[dfe3[i,1],dfe3[i,2]],Sbar[dfe3[i,1],dfe3[i,2]],"\n")
}
for(i in 1:nrow(dfe1)){
cat(i,S[dfe1[i,1],dfe1[i,2]],Sbar[dfe1[i,1],dfe1[i,2]],"\n")
}
############### rank S as a long vector ###############
#### list of S Sbar S0
LS=matrix(0,sum(S0==0),3)
count=0
for(i in 1:p){
for(j in i:p){
if(!S0[i,j]){
count=count+1
LS[count,]=c(S[i,j],abs(S[i,j])-abs(Sbar[i,j]),Sbar[i,j])
}
}
}
#i00=688;j00=1398; i00=899; j00=1133; i00=1473; j00=1479
i00=660; j00=1077
i00=1078; j00=1398
q1=ecdf(LS[,1])
q2=ecdf(LS[,3])
q1(S[i00,j00])
q2(Sbar[i00,j00])
sum(LS[,1]>S[i00,j00])
sum(LS[,3]>Sbar[i00,j00])
tikz("./fig/real_plots.tex", width = 6.5, height = 3.25)
par(mfrow=c(1,2))
xx=X1[,232]; yy=X1[,441]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$5^\\circ$S, $35^\\circ$W",
ylab="$5^\\circ$N, $95^\\circ$E",pch=19,main="Temperature")
abline(lm(xx~yy), col="blue",lty=2)
#text(c(-1,1),paste0("$\\rho=$",round(cor(xx,yy),2)),col="blue")
xx=X1[,40]; yy=X1[,137]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$5^\\circ$S, $155^\\circ$W",
ylab="$5^\\circ$N, $95^\\circ$W",pch=19,main="Temperature")
abline(lm(xx~yy), col="blue",lty=2)
#text(c(-1,1),paste0("$\\rho=$",round(cor(xx,yy),2)),col="blue")
dev.off()
tikz("./fig/real_plots2.tex", width = 6.5, height = 6.5)
par(mfrow=c(2,2))
ii=79;jj=261
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $163.75^\\circ$W",
ylab="$58.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
ii=79;jj=262
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $163.75^\\circ$W",
ylab="$53.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
ii=79+NLAT; jj=261
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $158.75^\\circ$W",
ylab="$58.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
ii=79+NLAT; jj=262
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$58.75^\\circ$S, $158.75^\\circ$W",
ylab="$53.75^\\circ$S, $128.75^\\circ$W",pch=19,main="Precipitation")
abline(lm(xx~yy), col="blue",lty=2)
dev.off()
#### screening features ####
dfs=NULL
N=dim(S)[1]
for(i in 1:(N-2)){
#print(i)
for(j in (i+1):(N-1)){
if((i%%NLON>=2)){
if((abs(S[i,j])-mean(abs(S[i,j+1]),abs(S[i,j-1])))>0.3){
dfs=rbind(dfs,c(i,j,S[i,j],S[i,j-1],S[i,j+1]))
}
}
}
}
# Temp, removed, 5*5 deg, model length(a)=2, 2d model
i00=173; j00=7070
i00=57; j00=4559
i00=2544; j00=4028
i00=981; j00=1136
i00=871; j00=1398
i00=1078; j00=1398
i00=660; j00=1077
S[i00+c(-1,0,1),j00+c(-1,0,1)]
S[i00+c(-NLAT,0,NLAT),j00+c(-NLAT,0,NLAT)]
plot(X1[,i00],X1[,j00+NLAT])#,xlim=c(-10,10),ylim=c(-10,10)
plot(rank(X1[,i00]),rank(X1[,j00-1]))
############### paper plots ###############
###### real_plot3 ######
tikz("./fig/real_plots3.tex", width = 6.5, height = 6.5)
i00=1078; j00=1398;
#i00=688; j00=1398
par(mfrow=c(2,2))
ii=i00;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$3.75^\\circ$S, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $86.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00+1;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$1.25^\\circ$N, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $86.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00+NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$3.75^\\circ$S, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $91.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,4,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00-NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$3.75^\\circ$S, $26.25^\\circ$W",
ylab="$36.25^\\circ$N, $81.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,2,paste0("cor=",round(cor(xx,yy),2)),col="blue")
dev.off()
###### real_plot4 ######
tikz("./fig/real_plots4.tex", width = 6.5, height = 6.5)
i00=660; j00=1077; #i00=688; j00=1398
par(mfrow=c(2,2))
ii=i00;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$13.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $26.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00+1;jj=j00
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$8.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $26.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00+NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$13.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $31.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
ii=i00;jj=j00-NLAT
xx=X1[,ii]; yy=X1[,jj]
plot(xx,yy,col=rgb(1,0.5,0,alpha=0.5),xlab="$13.75^\\circ$S, $53.75^\\circ$W",
ylab="$8.75^\\circ$S, $21.25^\\circ$E",pch=19,main="Precipitation")
#abline(lm(xx~yy), col="blue",lty=2)
text(2,8,paste0("cor=",round(cor(xx,yy),2)),col="blue")
dev.off()
############ Add: find null cutoff ############
nk=1000
lmax=rep(0,nk)
for(k in 1:nk){
#t0=proc.time()
print(k)
data = mvrnorm(dim(X)[3], mu = rep(0,p), Sigma = covmodel$S0)
Shat=cor(data)
lmax[k]=maxd.r(abs(Shat)*(!S0),6)
rm(data)
#proc.time()-t0
}
save(lmax,file="results/real_H0_lmax.RData")
load("results/real_H0_lmax.RData")
quantile(lmax,0.999) #0.4935794
max(abs(LS[,1])) #0.889298
sum(abs(LS[,1])>quantile(lmax,0.999)) #55387
55387/length(LS[,1])
|
for(i in 1:nrow(treningowa)){
val1<-attr(alc_var$alc_niski[treningowa$alcohol[i]],"memberships");
val2<-attr(alc_var$alc_wysoki[treningowa$alcohol[i]],"memberships");
print(val1);
print(val2);
if(val1<val2){
treningowa$alcohol[i]="alc_wysoki"
}else{
treningowa$alcohol[i]="alc_niski"
}
val1<-attr(acid_var$acid_niski[treningowa$malic_acid[i]],"memberships");
val2<-attr(acid_var$acid_sredni[treningowa$malic_acid[i]],"memberships");
val3<-attr(acid_var$acid_wysoki[treningowa$malic_acid[i]],"memberships");
if(val1>val2){
treningowa$malic_acid[i]="acid_niski"
}else if(val2>val3){
treningowa$malic_acid[i]="acid_sredni"
}else{
treningowa$malic_acid[i]="acid_wysoki"
}
val1<-attr(intensity_var$intensity_niskie[treningowa$color_intensity[i]],"memberships");
val2<-attr(intensity_var$intensity_srednie[treningowa$color_intensity[i]],"memberships");
val3<-attr(intensity_var$intensity_wysokie[treningowa$color_intensity[i]],"memberships");
if(val1>val2){
treningowa$color_intensity[i]="intensity_niskie"
}else if(val2>val3){
treningowa$color_intensity[i]="intensity_srednie"
}else{
treningowa$color_intensity[i]="intensity_wysokie"
}
}
| /R_changing_loop.R | no_license | Macu92/uczelnia-zssi | R | false | false | 1,214 | r | for(i in 1:nrow(treningowa)){
val1<-attr(alc_var$alc_niski[treningowa$alcohol[i]],"memberships");
val2<-attr(alc_var$alc_wysoki[treningowa$alcohol[i]],"memberships");
print(val1);
print(val2);
if(val1<val2){
treningowa$alcohol[i]="alc_wysoki"
}else{
treningowa$alcohol[i]="alc_niski"
}
val1<-attr(acid_var$acid_niski[treningowa$malic_acid[i]],"memberships");
val2<-attr(acid_var$acid_sredni[treningowa$malic_acid[i]],"memberships");
val3<-attr(acid_var$acid_wysoki[treningowa$malic_acid[i]],"memberships");
if(val1>val2){
treningowa$malic_acid[i]="acid_niski"
}else if(val2>val3){
treningowa$malic_acid[i]="acid_sredni"
}else{
treningowa$malic_acid[i]="acid_wysoki"
}
val1<-attr(intensity_var$intensity_niskie[treningowa$color_intensity[i]],"memberships");
val2<-attr(intensity_var$intensity_srednie[treningowa$color_intensity[i]],"memberships");
val3<-attr(intensity_var$intensity_wysokie[treningowa$color_intensity[i]],"memberships");
if(val1>val2){
treningowa$color_intensity[i]="intensity_niskie"
}else if(val2>val3){
treningowa$color_intensity[i]="intensity_srednie"
}else{
treningowa$color_intensity[i]="intensity_wysokie"
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_facility_bo_excel.R
\name{write_facility_excel}
\alias{write_facility_excel}
\title{Write NSSP BioSense Platform Data Quality Summary Reports for one Facility, Use excel as input}
\usage{
write_facility_excel(
input,
facility = NA,
receiver = NA,
table = NA,
mft = NA,
username = NA,
password = NA,
start = NA,
end = NA,
directory = NA,
field = NA,
exclude = NA,
optional = T,
email = NA,
sender = NA,
email_password = NA,
personname = NA,
title = NA,
phone = NA,
message = NA
)
}
\arguments{
\item{input}{location of input.xlsx file.}
\item{facility}{The C_Biosense_Facility_ID for the facility that you wish to generate and write the report for.}
\item{receiver}{Email address of receiver.}
\item{table}{The table that you want to retrieve the data from, as a string.}
\item{mft}{The MFT (master facilities table) from where the facility name will be retrieved, as a string.}
\item{username}{Your BioSense username, as a string. This is the same username you may use to log into RStudio or Adminer.}
\item{password}{Your BioSense password, as a string. This is the same password you may use to log into RStudio or Adminer.}
\item{start}{The start date time that you wish to begin pulling data from, as a string.}
\item{end}{The end data time that you wish to stop pulling data from, as a string.}
\item{directory}{The directory where you would like to write the reports to (i.e., "~/Documents/MyReports"), as a string.}
\item{field}{Default NA. Can add a string with delimiter of ':'. Only fields that countain those words will be included in the final report.}
\item{exclude}{Default NA. Can add a string with delimiter of ':'. Exclude fields with certain keywords in the final report.}
\item{optional}{Default True. If False then remove all optional fields}
\item{email}{Default False. If True, then the function will atempt to send out a form}
\item{sender}{Email address of sender. Make sure it's kdhe.KS.gov}
\item{email_password}{Your Email Password}
\item{personname}{Your Name to be used in your email text}
\item{title}{Your job title to be used in your email text}
\item{phone}{Your phone number to be used in your email text
@param message The email message to be sent. Allows for composition of costume messages.}
}
\description{
This performs `write_facilty_report` function for all . It will generate summary report for all specified facility.
This function uses excel generated information although one can override the information from the input.xlsx
The summary workbook shows percents and counts of nulls and invalids, Additionally it generates a timeliness
report and creates a table. The program can send out a report to designated email address
}
\examples{
library(biosensequality)
write_facility_excel("Input.xlsx")
## You can override options from input by filling in the parameters you need to override
write_facility_excel("Input.xlsx", email=F)
}
| /man/write_facility_excel.Rd | no_license | andrew-simpson/KSSPReport | R | false | true | 3,019 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_facility_bo_excel.R
\name{write_facility_excel}
\alias{write_facility_excel}
\title{Write NSSP BioSense Platform Data Quality Summary Reports for one Facility, Use excel as input}
\usage{
write_facility_excel(
input,
facility = NA,
receiver = NA,
table = NA,
mft = NA,
username = NA,
password = NA,
start = NA,
end = NA,
directory = NA,
field = NA,
exclude = NA,
optional = T,
email = NA,
sender = NA,
email_password = NA,
personname = NA,
title = NA,
phone = NA,
message = NA
)
}
\arguments{
\item{input}{location of input.xlsx file.}
\item{facility}{The C_Biosense_Facility_ID for the facility that you wish to generate and write the report for.}
\item{receiver}{Email address of receiver.}
\item{table}{The table that you want to retrieve the data from, as a string.}
\item{mft}{The MFT (master facilities table) from where the facility name will be retrieved, as a string.}
\item{username}{Your BioSense username, as a string. This is the same username you may use to log into RStudio or Adminer.}
\item{password}{Your BioSense password, as a string. This is the same password you may use to log into RStudio or Adminer.}
\item{start}{The start date time that you wish to begin pulling data from, as a string.}
\item{end}{The end data time that you wish to stop pulling data from, as a string.}
\item{directory}{The directory where you would like to write the reports to (i.e., "~/Documents/MyReports"), as a string.}
\item{field}{Default NA. Can add a string with delimiter of ':'. Only fields that countain those words will be included in the final report.}
\item{exclude}{Default NA. Can add a string with delimiter of ':'. Exclude fields with certain keywords in the final report.}
\item{optional}{Default True. If False then remove all optional fields}
\item{email}{Default False. If True, then the function will atempt to send out a form}
\item{sender}{Email address of sender. Make sure it's kdhe.KS.gov}
\item{email_password}{Your Email Password}
\item{personname}{Your Name to be used in your email text}
\item{title}{Your job title to be used in your email text}
\item{phone}{Your phone number to be used in your email text
@param message The email message to be sent. Allows for composition of costume messages.}
}
\description{
This performs `write_facilty_report` function for all . It will generate summary report for all specified facility.
This function uses excel generated information although one can override the information from the input.xlsx
The summary workbook shows percents and counts of nulls and invalids, Additionally it generates a timeliness
report and creates a table. The program can send out a report to designated email address
}
\examples{
library(biosensequality)
write_facility_excel("Input.xlsx")
## You can override options from input by filling in the parameters you need to override
write_facility_excel("Input.xlsx", email=F)
}
|
testlist <- list(b = c(0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) | /metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615768919-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 630 | r | testlist <- list(b = c(0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) |
\name{portfolio-class}
\docType{class}
\alias{portfolio-class}
\alias{plot,portfolio,missing-method}
\alias{position_add,portfolio,character,ANY,ANY,matrix-method}
\alias{position_add,portfolio,character,ANY,ANY,missing-method}
\alias{position_add,portfolio,character,ANY,missing,matrix-method}
\alias{position_add,portfolio,character,ANY,missing,missing-method}
\alias{portfolio_create,portfolio,missing,missing,missing-method}
\alias{show,portfolio-method}
\title{Class \code{"portfolio"}}
\description{
Container class for storing portfolio parameters
}
\section{Slots}{
\describe{
\item{\code{java}:}{Object of class \code{"jobjRef"} ~~ }
\item{\code{optimization_info}:}{Object of class \code{"ANY"} ~~ }
}
}
\section{Methods}{
\describe{
\item{plot}{\code{signature(x = "portfolio", y = "missing")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "ANY", priceData = "matrix")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "ANY", priceData = "missing")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "missing", priceData = "matrix")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "missing", priceData = "missing")}: ... }
\item{show}{\code{signature(object = "portfolio")}: ... }
\item{expected_return}{\code{signature("portfolio")}: ... }
}
}
\author{Kostin Andrey <andrey.kostin@portfolioeffect.com>}
\examples{
showClass("portfolio")
}
\keyword{PortfolioEffectHFT}
%\concept{high frequency, intraday analytics, market data, portfolio, portfolio management,realtime analytics, risk, risk management, toolbox tools, trading, trading strategies}
\keyword{classes}
| /man/portfolio-class.Rd | no_license | githubfun/PortfolioEffectHFT | R | false | false | 1,952 | rd | \name{portfolio-class}
\docType{class}
\alias{portfolio-class}
\alias{plot,portfolio,missing-method}
\alias{position_add,portfolio,character,ANY,ANY,matrix-method}
\alias{position_add,portfolio,character,ANY,ANY,missing-method}
\alias{position_add,portfolio,character,ANY,missing,matrix-method}
\alias{position_add,portfolio,character,ANY,missing,missing-method}
\alias{portfolio_create,portfolio,missing,missing,missing-method}
\alias{show,portfolio-method}
\title{Class \code{"portfolio"}}
\description{
Container class for storing portfolio parameters
}
\section{Slots}{
\describe{
\item{\code{java}:}{Object of class \code{"jobjRef"} ~~ }
\item{\code{optimization_info}:}{Object of class \code{"ANY"} ~~ }
}
}
\section{Methods}{
\describe{
\item{plot}{\code{signature(x = "portfolio", y = "missing")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "ANY", priceData = "matrix")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "ANY", priceData = "missing")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "missing", priceData = "matrix")}: ... }
\item{position_add}{\code{signature(portfolio = "portfolio",
symbol = "character", quantity = "ANY", time = "missing", priceData = "missing")}: ... }
\item{show}{\code{signature(object = "portfolio")}: ... }
\item{expected_return}{\code{signature("portfolio")}: ... }
}
}
\author{Kostin Andrey <andrey.kostin@portfolioeffect.com>}
\examples{
showClass("portfolio")
}
\keyword{PortfolioEffectHFT}
%\concept{high frequency, intraday analytics, market data, portfolio, portfolio management,realtime analytics, risk, risk management, toolbox tools, trading, trading strategies}
\keyword{classes}
|
dataset <- read.csv("~/MEGA/MT_datasets/atp7d.csv", stringsAsFactors = F)
X <- dataset[, -c(412:ncol(dataset))]
X <- remove.unique(X)
Y <- dataset[, 412:ncol(dataset)]
source("~/mtr-toolkit/utils_and_includes/utils_MT.R")
# pca.m <- prcomp(X, center = TRUE, scale. = TRUE)
# # Component choice
# summary(pca.m)
num.comp <- 60
output <- matrix(nrow=nrow(dataset), ncol=num.comp, 0)
for(i in 1:nrow(dataset)) {
pca.m <- prcomp(X[-i,], center = TRUE, scale. = TRUE)
output[i,] <- predict(pca.m, X[i,])[,1:num.comp]
}
colnames(output) <- paste0("PC", 1:num.comp)
output <- as.data.frame(output)
final <- cbind(output, Y)
write.csv(final, "~/Desktop/atp7d_PCA.csv", row.names = F)
| /analysis_and_plots/pca_extractor.R | no_license | hugoabonizio/mtr-toolkit | R | false | false | 690 | r | dataset <- read.csv("~/MEGA/MT_datasets/atp7d.csv", stringsAsFactors = F)
X <- dataset[, -c(412:ncol(dataset))]
X <- remove.unique(X)
Y <- dataset[, 412:ncol(dataset)]
source("~/mtr-toolkit/utils_and_includes/utils_MT.R")
# pca.m <- prcomp(X, center = TRUE, scale. = TRUE)
# # Component choice
# summary(pca.m)
num.comp <- 60
output <- matrix(nrow=nrow(dataset), ncol=num.comp, 0)
for(i in 1:nrow(dataset)) {
pca.m <- prcomp(X[-i,], center = TRUE, scale. = TRUE)
output[i,] <- predict(pca.m, X[i,])[,1:num.comp]
}
colnames(output) <- paste0("PC", 1:num.comp)
output <- as.data.frame(output)
final <- cbind(output, Y)
write.csv(final, "~/Desktop/atp7d_PCA.csv", row.names = F)
|
## originally by Yulong Niu
## yulong.niu@hotmail.com
###########################Raw reads######################
rawpath <- '/netscratch/dep_psl/grp_rgo/yniu/CJFe/raw_data_1stadd'
setwd(rawpath)
library('magrittr')
library('doParallel')
library('foreach')
library('tibble')
library('readr')
ncore <- 45
fqs <- dir(rawpath,
pattern = 'R1.fq.gz',
full.names = TRUE)
registerDoParallel(cores = ncore)
rn <- foreach(i = seq_along(fqs), .combine = c) %dopar% {
eachrn <- paste('zcat', fqs[i], '| awk "END{print NR/4}"') %>%
system(inter = TRUE) %>%
as.numeric
return(eachrn)
}
stopImplicitCluster()
snames <- fqs %>%
strsplit(split = '/', fixed = TRUE) %>%
sapply('[[', 8) %>%
strsplit(split = '.', fixed = TRUE) %>%
sapply('[[', 1) %>%
{substr(., 1, nchar(.) - 3)}
tibble(sample = snames,
rawfq = rn) %>%
write_csv('raw_seqnumber_1stadd.csv')
##########################################################
#################extract Kallisto and HISAT2 output########
setwd('/extDisk1/RESEARCH/MPIPZ_CJ_RNASeq/results/')
library('dplyr')
library('readr')
KHoutput <- function(op, type = 'PE', org = 'hsa') {
## INPUT: 'op' is a character vector. 'type' is 'PE' (pair-end) or 'SE' (single-end). 'org' is the organism name.
## OUTPUT: A tibble, 1st column is input reads number, 2nd column is Kallisto aligned read number, and 3rd column is the HISAT2 aligned read number.
## USAGE: Extract the number of aligned reads.
require('stringr')
require('magrittr')
require('tibble')
## input reads number
fqnum <- op %>%
str_detect('\\d+ reads; of these:') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
sapply('[[', 1) %>%
as.numeric
## HISAT2 aligned
hmapnum <- op %>%
str_detect('.* aligned 0 times$') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
{ if (type == 'PE') {
sapply(., '[[', 9)
} else {
sapply(., '[[', 5)
}} %>%
as.numeric %>%
{ if (type == 'PE') {
./2
} else .} %>%
{fqnum - .}
## Kallisto aligned
kmapnum <- op %>%
str_detect('.* reads pseudoaligned') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
sapply('[[', 5) %>%
str_replace_all(',', '') %>%
as.numeric
## sample names
snames <- op %>%
str_detect('HISAT2 using') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
sapply('[[', 6) %>%
{substr(., 1, nchar(.) - 1)}
res <- tibble(sample = snames,
trimfq = fqnum,
hmap = hmapnum,
kmap = kmapnum,
org = org)
return(res)
}
##~~~~~~~~~~~~~~~~~~~~~~~test contamination~~~~~~~~~~~~~~~~~~~~~~~~~~~
sampleAnno <- read_delim('/extDisk1/RESEARCH/MPIPZ_CJ_RNASeq/results/sample_anno.txt', delim = '\t') %>%
rename(ID = `Library number`, SampleAnno = `Library Name`) %>%
select(ID, SampleAnno) %>%
mutate(ID = ID %>% str_replace('\\.', '_'), SampleAnno = SampleAnno %>% str_replace('-', '_')) %>%
arrange(ID)
athout <- 'alignment_nohup_1stadd.out' %>%
readLines %>%
KHoutput(type = 'PE', org = 'ath') %>%
mutate(H_ath = round(hmap/trimfq, 3), K_ath = round(kmap/trimfq, 3)) %>%
select(c(-hmap, -kmap, -org))
## raw reads
rawrd <- read_csv('raw_seqnumber_1stadd.csv')
contam <- rawrd %>%
inner_join(athout)
write_csv(contam, 'ath_alignment_1stadd.csv')
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
######################################################
| /scripts/script_extractalign.R | no_license | YulongNiu/MPIPZ_CJ_RNASeq | R | false | false | 3,528 | r |
## originally by Yulong Niu
## yulong.niu@hotmail.com
###########################Raw reads######################
rawpath <- '/netscratch/dep_psl/grp_rgo/yniu/CJFe/raw_data_1stadd'
setwd(rawpath)
library('magrittr')
library('doParallel')
library('foreach')
library('tibble')
library('readr')
ncore <- 45
fqs <- dir(rawpath,
pattern = 'R1.fq.gz',
full.names = TRUE)
registerDoParallel(cores = ncore)
rn <- foreach(i = seq_along(fqs), .combine = c) %dopar% {
eachrn <- paste('zcat', fqs[i], '| awk "END{print NR/4}"') %>%
system(inter = TRUE) %>%
as.numeric
return(eachrn)
}
stopImplicitCluster()
snames <- fqs %>%
strsplit(split = '/', fixed = TRUE) %>%
sapply('[[', 8) %>%
strsplit(split = '.', fixed = TRUE) %>%
sapply('[[', 1) %>%
{substr(., 1, nchar(.) - 3)}
tibble(sample = snames,
rawfq = rn) %>%
write_csv('raw_seqnumber_1stadd.csv')
##########################################################
#################extract Kallisto and HISAT2 output########
setwd('/extDisk1/RESEARCH/MPIPZ_CJ_RNASeq/results/')
library('dplyr')
library('readr')
KHoutput <- function(op, type = 'PE', org = 'hsa') {
## INPUT: 'op' is a character vector. 'type' is 'PE' (pair-end) or 'SE' (single-end). 'org' is the organism name.
## OUTPUT: A tibble, 1st column is input reads number, 2nd column is Kallisto aligned read number, and 3rd column is the HISAT2 aligned read number.
## USAGE: Extract the number of aligned reads.
require('stringr')
require('magrittr')
require('tibble')
## input reads number
fqnum <- op %>%
str_detect('\\d+ reads; of these:') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
sapply('[[', 1) %>%
as.numeric
## HISAT2 aligned
hmapnum <- op %>%
str_detect('.* aligned 0 times$') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
{ if (type == 'PE') {
sapply(., '[[', 9)
} else {
sapply(., '[[', 5)
}} %>%
as.numeric %>%
{ if (type == 'PE') {
./2
} else .} %>%
{fqnum - .}
## Kallisto aligned
kmapnum <- op %>%
str_detect('.* reads pseudoaligned') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
sapply('[[', 5) %>%
str_replace_all(',', '') %>%
as.numeric
## sample names
snames <- op %>%
str_detect('HISAT2 using') %>%
op[.] %>%
strsplit(split = ' ', fixed = TRUE) %>%
sapply('[[', 6) %>%
{substr(., 1, nchar(.) - 1)}
res <- tibble(sample = snames,
trimfq = fqnum,
hmap = hmapnum,
kmap = kmapnum,
org = org)
return(res)
}
##~~~~~~~~~~~~~~~~~~~~~~~test contamination~~~~~~~~~~~~~~~~~~~~~~~~~~~
sampleAnno <- read_delim('/extDisk1/RESEARCH/MPIPZ_CJ_RNASeq/results/sample_anno.txt', delim = '\t') %>%
rename(ID = `Library number`, SampleAnno = `Library Name`) %>%
select(ID, SampleAnno) %>%
mutate(ID = ID %>% str_replace('\\.', '_'), SampleAnno = SampleAnno %>% str_replace('-', '_')) %>%
arrange(ID)
athout <- 'alignment_nohup_1stadd.out' %>%
readLines %>%
KHoutput(type = 'PE', org = 'ath') %>%
mutate(H_ath = round(hmap/trimfq, 3), K_ath = round(kmap/trimfq, 3)) %>%
select(c(-hmap, -kmap, -org))
## raw reads
rawrd <- read_csv('raw_seqnumber_1stadd.csv')
contam <- rawrd %>%
inner_join(athout)
write_csv(contam, 'ath_alignment_1stadd.csv')
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
######################################################
|
# Load packages and data
library(dplyr)
library(ggplot2)
library(stargazer)
Soil <- read.csv("Soil Lab 2.csv")
#make the variables into factors so they will be recognized in the linear model
Soil$Site <- as.factor(Soil$Site)
Soil$Round <- as.factor(Soil$Round)
Soil$Side <- as.factor(Soil$Side)
Soil$Distance <- as.factor(Soil$Distance)
Soil$unprotected <- as.factor(Soil$unprotected)
hist(Soil$logSC)
#histogram looks more normal than before
#fixed effects model
logSCmodel <- lm(logSC ~ unprotected + Round + Side + Distance, data = Soil)
summary(logSCmodel)
plot(logSCmodel)
logSC.resid <- resid(logSCmodel)
shapiro.test(logSC.resid)
#not significant means distribution is not significantly different from normal
#mixed effects model, with site as a random effect
library(lme4)
mixed.logSC <- lmer(logSC ~ unprotected + Round + Side + Distance + (1|Site), data = Soil)
summary(mixed.logSC)
plot(mixed.logSC)
qqnorm(resid(mixed.logSC))
qqline(resid(mixed.logSC))
stargazer(mixed.logSC, type="text", report=('vc*p'))
| /Fieldwork Soil regression mixed effects.R | no_license | skanabar27/Road-salt | R | false | false | 1,044 | r | # Load packages and data
library(dplyr)
library(ggplot2)
library(stargazer)
Soil <- read.csv("Soil Lab 2.csv")
#make the variables into factors so they will be recognized in the linear model
Soil$Site <- as.factor(Soil$Site)
Soil$Round <- as.factor(Soil$Round)
Soil$Side <- as.factor(Soil$Side)
Soil$Distance <- as.factor(Soil$Distance)
Soil$unprotected <- as.factor(Soil$unprotected)
hist(Soil$logSC)
#histogram looks more normal than before
#fixed effects model
logSCmodel <- lm(logSC ~ unprotected + Round + Side + Distance, data = Soil)
summary(logSCmodel)
plot(logSCmodel)
logSC.resid <- resid(logSCmodel)
shapiro.test(logSC.resid)
#not significant means distribution is not significantly different from normal
#mixed effects model, with site as a random effect
library(lme4)
mixed.logSC <- lmer(logSC ~ unprotected + Round + Side + Distance + (1|Site), data = Soil)
summary(mixed.logSC)
plot(mixed.logSC)
qqnorm(resid(mixed.logSC))
qqline(resid(mixed.logSC))
stargazer(mixed.logSC, type="text", report=('vc*p'))
|
tabletoplotWCLsmall <-
data.frame(
ENSGid = c("ENSG00000100030", "ENSG00000100836" , "ENSG00000104131"),
symbol = c("MAPK1", "PABPN1", "EIF3J"),
Know_RBP = c("no", "known_RBP", "known_RBP"),
log2FC = runif(3),
p.value = runif(3),
p.adj = runif(3),
sig = runif(3)
)
intensitiestoploWCLsmall <-
matrix(c(runif(3), runif(3), runif(3)), nrow = 3)
test_that("plot_scatterRIC throws error without valid input", {
expect_error (plot_scatterRIC("tabletoplotWCLsmall", intensitiestoploWCLsmall))
expect_error (plot_scatterRIC(tabletoplotWCLsmall, "intensitiestoploWCLsmall"))
expect_error (plot_scatterRIC(tabletoplotWCLsmall[, -6], intensitiestoploWCLsmall))
})
test_that("plot_scatterRIC assigns colors based on p.adj values", {
col <- ifelse(tabletoplotWCLsmall$p.adj <= 0.1, "orange", "gray")
expect_type(col, "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.05)], "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.01)], "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.1 &
tabletoplotWCLsmall$log2FC < 0)] , "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.1 &
tabletoplotWCLsmall$log2FC < 0)] , "character")
})
test_that("plot_scatterRIC returns a plot", {
expect_null(plot_scatterRIC(tabletoplotWCLsmall, intensitiestoploWCLsmall))
}) | /tests/testthat/test-plot_scatterRIC.R | no_license | demar01/RIC | R | false | false | 1,414 | r | tabletoplotWCLsmall <-
data.frame(
ENSGid = c("ENSG00000100030", "ENSG00000100836" , "ENSG00000104131"),
symbol = c("MAPK1", "PABPN1", "EIF3J"),
Know_RBP = c("no", "known_RBP", "known_RBP"),
log2FC = runif(3),
p.value = runif(3),
p.adj = runif(3),
sig = runif(3)
)
intensitiestoploWCLsmall <-
matrix(c(runif(3), runif(3), runif(3)), nrow = 3)
test_that("plot_scatterRIC throws error without valid input", {
expect_error (plot_scatterRIC("tabletoplotWCLsmall", intensitiestoploWCLsmall))
expect_error (plot_scatterRIC(tabletoplotWCLsmall, "intensitiestoploWCLsmall"))
expect_error (plot_scatterRIC(tabletoplotWCLsmall[, -6], intensitiestoploWCLsmall))
})
test_that("plot_scatterRIC assigns colors based on p.adj values", {
col <- ifelse(tabletoplotWCLsmall$p.adj <= 0.1, "orange", "gray")
expect_type(col, "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.05)], "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.01)], "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.1 &
tabletoplotWCLsmall$log2FC < 0)] , "character")
expect_type(col[which(tabletoplotWCLsmall$p.adj <= 0.1 &
tabletoplotWCLsmall$log2FC < 0)] , "character")
})
test_that("plot_scatterRIC returns a plot", {
expect_null(plot_scatterRIC(tabletoplotWCLsmall, intensitiestoploWCLsmall))
}) |
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "chscase_funds")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.ctree", par.vals = list(), predict.type = "prob")
#:# hash
#:# df4e41a3659c8352d840fd3b7dda8a13
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_chscase_funds/classification_binaryClass/df4e41a3659c8352d840fd3b7dda8a13/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 692 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "chscase_funds")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.ctree", par.vals = list(), predict.type = "prob")
#:# hash
#:# df4e41a3659c8352d840fd3b7dda8a13
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
## A facility for caching the inverse of a matrix
## the following was a big help understanding scoping:
## http://www.r-bloggers.com/environments-in-r/
## as well as the original Ihaka/Gentleman paper, which I found here:
## http://www.tandfonline.com/doi/pdf/10.1080/10618600.1996.10474713
## makeCacheMatrix() takes a matrix as its parameter and returns
## a List of functions for manipulating the matrix and its
## cached inversed, which is set or retrieved by cacheSolve()
makeCacheMatrix <- function(x = matrix()) {
## clear cache and create set/get functions
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(xinv) inv <<- xinv
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve() takes a matrix cache (produced by makeCacheMatrix()) and
## returns either a cached inverse or a newly calculated one
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("getting cached inverse")
return(inv)
}
message("calculating inverse")
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | stevelovelace/cachematrix | R | false | false | 1,306 | r | ## A facility for caching the inverse of a matrix
## the following was a big help understanding scoping:
## http://www.r-bloggers.com/environments-in-r/
## as well as the original Ihaka/Gentleman paper, which I found here:
## http://www.tandfonline.com/doi/pdf/10.1080/10618600.1996.10474713
## makeCacheMatrix() takes a matrix as its parameter and returns
## a List of functions for manipulating the matrix and its
## cached inversed, which is set or retrieved by cacheSolve()
makeCacheMatrix <- function(x = matrix()) {
## clear cache and create set/get functions
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(xinv) inv <<- xinv
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve() takes a matrix cache (produced by makeCacheMatrix()) and
## returns either a cached inverse or a newly calculated one
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("getting cached inverse")
return(inv)
}
message("calculating inverse")
mat <- x$get()
inv <- solve(mat, ...)
x$setinv(inv)
inv
}
|
###############################################################
## Species Distribution Modeling for Western Massasauga ##
## by: Michelle Lawing and Danielle Walkup ##
## Updated: 30 July 2020 ##
## ##
## This is the main script document for species distribution ##
## modeling of Massasaugas. This is a wrapper script and ##
## sources all secondary scripts located in the scripts ##
## folder. Before attempting to run this script, make sure ##
## to open and read the other scripts in numerical order. ##
## The data included here are publicly sourced data and do ##
## not include the potentially sensive data included in the ##
## full analysis. Those data will be made available on ##
## request and in consulation with data constributers, and ##
## when relevant, state and federal authorities. ##
###############################################################
####### Caution, everything is set up to run from scratch, not load!!! #########
### This means that the run time will be very long for some of the scripts. ####
# Scripts 01_input through 07_evaluate build on each other and need to be run in
# order to setup and run the sdm model.
# Scripts 08_currentProjections through 10_futureProjections rely on scripts 01 and
# 02, but may need to be run separately because of issues with memory in rJava.
# See the warnings at the top of each individual script for more information
# Scripts 11_variableResponse and 12_manuscriptFigures are stand alone scripts
# that load the needed data and packages.
# Generally packages are loaded in the scripts as they are needed.
# Loading dismo and java here because we want to give java more memory to run
# all the models
options(java.parameters = "-Xmx2048m")
library(dismo)
library(rJava)
## We provide example data to run these scripts. These include inaturalist, gbif, and vertnet
## occurrences, administrative boundaries for the U.S. and Mexico, Bioclimatic data from WorldClim
## database, and derived Envirem data.
##############################
###--- Preliminary Setup -----
##############################
#Setup the future envirem data set
#source("scripts/make.future.envirem.R")
##############################
###------- Input Data --------
##############################
source("scripts/01_input.R")
##############################
###--- Variable Selection ----
##############################
source("scripts/02_variableEnv.R")
##############################
###------- Filtering ---------
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/03_filter.R")
##############################
###--------- Folding ---------
##############################
source("scripts/04_folding.R")
##############################
###---- Background Points ----
##############################
source("scripts/05_backgroundPts.R")
##############################
###--------- Maxent ----------
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/06_runModels.R")
##############################
###---- Model Evaluation -----
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/07_evaluate.R")
##############################
###---- Model Projections ----
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/08_currentProjections.R")
#read the warning at the top of the script about rJava memory error.
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/09_historicProjections.R")
#read the warning at the top of the script about rJava memory error.
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/10_futureProjections.R")
##############################
###---- Variable Response ----
##############################
source("scripts/11_variableResponse.R")
###############################
###---- Manuscript Figures ----
###############################
source("scripts/12_manuscriptFigures.R") | /massasaugaSDM.R | no_license | michellelawing/massasaugaSDM | R | false | false | 4,495 | r | ###############################################################
## Species Distribution Modeling for Western Massasauga ##
## by: Michelle Lawing and Danielle Walkup ##
## Updated: 30 July 2020 ##
## ##
## This is the main script document for species distribution ##
## modeling of Massasaugas. This is a wrapper script and ##
## sources all secondary scripts located in the scripts ##
## folder. Before attempting to run this script, make sure ##
## to open and read the other scripts in numerical order. ##
## The data included here are publicly sourced data and do ##
## not include the potentially sensive data included in the ##
## full analysis. Those data will be made available on ##
## request and in consulation with data constributers, and ##
## when relevant, state and federal authorities. ##
###############################################################
####### Caution, everything is set up to run from scratch, not load!!! #########
### This means that the run time will be very long for some of the scripts. ####
# Scripts 01_input through 07_evaluate build on each other and need to be run in
# order to setup and run the sdm model.
# Scripts 08_currentProjections through 10_futureProjections rely on scripts 01 and
# 02, but may need to be run separately because of issues with memory in rJava.
# See the warnings at the top of each individual script for more information
# Scripts 11_variableResponse and 12_manuscriptFigures are stand alone scripts
# that load the needed data and packages.
# Generally packages are loaded in the scripts as they are needed.
# Loading dismo and java here because we want to give java more memory to run
# all the models
options(java.parameters = "-Xmx2048m")
library(dismo)
library(rJava)
## We provide example data to run these scripts. These include inaturalist, gbif, and vertnet
## occurrences, administrative boundaries for the U.S. and Mexico, Bioclimatic data from WorldClim
## database, and derived Envirem data.
##############################
###--- Preliminary Setup -----
##############################
#Setup the future envirem data set
#source("scripts/make.future.envirem.R")
##############################
###------- Input Data --------
##############################
source("scripts/01_input.R")
##############################
###--- Variable Selection ----
##############################
source("scripts/02_variableEnv.R")
##############################
###------- Filtering ---------
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/03_filter.R")
##############################
###--------- Folding ---------
##############################
source("scripts/04_folding.R")
##############################
###---- Background Points ----
##############################
source("scripts/05_backgroundPts.R")
##############################
###--------- Maxent ----------
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/06_runModels.R")
##############################
###---- Model Evaluation -----
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/07_evaluate.R")
##############################
###---- Model Projections ----
##############################
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/08_currentProjections.R")
#read the warning at the top of the script about rJava memory error.
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/09_historicProjections.R")
#read the warning at the top of the script about rJava memory error.
#script should be commented/uncommented depending on whether you are running
#new analyses or loading old
source("scripts/10_futureProjections.R")
##############################
###---- Variable Response ----
##############################
source("scripts/11_variableResponse.R")
###############################
###---- Manuscript Figures ----
###############################
source("scripts/12_manuscriptFigures.R") |
# Extra functions that help me work inside dataframes.
#' Replace all matching values in a dataframe with something else
#'
#' Uses regex to match and replace cell values. This function is meant for formatting
#' printed data; it's often useful to blank out NAs or other values to minimise visual
#' clutter when you are reading a table. For actual data-tidying applications, it's
#' safer to use `dplyr::recode()` or `dplyr::recode_factor()`.
#'
#' By default, this function will replace cells consisting of NAs, spaces, empty strings,
#' dashes, and underscores with an empty string.
#'
#' @param df (Dataframe) A dataframe.
#' @param find (Character) A regex search pattern.
#' @param replace (Character) The string used to overwrite the matching cells.
#' @param replace_na (Logical) If `TRUE`, also overwrite R's built-in `NA` values.
#'
#' @return A dataframe.
#' @export
#'
#' @examples
#' test_df <-
#' data.frame(stringsAsFactors = FALSE,
#' name = c("insect1", "insect2", "insect3", "insect4", "insect5",
#' "insect6", "insect7", "insect8", "insect9", "insect10"),
#' family = c("Belidae", "Belidae", " ", "Coccinelidae", NA, "Coccinelidae",
#' "Braconidae", "_", "-", "Curculionidae"),
#' is_cool = c("TRUE", "TRUE", NA, "TRUE", "", "TRUE", "TRUE", "-", "_",
#' "TRUE")
#' )
#'
#' test_df
#' overwrite_df(test_df)
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
overwrite_df <- function(df, find = "^(NA||\\s+|0|-+|_+)$", replace = "", replace_na = TRUE) {
df_l <- df
if (replace_na == TRUE) {
df_l[is.na(df_l)] <- replace # gsub can't look for R's NA values, so replace them.
}
out <- data.frame(lapply(df_l, function(x) { gsub(find, replace, as.character(x)) }),
stringsAsFactors = FALSE,
check.rows = FALSE,
check.names = FALSE,
fix.empty.names = FALSE)
return(out)
}
#' Drop 'empty' columns in a dataframe
#'
#' Deletes columns from a dataframe if they are 'empty'. A column is empty when every
#' single row is `NA`, `NULL`, `""`, or matches a regular expression.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be subsetted from `df`. If `to` is `NULL`, it defaults to the last
#' column in `df` so that `from = 2, to = NULL` is the same as `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#' @param regex (Character) A regex pattern that matches a value that should be considered
#' 'empty'.
#' @param report (Logical) If `TRUE`, print a Message with the names of the empty columns
#' that were dropped.
#'
#' @return A subset of `df` with all empty columns removed.
#' @export
#'
#' @examples
#' data <- data.frame(a = c(1, 2, 3),
#' b = c(0, 0, 0),
#' c = c(1, 1, 0),
#' d = c("", "", ""),
#' e = c("moo", "baa", "woof"))
#'
#' #> a b c d e
#' #> 1 0 1 moo
#' #> 2 0 1 baa
#' #> 3 0 0 woof
#'
#' drop_empty_cols(data)
#'
#' #> a b c e
#' #> 1 0 1 moo
#' #> 2 0 1 baa
#' #> 3 0 0 woof
#'
#' drop_empty_cols(data, regex = "moo|baa|woof")
#'
#' #> a b c
#' #> 1 0 1
#' #> 2 0 1
#' #> 3 0 0
#'
#' drop_empty_cols(data, regex = "moo|baa|woof", report = TRUE)
#'
#' #> Empty cols dropped: d, e
#' #> a b c
#' #> 1 0 1
#' #> 2 0 1
#' #> 3 0 0
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
drop_empty_cols <- function(df, from = 1, to = NULL, cols = NULL, regex = "^$",
report = FALSE) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
out <- base::Filter(function(x) !all(is.na(x) | is.null(x) | x == "" | grepl(regex, x)), sub_df)
if (report == TRUE) {
message("Dropped empty cols: ", fold(diff_cols(df, out), n = Inf))
}
return(dplyr::select(df, any_of(colnames(out))))
}
#' Drop 'empty' rows in a dataframe
#'
#' Deletes rows from a dataframe if they are 'empty'. A row is empty when every single
#' cell is `NA`, `NULL`, `""`, or matches a regular expression.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be subsetted from `df`. For example, columns that are always filled
#' should be omitted (see examples). If `to` is `NULL`, it defaults to the last
#' column in `df` so that `from = 2, to = NULL` is the same as `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#' @param regex (Character) A regex pattern that matches a value that should be considered
#' 'empty'.
#' @param report (Logical) If `TRUE`, print a Message with the number of empty rows
#' that were dropped.
#'
#' @return A subset of `df` with all empty rows removed.
#' @export
#'
#' @examples
#' data <- data.frame(name = c("Jim", "Jane", "Janice", "Joe", "Jay"),
#' a = c(0, "", 1, NA, 0),
#' b = c(1, "", 1, NA, 0),
#' c = c(1, NA, 2, 0, 0),
#' d = c(0, NA, 4, 0, 0),
#' e = c(0, "", 5, 0, 0),
#' f = c(3, "", 0, 0, 0),
#' stringsAsFactors = FALSE)
#'
#' data
#'
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 2 Jane NA NA
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data)
#'
#' # Returns the whole dataframe because column 1 ('name') is never empty.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 2 Jane NA NA
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data, from = 2)
#'
#' # We get the desired result when 'name' is omitted.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data, from = 2, regex = "^0$")
#'
#' # Regex can be used to match cells that should be 'empty'.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#'
#' drop_empty_rows(data, cols = c(2, 6))
#'
#' # Non-contiguous columns can be selected with 'cols'.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data, cols = c(2, 6), report = TRUE)
#'
#' #> Dropped rows: 1 in total
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
drop_empty_rows <- function(df, from = 1, to = NULL, cols = NULL, regex = "^$",
report = FALSE) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
# https://stackoverflow.com/a/15618761/5578429
# trimws() MUST be kept in the anonymous function below. This is because of how,
# when apply() is given a dataframe, it coerces it to a matrix with as.matrix(). The
# coercion is done using format(), which pads numbers with spaces to the length
# of the longest string in the column. This means that a df might be coerced as:
#
# "NA" "1" "1"
# " 0" "0" " " This row is wrongly kept because " " is not 'empty'.
# " 1" "1" "2"
# " 1" "1" "3"
not_empty <- apply(sub_df,
MARGIN = 1, # Along each row...
function(x) {
# Remove whitespace from each cell
y <- trimws(x, which = "both");
# Negated so that non-empty rows are TRUE, and will be kept.
!all(nchar(y) == 0 |
y == "" |
is.na(y) |
is.null(y) |
grepl(regex, y))
}
)
# This is dplyr::slice() and not a simpler subset (df[!is_empty,]) because of a strange
# quirk where dataframes with labelled variables would lose their labels if they were
# subset in base R. dplyr::slice() keeps them.
out <- dplyr::slice(df, which(not_empty))
if (report == TRUE) {
dropped_row_nums <- paste0(": ", fold(which(!not_empty), n = 50))
message("Dropped ", nrow(df) - nrow(out), " empty rows", dropped_row_nums)
}
return(out)
}
#' Collapse a dataframe into a vector
#'
#' Useful for taking every number in a table and plotting it in a histogram, for example.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be considered for the empty/not-empty decision. For example, columns that
#' are always filled should be omitted (see examples). If `to` is `NULL`, it defaults
#' to the last column in `df` so that `from = 2, to = NULL` is the same as
#' `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#'
#' @return A vector containing the cell contents from the selected columns of `df`.
#' If all of the cells are numeric, the vector is Numeric. If any of the cells contain
#' strings, the vector is Character. The columns are concatenated in order.
#' @export
#'
#' @examples
#' collapse_df(iris, cols = 1:4)
#'
#' #> [1] 5.1 4.9 4.7 4.6 5.0 5.4 4.6 5.0 4.4 4.9 5.4 4.8 ...
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
collapse_df <- function(df, from = 1, to = NULL, cols = NULL) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
# I wondered for a second why I should even make this a function instead of just using
# unlist() directly. But then I realised that I would have to keep typing
# longdataframename[2:length(longdataframename)], and that's pretty annoying.
return(unlist(sub_df, use.names = FALSE))
}
#' Sort columns in a dataframe
#'
#' Sorts the columns of a dataframe, and then allows you to pull columns to the start of
#' the dataframe by name.
#'
#' @param df (Dataframe) A dataframe.
#' @param ... (Column names) If you want to manually position columns _after_ they are
#' sorted, provide unquoted column names here. The columns in `...` will be placed
#' first in the dataframe, and then all other unlisted columns will be placed after.
#' @param decreasing (Logical) If `FALSE`, sort columns from A-Z and 0-9. If `TRUE`, sort
#' in reverse.
#'
#' @return A copy of `df` with reordered columns.
#' @export
#'
#' @examples
#' colnames(iris)
#'
#' #> [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
#'
#' sorted <- sort_cols(iris)
#' colnames(sorted)
#'
#' #> [1] "Petal.Length" "Petal.Width" "Sepal.Length" "Sepal.Width" "Species"
#'
#' reverse <- sort_cols(iris, decreasing = TRUE)
#' colnames(reverse)
#'
#' #> [1] "Species" "Sepal.Width" "Sepal.Length" "Petal.Width" "Petal.Length"
#'
#' manual <- sort_cols(iris, Species)
#' colnames(manual)
#'
#' #> [1] "Species" " Petal.Length" "Petal.Width" "Sepal.Length" "Sepal.Width"
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
sort_cols <- function(df, ..., decreasing = FALSE) {
dots <- dplyr::quos(...)
sorted <- df[, order(colnames(df), decreasing = decreasing)] # Order cols.
return(dplyr::select(sorted, !!! dots, dplyr::everything()))
}
#' Drop invariant columns from a dataframe
#'
#' Deletes columns from a dataframe if they do not vary. For `character` and `factor`
#' columns, this means that every row of the column contains exactly the same string.
#' For `numeric` columns, the numbers are rounded to a nearest common value and then
#' checked to see if every rounded number is the same.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be used. If `to` is `NULL`, it defaults to the last column in `df` so
#' that `from = 2, to = NULL` is the same as `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#' @param nearest (Numeric or `NULL`) For numeric columns, this is the common value that
#' all numbers will be rounded to. The default `NULL` uses the `mean()` of each
#' column as the rounding target.
#' @param dir (Character or `NULL`) Controls the rounding function used. Leave as `NULL`
#' to round up and down. Use `"up"` to round up only. Use `"down"` to round down only.
#'
#' @return A copy of `df` with all invariant columns removed.
#' @export
#'
#' @examples
#' df <- data.frame(stringsAsFactors=FALSE,
#' char_invar = c("A", "A", "A", "A", "A"),
#' char_var = c("A", "A", "A", "B", "A"),
#' num_invar = c(1L, 1L, 1L, 1L, 1L),
#' num_mean_0 = c(0, -0.1, 0.1, 0.01, -0.01),
#' num_var = c(0, 0.2, 0.8, 0.03, 0.4)
#' )
#'
#' df
#'
#' #> char_invar char_var num_invar num_mean_0 num_var
#' #> 1 A A 1 0.00 0.00
#' #> 2 A A 1 -0.10 0.20
#' #> 3 A A 1 0.10 0.80
#' #> 4 A B 1 0.01 0.03
#' #> 5 A A 1 -0.01 0.40
#'
#'
#' drop_invar_cols(df)
#'
#' #> char_var num_var
#' #> 1 A 0.00
#' #> 2 A 0.20
#' #> 3 A 0.80
#' #> 4 B 0.03
#' #> 5 A 0.40
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
drop_invar_cols <- function(df, from = 1, to = NULL, cols = NULL,
nearest = NULL, dir = NULL) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
base::Filter(
function(x) {
if (is.numeric(x)) {
# Use fuzzy (rounded) matching
if (is.null(nearest)) nearest = mean(x, na.rm = TRUE)
rounded <- round_to_nearest(x, to = nearest, dir = dir)
if (howmany(rounded) == 1) return(FALSE)
} else {
# Use exact matching
if (howmany(x) == 1) return(FALSE)
}
return(TRUE)
}, sub_df)
}
#' First and last rows of a dataframe
#'
#' @param df (Dataframe) A dataframe.
#' @param top (Integer) The number of rows to get from the start of `df`.
#' @param tail (Integer) The number of rows to get from the end of `df`.
#'
#' @details `0` can be provided for the top and tail, in which case it will behave like
#' `head()` and `tail()` respectively.
#'
#' @return A dataframe.
#' @export
#'
#' @examples
#' # Returns 6 rows by default, just like head() does.
#' top_tail(iris)
#'
#' #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species
#' #> 1 5.1 3.5 1.4 0.2 setosa
#' #> 2 4.9 3.0 1.4 0.2 setosa
#' #> 3 4.7 3.2 1.3 0.2 setosa
#' #> 148 6.5 3.0 5.2 2.0 virginica
#' #> 149 6.2 3.4 5.4 2.3 virginica
#' #> 150 5.9 3.0 5.1 1.8 virginica
#'
#' top_tail(iris, top = 1, tail = 2)
#'
#' #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species
#' #> 1 5.1 3.5 1.4 0.2 setosa
#' #> 149 6.2 3.4 5.4 2.3 virginica
#' #> 150 5.9 3.0 5.1 1.8 virginica
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
top_tail <- function(df, top = 3, tail = 3) {
rows <- nrow(df)
if (top <= 0) top_range <- 0 else top_range <- 1:top
if (tail <= 0) tail_range <- 0 else tail_range <- (rows - (tail - 1)):rows
df[unique(c(top_range, tail_range)), ]
}
#' Add a 'group size' column to a dataframe
#'
#' @param df (Dataframe) The dataframe.
#' @param ... (Names) Bare names of the columns of `df` that will for the groups for
#' `dplyr::group_by()`.
#' @param .id (Character) The name of the new column. If `NA` (default), the name
#' will be generated from the columns in `...`.
#' @param na.rm (Logical or Character) If `TRUE`, runs `tidyr::drop_na(df)` before
#' grouping. If a `Character` vector that has column names, runs
#' `tidyr::drop_na(df, ...)` where `...` is the column names that will be
#' considered for dropping.
#'
#' @return An ungrouped dataframe `df` with a new column containing the group size,
#' duplicated at each row. By default, the new column's name is generated from the
#' groups in `...` (see examples).
#' @export
#'
#' @examples
#'
#' sw_subset <- dplyr::select(dplyr::starwars, -(films:starships))
#'
#' test <- add_group_size(sw_subset, species, homeworld,
#' .id = "my_colname", na.rm = FALSE)
#' dplyr::glimpse(test)
#'
#' #> Observations: 87
#' #> Variables: 11
#' #> $ name <chr> "Luke Skywalker", "C-3PO", "R2-D2", "Darth Vader", "Le...
#' #> $ height <int> 172, 167, 96, 202, 150, 178, 165, 97, 183, 182, 188, 1...
#' #> $ mass <dbl> 77.0, 75.0, 32.0, 136.0, 49.0, 120.0, 75.0, 32.0, 84.0...
#' #> $ hair_color <chr> "blond", NA, NA, "none", "brown", "brown, grey", "brow...
#' #> $ skin_color <chr> "fair", "gold", "white, blue", "white", "light", "ligh...
#' #> $ eye_color <chr> "blue", "yellow", "red", "yellow", "brown", "blue", "b...
#' #> $ birth_year <dbl> 19.0, 112.0, 33.0, 41.9, 19.0, 52.0, 47.0, NA, 24.0, 5...
#' #> $ gender <chr> "male", NA, NA, "male", "female", "male", "female", NA...
#' #> $ homeworld <chr> "Tatooine", "Tatooine", "Naboo", "Tatooine", "Alderaan...
#' #> $ species <chr> "Human", "Droid", "Droid", "Human", "Human", "Human", ...
#' #> $ my_colname <int> 8, 2, 1, 8, 3, 8, 8, 2, 8, 1, 8, 1, 2, 2, 1, 1, 2, 1, ...
#'
#' test2 <- add_group_size(sw_subset, eye_color, homeworld,
#' na.rm = c("hair_color", "gender"))
#'
#' # Note the automatic column names and the dropped NA rows.
#' dplyr::glimpse(test2)
#'
#' #> Observations: 82
#' #> Variables: 11
#' #> $ name <chr> "Luke Skywalker", "Darth Vader", "Lei...
#' #> $ height <int> 172, 202, 150, 178, 165, 183, 182, 18...
#' #> $ mass <dbl> 77.0, 136.0, 49.0, 120.0, 75.0, 84.0,...
#' #> $ hair_color <chr> "blond", "none", "brown", "brown, gre...
#' #> $ skin_color <chr> "fair", "white", "light", "light", "l...
#' #> $ eye_color <chr> "blue", "yellow", "brown", "blue", "b...
#' #> $ birth_year <dbl> 19.0, 41.9, 19.0, 52.0, 47.0, 24.0, 5...
#' #> $ gender <chr> "male", "male", "female", "male", "fe...
#' #> $ homeworld <chr> "Tatooine", "Tatooine", "Alderaan", "...
#' #> $ species <chr> "Human", "Human", "Human", "Human", "...
#' #> $ grpsize_eye_color_homeworld <int> 5, 1, 3, 5, 5, 2, 1, 5, 1, 2, 1, 1, 1...
#'
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @importFrom rlang :=
#' @md
add_group_size <- function(df, ..., .id = NA, na.rm = FALSE) {
if (is.na(.id)) {
colname <- paste0("grpsize_", dots_char(..., collapse = "_"))
} else {
colname <- as.character(.id)
}
if (typeof(na.rm) == "character") {
if (length(na.rm) == 0) {
stop("Argument 'na.rm' must be TRUE, FALSE, or a character vector of
column names.")
}
df <- do.call(tidyr::drop_na, list(data = df, na.rm))
} else if (na.rm == TRUE) {
df <- tidyr::drop_na(df)
}
res <- dplyr::group_by(df, ...)
res <- dplyr::mutate(res, !!colname := dplyr::n())
res <- dplyr::ungroup(res)
return(res)
}
#' Only keep rows that contain NA
#'
#' It is sometimes useful to look at rows of a dataframe where a value is
#' missing. For example, you may want to see why a function returns NA in some
#' rows and not others.
#'
#' @param df (Dataframe) The dataframe
#'
#' @return A dataframe with the same number of columns as `df`, but the only
#' rows it has are rows that have at least 1 `NA` value.
#' @export
#'
#' @examples
#'
#' na_starwars <- rows_with_na(dplyr::starwars)
#' dplyr::glimpse(na_starwars)
#'
#' #> Observations: 58
#' #> Variables: 13
#' #> $ name <chr> "C-3PO", "R2-D2", "R5-D4", "Wilhuff Tarkin", "Greedo",...
#' #> $ height <int> 167, 96, 97, 180, 173, 175, 180, 66, 200, 150, NA, 160...
#' #> $ mass <dbl> 75, 32, 32, NA, 74, 1358, 110, 17, 140, NA, NA, 68, 89...
#' #> $ hair_color <chr> NA, NA, NA, "auburn, grey", NA, NA, "brown", "white", ...
#' #> $ skin_color <chr> "gold", "white, blue", "white, red", "fair", "green", ...
#' #> $ eye_color <chr> "yellow", "red", "red", "blue", "black", "orange", "bl...
#' #> $ birth_year <dbl> 112, 33, NA, 64, 44, 600, NA, 896, 15, 48, NA, NA, 92,...
#' #> $ gender <chr> NA, NA, NA, "male", "male", "hermaphrodite", "male", "...
#' #> $ homeworld <chr> "Tatooine", "Naboo", "Tatooine", "Eriadu", "Rodia", "N...
#' #> $ species <chr> "Droid", "Droid", "Droid", "Human", "Rodian", "Hutt", ...
#' #> $ films <list> [<"Attack of the Clones", "The Phantom Menace", "Reve...
#' #> $ vehicles <list> [<>, <>, <>, <>, <>, <>, <>, <>, <>, <>, <>, <>, "Tri...
#' #> $ starships <list> [<>, <>, <>, <>, <>, <>, "X-wing", <>, <>, <>, "A-win...
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' @md
#' @importFrom magrittr %>%
rows_with_na <- function(df) {
na_count <-
df %>%
dplyr::mutate_all(~ if_na(., 1, 0)) %>%
rowSums()
df %>%
dplyr::mutate(na_count_per_row = na_count) %>%
dplyr::filter(na_count_per_row > 0) %>%
dplyr::select(-na_count_per_row)
}
#' Given two dataframes, which columns appear in both of them?
#'
#' The order of `l` and `r` doesn't matter for `same_cols()`, but
#' it does for `diff_cols()`.
#'
#' @param l (Dataframe) A dataframe whose column names to compare.
#' @param r (Dataframe) A dataframe whose column names to compare.
#'
#' @return A Character vector with the names of the columns that appear
#' in both `l` and `r`.
#'
#' @export
#'
#' @examples
#' iris1 <- iris[, 1:3]
#' colnames(iris1)
#'
#' #> [1] "Sepal.Length" "Sepal.Width" "Petal.Length"
#'
#' iris2 <- iris[, 2:5]
#' colnames(iris2)
#' #> [1] "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
#'
#'
#' same_cols(iris1, iris2)
#' #> [1] "Sepal.Width" "Petal.Length"
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' @md
same_cols <- function(l, r) {
base::intersect(colnames(l), colnames(r))
}
#' Given two dataframes, which columns are present in one but not in the other?
#'
#' Unlike `same_cols()`, the order of `l` and `r` *does* matter for `diff_cols()`.
#'
#' @param l (Dataframe) A dataframe whose column names to compare.
#' @param r (Dataframe) A dataframe whose column names to compare.
#' @param side (Character) `"both"` or `"b"` (default) finds columns that are missing from
#' both dataframes. `"left"` or `"l"` finds cols in `l` that are not in `r`.
#' `"right"` or `"r"` finds cols in `r` that are not in `l`.
#'
#' @return A Character vector with the names of missing columns.
#' @export
#'
#' @examples
#' iris1 <- iris[, 1:3]
#' colnames(iris1)
#' ## [1] "Sepal.Length" "Sepal.Width" "Petal.Length"
#'
#' iris2 <- iris[, 2:5]
#' colnames(iris2)
#' ## [1] "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
#'
#' diff_cols(iris1, iris2)
#' #> [1] "Sepal.Length" "Petal.Width" "Species"
#'
#' diff_cols(iris1, iris2, side = "l")
#' #> [1] "Sepal.Length"
#'
#' diff_cols(iris1, iris2, side = "r")
#' #> [1] "Petal.Width" "Species"
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' @md
diff_cols <- function(l, r, side = "both") {
# Both directions need to be compared.
set1 <- base::setdiff(colnames(l), colnames(r))
set2 <- base::setdiff(colnames(r), colnames(l))
if (grepl("^b", side))
return(unique(c(set1, set2)))
if (grepl("^l", side))
return(set1)
if (grepl("^r", side))
return(set2)
}
#' Count/proportion of `NA`s and not-`NA`s per dataframe row
#'
#' @param df (Dataframe) A dataframe.
#' @param ... (Tidy-select) `dplyr`-style column selection.
#' See [dplyr::dplyr_tidy_select()]. If empty, defaults to `dplyr::everything()`.
#'
#' @return The dataframe `df` with four new columns: `na_in_row_count`, `notna_in_row_count`,
#' `na_in_row_prop`, and `notna_in_row_prop`.
#'
#' @export
#'
#' @examples
#' df <- data.frame(a = c(1, 2, NA, 4, NA), b = 1:5, c = c(NA, 2, 3, NA, NA))
#'
#' df
#'
#' #> a b c
#' #> 1 1 1 NA
#' #> 2 2 2 2
#' #> 3 NA 3 3
#' #> 4 4 4 NA
#' #> 5 NA 5 NA
#'
#' # By default, looks for NAs in all columns
#' na_in_row(df)
#'
#' # a b c na_in_row_count notna_in_row_count na_in_row_prop notna_in_row_prop
#' # 1 1 1 NA 1 2 0.3333333 0.6666667
#' # 2 2 2 2 0 3 0.0000000 1.0000000
#' # 3 NA 3 3 1 2 0.3333333 0.6666667
#' # 4 4 4 NA 1 2 0.3333333 0.6666667
#' # 5 NA 5 NA 2 1 0.6666667 0.3333333
#'
#' # Or use tidyselectors to choose columns. Here, looking for
#' # NAs in all columns except `b`
#' na_in_row(df, -b)
#'
#' # a b c na_in_row_count notna_in_row_count na_in_row_prop notna_in_row_prop
#' # 1 1 1 NA 1 1 0.5 0.5
#' # 2 2 2 2 0 2 0.0 1.0
#' # 3 NA 3 3 1 1 0.5 0.5
#' # 4 4 4 NA 1 1 0.5 0.5
#' # 5 NA 5 NA 2 0 1.0 0.0
#'
#' @section Source:
#' - <https://stackoverflow.com/a/35444245/5578429>
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' - maloneypatr (<https://stackoverflow.com/users/2124146/maloneypatr>)
#'
#' @md
na_in_row <- function(df, ...) {
if (...length() == 0) {
wip <- dplyr::select(df, everything())
} else {
wip <- dplyr::select(df, ...)
}
wip <- dplyr::mutate(wip,
na_in_row_count = apply(wip, 1, function(x) sum( is.na(x))),
notna_in_row_count = apply(wip, 1, function(x) sum(!is.na(x))),
na_in_row_prop = na_in_row_count / apply(wip, 1, length),
notna_in_row_prop = notna_in_row_count / apply(wip, 1, length))
return(dplyr::bind_cols(df,
dplyr::select(wip,
na_in_row_count, notna_in_row_count,
na_in_row_prop, notna_in_row_prop)))
}
| /R/dataframe_tools.R | permissive | DesiQuintans/desiderata | R | false | false | 28,519 | r | # Extra functions that help me work inside dataframes.
#' Replace all matching values in a dataframe with something else
#'
#' Uses regex to match and replace cell values. This function is meant for formatting
#' printed data; it's often useful to blank out NAs or other values to minimise visual
#' clutter when you are reading a table. For actual data-tidying applications, it's
#' safer to use `dplyr::recode()` or `dplyr::recode_factor()`.
#'
#' By default, this function will replace cells consisting of NAs, spaces, empty strings,
#' dashes, and underscores with an empty string.
#'
#' @param df (Dataframe) A dataframe.
#' @param find (Character) A regex search pattern.
#' @param replace (Character) The string used to overwrite the matching cells.
#' @param replace_na (Logical) If `TRUE`, also overwrite R's built-in `NA` values.
#'
#' @return A dataframe.
#' @export
#'
#' @examples
#' test_df <-
#' data.frame(stringsAsFactors = FALSE,
#' name = c("insect1", "insect2", "insect3", "insect4", "insect5",
#' "insect6", "insect7", "insect8", "insect9", "insect10"),
#' family = c("Belidae", "Belidae", " ", "Coccinelidae", NA, "Coccinelidae",
#' "Braconidae", "_", "-", "Curculionidae"),
#' is_cool = c("TRUE", "TRUE", NA, "TRUE", "", "TRUE", "TRUE", "-", "_",
#' "TRUE")
#' )
#'
#' test_df
#' overwrite_df(test_df)
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
overwrite_df <- function(df, find = "^(NA||\\s+|0|-+|_+)$", replace = "", replace_na = TRUE) {
df_l <- df
if (replace_na == TRUE) {
df_l[is.na(df_l)] <- replace # gsub can't look for R's NA values, so replace them.
}
out <- data.frame(lapply(df_l, function(x) { gsub(find, replace, as.character(x)) }),
stringsAsFactors = FALSE,
check.rows = FALSE,
check.names = FALSE,
fix.empty.names = FALSE)
return(out)
}
#' Drop 'empty' columns in a dataframe
#'
#' Deletes columns from a dataframe if they are 'empty'. A column is empty when every
#' single row is `NA`, `NULL`, `""`, or matches a regular expression.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be subsetted from `df`. If `to` is `NULL`, it defaults to the last
#' column in `df` so that `from = 2, to = NULL` is the same as `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#' @param regex (Character) A regex pattern that matches a value that should be considered
#' 'empty'.
#' @param report (Logical) If `TRUE`, print a Message with the names of the empty columns
#' that were dropped.
#'
#' @return A subset of `df` with all empty columns removed.
#' @export
#'
#' @examples
#' data <- data.frame(a = c(1, 2, 3),
#' b = c(0, 0, 0),
#' c = c(1, 1, 0),
#' d = c("", "", ""),
#' e = c("moo", "baa", "woof"))
#'
#' #> a b c d e
#' #> 1 0 1 moo
#' #> 2 0 1 baa
#' #> 3 0 0 woof
#'
#' drop_empty_cols(data)
#'
#' #> a b c e
#' #> 1 0 1 moo
#' #> 2 0 1 baa
#' #> 3 0 0 woof
#'
#' drop_empty_cols(data, regex = "moo|baa|woof")
#'
#' #> a b c
#' #> 1 0 1
#' #> 2 0 1
#' #> 3 0 0
#'
#' drop_empty_cols(data, regex = "moo|baa|woof", report = TRUE)
#'
#' #> Empty cols dropped: d, e
#' #> a b c
#' #> 1 0 1
#' #> 2 0 1
#' #> 3 0 0
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
drop_empty_cols <- function(df, from = 1, to = NULL, cols = NULL, regex = "^$",
report = FALSE) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
out <- base::Filter(function(x) !all(is.na(x) | is.null(x) | x == "" | grepl(regex, x)), sub_df)
if (report == TRUE) {
message("Dropped empty cols: ", fold(diff_cols(df, out), n = Inf))
}
return(dplyr::select(df, any_of(colnames(out))))
}
#' Drop 'empty' rows in a dataframe
#'
#' Deletes rows from a dataframe if they are 'empty'. A row is empty when every single
#' cell is `NA`, `NULL`, `""`, or matches a regular expression.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be subsetted from `df`. For example, columns that are always filled
#' should be omitted (see examples). If `to` is `NULL`, it defaults to the last
#' column in `df` so that `from = 2, to = NULL` is the same as `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#' @param regex (Character) A regex pattern that matches a value that should be considered
#' 'empty'.
#' @param report (Logical) If `TRUE`, print a Message with the number of empty rows
#' that were dropped.
#'
#' @return A subset of `df` with all empty rows removed.
#' @export
#'
#' @examples
#' data <- data.frame(name = c("Jim", "Jane", "Janice", "Joe", "Jay"),
#' a = c(0, "", 1, NA, 0),
#' b = c(1, "", 1, NA, 0),
#' c = c(1, NA, 2, 0, 0),
#' d = c(0, NA, 4, 0, 0),
#' e = c(0, "", 5, 0, 0),
#' f = c(3, "", 0, 0, 0),
#' stringsAsFactors = FALSE)
#'
#' data
#'
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 2 Jane NA NA
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data)
#'
#' # Returns the whole dataframe because column 1 ('name') is never empty.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 2 Jane NA NA
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data, from = 2)
#'
#' # We get the desired result when 'name' is omitted.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data, from = 2, regex = "^0$")
#'
#' # Regex can be used to match cells that should be 'empty'.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#'
#' drop_empty_rows(data, cols = c(2, 6))
#'
#' # Non-contiguous columns can be selected with 'cols'.
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' drop_empty_rows(data, cols = c(2, 6), report = TRUE)
#'
#' #> Dropped rows: 1 in total
#' #> name a b c d e f
#' #> 1 Jim 0 1 1 0 0 3
#' #> 3 Janice 1 1 2 4 5 0
#' #> 4 Joe <NA> <NA> 0 0 0 0
#' #> 5 Jay 0 0 0 0 0 0
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
drop_empty_rows <- function(df, from = 1, to = NULL, cols = NULL, regex = "^$",
report = FALSE) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
# https://stackoverflow.com/a/15618761/5578429
# trimws() MUST be kept in the anonymous function below. This is because of how,
# when apply() is given a dataframe, it coerces it to a matrix with as.matrix(). The
# coercion is done using format(), which pads numbers with spaces to the length
# of the longest string in the column. This means that a df might be coerced as:
#
# "NA" "1" "1"
# " 0" "0" " " This row is wrongly kept because " " is not 'empty'.
# " 1" "1" "2"
# " 1" "1" "3"
not_empty <- apply(sub_df,
MARGIN = 1, # Along each row...
function(x) {
# Remove whitespace from each cell
y <- trimws(x, which = "both");
# Negated so that non-empty rows are TRUE, and will be kept.
!all(nchar(y) == 0 |
y == "" |
is.na(y) |
is.null(y) |
grepl(regex, y))
}
)
# This is dplyr::slice() and not a simpler subset (df[!is_empty,]) because of a strange
# quirk where dataframes with labelled variables would lose their labels if they were
# subset in base R. dplyr::slice() keeps them.
out <- dplyr::slice(df, which(not_empty))
if (report == TRUE) {
dropped_row_nums <- paste0(": ", fold(which(!not_empty), n = 50))
message("Dropped ", nrow(df) - nrow(out), " empty rows", dropped_row_nums)
}
return(out)
}
#' Collapse a dataframe into a vector
#'
#' Useful for taking every number in a table and plotting it in a histogram, for example.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be considered for the empty/not-empty decision. For example, columns that
#' are always filled should be omitted (see examples). If `to` is `NULL`, it defaults
#' to the last column in `df` so that `from = 2, to = NULL` is the same as
#' `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#'
#' @return A vector containing the cell contents from the selected columns of `df`.
#' If all of the cells are numeric, the vector is Numeric. If any of the cells contain
#' strings, the vector is Character. The columns are concatenated in order.
#' @export
#'
#' @examples
#' collapse_df(iris, cols = 1:4)
#'
#' #> [1] 5.1 4.9 4.7 4.6 5.0 5.4 4.6 5.0 4.4 4.9 5.4 4.8 ...
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
collapse_df <- function(df, from = 1, to = NULL, cols = NULL) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
# I wondered for a second why I should even make this a function instead of just using
# unlist() directly. But then I realised that I would have to keep typing
# longdataframename[2:length(longdataframename)], and that's pretty annoying.
return(unlist(sub_df, use.names = FALSE))
}
#' Sort columns in a dataframe
#'
#' Sorts the columns of a dataframe, and then allows you to pull columns to the start of
#' the dataframe by name.
#'
#' @param df (Dataframe) A dataframe.
#' @param ... (Column names) If you want to manually position columns _after_ they are
#' sorted, provide unquoted column names here. The columns in `...` will be placed
#' first in the dataframe, and then all other unlisted columns will be placed after.
#' @param decreasing (Logical) If `FALSE`, sort columns from A-Z and 0-9. If `TRUE`, sort
#' in reverse.
#'
#' @return A copy of `df` with reordered columns.
#' @export
#'
#' @examples
#' colnames(iris)
#'
#' #> [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
#'
#' sorted <- sort_cols(iris)
#' colnames(sorted)
#'
#' #> [1] "Petal.Length" "Petal.Width" "Sepal.Length" "Sepal.Width" "Species"
#'
#' reverse <- sort_cols(iris, decreasing = TRUE)
#' colnames(reverse)
#'
#' #> [1] "Species" "Sepal.Width" "Sepal.Length" "Petal.Width" "Petal.Length"
#'
#' manual <- sort_cols(iris, Species)
#' colnames(manual)
#'
#' #> [1] "Species" " Petal.Length" "Petal.Width" "Sepal.Length" "Sepal.Width"
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
sort_cols <- function(df, ..., decreasing = FALSE) {
dots <- dplyr::quos(...)
sorted <- df[, order(colnames(df), decreasing = decreasing)] # Order cols.
return(dplyr::select(sorted, !!! dots, dplyr::everything()))
}
#' Drop invariant columns from a dataframe
#'
#' Deletes columns from a dataframe if they do not vary. For `character` and `factor`
#' columns, this means that every row of the column contains exactly the same string.
#' For `numeric` columns, the numbers are rounded to a nearest common value and then
#' checked to see if every rounded number is the same.
#'
#' @param df (Dataframe) A dataframe.
#' @param from,to (Numeric or `NULL`) The start and end of a continuous range of columns
#' that will be used. If `to` is `NULL`, it defaults to the last column in `df` so
#' that `from = 2, to = NULL` is the same as `2:length(df)`.
#' @param cols (Numeric or `NULL`) A numeric vector of the columns to consider. This
#' allows you to select non-contiguous columns. If the `cols` argument is being used
#' (not-`NULL`), `from` and `to` will be ignored.
#' @param nearest (Numeric or `NULL`) For numeric columns, this is the common value that
#' all numbers will be rounded to. The default `NULL` uses the `mean()` of each
#' column as the rounding target.
#' @param dir (Character or `NULL`) Controls the rounding function used. Leave as `NULL`
#' to round up and down. Use `"up"` to round up only. Use `"down"` to round down only.
#'
#' @return A copy of `df` with all invariant columns removed.
#' @export
#'
#' @examples
#' df <- data.frame(stringsAsFactors=FALSE,
#' char_invar = c("A", "A", "A", "A", "A"),
#' char_var = c("A", "A", "A", "B", "A"),
#' num_invar = c(1L, 1L, 1L, 1L, 1L),
#' num_mean_0 = c(0, -0.1, 0.1, 0.01, -0.01),
#' num_var = c(0, 0.2, 0.8, 0.03, 0.4)
#' )
#'
#' df
#'
#' #> char_invar char_var num_invar num_mean_0 num_var
#' #> 1 A A 1 0.00 0.00
#' #> 2 A A 1 -0.10 0.20
#' #> 3 A A 1 0.10 0.80
#' #> 4 A B 1 0.01 0.03
#' #> 5 A A 1 -0.01 0.40
#'
#'
#' drop_invar_cols(df)
#'
#' #> char_var num_var
#' #> 1 A 0.00
#' #> 2 A 0.20
#' #> 3 A 0.80
#' #> 4 B 0.03
#' #> 5 A 0.40
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
drop_invar_cols <- function(df, from = 1, to = NULL, cols = NULL,
nearest = NULL, dir = NULL) {
selected <- construct_cols(df, from = from, to = to, cols = cols)
sub_df <- df[selected]
base::Filter(
function(x) {
if (is.numeric(x)) {
# Use fuzzy (rounded) matching
if (is.null(nearest)) nearest = mean(x, na.rm = TRUE)
rounded <- round_to_nearest(x, to = nearest, dir = dir)
if (howmany(rounded) == 1) return(FALSE)
} else {
# Use exact matching
if (howmany(x) == 1) return(FALSE)
}
return(TRUE)
}, sub_df)
}
#' First and last rows of a dataframe
#'
#' @param df (Dataframe) A dataframe.
#' @param top (Integer) The number of rows to get from the start of `df`.
#' @param tail (Integer) The number of rows to get from the end of `df`.
#'
#' @details `0` can be provided for the top and tail, in which case it will behave like
#' `head()` and `tail()` respectively.
#'
#' @return A dataframe.
#' @export
#'
#' @examples
#' # Returns 6 rows by default, just like head() does.
#' top_tail(iris)
#'
#' #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species
#' #> 1 5.1 3.5 1.4 0.2 setosa
#' #> 2 4.9 3.0 1.4 0.2 setosa
#' #> 3 4.7 3.2 1.3 0.2 setosa
#' #> 148 6.5 3.0 5.2 2.0 virginica
#' #> 149 6.2 3.4 5.4 2.3 virginica
#' #> 150 5.9 3.0 5.1 1.8 virginica
#'
#' top_tail(iris, top = 1, tail = 2)
#'
#' #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species
#' #> 1 5.1 3.5 1.4 0.2 setosa
#' #> 149 6.2 3.4 5.4 2.3 virginica
#' #> 150 5.9 3.0 5.1 1.8 virginica
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @md
top_tail <- function(df, top = 3, tail = 3) {
rows <- nrow(df)
if (top <= 0) top_range <- 0 else top_range <- 1:top
if (tail <= 0) tail_range <- 0 else tail_range <- (rows - (tail - 1)):rows
df[unique(c(top_range, tail_range)), ]
}
#' Add a 'group size' column to a dataframe
#'
#' @param df (Dataframe) The dataframe.
#' @param ... (Names) Bare names of the columns of `df` that will for the groups for
#' `dplyr::group_by()`.
#' @param .id (Character) The name of the new column. If `NA` (default), the name
#' will be generated from the columns in `...`.
#' @param na.rm (Logical or Character) If `TRUE`, runs `tidyr::drop_na(df)` before
#' grouping. If a `Character` vector that has column names, runs
#' `tidyr::drop_na(df, ...)` where `...` is the column names that will be
#' considered for dropping.
#'
#' @return An ungrouped dataframe `df` with a new column containing the group size,
#' duplicated at each row. By default, the new column's name is generated from the
#' groups in `...` (see examples).
#' @export
#'
#' @examples
#'
#' sw_subset <- dplyr::select(dplyr::starwars, -(films:starships))
#'
#' test <- add_group_size(sw_subset, species, homeworld,
#' .id = "my_colname", na.rm = FALSE)
#' dplyr::glimpse(test)
#'
#' #> Observations: 87
#' #> Variables: 11
#' #> $ name <chr> "Luke Skywalker", "C-3PO", "R2-D2", "Darth Vader", "Le...
#' #> $ height <int> 172, 167, 96, 202, 150, 178, 165, 97, 183, 182, 188, 1...
#' #> $ mass <dbl> 77.0, 75.0, 32.0, 136.0, 49.0, 120.0, 75.0, 32.0, 84.0...
#' #> $ hair_color <chr> "blond", NA, NA, "none", "brown", "brown, grey", "brow...
#' #> $ skin_color <chr> "fair", "gold", "white, blue", "white", "light", "ligh...
#' #> $ eye_color <chr> "blue", "yellow", "red", "yellow", "brown", "blue", "b...
#' #> $ birth_year <dbl> 19.0, 112.0, 33.0, 41.9, 19.0, 52.0, 47.0, NA, 24.0, 5...
#' #> $ gender <chr> "male", NA, NA, "male", "female", "male", "female", NA...
#' #> $ homeworld <chr> "Tatooine", "Tatooine", "Naboo", "Tatooine", "Alderaan...
#' #> $ species <chr> "Human", "Droid", "Droid", "Human", "Human", "Human", ...
#' #> $ my_colname <int> 8, 2, 1, 8, 3, 8, 8, 2, 8, 1, 8, 1, 2, 2, 1, 1, 2, 1, ...
#'
#' test2 <- add_group_size(sw_subset, eye_color, homeworld,
#' na.rm = c("hair_color", "gender"))
#'
#' # Note the automatic column names and the dropped NA rows.
#' dplyr::glimpse(test2)
#'
#' #> Observations: 82
#' #> Variables: 11
#' #> $ name <chr> "Luke Skywalker", "Darth Vader", "Lei...
#' #> $ height <int> 172, 202, 150, 178, 165, 183, 182, 18...
#' #> $ mass <dbl> 77.0, 136.0, 49.0, 120.0, 75.0, 84.0,...
#' #> $ hair_color <chr> "blond", "none", "brown", "brown, gre...
#' #> $ skin_color <chr> "fair", "white", "light", "light", "l...
#' #> $ eye_color <chr> "blue", "yellow", "brown", "blue", "b...
#' #> $ birth_year <dbl> 19.0, 41.9, 19.0, 52.0, 47.0, 24.0, 5...
#' #> $ gender <chr> "male", "male", "female", "male", "fe...
#' #> $ homeworld <chr> "Tatooine", "Tatooine", "Alderaan", "...
#' #> $ species <chr> "Human", "Human", "Human", "Human", "...
#' #> $ grpsize_eye_color_homeworld <int> 5, 1, 3, 5, 5, 2, 1, 5, 1, 2, 1, 1, 1...
#'
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#'
#' @importFrom rlang :=
#' @md
add_group_size <- function(df, ..., .id = NA, na.rm = FALSE) {
if (is.na(.id)) {
colname <- paste0("grpsize_", dots_char(..., collapse = "_"))
} else {
colname <- as.character(.id)
}
if (typeof(na.rm) == "character") {
if (length(na.rm) == 0) {
stop("Argument 'na.rm' must be TRUE, FALSE, or a character vector of
column names.")
}
df <- do.call(tidyr::drop_na, list(data = df, na.rm))
} else if (na.rm == TRUE) {
df <- tidyr::drop_na(df)
}
res <- dplyr::group_by(df, ...)
res <- dplyr::mutate(res, !!colname := dplyr::n())
res <- dplyr::ungroup(res)
return(res)
}
#' Only keep rows that contain NA
#'
#' It is sometimes useful to look at rows of a dataframe where a value is
#' missing. For example, you may want to see why a function returns NA in some
#' rows and not others.
#'
#' @param df (Dataframe) The dataframe
#'
#' @return A dataframe with the same number of columns as `df`, but the only
#' rows it has are rows that have at least 1 `NA` value.
#' @export
#'
#' @examples
#'
#' na_starwars <- rows_with_na(dplyr::starwars)
#' dplyr::glimpse(na_starwars)
#'
#' #> Observations: 58
#' #> Variables: 13
#' #> $ name <chr> "C-3PO", "R2-D2", "R5-D4", "Wilhuff Tarkin", "Greedo",...
#' #> $ height <int> 167, 96, 97, 180, 173, 175, 180, 66, 200, 150, NA, 160...
#' #> $ mass <dbl> 75, 32, 32, NA, 74, 1358, 110, 17, 140, NA, NA, 68, 89...
#' #> $ hair_color <chr> NA, NA, NA, "auburn, grey", NA, NA, "brown", "white", ...
#' #> $ skin_color <chr> "gold", "white, blue", "white, red", "fair", "green", ...
#' #> $ eye_color <chr> "yellow", "red", "red", "blue", "black", "orange", "bl...
#' #> $ birth_year <dbl> 112, 33, NA, 64, 44, 600, NA, 896, 15, 48, NA, NA, 92,...
#' #> $ gender <chr> NA, NA, NA, "male", "male", "hermaphrodite", "male", "...
#' #> $ homeworld <chr> "Tatooine", "Naboo", "Tatooine", "Eriadu", "Rodia", "N...
#' #> $ species <chr> "Droid", "Droid", "Droid", "Human", "Rodian", "Hutt", ...
#' #> $ films <list> [<"Attack of the Clones", "The Phantom Menace", "Reve...
#' #> $ vehicles <list> [<>, <>, <>, <>, <>, <>, <>, <>, <>, <>, <>, <>, "Tri...
#' #> $ starships <list> [<>, <>, <>, <>, <>, <>, "X-wing", <>, <>, <>, "A-win...
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' @md
#' @importFrom magrittr %>%
rows_with_na <- function(df) {
na_count <-
df %>%
dplyr::mutate_all(~ if_na(., 1, 0)) %>%
rowSums()
df %>%
dplyr::mutate(na_count_per_row = na_count) %>%
dplyr::filter(na_count_per_row > 0) %>%
dplyr::select(-na_count_per_row)
}
#' Given two dataframes, which columns appear in both of them?
#'
#' The order of `l` and `r` doesn't matter for `same_cols()`, but
#' it does for `diff_cols()`.
#'
#' @param l (Dataframe) A dataframe whose column names to compare.
#' @param r (Dataframe) A dataframe whose column names to compare.
#'
#' @return A Character vector with the names of the columns that appear
#' in both `l` and `r`.
#'
#' @export
#'
#' @examples
#' iris1 <- iris[, 1:3]
#' colnames(iris1)
#'
#' #> [1] "Sepal.Length" "Sepal.Width" "Petal.Length"
#'
#' iris2 <- iris[, 2:5]
#' colnames(iris2)
#' #> [1] "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
#'
#'
#' same_cols(iris1, iris2)
#' #> [1] "Sepal.Width" "Petal.Length"
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' @md
same_cols <- function(l, r) {
base::intersect(colnames(l), colnames(r))
}
#' Given two dataframes, which columns are present in one but not in the other?
#'
#' Unlike `same_cols()`, the order of `l` and `r` *does* matter for `diff_cols()`.
#'
#' @param l (Dataframe) A dataframe whose column names to compare.
#' @param r (Dataframe) A dataframe whose column names to compare.
#' @param side (Character) `"both"` or `"b"` (default) finds columns that are missing from
#' both dataframes. `"left"` or `"l"` finds cols in `l` that are not in `r`.
#' `"right"` or `"r"` finds cols in `r` that are not in `l`.
#'
#' @return A Character vector with the names of missing columns.
#' @export
#'
#' @examples
#' iris1 <- iris[, 1:3]
#' colnames(iris1)
#' ## [1] "Sepal.Length" "Sepal.Width" "Petal.Length"
#'
#' iris2 <- iris[, 2:5]
#' colnames(iris2)
#' ## [1] "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
#'
#' diff_cols(iris1, iris2)
#' #> [1] "Sepal.Length" "Petal.Width" "Species"
#'
#' diff_cols(iris1, iris2, side = "l")
#' #> [1] "Sepal.Length"
#'
#' diff_cols(iris1, iris2, side = "r")
#' #> [1] "Petal.Width" "Species"
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' @md
diff_cols <- function(l, r, side = "both") {
# Both directions need to be compared.
set1 <- base::setdiff(colnames(l), colnames(r))
set2 <- base::setdiff(colnames(r), colnames(l))
if (grepl("^b", side))
return(unique(c(set1, set2)))
if (grepl("^l", side))
return(set1)
if (grepl("^r", side))
return(set2)
}
#' Count/proportion of `NA`s and not-`NA`s per dataframe row
#'
#' @param df (Dataframe) A dataframe.
#' @param ... (Tidy-select) `dplyr`-style column selection.
#' See [dplyr::dplyr_tidy_select()]. If empty, defaults to `dplyr::everything()`.
#'
#' @return The dataframe `df` with four new columns: `na_in_row_count`, `notna_in_row_count`,
#' `na_in_row_prop`, and `notna_in_row_prop`.
#'
#' @export
#'
#' @examples
#' df <- data.frame(a = c(1, 2, NA, 4, NA), b = 1:5, c = c(NA, 2, 3, NA, NA))
#'
#' df
#'
#' #> a b c
#' #> 1 1 1 NA
#' #> 2 2 2 2
#' #> 3 NA 3 3
#' #> 4 4 4 NA
#' #> 5 NA 5 NA
#'
#' # By default, looks for NAs in all columns
#' na_in_row(df)
#'
#' # a b c na_in_row_count notna_in_row_count na_in_row_prop notna_in_row_prop
#' # 1 1 1 NA 1 2 0.3333333 0.6666667
#' # 2 2 2 2 0 3 0.0000000 1.0000000
#' # 3 NA 3 3 1 2 0.3333333 0.6666667
#' # 4 4 4 NA 1 2 0.3333333 0.6666667
#' # 5 NA 5 NA 2 1 0.6666667 0.3333333
#'
#' # Or use tidyselectors to choose columns. Here, looking for
#' # NAs in all columns except `b`
#' na_in_row(df, -b)
#'
#' # a b c na_in_row_count notna_in_row_count na_in_row_prop notna_in_row_prop
#' # 1 1 1 NA 1 1 0.5 0.5
#' # 2 2 2 2 0 2 0.0 1.0
#' # 3 NA 3 3 1 1 0.5 0.5
#' # 4 4 4 NA 1 1 0.5 0.5
#' # 5 NA 5 NA 2 0 1.0 0.0
#'
#' @section Source:
#' - <https://stackoverflow.com/a/35444245/5578429>
#'
#' @section Authors:
#' - Desi Quintans (<http://www.desiquintans.com>)
#' - maloneypatr (<https://stackoverflow.com/users/2124146/maloneypatr>)
#'
#' @md
na_in_row <- function(df, ...) {
if (...length() == 0) {
wip <- dplyr::select(df, everything())
} else {
wip <- dplyr::select(df, ...)
}
wip <- dplyr::mutate(wip,
na_in_row_count = apply(wip, 1, function(x) sum( is.na(x))),
notna_in_row_count = apply(wip, 1, function(x) sum(!is.na(x))),
na_in_row_prop = na_in_row_count / apply(wip, 1, length),
notna_in_row_prop = notna_in_row_count / apply(wip, 1, length))
return(dplyr::bind_cols(df,
dplyr::select(wip,
na_in_row_count, notna_in_row_count,
na_in_row_prop, notna_in_row_prop)))
}
|
#This script is for Class 2, week 3, Assignment 2: Lexical Scoping. The assignment is called: "Caching the Inverse of a Matrix"
#This function is used to create a matrix for caching the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#This is the function for computing the inverse of the matrixed returned by "makeCacheMatrix"
cachesolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | mgordo34/ProgrammingAssignment2 | R | false | false | 786 | r | #This script is for Class 2, week 3, Assignment 2: Lexical Scoping. The assignment is called: "Caching the Inverse of a Matrix"
#This function is used to create a matrix for caching the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#This is the function for computing the inverse of the matrixed returned by "makeCacheMatrix"
cachesolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
#' @title get_perc_risk_index
#'
#' @description Generates the mean of the values over a certain percentile
#' threshold for the pixel of the raster stack under the polygons of a shapefile
#'
#' @param r_stack is the raster or raster stack
#' @param p_shape is the shapefile on which to aggregate the values
#' @param perc_val is the percentile value used as a threshold
#' @param mod defines if the values considered for the mean are above (gt) or
#' below (lt) the threshold
#'
#' @export
#'
#' @examples
#' \dontrun{
#' r.index <- get_perc_risk_index(r_stack = r_stack, p_shape = poly,
#' perc_val = 50, mod = "lt")
#' }
#'
get_perc_risk_index <- function(r_stack, p_shape, perc_val = 75, mod = "gt"){
r_vals <- raster::extract(r_stack, p_shape)
index_val <- lapply(r_vals, FUN = mean_percs, perc_val = perc_val, mod = mod)
return(index_val)
}
| /R/get_perc_risk_index.R | permissive | FannieNo/caliver | R | false | false | 890 | r | #' @title get_perc_risk_index
#'
#' @description Generates the mean of the values over a certain percentile
#' threshold for the pixel of the raster stack under the polygons of a shapefile
#'
#' @param r_stack is the raster or raster stack
#' @param p_shape is the shapefile on which to aggregate the values
#' @param perc_val is the percentile value used as a threshold
#' @param mod defines if the values considered for the mean are above (gt) or
#' below (lt) the threshold
#'
#' @export
#'
#' @examples
#' \dontrun{
#' r.index <- get_perc_risk_index(r_stack = r_stack, p_shape = poly,
#' perc_val = 50, mod = "lt")
#' }
#'
get_perc_risk_index <- function(r_stack, p_shape, perc_val = 75, mod = "gt"){
r_vals <- raster::extract(r_stack, p_shape)
index_val <- lapply(r_vals, FUN = mean_percs, perc_val = perc_val, mod = mod)
return(index_val)
}
|
#### Preamble ####
# Purpose: Use opendatatoronto to get ... data
# Author: Renjing Liu (Max)
# Contact: renjing.liu@mail.utoronto.ca
# Data: 28th January 2021
# Pre-requisites: None
# TODOs:
#### Workspace set-up ####
# install.packages("opendatatoronto")
library(opendatatoronto)
| /script/Paper_code.R | permissive | MaxLiu728/Paper | R | false | false | 285 | r | #### Preamble ####
# Purpose: Use opendatatoronto to get ... data
# Author: Renjing Liu (Max)
# Contact: renjing.liu@mail.utoronto.ca
# Data: 28th January 2021
# Pre-requisites: None
# TODOs:
#### Workspace set-up ####
# install.packages("opendatatoronto")
library(opendatatoronto)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outliers0.R
\name{outlierLineages}
\alias{outlierLineages}
\title{Detect lineages with unusually large evolutionary divergence under the fitted treedater model}
\usage{
outlierLineages(td, alpha = 0.05, type = c("tips", "internal", "all"))
}
\arguments{
\item{td}{A fitted treedater object}
\item{alpha}{The tail probability used for classifying lineages as outliers}
\item{type}{Should outliers be detected on tip lineages, interal lineages, or all lineages?}
}
\value{
A data frame summarizing for each lineage the p values, adjusted p values ('q'), likelihood, rates, and branch lengths.
}
\description{
Outliers are detected using the *stats::p.adjust* function and the 'fdr' function. The test requires that *dater* was used with the temporalConstraints=TRUE.
}
\seealso{
dater
outlier.tips
}
| /man/outlierLineages.Rd | no_license | emvolz/treedater | R | false | true | 878 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outliers0.R
\name{outlierLineages}
\alias{outlierLineages}
\title{Detect lineages with unusually large evolutionary divergence under the fitted treedater model}
\usage{
outlierLineages(td, alpha = 0.05, type = c("tips", "internal", "all"))
}
\arguments{
\item{td}{A fitted treedater object}
\item{alpha}{The tail probability used for classifying lineages as outliers}
\item{type}{Should outliers be detected on tip lineages, interal lineages, or all lineages?}
}
\value{
A data frame summarizing for each lineage the p values, adjusted p values ('q'), likelihood, rates, and branch lengths.
}
\description{
Outliers are detected using the *stats::p.adjust* function and the 'fdr' function. The test requires that *dater* was used with the temporalConstraints=TRUE.
}
\seealso{
dater
outlier.tips
}
|
library(ggplot2)
library(ggthemes)
library(dplyr)
library(tidyr)
# Clean workspace
rm(list = ls())
# Read from 'raw' csv which has not been cleaned
loan_raw <- read.csv("loan.csv", stringsAsFactors = FALSE, na.strings=c("","NA"))
# Analyse the data at hand
str(loan_raw)
# A function to check if a column has a single unique value
isSingleDataColumn <- function(col_data) {
unique_data <- length(unique(col_data))
return (unique_data == 1)
}
# The good side effect is that a columns which only has NA's or 0's will also appear as a column
# with a single unique value
# Get a list of columns which has a single unique value
columns_with_single_data <- sapply(loan_raw, isSingleDataColumn)
# Remove columns with single unique values, all NAs and all 0s from our dataset
loan_data <- loan_raw[, !columns_with_single_data]
# Make percentage as numeric by removing the % character
loan_data<-separate(loan_data,int_rate,c("int_rate"),sep="%",remove=TRUE)
loan_data$int_rate<-as.numeric(loan_data$int_rate)
loan_data<-separate(loan_data, revol_util, c("revol_util"), sep="%",remove=TRUE)
loan_data$revol_util<-as.numeric(loan_data$revol_util)
# Don't need the url of the application for analysis, remove it
loan_data <- subset(loan_data, select = -url)
# Show columns which has NA values
colWithNA <- function(loan_data) {
col <- colSums(is.na(loan_data))
return (col[col > 0])
}
colWithNA(loan_data)
# There are 2453 NA values in emp_title, convert those to 'Other'
loan_data$emp_title[is.na(loan_data$emp_title)] <- 'Other'
# Description is can be removed
loan_data <- subset(loan_data, select = -desc)
# Set NA values in loan 'title' to 'Other'
loan_data$title[is.na(loan_data$title)] <- 'Other'
# The payment dates lies in the range of 8 - 16 day of the month, ie 8 day range.
unique(loan_data$last_pymnt_d)
# We are going to assume that 8 days of gap does not effect the payment, and remove the payment
# date information
loan_data <- subset(loan_data, select = -last_pymnt_d)
loan_data <- subset(loan_data, select = -next_pymnt_d)
# Also remove last_credit_pull_d
loan_data <- subset(loan_data, select = -last_credit_pull_d)
# Check the values of tax liens
unique(loan_data$tax_liens)
# Has only 0 and NAs
# We are going to assume that NA also means no tax liens, i.e the person has paid his taxes properly.
# Since all values are either 0 or NAs, we can remove this column
loan_data <- subset(loan_data, select = -tax_liens)
# Collection with 12 months just has 0 and NAs and can be remove
unique(loan_data$collections_12_mths_ex_med)
loan_data <-subset(loan_data, select = -collections_12_mths_ex_med)
# Chargeoff within 12 months just has 0 and Nas and can be removed
unique(loan_data$chargeoff_within_12_mths)
loan_data <- subset(loan_data, select = -chargeoff_within_12_mths)
# Maintain NA values in 'Months since last delinq', 'Months since last record',
# Revloving line utilization rate', and 'public record bankruptcies' for analysis
# Check if loan status can be converted into factor
unique(loan_data$loan_status)
# Yes, it can be. Convert into factor.
loan_data$loan_status <- factor(loan_data$loan_status)
# Do the same thing for grade, sub_grade
unique(loan_data$grade)
unique(loan_data$sub_grade)
loan_data$grade <- factor(loan_data$grade)
loan_data$sub_grade <- factor(loan_data$sub_grade)
# Do the same thing for addr_state
unique(loan_data$addr_state)
loan_data$addr_state <- factor(loan_data$addr_state)
# Check and convert into factors for term, emp_length, verification_status, purpose,
# home ownership, pub_rec_bankruptcies, pub_rec, inq_last_6mths, delinq_2yrs
unique(loan_data$term)
unique(loan_data$emp_length)
unique(loan_data$verification_status)
unique(loan_data$purpose)
unique(loan_data$home_ownership)
unique(loan_data$pub_rec_bankruptcies)
unique(loan_data$pub_rec)
unique(loan_data$inq_last_6mths)
unique(loan_data$delinq_2yrs)
loan_data$term <- factor(loan_data$term)
loan_data$emp_length <- factor(loan_data$emp_length)
loan_data$verification_status <- factor(loan_data$verification_status)
loan_data$purpose <- factor(loan_data$purpose)
loan_data$home_ownership <- factor(loan_data$home_ownership)
loan_data$pub_rec_bankruptcies <- factor(loan_data$pub_rec_bankruptcies)
loan_data$pub_rec <- factor(loan_data$pub_rec)
loan_data$inq_last_6mths <- factor(loan_data$inq_last_6mths)
loan_data$delinq_2yrs <- factor(loan_data$delinq_2yrs)
# Determine the proportion of Charged Off loans
# Plot pie chart
temp <- summarize(group_by(loan_data, loan_status), value = n())
str(temp)
temp <- mutate(temp, loan_status = factor(loan_status, levels = rev(loan_status)),
cumulative = cumsum(value),
midpoint = cumulative - value / 2,
label = factor(paste0(round(value / sum(value) * 100, 1), "%")))
ggplot(temp, aes(x="", weight = value, fill = loan_status)) +
geom_bar(width=1, position = "stack") +
coord_polar(theta = "y") +
labs(title = "Total Loan Status" , fill= "Loan status") +
geom_text(aes(x = 1.3, y = midpoint, label = label)) +
theme_void()
# Performing univariate and segmented univariate analysis with categorical variables
# 1 - Loan Status vs Loan Grade
ggplot(loan_data,aes(x=grade,fill=loan_status))+geom_bar(position="fill")+
labs(x="Grade",title=" Grade")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 2 - Loan Status vs Loan Sub-grade
ggplot(loan_data,aes(x=sub_grade,fill=loan_status))+geom_bar(position="fill")+
labs(x="Sub Grade",title="Loan Sub Grade")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 3 - Loan Status vs state
ggplot(loan_data,aes(x=addr_state))+geom_bar(stat = "count") +
geom_text(stat ='count', aes(label = ..count..), vjust = -1)
# Now plot the proportion of charged off loans
ggplot(loan_data,aes(x=addr_state,fill= loan_status))+geom_bar(position = "fill")+
labs(x="State",title="State")+
labs(fill="Loan Status")+
labs(y="Proportion")
# State NE has very few loans to make a call on the high proportion of charged-off loans
# 4 Loan Status vs Term
ggplot(loan_data,aes(x=term,fill=loan_status))+geom_bar(position = "fill")+
labs(x="Term",title="Loan Term")+
labs(fill="Loan Status")+
labs(y="Proportion")
# Loan with Term of 60 months have a higher proportion of defaulting
# 5 Loan Status vs Emp Length
ggplot(loan_data,aes(x=emp_length,fill=loan_status))+geom_bar(position = "fill")
# 6 Loan Status vs Verification Status
ggplot(loan_data,aes(x=verification_status,fill=loan_status))+
geom_bar(position = "fill")
# 7 Loan Status vs Purpose
ggplot(loan_data,aes(x=purpose,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Purpose",title="Purpose")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 8 Loan Status vs Home ownership
ggplot(loan_data,aes(x=home_ownership,fill=loan_status))+
geom_bar(position = "fill")
# 9 Loan Status vs Public Record Bankrupcies
ggplot(loan_data,aes(x=pub_rec_bankruptcies,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Public Record Bankrupcies",title="Public Record Bankrupcies")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 10 Loan Status vs Public Records
ggplot(loan_data,aes(x=pub_rec,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Public Records",title="Public Records")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 11 Loan status vs inq_last_6mths
ggplot(loan_data,aes(x=inq_last_6mths,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Six Month Inquiries",title="Six Month Inquiries")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 12 Loan status vs delinq_2yrs
ggplot(loan_data,aes(x=delinq_2yrs,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Delinquencies",title="Delinquencies In Last Two Year")+
labs(fill="Loan Status")+
labs(y="Proportion")
# Performing univariate quantitative variables
# 13 Loan Status vs dti
ggplot(loan_data,aes(x=dti,fill=loan_status))+
geom_histogram(colour = "black", binwidth = 0.5,position = "fill")+
labs(x="DTI",title="DTI")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 14 Loan Status vs Funded Amount
bin_size = (max(loan_data$funded_amnt) - min(loan_data$funded_amnt)) / 50
ggplot(loan_data,aes(x=funded_amnt,fill=loan_status))+
geom_histogram(colour = "black", binwidth = bin_size, position = "fill")+
labs(x="Funded Amount",title="Funded Amount")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 15 Loan Status vs Annual Income
bin_size = (max(loan_data$annual_inc) - min(loan_data$annual_inc)) / 50
ggplot(loan_data,aes(x=annual_inc,fill=loan_status))+
geom_histogram(colour = "black", binwidth = bin_size, position = "fill")+
labs(x="Annual Income",title="Annual Income")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 16 Loan Status vs Installment
bin_size = (max(loan_data$installment ) - min(loan_data$installment)) / 50
ggplot(loan_data,aes(x=installment,bin_width = bin_size, fill=loan_status))+
geom_histogram(colour = "black", position = "fill")+
labs(x="Installment",title="Installment")+
labs(fill="Loan Status")+
labs(y="Proportion")
#* int_rate :
# 17 creating rate slots:
loan_data$int_rate_Slots = rep(1,nrow(loan_data))
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 5 & loan_data$int_rate < 7.5 ) ] = "Slot1 5 - 7.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 7.5 & loan_data$int_rate < 10 ) ] = "Slot2 7.5 - 10" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 10 & loan_data$int_rate < 12.5 ) ] = "Slot3 10 - 12.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 12.5 & loan_data$int_rate < 15 ) ] = "Slot4 12.5 - 15" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 15 & loan_data$int_rate < 17.5 ) ] = "Slot5 15 - 17.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 17.5 & loan_data$int_rate < 20 ) ] = "Slot6 17.5 - 20" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 20 & loan_data$int_rate < 22.5 ) ] = "Slot7 20 - 22.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 22.5 & loan_data$int_rate < 25 ) ] = "Slot8 22.5 - 25" ;
ggplot( loan_data, aes(x = int_rate_Slots, fill = factor(loan_status))) + geom_bar(stat = "count" ) + labs(x ="Interest Rate SLots",title="Total Loans per Slot") + labs(y ="Count of Loans") + labs(fill="Loan Status")
#here it shows that majority loans are given in 10 - 15 rate of interest.
#calculating only charged off loans vs state
charged_off <- subset(loan_data, loan_status == "Charged Off")
ggplot( charged_off, aes(x = int_rate_Slots)) + geom_bar(stat = "count" ) + labs(x ="Interest Rate SLots",title="Defaulted Loans per Slot") + labs(y ="Count of Defaulted Loans")
#This comparision shows the loans given in 12.5-15% slot have seen highest defaulters.
# Performing segmented univariate analysis
# Identifying a derived metric as proportion of charged off in total population
proportions <- summarise(group_by(loan_data, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# Evaluate if the proportion metric is higher in any segment
# 18 Mortage and lower grade loan
homeOwner <- subset(loan_data, loan_data$home_ownership=='MORTGAGE' &
((loan_data$grade=='B')| (loan_data$grade=='C')|
(loan_data$grade=='D')| (loan_data$grade=='E')|
(loan_data$grade=='F')))
proportions <- summarise(group_by(homeOwner, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# 19 High Funded value and high DTI
temp <- subset(loan_data,funded_amnt>25000 & dti > 20)
proportions <- summarise(group_by(temp, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# 20 High Funded value and employment term less than 1 year
temp <- subset(loan_data,funded_amnt>25000 & loan_data$emp_length == '< 1 year')
proportions <- summarise(group_by(temp, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# 21 Annual Income greater than 350K and income source not verified
temp <- subset(loan_data,annual_inc>350000 & factor(loan_data$verification_status) == 'Not Verified')
proportions <- summarise(group_by(temp, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# Write the cleaned data to file
write.csv(loan_data, "loan_clean.csv",row.names = FALSE)
| /code.R | no_license | rmukhia/Gramener-Case-Study | R | false | false | 12,924 | r | library(ggplot2)
library(ggthemes)
library(dplyr)
library(tidyr)
# Clean workspace
rm(list = ls())
# Read from 'raw' csv which has not been cleaned
loan_raw <- read.csv("loan.csv", stringsAsFactors = FALSE, na.strings=c("","NA"))
# Analyse the data at hand
str(loan_raw)
# A function to check if a column has a single unique value
isSingleDataColumn <- function(col_data) {
unique_data <- length(unique(col_data))
return (unique_data == 1)
}
# The good side effect is that a columns which only has NA's or 0's will also appear as a column
# with a single unique value
# Get a list of columns which has a single unique value
columns_with_single_data <- sapply(loan_raw, isSingleDataColumn)
# Remove columns with single unique values, all NAs and all 0s from our dataset
loan_data <- loan_raw[, !columns_with_single_data]
# Make percentage as numeric by removing the % character
loan_data<-separate(loan_data,int_rate,c("int_rate"),sep="%",remove=TRUE)
loan_data$int_rate<-as.numeric(loan_data$int_rate)
loan_data<-separate(loan_data, revol_util, c("revol_util"), sep="%",remove=TRUE)
loan_data$revol_util<-as.numeric(loan_data$revol_util)
# Don't need the url of the application for analysis, remove it
loan_data <- subset(loan_data, select = -url)
# Show columns which has NA values
colWithNA <- function(loan_data) {
col <- colSums(is.na(loan_data))
return (col[col > 0])
}
colWithNA(loan_data)
# There are 2453 NA values in emp_title, convert those to 'Other'
loan_data$emp_title[is.na(loan_data$emp_title)] <- 'Other'
# Description is can be removed
loan_data <- subset(loan_data, select = -desc)
# Set NA values in loan 'title' to 'Other'
loan_data$title[is.na(loan_data$title)] <- 'Other'
# The payment dates lies in the range of 8 - 16 day of the month, ie 8 day range.
unique(loan_data$last_pymnt_d)
# We are going to assume that 8 days of gap does not effect the payment, and remove the payment
# date information
loan_data <- subset(loan_data, select = -last_pymnt_d)
loan_data <- subset(loan_data, select = -next_pymnt_d)
# Also remove last_credit_pull_d
loan_data <- subset(loan_data, select = -last_credit_pull_d)
# Check the values of tax liens
unique(loan_data$tax_liens)
# Has only 0 and NAs
# We are going to assume that NA also means no tax liens, i.e the person has paid his taxes properly.
# Since all values are either 0 or NAs, we can remove this column
loan_data <- subset(loan_data, select = -tax_liens)
# Collection with 12 months just has 0 and NAs and can be remove
unique(loan_data$collections_12_mths_ex_med)
loan_data <-subset(loan_data, select = -collections_12_mths_ex_med)
# Chargeoff within 12 months just has 0 and Nas and can be removed
unique(loan_data$chargeoff_within_12_mths)
loan_data <- subset(loan_data, select = -chargeoff_within_12_mths)
# Maintain NA values in 'Months since last delinq', 'Months since last record',
# Revloving line utilization rate', and 'public record bankruptcies' for analysis
# Check if loan status can be converted into factor
unique(loan_data$loan_status)
# Yes, it can be. Convert into factor.
loan_data$loan_status <- factor(loan_data$loan_status)
# Do the same thing for grade, sub_grade
unique(loan_data$grade)
unique(loan_data$sub_grade)
loan_data$grade <- factor(loan_data$grade)
loan_data$sub_grade <- factor(loan_data$sub_grade)
# Do the same thing for addr_state
unique(loan_data$addr_state)
loan_data$addr_state <- factor(loan_data$addr_state)
# Check and convert into factors for term, emp_length, verification_status, purpose,
# home ownership, pub_rec_bankruptcies, pub_rec, inq_last_6mths, delinq_2yrs
unique(loan_data$term)
unique(loan_data$emp_length)
unique(loan_data$verification_status)
unique(loan_data$purpose)
unique(loan_data$home_ownership)
unique(loan_data$pub_rec_bankruptcies)
unique(loan_data$pub_rec)
unique(loan_data$inq_last_6mths)
unique(loan_data$delinq_2yrs)
loan_data$term <- factor(loan_data$term)
loan_data$emp_length <- factor(loan_data$emp_length)
loan_data$verification_status <- factor(loan_data$verification_status)
loan_data$purpose <- factor(loan_data$purpose)
loan_data$home_ownership <- factor(loan_data$home_ownership)
loan_data$pub_rec_bankruptcies <- factor(loan_data$pub_rec_bankruptcies)
loan_data$pub_rec <- factor(loan_data$pub_rec)
loan_data$inq_last_6mths <- factor(loan_data$inq_last_6mths)
loan_data$delinq_2yrs <- factor(loan_data$delinq_2yrs)
# Determine the proportion of Charged Off loans
# Plot pie chart
temp <- summarize(group_by(loan_data, loan_status), value = n())
str(temp)
temp <- mutate(temp, loan_status = factor(loan_status, levels = rev(loan_status)),
cumulative = cumsum(value),
midpoint = cumulative - value / 2,
label = factor(paste0(round(value / sum(value) * 100, 1), "%")))
ggplot(temp, aes(x="", weight = value, fill = loan_status)) +
geom_bar(width=1, position = "stack") +
coord_polar(theta = "y") +
labs(title = "Total Loan Status" , fill= "Loan status") +
geom_text(aes(x = 1.3, y = midpoint, label = label)) +
theme_void()
# Performing univariate and segmented univariate analysis with categorical variables
# 1 - Loan Status vs Loan Grade
ggplot(loan_data,aes(x=grade,fill=loan_status))+geom_bar(position="fill")+
labs(x="Grade",title=" Grade")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 2 - Loan Status vs Loan Sub-grade
ggplot(loan_data,aes(x=sub_grade,fill=loan_status))+geom_bar(position="fill")+
labs(x="Sub Grade",title="Loan Sub Grade")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 3 - Loan Status vs state
ggplot(loan_data,aes(x=addr_state))+geom_bar(stat = "count") +
geom_text(stat ='count', aes(label = ..count..), vjust = -1)
# Now plot the proportion of charged off loans
ggplot(loan_data,aes(x=addr_state,fill= loan_status))+geom_bar(position = "fill")+
labs(x="State",title="State")+
labs(fill="Loan Status")+
labs(y="Proportion")
# State NE has very few loans to make a call on the high proportion of charged-off loans
# 4 Loan Status vs Term
ggplot(loan_data,aes(x=term,fill=loan_status))+geom_bar(position = "fill")+
labs(x="Term",title="Loan Term")+
labs(fill="Loan Status")+
labs(y="Proportion")
# Loan with Term of 60 months have a higher proportion of defaulting
# 5 Loan Status vs Emp Length
ggplot(loan_data,aes(x=emp_length,fill=loan_status))+geom_bar(position = "fill")
# 6 Loan Status vs Verification Status
ggplot(loan_data,aes(x=verification_status,fill=loan_status))+
geom_bar(position = "fill")
# 7 Loan Status vs Purpose
ggplot(loan_data,aes(x=purpose,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Purpose",title="Purpose")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 8 Loan Status vs Home ownership
ggplot(loan_data,aes(x=home_ownership,fill=loan_status))+
geom_bar(position = "fill")
# 9 Loan Status vs Public Record Bankrupcies
ggplot(loan_data,aes(x=pub_rec_bankruptcies,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Public Record Bankrupcies",title="Public Record Bankrupcies")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 10 Loan Status vs Public Records
ggplot(loan_data,aes(x=pub_rec,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Public Records",title="Public Records")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 11 Loan status vs inq_last_6mths
ggplot(loan_data,aes(x=inq_last_6mths,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Six Month Inquiries",title="Six Month Inquiries")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 12 Loan status vs delinq_2yrs
ggplot(loan_data,aes(x=delinq_2yrs,fill=loan_status))+
geom_bar(position = "fill")+
labs(x="Delinquencies",title="Delinquencies In Last Two Year")+
labs(fill="Loan Status")+
labs(y="Proportion")
# Performing univariate quantitative variables
# 13 Loan Status vs dti
ggplot(loan_data,aes(x=dti,fill=loan_status))+
geom_histogram(colour = "black", binwidth = 0.5,position = "fill")+
labs(x="DTI",title="DTI")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 14 Loan Status vs Funded Amount
bin_size = (max(loan_data$funded_amnt) - min(loan_data$funded_amnt)) / 50
ggplot(loan_data,aes(x=funded_amnt,fill=loan_status))+
geom_histogram(colour = "black", binwidth = bin_size, position = "fill")+
labs(x="Funded Amount",title="Funded Amount")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 15 Loan Status vs Annual Income
bin_size = (max(loan_data$annual_inc) - min(loan_data$annual_inc)) / 50
ggplot(loan_data,aes(x=annual_inc,fill=loan_status))+
geom_histogram(colour = "black", binwidth = bin_size, position = "fill")+
labs(x="Annual Income",title="Annual Income")+
labs(fill="Loan Status")+
labs(y="Proportion")
# 16 Loan Status vs Installment
bin_size = (max(loan_data$installment ) - min(loan_data$installment)) / 50
ggplot(loan_data,aes(x=installment,bin_width = bin_size, fill=loan_status))+
geom_histogram(colour = "black", position = "fill")+
labs(x="Installment",title="Installment")+
labs(fill="Loan Status")+
labs(y="Proportion")
#* int_rate :
# 17 creating rate slots:
loan_data$int_rate_Slots = rep(1,nrow(loan_data))
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 5 & loan_data$int_rate < 7.5 ) ] = "Slot1 5 - 7.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 7.5 & loan_data$int_rate < 10 ) ] = "Slot2 7.5 - 10" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 10 & loan_data$int_rate < 12.5 ) ] = "Slot3 10 - 12.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 12.5 & loan_data$int_rate < 15 ) ] = "Slot4 12.5 - 15" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 15 & loan_data$int_rate < 17.5 ) ] = "Slot5 15 - 17.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 17.5 & loan_data$int_rate < 20 ) ] = "Slot6 17.5 - 20" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 20 & loan_data$int_rate < 22.5 ) ] = "Slot7 20 - 22.5" ;
loan_data$int_rate_Slots[ which( loan_data$int_rate >= 22.5 & loan_data$int_rate < 25 ) ] = "Slot8 22.5 - 25" ;
ggplot( loan_data, aes(x = int_rate_Slots, fill = factor(loan_status))) + geom_bar(stat = "count" ) + labs(x ="Interest Rate SLots",title="Total Loans per Slot") + labs(y ="Count of Loans") + labs(fill="Loan Status")
#here it shows that majority loans are given in 10 - 15 rate of interest.
#calculating only charged off loans vs state
charged_off <- subset(loan_data, loan_status == "Charged Off")
ggplot( charged_off, aes(x = int_rate_Slots)) + geom_bar(stat = "count" ) + labs(x ="Interest Rate SLots",title="Defaulted Loans per Slot") + labs(y ="Count of Defaulted Loans")
#This comparision shows the loans given in 12.5-15% slot have seen highest defaulters.
# Performing segmented univariate analysis
# Identifying a derived metric as proportion of charged off in total population
proportions <- summarise(group_by(loan_data, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# Evaluate if the proportion metric is higher in any segment
# 18 Mortage and lower grade loan
homeOwner <- subset(loan_data, loan_data$home_ownership=='MORTGAGE' &
((loan_data$grade=='B')| (loan_data$grade=='C')|
(loan_data$grade=='D')| (loan_data$grade=='E')|
(loan_data$grade=='F')))
proportions <- summarise(group_by(homeOwner, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# 19 High Funded value and high DTI
temp <- subset(loan_data,funded_amnt>25000 & dti > 20)
proportions <- summarise(group_by(temp, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# 20 High Funded value and employment term less than 1 year
temp <- subset(loan_data,funded_amnt>25000 & loan_data$emp_length == '< 1 year')
proportions <- summarise(group_by(temp, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# 21 Annual Income greater than 350K and income source not verified
temp <- subset(loan_data,annual_inc>350000 & factor(loan_data$verification_status) == 'Not Verified')
proportions <- summarise(group_by(temp, loan_status), total = n())
proportions$percentage <- proportions$total / sum(proportions$total)
proportions
ggplot(proportions, aes(x = loan_status, y= percentage)) + geom_bar(stat="identity")
# Write the cleaned data to file
write.csv(loan_data, "loan_clean.csv",row.names = FALSE)
|
sl = 4
dt = 0.1
ni = sl/dt
a = -9.81
v = vector(length=ni)
r = vector(length=ni)
v[1] = 15
r[1] = 11
ti=c(0:ni)
for (i in 1:ni){
v[i+1] = v[i] + a*dt
r[i+1] = r[i] + v[i+1]*dt
}
plot(ti,r,type = "l",lwd=2,col="blue",xlab = "time(Hour)",ylim = c(-15,25))
points(ti,v,type = "l",lwd=1.5,col="red")
abline(h=0)
legend(-1,-2,c("velocity","position"),col=c("blue","red"),text.col = c("blue","red"), lwd = c(2,1), bg = "gray90") | /tugas Pengantar Sains Komputasi/position_velocity.R | no_license | imanursar/sains-komputasi | R | false | false | 428 | r | sl = 4
dt = 0.1
ni = sl/dt
a = -9.81
v = vector(length=ni)
r = vector(length=ni)
v[1] = 15
r[1] = 11
ti=c(0:ni)
for (i in 1:ni){
v[i+1] = v[i] + a*dt
r[i+1] = r[i] + v[i+1]*dt
}
plot(ti,r,type = "l",lwd=2,col="blue",xlab = "time(Hour)",ylim = c(-15,25))
points(ti,v,type = "l",lwd=1.5,col="red")
abline(h=0)
legend(-1,-2,c("velocity","position"),col=c("blue","red"),text.col = c("blue","red"), lwd = c(2,1), bg = "gray90") |
##########################################################
# Deploy model #
##########################################################
# Install the mrsdeploy package if it is not already installed
#install.packages("mrsdeploy")
library(mrsdeploy)
# Model deployment with actual model
Tree1 <- rxBTrees(formula = label ~ age + Gender + Income + number_household + months_residence
+ T1count_30d + T2count_30d + T3count_30d
+ T1spend_30d + T2spend_30d + T3spend_30d
+ T1count_10d + T2count_10d + T3count_10d
+ T1spend_10d + T2spend_10d + T3spend_10d
+ r_60d + f_60d + T1_m_60d + T2_m_60d + T3_m_60d
, data = splitFiles[[2]], learningRate = 0.1, nTree=50, maxDepth = 5, seed = 1234, lossFunction = "multinomial")
summary(Tree1)
# Make predictions locally
rxPredict(Tree1, data = splitFiles[[1]], outData = 'predout.xdf', overwrite = TRUE)
rxGetInfo('predout.xdf', numRows = 5)
# Produce a prediction function that can use the above model
actualmodel <- function(age, Gender, Income ,number_household, months_residence, T1count_30d, T2count_30d, T3count_30d, T1spend_30d, T2spend_30d, T3spend_30d, T1count_10d, T2count_10d,
T3count_10d, T1spend_10d, T2spend_10d, T3spend_10d, r_60d, f_60d, T1_m_60d, T2_m_60d, T3_m_60d) {
library(RevoScaleR)
newdata <- data.frame(age= age, Gender=Gender, Income=Income, number_household =number_household, months_residence = months_residence,
T1count_30d = T1count_30d, T2count_30d = T2count_30d, T3count_30d = T3count_30d,
T1spend_30d = T1spend_30d , T2spend_30d = T2spend_30d , T3spend_30d = T3spend_30d,
T1count_10d = T1count_10d, T2count_10d = T2count_10d, T3count_10d = T3count_10d,
T1spend_10d = T1spend_10d , T2spend_10d = T2spend_10d , T3spend_10d = T3spend_10d,
r_60d = r_60d , f_60d = f_60d , T1_m_60d = T1_m_60d , T2_m_60d = T2_m_60d , T3_m_60d = T3_m_60d)
outputs = rxPredict(Tree1, newdata, type = "prob")
X0_prob = outputs[1]
X1_prob = outputs[2]
X2_prob = outputs[3]
X3_prob = outputs[4]
label_Pred = outputs[5]
answer = as.data.frame(cbind(X0_prob, X1_prob, X2_prob, X3_prob, label_Pred))
}
# Test function locally before deploying as a web service
print(actualmodel(29, 'M', 'Greater_than_100K', 1, 5, 6787, 178, 221, 0, 0, 0, 3234, 85, 88, 0, 0, 0, 1, 23, 0,0,0))
##########################################################
# Log into Microsoft R Server #
##########################################################
# Use `remoteLogin` to authenticate with R Server using
# the local admin account. Use session = false so no
# remote R session started
remoteLogin(deployr_endpoint = "http://localhost:12800",
username = 'admin',
password = 'HolaSenor123!',
session = FALSE,
commandline = TRUE)
##########################################################
# Publish Model as a Service #
##########################################################
# Publish as service using `publishService()` function from
# `mrsdeploy` package. Name service "mtService" and provide
# unique version number. Assign service to the variable `api`
# Switch between local and remote sessions
#pause()
#resume()
# If the web service has already been deployed, delete it before re-deploying
#deleteService("mrsservice", "v1.0.0")
api <- publishService(
"mrsservice",
code = actualmodel,
model = Tree1,
inputs = list(age= "numeric", Gender= "character", Income= "character", number_household = "numeric", months_residence = "numeric",
T1count_30d = "numeric", T2count_30d = "numeric", T3count_30d = "numeric",
T1spend_30d = "numeric", T2spend_30d = "numeric", T3spend_30d = "numeric",
T1count_10d = "numeric", T2count_10d = "numeric", T3count_10d = "numeric",
T1spend_10d = "numeric", T2spend_10d = "numeric", T3spend_10d = "numeric",
r_60d = "numeric", f_60d = "numeric", T1_m_60d = "numeric", T2_m_60d = "numeric", T3_m_60d = "numeric"),
outputs = list(answer = 'data.frame'),
v = "v1.0.0"
)
##########################################################
# Consume Service in R #
##########################################################
# Print capabilities that define the service holdings: service
# name, version, descriptions, inputs, outputs, and the
# name of the function to be consumed
print(api$capabilities())
api
# Consume service by calling function
# contained in this service
result <- api$actualmodel(29, 'M', 'Greater_than_100K', 1, 5, 6787, 178, 221, 0, 0, 0, 3234, 85, 88, 0, 0, 0, 1, 23, 0,0,0)
result$success
result$outputParameters
# Print response output variables
print(result)
##########################################################
# Get Swagger File for this Service in R Now #
##########################################################
# During this authenticated session, download the
# Swagger-based JSON file that defines this service
swagger <- api$swagger()
cat(swagger, file = "swagger.json", append = FALSE)
# Now you can share Swagger-based JSON so others can consume it
##########################################################
# Delete service version when finished #
##########################################################
# User who published service or user with owner role can
# remove the service when it is no longer needed
#status <- deleteService("mrsservice", "v1.0.0")
#status
##########################################################
# Log off of R Server #
##########################################################
# Log off of R Server
remoteLogout()
| /Technical Deployment Guide/src/R/data_deploy_blob.R | permissive | trPhan/cortana-intelligence-customer360 | R | false | false | 5,981 | r | ##########################################################
# Deploy model #
##########################################################
# Install the mrsdeploy package if it is not already installed
#install.packages("mrsdeploy")
library(mrsdeploy)
# Model deployment with actual model
Tree1 <- rxBTrees(formula = label ~ age + Gender + Income + number_household + months_residence
+ T1count_30d + T2count_30d + T3count_30d
+ T1spend_30d + T2spend_30d + T3spend_30d
+ T1count_10d + T2count_10d + T3count_10d
+ T1spend_10d + T2spend_10d + T3spend_10d
+ r_60d + f_60d + T1_m_60d + T2_m_60d + T3_m_60d
, data = splitFiles[[2]], learningRate = 0.1, nTree=50, maxDepth = 5, seed = 1234, lossFunction = "multinomial")
summary(Tree1)
# Make predictions locally
rxPredict(Tree1, data = splitFiles[[1]], outData = 'predout.xdf', overwrite = TRUE)
rxGetInfo('predout.xdf', numRows = 5)
# Produce a prediction function that can use the above model
actualmodel <- function(age, Gender, Income ,number_household, months_residence, T1count_30d, T2count_30d, T3count_30d, T1spend_30d, T2spend_30d, T3spend_30d, T1count_10d, T2count_10d,
T3count_10d, T1spend_10d, T2spend_10d, T3spend_10d, r_60d, f_60d, T1_m_60d, T2_m_60d, T3_m_60d) {
library(RevoScaleR)
newdata <- data.frame(age= age, Gender=Gender, Income=Income, number_household =number_household, months_residence = months_residence,
T1count_30d = T1count_30d, T2count_30d = T2count_30d, T3count_30d = T3count_30d,
T1spend_30d = T1spend_30d , T2spend_30d = T2spend_30d , T3spend_30d = T3spend_30d,
T1count_10d = T1count_10d, T2count_10d = T2count_10d, T3count_10d = T3count_10d,
T1spend_10d = T1spend_10d , T2spend_10d = T2spend_10d , T3spend_10d = T3spend_10d,
r_60d = r_60d , f_60d = f_60d , T1_m_60d = T1_m_60d , T2_m_60d = T2_m_60d , T3_m_60d = T3_m_60d)
outputs = rxPredict(Tree1, newdata, type = "prob")
X0_prob = outputs[1]
X1_prob = outputs[2]
X2_prob = outputs[3]
X3_prob = outputs[4]
label_Pred = outputs[5]
answer = as.data.frame(cbind(X0_prob, X1_prob, X2_prob, X3_prob, label_Pred))
}
# Test function locally before deploying as a web service
print(actualmodel(29, 'M', 'Greater_than_100K', 1, 5, 6787, 178, 221, 0, 0, 0, 3234, 85, 88, 0, 0, 0, 1, 23, 0,0,0))
##########################################################
# Log into Microsoft R Server #
##########################################################
# Use `remoteLogin` to authenticate with R Server using
# the local admin account. Use session = false so no
# remote R session started
remoteLogin(deployr_endpoint = "http://localhost:12800",
username = 'admin',
password = 'HolaSenor123!',
session = FALSE,
commandline = TRUE)
##########################################################
# Publish Model as a Service #
##########################################################
# Publish as service using `publishService()` function from
# `mrsdeploy` package. Name service "mtService" and provide
# unique version number. Assign service to the variable `api`
# Switch between local and remote sessions
#pause()
#resume()
# If the web service has already been deployed, delete it before re-deploying
#deleteService("mrsservice", "v1.0.0")
api <- publishService(
"mrsservice",
code = actualmodel,
model = Tree1,
inputs = list(age= "numeric", Gender= "character", Income= "character", number_household = "numeric", months_residence = "numeric",
T1count_30d = "numeric", T2count_30d = "numeric", T3count_30d = "numeric",
T1spend_30d = "numeric", T2spend_30d = "numeric", T3spend_30d = "numeric",
T1count_10d = "numeric", T2count_10d = "numeric", T3count_10d = "numeric",
T1spend_10d = "numeric", T2spend_10d = "numeric", T3spend_10d = "numeric",
r_60d = "numeric", f_60d = "numeric", T1_m_60d = "numeric", T2_m_60d = "numeric", T3_m_60d = "numeric"),
outputs = list(answer = 'data.frame'),
v = "v1.0.0"
)
##########################################################
# Consume Service in R #
##########################################################
# Print capabilities that define the service holdings: service
# name, version, descriptions, inputs, outputs, and the
# name of the function to be consumed
print(api$capabilities())
api
# Consume service by calling function
# contained in this service
result <- api$actualmodel(29, 'M', 'Greater_than_100K', 1, 5, 6787, 178, 221, 0, 0, 0, 3234, 85, 88, 0, 0, 0, 1, 23, 0,0,0)
result$success
result$outputParameters
# Print response output variables
print(result)
##########################################################
# Get Swagger File for this Service in R Now #
##########################################################
# During this authenticated session, download the
# Swagger-based JSON file that defines this service
swagger <- api$swagger()
cat(swagger, file = "swagger.json", append = FALSE)
# Now you can share Swagger-based JSON so others can consume it
##########################################################
# Delete service version when finished #
##########################################################
# User who published service or user with owner role can
# remove the service when it is no longer needed
#status <- deleteService("mrsservice", "v1.0.0")
#status
##########################################################
# Log off of R Server #
##########################################################
# Log off of R Server
remoteLogout()
|
# STROMAL CELLS - SCORE TUVESON CAF SIGNATURES
# SUNNY Z WU
#
#
# qrsh -pe smp 8 -l mem_requested=10G -P TumourProgression
# source activate r_seurat_dev
# R
#
# 01: SETUP -------------------------------------------------------------------
library(GSEABase)
library(AUCell)
library(reshape2)
library(NMF)
library(Matrix)
library(cowplot)
# directories
setwd("/share/ScratchGeneral/sunwu/projects/MINI_ATLAS_PROJECT/stromal_AUCEll/")
dir.create("02_AUCELL_SIGNATURES")
# 02: READ SIGNATURES -----------------------------------------------------
# # read genesets
temp_genesets <- "/share/ScratchGeneral/sunwu/projects/MINI_ATLAS_PROJECT/Sept2019/05_figures/05_STROMAL_FIGURES_v2/gene_signatures.csv"
temp_xcell_genesets <-
read.csv(temp_genesets)
temp_xcell_genesets <- temp_xcell_genesets[,2:6]
temp_GeneSetCollection_list <- NULL
for(i in c(1:ncol(temp_xcell_genesets))) {
n <- paste0("temp_set_name",
i)
assign(n,
colnames(temp_xcell_genesets[i]))
temp_set_name <- get(paste0("temp_set_name",
i))
temp_set <- na.omit(temp_xcell_genesets[i])
colnames(temp_set) <- "gene_set"
temp_set <- GeneSet(unique(as.vector(temp_set$gene_set)),
setName = temp_set_name)
temp_GeneSetCollection_list <- append(temp_GeneSetCollection_list,
temp_set)
}
temp_gene_set_collection <-
GeneSetCollection(temp_GeneSetCollection_list)
rm(list = ls(pattern = "temp_set_name"))
# 03: LOAD ANNOTATED OBJECTS ---------------------------------------------
# load all mesenchymal cells
seurat_10X_integrated <- readRDS("../05_STROMAL_FIGURES_v3/Rdata/Stromal_ANNOTATED_object.Rdata")
# 04: RUN AUCELL ------------------------------
temp_exprMatrix <- GetAssayData(object = seurat_10X_integrated,
assay = "RNA",
slot = "data")
temp_exprMatrix <-
Matrix(temp_exprMatrix,
sparse = T)
# AUCELL
dim(temp_exprMatrix)
print("Buildrankings")
temp_cells_rankings <-
AUCell_buildRankings(temp_exprMatrix,
nCores = 1,
plotStats = F)
# subset gene sets
temp_subsetgeneSets <-
subsetGeneSets(temp_gene_set_collection,
rownames(temp_exprMatrix))
# calculate area under the curve
temp_cells_AUC <-
AUCell_calcAUC(geneSets = temp_subsetgeneSets,
rankings = temp_cells_rankings,
aucMaxRank = ceiling(0.05 * nrow(temp_cells_rankings)),
nCores = 1,
verbose = T)
#transpose matrix for seurat metadata assignment
temp_cells_AUC_matrix <-
t(as.data.frame(getAUC(temp_cells_AUC)))
temp_cells_AUC_matrix_sorted <-
temp_cells_AUC_matrix[rownames(seurat_10X_integrated@meta.data),,drop=FALSE]
temp_cells_AUC_matrix_sorted <-
as.data.frame.matrix(temp_cells_AUC_matrix_sorted)
print(all.equal(rownames(temp_cells_AUC_matrix_sorted),
rownames(seurat_10X_integrated@meta.data)))
temp_first_geneset <-
(ncol(seurat_10X_integrated@meta.data) + 1)
seurat_10X_integrated_AUCell <- AddMetaData(seurat_10X_integrated,
metadata = temp_cells_AUC_matrix_sorted)
temp_last_geneset <-
(ncol(seurat_10X_integrated_AUCell@meta.data))
# 05: FEATUEREPLOTS AND VLNPLOTS ------------------------------------------------------------
temp_gene_set_names <-
c("iCAF_human_PDAC_Elyada_et_al_2019",
"myCAF_human_PDAC_Elyada_et_al_2019",
"iCAF_mouse_PDAC_Elyada_et_al_2019",
"myCAF_mouse_PDAC_Elyada_et_al_2019",
"apCAF_mouse_PDAC_Elyada_et_al_2019")
colnames(seurat_10X_integrated_AUCell@meta.data)[87:91] <-
temp_gene_set_names
# PLOT
temp_png_function <-
function(x) {
png(
file = (x),
width = 16,
height = 12,
res = 300,
units = 'in'
)
}
temp_pdf_function <-
function(x) {
pdf(
file = (x),
width = 16,
height = 12,
useDingbats=F
)
}
temp_pdf_function(paste0("02_AUCELL_SIGNATURES/01_featureplot.pdf"))
temp_featureplot <- FeaturePlot(
object = seurat_10X_integrated_AUCell,
features = temp_gene_set_names,
order = T,
pt.size = 0.01,
reduction = temp_reduction)
print(temp_featureplot)
dev.off()
# vlnplot
temp_png_function <-
function(x) {
png(
file = (x),
width = 6,
height = 18,
res = 300,
units = 'in'
)
}
temp_pdf_function <-
function(x) {
pdf(
file = (x),
width = 6,
height = 18,
useDingbats=F
)
}
temp_pdf_function(paste0("02_AUCELL_SIGNATURES/02_vlnplot.pdf"))
temp_vlnplot <- VlnPlot(object = seurat_10X_integrated_AUCell,
features = temp_gene_set_names,
pt.size = 0.01,
group.by = "celltype_subset",
ncol = 1)
print(temp_vlnplot)
dev.off()
temp_pdf_function(paste0("02_AUCELL_SIGNATURES/03_vlnplot_nodots.pdf"))
temp_vlnplot <- VlnPlot(object = seurat_10X_integrated_AUCell,
features = temp_gene_set_names,
pt.size = 0,
group.by = "celltype_subset",
ncol = 1)
print(temp_vlnplot)
dev.off()
# 06: HEATMAPS ----------------------------------------------------------------
temp_df <- seurat_10X_integrated_AUCell@meta.data[,temp_gene_set_names]
temp_df$cluster <- seurat_10X_integrated_AUCell@meta.data$celltype_subset
temp_df_m <- melt(temp_df)
temp_df_m_agg <-
aggregate(.~cluster+variable,
temp_df_m,
mean)
temp_df_m_agg_dcast <-
dcast(data = temp_df_m_agg,
formula = variable~cluster,
fun.aggregate = sum,
value.var = "value")
rownames(temp_df_m_agg_dcast) <-
temp_df_m_agg_dcast$variable
temp_df_m_agg_dcast <-
temp_df_m_agg_dcast[, ! colnames(temp_df_m_agg_dcast) %in% "variable"]
temp_df_m_agg_dcast <- as.matrix(temp_df_m_agg_dcast)
library("viridis")
hmcol <- inferno(24)
rownames(temp_df_m_agg_dcast) <- c("iCAF_human_PDAC_Elyada_et_al_2019",
"myCAF_human_PDAC_Elyada_et_al_2019",
"iCAF_mouse_PDAC_Elyada_et_al_2019",
"myCAF_mouse_PDAC_Elyada_et_al_2019",
"apCAF_mouse_PDAC_Elyada_et_al_2019")
pheatmap(temp_df_m_agg_dcast, filename = paste0("02_AUCELL_SIGNATURES/04_heatmap.pdf"),
color = rev(hmcol),
cluster_cols = T,
cluster_rows = T,
scale = "row",
clustering_distance_cols = "correlation",
clustering_distance_rows = "correlation",
fontsize_row = 10,
show_rownames = T,
show_colnames = T,
fontsize_col = 10,
cellheight= 25,
cellwidth = 15,
gaps_col = NULL,
angle_col = 45,
treeheight_col = 50,
legend = T
)
# SAVE RDS ----------------------------------------------------------------
saveRDS(seurat_10X_integrated_AUCell,
"../05_STROMAL_FIGURES_v3/Rdata/Stromal_ANNOTATED_object_AUCell_Elyada_et_al.Rdata")
| /monocle_analysis_stromal_cells/scoring_tuveson_CAF_signatures.R | no_license | Swarbricklab-code/BrCa_cell_atlas | R | false | false | 7,202 | r | # STROMAL CELLS - SCORE TUVESON CAF SIGNATURES
# SUNNY Z WU
#
#
# qrsh -pe smp 8 -l mem_requested=10G -P TumourProgression
# source activate r_seurat_dev
# R
#
# 01: SETUP -------------------------------------------------------------------
library(GSEABase)
library(AUCell)
library(reshape2)
library(NMF)
library(Matrix)
library(cowplot)
# directories
setwd("/share/ScratchGeneral/sunwu/projects/MINI_ATLAS_PROJECT/stromal_AUCEll/")
dir.create("02_AUCELL_SIGNATURES")
# 02: READ SIGNATURES -----------------------------------------------------
# # read genesets
temp_genesets <- "/share/ScratchGeneral/sunwu/projects/MINI_ATLAS_PROJECT/Sept2019/05_figures/05_STROMAL_FIGURES_v2/gene_signatures.csv"
temp_xcell_genesets <-
read.csv(temp_genesets)
temp_xcell_genesets <- temp_xcell_genesets[,2:6]
temp_GeneSetCollection_list <- NULL
for(i in c(1:ncol(temp_xcell_genesets))) {
n <- paste0("temp_set_name",
i)
assign(n,
colnames(temp_xcell_genesets[i]))
temp_set_name <- get(paste0("temp_set_name",
i))
temp_set <- na.omit(temp_xcell_genesets[i])
colnames(temp_set) <- "gene_set"
temp_set <- GeneSet(unique(as.vector(temp_set$gene_set)),
setName = temp_set_name)
temp_GeneSetCollection_list <- append(temp_GeneSetCollection_list,
temp_set)
}
temp_gene_set_collection <-
GeneSetCollection(temp_GeneSetCollection_list)
rm(list = ls(pattern = "temp_set_name"))
# 03: LOAD ANNOTATED OBJECTS ---------------------------------------------
# load all mesenchymal cells
seurat_10X_integrated <- readRDS("../05_STROMAL_FIGURES_v3/Rdata/Stromal_ANNOTATED_object.Rdata")
# 04: RUN AUCELL ------------------------------
temp_exprMatrix <- GetAssayData(object = seurat_10X_integrated,
assay = "RNA",
slot = "data")
temp_exprMatrix <-
Matrix(temp_exprMatrix,
sparse = T)
# AUCELL
dim(temp_exprMatrix)
print("Buildrankings")
temp_cells_rankings <-
AUCell_buildRankings(temp_exprMatrix,
nCores = 1,
plotStats = F)
# subset gene sets
temp_subsetgeneSets <-
subsetGeneSets(temp_gene_set_collection,
rownames(temp_exprMatrix))
# calculate area under the curve
temp_cells_AUC <-
AUCell_calcAUC(geneSets = temp_subsetgeneSets,
rankings = temp_cells_rankings,
aucMaxRank = ceiling(0.05 * nrow(temp_cells_rankings)),
nCores = 1,
verbose = T)
#transpose matrix for seurat metadata assignment
temp_cells_AUC_matrix <-
t(as.data.frame(getAUC(temp_cells_AUC)))
temp_cells_AUC_matrix_sorted <-
temp_cells_AUC_matrix[rownames(seurat_10X_integrated@meta.data),,drop=FALSE]
temp_cells_AUC_matrix_sorted <-
as.data.frame.matrix(temp_cells_AUC_matrix_sorted)
print(all.equal(rownames(temp_cells_AUC_matrix_sorted),
rownames(seurat_10X_integrated@meta.data)))
temp_first_geneset <-
(ncol(seurat_10X_integrated@meta.data) + 1)
seurat_10X_integrated_AUCell <- AddMetaData(seurat_10X_integrated,
metadata = temp_cells_AUC_matrix_sorted)
temp_last_geneset <-
(ncol(seurat_10X_integrated_AUCell@meta.data))
# 05: FEATUEREPLOTS AND VLNPLOTS ------------------------------------------------------------
temp_gene_set_names <-
c("iCAF_human_PDAC_Elyada_et_al_2019",
"myCAF_human_PDAC_Elyada_et_al_2019",
"iCAF_mouse_PDAC_Elyada_et_al_2019",
"myCAF_mouse_PDAC_Elyada_et_al_2019",
"apCAF_mouse_PDAC_Elyada_et_al_2019")
colnames(seurat_10X_integrated_AUCell@meta.data)[87:91] <-
temp_gene_set_names
# PLOT
temp_png_function <-
function(x) {
png(
file = (x),
width = 16,
height = 12,
res = 300,
units = 'in'
)
}
temp_pdf_function <-
function(x) {
pdf(
file = (x),
width = 16,
height = 12,
useDingbats=F
)
}
temp_pdf_function(paste0("02_AUCELL_SIGNATURES/01_featureplot.pdf"))
temp_featureplot <- FeaturePlot(
object = seurat_10X_integrated_AUCell,
features = temp_gene_set_names,
order = T,
pt.size = 0.01,
reduction = temp_reduction)
print(temp_featureplot)
dev.off()
# vlnplot
temp_png_function <-
function(x) {
png(
file = (x),
width = 6,
height = 18,
res = 300,
units = 'in'
)
}
temp_pdf_function <-
function(x) {
pdf(
file = (x),
width = 6,
height = 18,
useDingbats=F
)
}
temp_pdf_function(paste0("02_AUCELL_SIGNATURES/02_vlnplot.pdf"))
temp_vlnplot <- VlnPlot(object = seurat_10X_integrated_AUCell,
features = temp_gene_set_names,
pt.size = 0.01,
group.by = "celltype_subset",
ncol = 1)
print(temp_vlnplot)
dev.off()
temp_pdf_function(paste0("02_AUCELL_SIGNATURES/03_vlnplot_nodots.pdf"))
temp_vlnplot <- VlnPlot(object = seurat_10X_integrated_AUCell,
features = temp_gene_set_names,
pt.size = 0,
group.by = "celltype_subset",
ncol = 1)
print(temp_vlnplot)
dev.off()
# 06: HEATMAPS ----------------------------------------------------------------
temp_df <- seurat_10X_integrated_AUCell@meta.data[,temp_gene_set_names]
temp_df$cluster <- seurat_10X_integrated_AUCell@meta.data$celltype_subset
temp_df_m <- melt(temp_df)
temp_df_m_agg <-
aggregate(.~cluster+variable,
temp_df_m,
mean)
temp_df_m_agg_dcast <-
dcast(data = temp_df_m_agg,
formula = variable~cluster,
fun.aggregate = sum,
value.var = "value")
rownames(temp_df_m_agg_dcast) <-
temp_df_m_agg_dcast$variable
temp_df_m_agg_dcast <-
temp_df_m_agg_dcast[, ! colnames(temp_df_m_agg_dcast) %in% "variable"]
temp_df_m_agg_dcast <- as.matrix(temp_df_m_agg_dcast)
library("viridis")
hmcol <- inferno(24)
rownames(temp_df_m_agg_dcast) <- c("iCAF_human_PDAC_Elyada_et_al_2019",
"myCAF_human_PDAC_Elyada_et_al_2019",
"iCAF_mouse_PDAC_Elyada_et_al_2019",
"myCAF_mouse_PDAC_Elyada_et_al_2019",
"apCAF_mouse_PDAC_Elyada_et_al_2019")
pheatmap(temp_df_m_agg_dcast, filename = paste0("02_AUCELL_SIGNATURES/04_heatmap.pdf"),
color = rev(hmcol),
cluster_cols = T,
cluster_rows = T,
scale = "row",
clustering_distance_cols = "correlation",
clustering_distance_rows = "correlation",
fontsize_row = 10,
show_rownames = T,
show_colnames = T,
fontsize_col = 10,
cellheight= 25,
cellwidth = 15,
gaps_col = NULL,
angle_col = 45,
treeheight_col = 50,
legend = T
)
# SAVE RDS ----------------------------------------------------------------
saveRDS(seurat_10X_integrated_AUCell,
"../05_STROMAL_FIGURES_v3/Rdata/Stromal_ANNOTATED_object_AUCell_Elyada_et_al.Rdata")
|
# Code for plot4.png
#import the following libraries
library(lubridate)
# "blue" contains the whole database
blue <- read.table("household_power_consumption.txt",header = TRUE, sep = ";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
# green contains the data for dates = 1/2/2007 and 2/2/2007
green <- subset(blue, dmy(blue$Date)==dmy("2-2-2007")|dmy(blue$Date)==dmy("1-2-2007"))
green$Date <- as.Date(green$Date, "%d/%m/%Y")
#taking the complete cases
green <- green[complete.cases(green),]
# combining the date and time
new_date <- paste(green$Date, green$Time)
new_date <- as.POSIXct(new_date)
#laying out the tiles
par(mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
# first plot
plot(new_date,green$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts", xlab = "")
# second plot
plot(new_date,green$Voltage, type = "l",ylab = "Voltage (volt)", xlab = "")
# third plot
plot(new_date,green$Sub_metering_1, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
lines(new_date, green$Sub_metering_2,col = "Red")
lines(new_date, green$Sub_metering_3,col = "Blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# fourth plot
plot(new_date, green$Global_reactive_power, type = "l", xlab = "", ylab = "Global Reactive Power (kilowatts)")
#saving the files
dev.copy(png,"/Users/vrsreeganesh/plot4.png", height=480, width=480)
dev.off()
| /plot4.R | no_license | kunjaps/ExData_Plotting1 | R | false | false | 1,562 | r | # Code for plot4.png
#import the following libraries
library(lubridate)
# "blue" contains the whole database
blue <- read.table("household_power_consumption.txt",header = TRUE, sep = ";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
# green contains the data for dates = 1/2/2007 and 2/2/2007
green <- subset(blue, dmy(blue$Date)==dmy("2-2-2007")|dmy(blue$Date)==dmy("1-2-2007"))
green$Date <- as.Date(green$Date, "%d/%m/%Y")
#taking the complete cases
green <- green[complete.cases(green),]
# combining the date and time
new_date <- paste(green$Date, green$Time)
new_date <- as.POSIXct(new_date)
#laying out the tiles
par(mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
# first plot
plot(new_date,green$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts", xlab = "")
# second plot
plot(new_date,green$Voltage, type = "l",ylab = "Voltage (volt)", xlab = "")
# third plot
plot(new_date,green$Sub_metering_1, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
lines(new_date, green$Sub_metering_2,col = "Red")
lines(new_date, green$Sub_metering_3,col = "Blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# fourth plot
plot(new_date, green$Global_reactive_power, type = "l", xlab = "", ylab = "Global Reactive Power (kilowatts)")
#saving the files
dev.copy(png,"/Users/vrsreeganesh/plot4.png", height=480, width=480)
dev.off()
|
library(simecol)
### Name: approxTime
### Title: Linear Interpolation with Complete Matrices or Data Frames
### Aliases: approxTime approxTime1
### Keywords: arith
### ** Examples
inputs <- data.frame(time = 1:10, y1 = rnorm(10), y2 = rnorm(10, mean = 50))
input <- approxTime(inputs, c(2.5, 3), rule = 2)
| /data/genthat_extracted_code/simecol/examples/approxTime.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 314 | r | library(simecol)
### Name: approxTime
### Title: Linear Interpolation with Complete Matrices or Data Frames
### Aliases: approxTime approxTime1
### Keywords: arith
### ** Examples
inputs <- data.frame(time = 1:10, y1 = rnorm(10), y2 = rnorm(10, mean = 50))
input <- approxTime(inputs, c(2.5, 3), rule = 2)
|
source('/workspace/workspace_pipelines/mace_paper_2020/00_packages_functions.R', echo=TRUE)
load.pigz("mace_paper_2020.RData")
demo_genes<-c("CD14", "IL7R", "CD8A", "KLRB1", "CD79A", "FCGR3A", "PF4", "IL3RA","PCNA")
#dot plot
gene_dot_plot<-plot_genes_by_group(cds = cds_trimmed,
markers = demo_genes,
group_cells_by = "cluster_assignment",
ordering_type = "maximal_on_diag")
gene_dot_plot<-gene_dot_plot + labs(x = NULL, y = NULL)
gene_dot_plot
save_plot(gene_dot_plot, filename = "plots_out/gene_dot_plot.pdf", base_width = 6, base_height = 5)
save.image.pigz("mace_paper_2020.RData",n.cores = 39)
| /02_gene_dotplot.R | permissive | blaserlab/mace_paper_2020 | R | false | false | 711 | r | source('/workspace/workspace_pipelines/mace_paper_2020/00_packages_functions.R', echo=TRUE)
load.pigz("mace_paper_2020.RData")
demo_genes<-c("CD14", "IL7R", "CD8A", "KLRB1", "CD79A", "FCGR3A", "PF4", "IL3RA","PCNA")
#dot plot
gene_dot_plot<-plot_genes_by_group(cds = cds_trimmed,
markers = demo_genes,
group_cells_by = "cluster_assignment",
ordering_type = "maximal_on_diag")
gene_dot_plot<-gene_dot_plot + labs(x = NULL, y = NULL)
gene_dot_plot
save_plot(gene_dot_plot, filename = "plots_out/gene_dot_plot.pdf", base_width = 6, base_height = 5)
save.image.pigz("mace_paper_2020.RData",n.cores = 39)
|
\name{glyph_build}
\alias{glyph_build}
\title{Build a glyphs object for rendering}
\usage{
glyph_build(plot)
}
\arguments{
\item{layer}{an object of class glayer}
}
\description{
glyph_build takes a glyph plot object (class glyphs), and
performs all steps necessary to produce an object that
can be rendered. This function outputs two pieces: a list
of data frames (one for each layer), and a panel object,
which contain all information about axis limits, breaks,
etc.
}
\seealso{
print.glyphs and \code{\link{glayer_build}} for functions
that contain the complete set of steps for generating a
glyphs plot
}
\keyword{internal}
| /man/glyph_build.Rd | no_license | garrettgman/ggplyr | R | false | false | 651 | rd | \name{glyph_build}
\alias{glyph_build}
\title{Build a glyphs object for rendering}
\usage{
glyph_build(plot)
}
\arguments{
\item{layer}{an object of class glayer}
}
\description{
glyph_build takes a glyph plot object (class glyphs), and
performs all steps necessary to produce an object that
can be rendered. This function outputs two pieces: a list
of data frames (one for each layer), and a panel object,
which contain all information about axis limits, breaks,
etc.
}
\seealso{
print.glyphs and \code{\link{glayer_build}} for functions
that contain the complete set of steps for generating a
glyphs plot
}
\keyword{internal}
|
library(plyr);
library(reshape);
library(ggplot2)
source("SummarySE.R")
source("multiplot.R")
#########1. Read and reformat data ############
###############################################
# read and format ppidata
data <- read.table(file="ppiJul2014.csv", sep=",", header=TRUE, na.strings="NA")
data$BOX <- as.factor(data$BOX)
# read and format cpp and other covariate data
cpp <- read.table(file="Jul2014.csv", sep=",", header=TRUE, na.strings="NA")
cpp$sire.age <- as.numeric(cpp$sire.age)
cpp$batch <- as.factor(cpp$batch)
# take sire.age, ppi.mo, batch and a few other traits
covs <- cbind(cpp[1], cpp[4], cpp[10], cpp[12], cpp[36],
cpp[58], cpp[72], cpp[93], cpp[102])
sens <- covs[7] - covs[6] # create sensitization variable
names(sens)[1] <-"sens"
covs <- cbind(covs, sens)
# read in and format info data frame
info <- read.table(file="ppi.info.csv", sep=",", header=TRUE, na.strings="NA")
info$ppi.age <- as.numeric(info$ppi.age)
info$gen <- as.factor(info$gen)
ppinfo <- info[complete.cases(info),] # removes five samples with NA entries
ppinfo <- cbind(ppinfo[1:2], ppinfo[7:10]) # keep only necessary columns
# order data by first col (id)
data <- data[do.call(order, data),]
covs <- covs[do.call(order, covs ),]
ppinfo <- ppinfo[do.call(order, ppinfo),]
# match up variables by id
ppinfo0 <- ppinfo[ppinfo$id %in% covs$id,]
covs0 <- covs[covs$id %in% ppinfo$id,]
cov.ppi <- cbind(covs0, ppinfo0)
names(cov.ppi)[11]<- "id.2"
# remove samples with errors and error column
cov.ppi <- cov.ppi[!cov.ppi$id == "54343",]
cov.ppi <- cov.ppi[!cov.ppi$id == "54350",]
cov.ppi <- cov.ppi[c(-16)]
# cleanup
rm(covs, covs0, ppinfo, ppinfo0, cpp)
# match up data and full cov set
cov <- cov.ppi[cov.ppi$id %in% data$ID,]
d <- data[data$ID %in% cov.ppi$id,]
ppi <- cbind(cov, d)
#cleanup
rm(d, cov, cov.ppi, data)
# for plotting
data <-ppi
data <-ppi[complete.cases(ppi),]
data <-data[!data$BOX == "5",]
data <-data[!data$BOX == "NA",]
data$BOX=droplevels(data$BOX)
ppidata <- data
############# ppi by chamber #########
ppi3=
ggplot(data=data, aes(x=BOX,y=PPI3, color=BOX))+
geom_boxplot() +
ggtitle("3dB PPI")+
xlab("PPI chamber") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
ppi6=
ggplot(data=data, aes(x=BOX,y=PPI6, color=BOX))+
geom_boxplot() +
ggtitle("6dB PPI")+
xlab("PPI chamber") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
ppi12=
ggplot(data=data, aes(x=BOX,y=PPI12, color=BOX))+
geom_boxplot() +
ggtitle("12dB PPI")+
xlab("PPI chamber") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=BOX,y=startle, color=BOX))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("PPI chamber") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=BOX,y=habituation, color=BOX))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("PPI chamber") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
############# ppi by month #########
ppi=
ggplot(data=data, aes(x=ppi.mo,y=avg_ppi, color=ppi.mo))+
geom_boxplot() +
ggtitle("Average PPI")+
xlab("Month") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=ppi.mo,y=startle, color=ppi.mo))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("Month") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=ppi.mo,y=habituation, color=ppi.mo))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("Month") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
############# ppi by batch #########
ppi=
ggplot(data=data, aes(x=batch,y=avg_ppi, color=batch))+
geom_boxplot() +
ggtitle("Average PPI")+
xlab("Batch") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=batch,y=startle, color=batch))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("Batch") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=batch,y=habituation, color=batch))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("Batch") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
############# ppi by gen #########
ppi=
ggplot(data=data, aes(x=gen,y=avg_ppi, color=gen))+
geom_boxplot() +
ggtitle("Average PPI")+
xlab("Generation") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=gen,y=startle, color=gen))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("Generation") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=gen,y=habituation, color=gen))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("Generation") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none") | /scripts/oldScripts/PPIlabmtg.R | no_license | ngon/LgSm-DataProcessing | R | false | false | 8,789 | r | library(plyr);
library(reshape);
library(ggplot2)
source("SummarySE.R")
source("multiplot.R")
#########1. Read and reformat data ############
###############################################
# read and format ppidata
data <- read.table(file="ppiJul2014.csv", sep=",", header=TRUE, na.strings="NA")
data$BOX <- as.factor(data$BOX)
# read and format cpp and other covariate data
cpp <- read.table(file="Jul2014.csv", sep=",", header=TRUE, na.strings="NA")
cpp$sire.age <- as.numeric(cpp$sire.age)
cpp$batch <- as.factor(cpp$batch)
# take sire.age, ppi.mo, batch and a few other traits
covs <- cbind(cpp[1], cpp[4], cpp[10], cpp[12], cpp[36],
cpp[58], cpp[72], cpp[93], cpp[102])
sens <- covs[7] - covs[6] # create sensitization variable
names(sens)[1] <-"sens"
covs <- cbind(covs, sens)
# read in and format info data frame
info <- read.table(file="ppi.info.csv", sep=",", header=TRUE, na.strings="NA")
info$ppi.age <- as.numeric(info$ppi.age)
info$gen <- as.factor(info$gen)
ppinfo <- info[complete.cases(info),] # removes five samples with NA entries
ppinfo <- cbind(ppinfo[1:2], ppinfo[7:10]) # keep only necessary columns
# order data by first col (id)
data <- data[do.call(order, data),]
covs <- covs[do.call(order, covs ),]
ppinfo <- ppinfo[do.call(order, ppinfo),]
# match up variables by id
ppinfo0 <- ppinfo[ppinfo$id %in% covs$id,]
covs0 <- covs[covs$id %in% ppinfo$id,]
cov.ppi <- cbind(covs0, ppinfo0)
names(cov.ppi)[11]<- "id.2"
# remove samples with errors and error column
cov.ppi <- cov.ppi[!cov.ppi$id == "54343",]
cov.ppi <- cov.ppi[!cov.ppi$id == "54350",]
cov.ppi <- cov.ppi[c(-16)]
# cleanup
rm(covs, covs0, ppinfo, ppinfo0, cpp)
# match up data and full cov set
cov <- cov.ppi[cov.ppi$id %in% data$ID,]
d <- data[data$ID %in% cov.ppi$id,]
ppi <- cbind(cov, d)
#cleanup
rm(d, cov, cov.ppi, data)
# for plotting
data <-ppi
data <-ppi[complete.cases(ppi),]
data <-data[!data$BOX == "5",]
data <-data[!data$BOX == "NA",]
data$BOX=droplevels(data$BOX)
ppidata <- data
############# ppi by chamber #########
ppi3=
ggplot(data=data, aes(x=BOX,y=PPI3, color=BOX))+
geom_boxplot() +
ggtitle("3dB PPI")+
xlab("PPI chamber") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
ppi6=
ggplot(data=data, aes(x=BOX,y=PPI6, color=BOX))+
geom_boxplot() +
ggtitle("6dB PPI")+
xlab("PPI chamber") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
ppi12=
ggplot(data=data, aes(x=BOX,y=PPI12, color=BOX))+
geom_boxplot() +
ggtitle("12dB PPI")+
xlab("PPI chamber") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=BOX,y=startle, color=BOX))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("PPI chamber") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=BOX,y=habituation, color=BOX))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("PPI chamber") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
############# ppi by month #########
ppi=
ggplot(data=data, aes(x=ppi.mo,y=avg_ppi, color=ppi.mo))+
geom_boxplot() +
ggtitle("Average PPI")+
xlab("Month") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=ppi.mo,y=startle, color=ppi.mo))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("Month") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=ppi.mo,y=habituation, color=ppi.mo))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("Month") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
############# ppi by batch #########
ppi=
ggplot(data=data, aes(x=batch,y=avg_ppi, color=batch))+
geom_boxplot() +
ggtitle("Average PPI")+
xlab("Batch") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=batch,y=startle, color=batch))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("Batch") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=batch,y=habituation, color=batch))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("Batch") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
############# ppi by gen #########
ppi=
ggplot(data=data, aes(x=gen,y=avg_ppi, color=gen))+
geom_boxplot() +
ggtitle("Average PPI")+
xlab("Generation") +
ylab("% PPI") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
start=
ggplot(data=data, aes(x=gen,y=startle, color=gen))+
geom_boxplot() +
ggtitle("120 dB Startle")+
xlab("Generation") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none")
habit=
ggplot(data=data, aes(x=gen,y=habituation, color=gen))+
geom_boxplot() +
ggtitle("Habituation")+
xlab("Generation") +
ylab("Startle amplitude") +
theme(axis.title.x = element_text(colour="black", size=18),
axis.text = element_text(colour="black", size=12),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(colour="black", size=20),
legend.position = "none") |
%\renewcommand{\usage}{\pkg{#1}}
\name{BIOMOD_FormatingData}
\alias{BIOMOD_FormatingData}
\title{ Initialise the datasets for usage in \pkg{biomod2} }
\description{
This function rearranges the user's input data to make sure they can be used within \pkg{biomod2}.
The function allows to select pseudo-absences or background data in the case that true absences data are not available, or to add pseudo-asbence data to an existing set of absence (see details).
}
\usage{
BIOMOD_FormatingData(resp.var,
expl.var,
resp.xy = NULL,
resp.name = NULL,
eval.resp.var = NULL,
eval.expl.var = NULL,
eval.resp.xy = NULL,
PA.nb.rep = 0,
PA.nb.absences = 1000,
PA.strategy = 'random',
PA.dist.min = 0,
PA.dist.max = NULL,
PA.sre.quant = 0.025,
PA.table = NULL,
na.rm = TRUE)
}
\arguments{
\item{resp.var}{ a vector, \code{\link[sp]{SpatialPointsDataFrame}} (or \code{\link[sp]{SpatialPoints}} if you work with \sQuote{only presences} data) containing species data (a single species) in binary format (ones for presences, zeros for true absences and NA for indeterminated ) that will be \bold{used to build the species distribution models}. }
\item{expl.var}{ a \code{matrix}, \code{data.frame}, \code{\link[sp]{SpatialPointsDataFrame}} or \code{\link[raster:stack]{RasterStack}} containing your explanatory variables that will be \bold{used to build your models}.}
\item{resp.xy}{ optional 2 columns \code{matrix} containing the X and Y coordinates of resp.var (only consider if resp.var is a vector) that will be \bold{used to build your models}.}
\item{eval.resp.var}{ a vector, \code{\link[sp]{SpatialPointsDataFrame}} your species data (a single species) in binary format (ones for presences, zeros for true absences and NA for indeterminated ) that will be \bold{used to evaluate the models with independant data (or past data for instance)}.}
\item{eval.expl.var}{a \code{matrix}, \code{data.frame}, \code{\link[sp]{SpatialPointsDataFrame}} or \code{\link[raster:stack]{RasterStack}} containing your explanatory variables that will be \bold{used to evaluate the models with independant data (or past data for instance)}.}
\item{eval.resp.xy}{opional 2 columns \code{matrix} containing the X and Y coordinates of resp.var (only consider if resp.var is a vector) that will be \bold{used to evaluate the modelswith independant data (or past data for instance)}.}
\item{resp.name}{ response variable name (character). The species name. }
\item{PA.nb.rep}{ number of required Pseudo Absences selection (if needed). 0 by Default.}
\item{PA.nb.absences}{ number of pseudo-absence selected for each repetition (when PA.nb.rep > 0) of the selection (true absences included)}
\item{PA.strategy}{ strategy for selecting the Pseudo Absences (must be \sQuote{random}, \sQuote{sre}, \sQuote{disk} or \sQuote{user.defined})}
\item{PA.dist.min}{minimal distance to presences for \sQuote{disk} Pseudo Absences selection (in meters if the explanatory is a not projected raster (+proj=longlat) and in map units (typically also meters) when it is projected or when explanatory variables are stored within table )}
\item{PA.dist.max}{maximal distance to presences for \sQuote{disk} Pseudo Absences selection(in meters if the explanatory is a not projected raster (+proj=longlat) and in map units (typically also meters) when it is projected or when explanatory variables are stored within table ) }
\item{PA.sre.quant}{quantile used for \sQuote{sre} Pseudo Absences selection}
\item{PA.table}{a \code{matrix} (or a \code{data.frame}) having as many rows than \code{resp.var} values. Each column correspund to a Pseudo-absences selection. It contains \code{TRUE} or \code{FALSE} indicating which values of \code{resp.var} will be considered to build models. It must be used with \sQuote{user.defined} \code{PA.strategy}.}
\item{na.rm}{locical, if TRUE, all points having one or several missing value for environmental data will be removed from analyse }
}
\details{
This function homogenises the initial data for making sure the modelling exercie will be completed with all the required data. It supports different kind of inputs.
IMPORTANT: When the explanatory data are given in \code{rasterLayer} or \code{rasterStack} objects, \pkg{biomod2} will be extract the variables onto the XY coordinates of the presence (and absence is any) vector. Be sure to give the XY coordinates (\sQuote{resp.xy}) in the same projection system than the raster objects. Same for the evaluation data in the case some sort of independant (or past) data are available (\sQuote{eval.resp.xy}).
When the explanatory variables are given in \code{\link[sp]{SpatialPointsDataFrame}}, the same requirements are asked than for the raster objects. The XY coordinates must be given to make sure biomod2 can extract the explanatory variables onto the presence (absence) data
When the explanatory variables are stored in a data.frame, make sure there are in the same order than the response variable. \pkg{biomod2} will simply merge the datasets without considering the XY coordinates.
When both presence and absence data are available, and there is enough absences: set sQuote{PA.nb.rep} to 0. No pseudo-absence will be extracted.
When no true absences are given or when there are not numerous enough. It's advise to make several pseudo absences selections. That way the influence of the pseudo-absence selection could then be estimated later on. If the user do not want to run several repetition, make sure to select a relatively high number pseudo-absence. Make sure the number of pseudo-absence data is not higher than the maximum number of potential pseudo-absence (e.g. do not select 10,000 pseudo-absence when the rasterStack or data.frame do not contain more than 2000 pixels or rows).
\enumerate{
\item{\bold{Response variable encoding}}{
\code{BIOMOD_FormatingData} concerns a single species at a time so \code{resp.var} must be a uni-dimentional object.
Response variable must be a \code{vector} or a one column \code{data.frame}/\code{matrix}/\code{\link[sp]{SpatialPointsDataFrame}} ( \code{\link[sp]{SpatialPoints}} are also allowed if you work with \sQuote{only presences} data) object.
As most of \pkg{biomod2} models need Presences AND Absences data, the response variable must contain some absences (if there are not, make sure to select pseudo-absence). In the input \code{resp.var} argument, the data should be coded in the following way :
\itemize{
\item{Presences : 1}
\item{True Absesnces : 0 (if any)}
\item{No Information : NA (if any, might latter be used for pseudo-absence)}
}
If \code{resp.var} is a non-spatial object (\code{vector}, \code{matrix}/\code{data.frame}) and that some models requiring spatial data are being used (e.g. MAXENT.Phillips) and/or pseudo absences spatialy dependent (i.e 'disk'), make sure to give the XY coordinates of the sites/rows (\sQuote{resp.xy}).
}
\item{\bold{Explanatory variables encoding}}{
Explanatory variables must be stored together in a multidimentional object. It may be a \code{matrix}, a \code{data.frame}, a \code{\link[sp]{SpatialPointsDataFrame}} or a \code{rasterStack} object. Factorial variables are allowed here even if that can lead to some models omissions.
}
\item{\bold{Evaluation Data}}{
If you have data enough, we strongly recommand to split your dataset into 2 part : one for training/calibrating and testing the models and another to evaluate it. If you do it, fill the \code{eval.resp.var}, \code{eval.expl.var} and optionally the \code{eval.resp.xy} arguments whith this data. The advantage of working with a specific dataset for evaluating your models is that you will be able to evaluate more properly your \sQuote{ensemble modeled} models. That being said, this argument is optional and you may prefer only to test (kind of evaluation) your models only with a \sQuote{cross-validation} procedure (see Models function). The best practice is to use one set of data for training/calibrating, one set of testing and one for evaluating. The calibration and testing of the data can be done automaticaly in \pkg{biomod2} in the Models function. The dataset for evaluation must be entered in \code{BIOMOD_FormatingData}.
}
\item{\bold{Pseudo Absences selection}}{
The \code{PA.xxx}'s arguments let you parametrise your pseudo absences selection if you want some. It's an optional step.
Pseudo absences will be selected within the \sQuote{background data} and might be constrained by a defined \sQuote{strategy}.
\enumerate{
\item{background data}{
\sQuote{Background data} represents data there is no information whether the species of interest occurs or not. It is defined by the \sQuote{No Information} data of your \code{resp.var} if you give some. If not, (i.e Only presences data or all cells with a define presence or absence state) the backgroud will be take into your \code{expl.var} object if it's a \code{RasterStack}.
}
\item{strategy}{
The strategy allows to constrain the choice of pseudo-absence within the \sQuote{background data}.
3 ways are currently implemented to select the pseudo-absences candidate cells (\code{PA.strategy} argument):
\itemize{
\item{\sQuote{random}: all cell of initial background are Pseudo absences candidates. The choice is made randomly given the number of pseudo-absence to select \code{PA.nb.absences}.}
\item{\sQuote{disk}: you may define a minimal (\code{PA.dist.min}), respectively a maximal (\code{PA.dist.max}) distance to presences points for selecting your pseudo absences candidates. That may be usefull if you don't want to select pseudo-absences too close to your presences (same niche and to avoid pseudo-replication), respectively too far from your presences (localised sampling startegy). }
\item{\sQuote{sre}: Pseudo absences candidates have to be selected in condition that differs from a defined proportion (\code{PA.sre.quant}) of presences data. It forces pseudo absences to be selected outside of the broadly defined environemental conditions for the species. It means that a surface range envelop model (sre, similar the BIOCLIM) is first carried out (using the specified quantile) on the species of interest, and then the pseudo-absence data are extracted outside of this envelop. This particular case may lead to over optimistic models evaluations.}
\item{\sQuote{user.defined}: In this case, pseudo absences selection should have been done in a previous step. This pseudo absences have to be reference into a well formated \code{data.frame} (e.g. \code{PA.table} argument)}
}
}
}
}
} % end enumerate
} %end detail section
\value{
A \code{'data.formated.Biomod.object'} for \code{\link[biomod2]{BIOMOD_Modeling}}.
It is strongly advised to check whether this formated data corresponds to what was expected. A summary is easily printed by simply tipping the name of the object. A generic plot function is also available to display the different dataset in the geographic space.
}
\author{ Wilfried Thuiller, Damien Georges }
\seealso{ \code{\link{BIOMOD_Modeling}}}
\examples{
# species occurrences
DataSpecies <- read.csv(system.file("external/species/mammals_table.csv",
package="biomod2"), row.names = 1)
head(DataSpecies)
# the name of studied species
myRespName <- 'GuloGulo'
# the presence/absences data for our species
myResp <- as.numeric(DataSpecies[,myRespName])
# the XY coordinates of species data
myRespXY <- DataSpecies[,c("X_WGS84","Y_WGS84")]
# Environmental variables extracted from BIOCLIM (bio_3, bio_4, bio_7, bio_11 & bio_12)
myExpl = stack( system.file( "external/bioclim/current/bio3.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio4.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio7.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio11.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio12.grd",
package="biomod2"))
# 1. Formatting Data
myBiomodData <- BIOMOD_FormatingData(resp.var = myResp,
expl.var = myExpl,
resp.xy = myRespXY,
resp.name = myRespName)
myBiomodData
plot(myBiomodData)
}
\keyword{ models }
\keyword{ datasets }
| /man/BIOMOD_FormatingData.Rd | no_license | sydnerecord/biomod2 | R | false | false | 13,303 | rd | %\renewcommand{\usage}{\pkg{#1}}
\name{BIOMOD_FormatingData}
\alias{BIOMOD_FormatingData}
\title{ Initialise the datasets for usage in \pkg{biomod2} }
\description{
This function rearranges the user's input data to make sure they can be used within \pkg{biomod2}.
The function allows to select pseudo-absences or background data in the case that true absences data are not available, or to add pseudo-asbence data to an existing set of absence (see details).
}
\usage{
BIOMOD_FormatingData(resp.var,
expl.var,
resp.xy = NULL,
resp.name = NULL,
eval.resp.var = NULL,
eval.expl.var = NULL,
eval.resp.xy = NULL,
PA.nb.rep = 0,
PA.nb.absences = 1000,
PA.strategy = 'random',
PA.dist.min = 0,
PA.dist.max = NULL,
PA.sre.quant = 0.025,
PA.table = NULL,
na.rm = TRUE)
}
\arguments{
\item{resp.var}{ a vector, \code{\link[sp]{SpatialPointsDataFrame}} (or \code{\link[sp]{SpatialPoints}} if you work with \sQuote{only presences} data) containing species data (a single species) in binary format (ones for presences, zeros for true absences and NA for indeterminated ) that will be \bold{used to build the species distribution models}. }
\item{expl.var}{ a \code{matrix}, \code{data.frame}, \code{\link[sp]{SpatialPointsDataFrame}} or \code{\link[raster:stack]{RasterStack}} containing your explanatory variables that will be \bold{used to build your models}.}
\item{resp.xy}{ optional 2 columns \code{matrix} containing the X and Y coordinates of resp.var (only consider if resp.var is a vector) that will be \bold{used to build your models}.}
\item{eval.resp.var}{ a vector, \code{\link[sp]{SpatialPointsDataFrame}} your species data (a single species) in binary format (ones for presences, zeros for true absences and NA for indeterminated ) that will be \bold{used to evaluate the models with independant data (or past data for instance)}.}
\item{eval.expl.var}{a \code{matrix}, \code{data.frame}, \code{\link[sp]{SpatialPointsDataFrame}} or \code{\link[raster:stack]{RasterStack}} containing your explanatory variables that will be \bold{used to evaluate the models with independant data (or past data for instance)}.}
\item{eval.resp.xy}{opional 2 columns \code{matrix} containing the X and Y coordinates of resp.var (only consider if resp.var is a vector) that will be \bold{used to evaluate the modelswith independant data (or past data for instance)}.}
\item{resp.name}{ response variable name (character). The species name. }
\item{PA.nb.rep}{ number of required Pseudo Absences selection (if needed). 0 by Default.}
\item{PA.nb.absences}{ number of pseudo-absence selected for each repetition (when PA.nb.rep > 0) of the selection (true absences included)}
\item{PA.strategy}{ strategy for selecting the Pseudo Absences (must be \sQuote{random}, \sQuote{sre}, \sQuote{disk} or \sQuote{user.defined})}
\item{PA.dist.min}{minimal distance to presences for \sQuote{disk} Pseudo Absences selection (in meters if the explanatory is a not projected raster (+proj=longlat) and in map units (typically also meters) when it is projected or when explanatory variables are stored within table )}
\item{PA.dist.max}{maximal distance to presences for \sQuote{disk} Pseudo Absences selection(in meters if the explanatory is a not projected raster (+proj=longlat) and in map units (typically also meters) when it is projected or when explanatory variables are stored within table ) }
\item{PA.sre.quant}{quantile used for \sQuote{sre} Pseudo Absences selection}
\item{PA.table}{a \code{matrix} (or a \code{data.frame}) having as many rows than \code{resp.var} values. Each column correspund to a Pseudo-absences selection. It contains \code{TRUE} or \code{FALSE} indicating which values of \code{resp.var} will be considered to build models. It must be used with \sQuote{user.defined} \code{PA.strategy}.}
\item{na.rm}{locical, if TRUE, all points having one or several missing value for environmental data will be removed from analyse }
}
\details{
This function homogenises the initial data for making sure the modelling exercie will be completed with all the required data. It supports different kind of inputs.
IMPORTANT: When the explanatory data are given in \code{rasterLayer} or \code{rasterStack} objects, \pkg{biomod2} will be extract the variables onto the XY coordinates of the presence (and absence is any) vector. Be sure to give the XY coordinates (\sQuote{resp.xy}) in the same projection system than the raster objects. Same for the evaluation data in the case some sort of independant (or past) data are available (\sQuote{eval.resp.xy}).
When the explanatory variables are given in \code{\link[sp]{SpatialPointsDataFrame}}, the same requirements are asked than for the raster objects. The XY coordinates must be given to make sure biomod2 can extract the explanatory variables onto the presence (absence) data
When the explanatory variables are stored in a data.frame, make sure there are in the same order than the response variable. \pkg{biomod2} will simply merge the datasets without considering the XY coordinates.
When both presence and absence data are available, and there is enough absences: set sQuote{PA.nb.rep} to 0. No pseudo-absence will be extracted.
When no true absences are given or when there are not numerous enough. It's advise to make several pseudo absences selections. That way the influence of the pseudo-absence selection could then be estimated later on. If the user do not want to run several repetition, make sure to select a relatively high number pseudo-absence. Make sure the number of pseudo-absence data is not higher than the maximum number of potential pseudo-absence (e.g. do not select 10,000 pseudo-absence when the rasterStack or data.frame do not contain more than 2000 pixels or rows).
\enumerate{
\item{\bold{Response variable encoding}}{
\code{BIOMOD_FormatingData} concerns a single species at a time so \code{resp.var} must be a uni-dimentional object.
Response variable must be a \code{vector} or a one column \code{data.frame}/\code{matrix}/\code{\link[sp]{SpatialPointsDataFrame}} ( \code{\link[sp]{SpatialPoints}} are also allowed if you work with \sQuote{only presences} data) object.
As most of \pkg{biomod2} models need Presences AND Absences data, the response variable must contain some absences (if there are not, make sure to select pseudo-absence). In the input \code{resp.var} argument, the data should be coded in the following way :
\itemize{
\item{Presences : 1}
\item{True Absesnces : 0 (if any)}
\item{No Information : NA (if any, might latter be used for pseudo-absence)}
}
If \code{resp.var} is a non-spatial object (\code{vector}, \code{matrix}/\code{data.frame}) and that some models requiring spatial data are being used (e.g. MAXENT.Phillips) and/or pseudo absences spatialy dependent (i.e 'disk'), make sure to give the XY coordinates of the sites/rows (\sQuote{resp.xy}).
}
\item{\bold{Explanatory variables encoding}}{
Explanatory variables must be stored together in a multidimentional object. It may be a \code{matrix}, a \code{data.frame}, a \code{\link[sp]{SpatialPointsDataFrame}} or a \code{rasterStack} object. Factorial variables are allowed here even if that can lead to some models omissions.
}
\item{\bold{Evaluation Data}}{
If you have data enough, we strongly recommand to split your dataset into 2 part : one for training/calibrating and testing the models and another to evaluate it. If you do it, fill the \code{eval.resp.var}, \code{eval.expl.var} and optionally the \code{eval.resp.xy} arguments whith this data. The advantage of working with a specific dataset for evaluating your models is that you will be able to evaluate more properly your \sQuote{ensemble modeled} models. That being said, this argument is optional and you may prefer only to test (kind of evaluation) your models only with a \sQuote{cross-validation} procedure (see Models function). The best practice is to use one set of data for training/calibrating, one set of testing and one for evaluating. The calibration and testing of the data can be done automaticaly in \pkg{biomod2} in the Models function. The dataset for evaluation must be entered in \code{BIOMOD_FormatingData}.
}
\item{\bold{Pseudo Absences selection}}{
The \code{PA.xxx}'s arguments let you parametrise your pseudo absences selection if you want some. It's an optional step.
Pseudo absences will be selected within the \sQuote{background data} and might be constrained by a defined \sQuote{strategy}.
\enumerate{
\item{background data}{
\sQuote{Background data} represents data there is no information whether the species of interest occurs or not. It is defined by the \sQuote{No Information} data of your \code{resp.var} if you give some. If not, (i.e Only presences data or all cells with a define presence or absence state) the backgroud will be take into your \code{expl.var} object if it's a \code{RasterStack}.
}
\item{strategy}{
The strategy allows to constrain the choice of pseudo-absence within the \sQuote{background data}.
3 ways are currently implemented to select the pseudo-absences candidate cells (\code{PA.strategy} argument):
\itemize{
\item{\sQuote{random}: all cell of initial background are Pseudo absences candidates. The choice is made randomly given the number of pseudo-absence to select \code{PA.nb.absences}.}
\item{\sQuote{disk}: you may define a minimal (\code{PA.dist.min}), respectively a maximal (\code{PA.dist.max}) distance to presences points for selecting your pseudo absences candidates. That may be usefull if you don't want to select pseudo-absences too close to your presences (same niche and to avoid pseudo-replication), respectively too far from your presences (localised sampling startegy). }
\item{\sQuote{sre}: Pseudo absences candidates have to be selected in condition that differs from a defined proportion (\code{PA.sre.quant}) of presences data. It forces pseudo absences to be selected outside of the broadly defined environemental conditions for the species. It means that a surface range envelop model (sre, similar the BIOCLIM) is first carried out (using the specified quantile) on the species of interest, and then the pseudo-absence data are extracted outside of this envelop. This particular case may lead to over optimistic models evaluations.}
\item{\sQuote{user.defined}: In this case, pseudo absences selection should have been done in a previous step. This pseudo absences have to be reference into a well formated \code{data.frame} (e.g. \code{PA.table} argument)}
}
}
}
}
} % end enumerate
} %end detail section
\value{
A \code{'data.formated.Biomod.object'} for \code{\link[biomod2]{BIOMOD_Modeling}}.
It is strongly advised to check whether this formated data corresponds to what was expected. A summary is easily printed by simply tipping the name of the object. A generic plot function is also available to display the different dataset in the geographic space.
}
\author{ Wilfried Thuiller, Damien Georges }
\seealso{ \code{\link{BIOMOD_Modeling}}}
\examples{
# species occurrences
DataSpecies <- read.csv(system.file("external/species/mammals_table.csv",
package="biomod2"), row.names = 1)
head(DataSpecies)
# the name of studied species
myRespName <- 'GuloGulo'
# the presence/absences data for our species
myResp <- as.numeric(DataSpecies[,myRespName])
# the XY coordinates of species data
myRespXY <- DataSpecies[,c("X_WGS84","Y_WGS84")]
# Environmental variables extracted from BIOCLIM (bio_3, bio_4, bio_7, bio_11 & bio_12)
myExpl = stack( system.file( "external/bioclim/current/bio3.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio4.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio7.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio11.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio12.grd",
package="biomod2"))
# 1. Formatting Data
myBiomodData <- BIOMOD_FormatingData(resp.var = myResp,
expl.var = myExpl,
resp.xy = myRespXY,
resp.name = myRespName)
myBiomodData
plot(myBiomodData)
}
\keyword{ models }
\keyword{ datasets }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sbc_functions.R
\name{sbc_add_row_sum}
\alias{sbc_add_row_sum}
\title{Agrega un renglon con totales a un dataframe}
\usage{
sbc_add_row_sum(x, sumLabel = "total")
}
\description{
Las columnas no numericas contendran un caracter de longitud 0 o el texto del parametro charColText
}
| /man/sbc_add_row_sum.Rd | no_license | mygeorgyboy/SbcRepTables | R | false | true | 359 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sbc_functions.R
\name{sbc_add_row_sum}
\alias{sbc_add_row_sum}
\title{Agrega un renglon con totales a un dataframe}
\usage{
sbc_add_row_sum(x, sumLabel = "total")
}
\description{
Las columnas no numericas contendran un caracter de longitud 0 o el texto del parametro charColText
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/timmaModel1.R
\name{timmaModel1}
\alias{timmaModel1}
\title{Predicting drug sensitivity with binary drug-target interaction data using two.sided TIMMA model}
\usage{
timmaModel1(drug_target_profile, y_actual, loo = TRUE)
}
\arguments{
\item{drug_target_profile}{the drug-target interaction data. See \code{\link{timma}}.}
\item{y_actual}{a drug sensitivity vector.}
\item{loo}{a logical value indicating whether to use the leave-one-out cross-validation in the model
selection process. By default, loo = TRUE.}
}
\value{
A list containing the following components:
\item{dummy}{the predicted efficacy matrix}
\item{error}{the prediction errors}
\item{prediction}{predicted drug sensitivity}
The difference between \code{\link{timmaModel}} and \code{\link{timmaBinary}} is \code{\link{timmaModel}}
returns the predicted efficacy matrix of all possible target combinations while \code{\link{timmaBinary}}
not.
}
\description{
A function to predict the drug sensitivity with binary drug-target interaction data using the
two.sided TIMMA model
}
\examples{
data(tyner_interaction_binary)
data(tyner_sensitivity)
results<-timmaModel1(tyner_interaction_binary[, 1:6], tyner_sensitivity[,1])
}
\author{
Liye He \email{liye.he@helsinki.fi}
}
| /timma/man/timmaModel1.Rd | no_license | akhikolla/InformationHouse | R | false | false | 1,361 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/timmaModel1.R
\name{timmaModel1}
\alias{timmaModel1}
\title{Predicting drug sensitivity with binary drug-target interaction data using two.sided TIMMA model}
\usage{
timmaModel1(drug_target_profile, y_actual, loo = TRUE)
}
\arguments{
\item{drug_target_profile}{the drug-target interaction data. See \code{\link{timma}}.}
\item{y_actual}{a drug sensitivity vector.}
\item{loo}{a logical value indicating whether to use the leave-one-out cross-validation in the model
selection process. By default, loo = TRUE.}
}
\value{
A list containing the following components:
\item{dummy}{the predicted efficacy matrix}
\item{error}{the prediction errors}
\item{prediction}{predicted drug sensitivity}
The difference between \code{\link{timmaModel}} and \code{\link{timmaBinary}} is \code{\link{timmaModel}}
returns the predicted efficacy matrix of all possible target combinations while \code{\link{timmaBinary}}
not.
}
\description{
A function to predict the drug sensitivity with binary drug-target interaction data using the
two.sided TIMMA model
}
\examples{
data(tyner_interaction_binary)
data(tyner_sensitivity)
results<-timmaModel1(tyner_interaction_binary[, 1:6], tyner_sensitivity[,1])
}
\author{
Liye He \email{liye.he@helsinki.fi}
}
|
#' Create GRASS GIS location
#'
#' Calls GRASS GIS to create a new GRASS GIS location using either a
#' georeferenced file or EPSG code.
#'
#' @param grassExecutable GRASS GIS executable (full path or command)
#' @param readProjectionFrom A geospatial file with CRS to use
#' @param EPSG EPSG code of a desired CRS
#' @param database Path to GRASS GIS spatial database (directory)
#' @param location Name of newly created location
createGRASSlocation <- function (grassExecutable, readProjectionFrom, EPSG, database, location) {
locationPath <- file.path(database, location)
if (missing(EPSG)){
system(paste("\"", grassExecutable, "\"", " -c ", "\"", readProjectionFrom, "\"", " ", "\"", locationPath, "\"", " -e ", sep = ""))
}
else{
system(paste("\"", grassExecutable, "\"", " -c ", "EPSG:", EPSG, " ", "\"", locationPath, "\"", " -e ", sep = ""))
}
}
#' Get path to GRASS GIS installation
#'
#' Asks GRASS GIS where is its installation directory on the system.
#'
#' @param grassExecutable GRASS GIS executable (full path or command)
#' @return Path to the installation
getGRASSpath <- function (grassExecutable) {
command <- paste("\"", grassExecutable, "\" --config path", sep = "")
path <- system(command, intern = TRUE)
return(trimws(path))
}
| /createGRASSlocation.R | permissive | lukasgabor/R_grassgis | R | false | false | 1,306 | r | #' Create GRASS GIS location
#'
#' Calls GRASS GIS to create a new GRASS GIS location using either a
#' georeferenced file or EPSG code.
#'
#' @param grassExecutable GRASS GIS executable (full path or command)
#' @param readProjectionFrom A geospatial file with CRS to use
#' @param EPSG EPSG code of a desired CRS
#' @param database Path to GRASS GIS spatial database (directory)
#' @param location Name of newly created location
createGRASSlocation <- function (grassExecutable, readProjectionFrom, EPSG, database, location) {
locationPath <- file.path(database, location)
if (missing(EPSG)){
system(paste("\"", grassExecutable, "\"", " -c ", "\"", readProjectionFrom, "\"", " ", "\"", locationPath, "\"", " -e ", sep = ""))
}
else{
system(paste("\"", grassExecutable, "\"", " -c ", "EPSG:", EPSG, " ", "\"", locationPath, "\"", " -e ", sep = ""))
}
}
#' Get path to GRASS GIS installation
#'
#' Asks GRASS GIS where is its installation directory on the system.
#'
#' @param grassExecutable GRASS GIS executable (full path or command)
#' @return Path to the installation
getGRASSpath <- function (grassExecutable) {
command <- paste("\"", grassExecutable, "\" --config path", sep = "")
path <- system(command, intern = TRUE)
return(trimws(path))
}
|
\name{survivalROC}
\alias{survivalROC}
\title{Time-dependent ROC curve estimation from censored survival data}
\description{
This function creates time-dependent ROC curve from censored survival
data using the Kaplan-Meier (KM) or Nearest Neighbor Estimation (NNE)
method of Heagerty, Lumley and Pepe, 2000}
\usage{
survivalROC(Stime, status, marker, entry = NULL, predict.time, cut.values =
NULL, method = "NNE", lambda = NULL, span = NULL, window =
"symmetric")
}
\arguments{
\item{Stime}{Event time or censoring time for subjects}
\item{status}{Indicator of status, 1 if death or event, 0 otherwise }
\item{marker}{Predictor or marker value}
\item{entry}{Entry time for the subjects}
\item{predict.time}{Time point of the ROC curve}
\item{cut.values}{marker values to use as a cut-off for
calculation of sensitivity and specificity}
\item{method}{Method for fitting joint distribution of (marker,t), either
of KM or NNE, the default method is NNE}
\item{lambda}{smoothing parameter for NNE}
\item{span}{Span for the NNE, need either lambda or span for NNE}
\item{window}{window for NNE, either of symmetric or asymmetric}
}
\details{ Suppose we have censored survival data along with a baseline
marker value and we want to see how well the marker predicts the
survival time for the subjects in the dataset. In particular,
suppose we have survival times in days and we want to see how well the
marker predicts the one-year survival (predict.time=365 days). This
function roc.KM.calc(), returns the unique marker values, TP (True
Positive), FP (False Positive), Kaplan-Meier survival
estimate corresponding to the time point of interest (predict.time)
and AUC (Area Under (ROC) Curve) at the time point of interest.
}
\value{Returns a list of the following items:
\item{cut.values}{unique marker values for calculation of TP and FP}
\item{TP}{True Positive corresponding to the cut offs in marker}
\item{FP}{False Positive corresponding to the cut offs in marker}
\item{predict.time}{time point of interest}
\item{Survival}{Kaplan-Meier survival estimate at predict.time}
\item{AUC}{Area Under (ROC) Curve at time predict.time}
}
\references{Heagerty, P.J., Lumley, T., Pepe, M. S. (2000)
Time-dependent ROC Curves for Censored Survival Data and a Diagnostic
Marker \emph{Biometrics}, \bold{56}, 337 -- 344}
\author{Patrick J. Heagerty }
\examples{
data(mayo)
nobs <- NROW(mayo)
cutoff <- 365
## MAYOSCORE 4, METHOD = NNE
Mayo4.1= survivalROC(Stime=mayo$time,
status=mayo$censor,
marker = mayo$mayoscore4,
predict.time = cutoff,span = 0.25*nobs^(-0.20) )
plot(Mayo4.1$FP, Mayo4.1$TP, type="l", xlim=c(0,1), ylim=c(0,1),
xlab=paste( "FP", "\n", "AUC = ",round(Mayo4.1$AUC,3)),
ylab="TP",main="Mayoscore 4, Method = NNE \n Year = 1")
abline(0,1)
## MAYOSCORE 4, METHOD = KM
Mayo4.2= survivalROC(Stime=mayo$time,
status=mayo$censor,
marker = mayo$mayoscore4,
predict.time = cutoff, method="KM")
plot(Mayo4.2$FP, Mayo4.2$TP, type="l", xlim=c(0,1), ylim=c(0,1),
xlab=paste( "FP", "\n", "AUC = ",round(Mayo4.2$AUC,3)),
ylab="TP",main="Mayoscore 4, Method = KM \n Year = 1")
abline(0,1)
}
\keyword{survival}
| /man/survivalROC.rd | no_license | cran/survivalROC | R | false | false | 3,263 | rd | \name{survivalROC}
\alias{survivalROC}
\title{Time-dependent ROC curve estimation from censored survival data}
\description{
This function creates time-dependent ROC curve from censored survival
data using the Kaplan-Meier (KM) or Nearest Neighbor Estimation (NNE)
method of Heagerty, Lumley and Pepe, 2000}
\usage{
survivalROC(Stime, status, marker, entry = NULL, predict.time, cut.values =
NULL, method = "NNE", lambda = NULL, span = NULL, window =
"symmetric")
}
\arguments{
\item{Stime}{Event time or censoring time for subjects}
\item{status}{Indicator of status, 1 if death or event, 0 otherwise }
\item{marker}{Predictor or marker value}
\item{entry}{Entry time for the subjects}
\item{predict.time}{Time point of the ROC curve}
\item{cut.values}{marker values to use as a cut-off for
calculation of sensitivity and specificity}
\item{method}{Method for fitting joint distribution of (marker,t), either
of KM or NNE, the default method is NNE}
\item{lambda}{smoothing parameter for NNE}
\item{span}{Span for the NNE, need either lambda or span for NNE}
\item{window}{window for NNE, either of symmetric or asymmetric}
}
\details{ Suppose we have censored survival data along with a baseline
marker value and we want to see how well the marker predicts the
survival time for the subjects in the dataset. In particular,
suppose we have survival times in days and we want to see how well the
marker predicts the one-year survival (predict.time=365 days). This
function roc.KM.calc(), returns the unique marker values, TP (True
Positive), FP (False Positive), Kaplan-Meier survival
estimate corresponding to the time point of interest (predict.time)
and AUC (Area Under (ROC) Curve) at the time point of interest.
}
\value{Returns a list of the following items:
\item{cut.values}{unique marker values for calculation of TP and FP}
\item{TP}{True Positive corresponding to the cut offs in marker}
\item{FP}{False Positive corresponding to the cut offs in marker}
\item{predict.time}{time point of interest}
\item{Survival}{Kaplan-Meier survival estimate at predict.time}
\item{AUC}{Area Under (ROC) Curve at time predict.time}
}
\references{Heagerty, P.J., Lumley, T., Pepe, M. S. (2000)
Time-dependent ROC Curves for Censored Survival Data and a Diagnostic
Marker \emph{Biometrics}, \bold{56}, 337 -- 344}
\author{Patrick J. Heagerty }
\examples{
data(mayo)
nobs <- NROW(mayo)
cutoff <- 365
## MAYOSCORE 4, METHOD = NNE
Mayo4.1= survivalROC(Stime=mayo$time,
status=mayo$censor,
marker = mayo$mayoscore4,
predict.time = cutoff,span = 0.25*nobs^(-0.20) )
plot(Mayo4.1$FP, Mayo4.1$TP, type="l", xlim=c(0,1), ylim=c(0,1),
xlab=paste( "FP", "\n", "AUC = ",round(Mayo4.1$AUC,3)),
ylab="TP",main="Mayoscore 4, Method = NNE \n Year = 1")
abline(0,1)
## MAYOSCORE 4, METHOD = KM
Mayo4.2= survivalROC(Stime=mayo$time,
status=mayo$censor,
marker = mayo$mayoscore4,
predict.time = cutoff, method="KM")
plot(Mayo4.2$FP, Mayo4.2$TP, type="l", xlim=c(0,1), ylim=c(0,1),
xlab=paste( "FP", "\n", "AUC = ",round(Mayo4.2$AUC,3)),
ylab="TP",main="Mayoscore 4, Method = KM \n Year = 1")
abline(0,1)
}
\keyword{survival}
|
library(shiny)
## define UI for dataset viewer application
fluidPage(
##Application title
titlePanel("shiny text"),
# sidebar with controls to selet a dataset and specify the
# number of observations to view
sidebarLayout(
sidebarPanel(
selectInput("dataset", "select a dataset:",
choice = c("rock", "pressure", "cars")),
numericInput("obs", "Number of observations to view", 10),
selectInput("test", "select the month",
choice = c("Jan", "Feb", "Mar"))
),
## show a summary of the dataset and an HTML table with the
## requested number of observations
mainPanel(
verbatimTextOutput("Summary"),
tableOutput("view"),
plotOutput("myplot")
)
)
) | /ui.R | no_license | codesofsamar/R-Project | R | false | false | 791 | r | library(shiny)
## define UI for dataset viewer application
fluidPage(
##Application title
titlePanel("shiny text"),
# sidebar with controls to selet a dataset and specify the
# number of observations to view
sidebarLayout(
sidebarPanel(
selectInput("dataset", "select a dataset:",
choice = c("rock", "pressure", "cars")),
numericInput("obs", "Number of observations to view", 10),
selectInput("test", "select the month",
choice = c("Jan", "Feb", "Mar"))
),
## show a summary of the dataset and an HTML table with the
## requested number of observations
mainPanel(
verbatimTextOutput("Summary"),
tableOutput("view"),
plotOutput("myplot")
)
)
) |
context("test-simulation_functions")
# Define variables
set.seed(1234)
n <- 7
idx <- 2
d <- 4
trend <- c(0.75, 0.00, 1.00, 0.75)
G <- cbind(c(0, 1.5, 1.5, 2),
c(1.5, 0, 2, 1.5),
c(1.5, 2, 0, 1.5),
c(2, 1.5, 1.5, 0))
G_tree <- matrix(nrow = d, ncol = d)
for (i in 1:d){
for (j in 1:d){
G_tree[i, j] <- 2 * abs(i - j)
}
}
G_tree_vec <- runif(d - 1)
G_wrong <- 0.2
G_wrong2 <- matrix(rep(0, d * d), nrow = d)
G_wrong3 <- G[, -1]
theta_1 <- 0.3
theta_2 <- 1.5
theta_3 <- c(0.3, 0.2, 0.7)
theta_wrong1 <- 10
theta_wrong2 <- -3
alpha <- runif(d)
alpha2 <- matrix(runif( (d - 1) * 2), nrow = d - 1)
alpha_wrong1 <- 43
alpha_wrong2 <- c(1, 1, 1, -0.2)
alpha_wrong3 <- matrix(runif(d * 2), nrow = d)
alpha_wrong4 <- matrix(-runif( (d - 1) * 2), nrow = d - 1)
cov_mat <- Gamma2Sigma(G, k = 3, full = FALSE)
chol_mat <- matrix(0, d, d)
chol_mat[-3, -3] <- chol(cov_mat)
my_tree <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 0),
c(0, 1, 0, 1),
c(0, 0, 1, 0)),
mode = "undirected")
my_tree_dir <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 0),
c(0, 1, 0, 1),
c(0, 0, 1, 0)))
graph_connected <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 1),
c(0, 1, 0, 1),
c(0, 1, 1, 0)),
mode = "undirected")
graph_disconnected <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 0),
c(0, 1, 0, 0),
c(0, 0, 0, 0)),
mode = "undirected")
empty_graph <- igraph::make_empty_graph(n = 0, directed = FALSE)
# Run tests
test_that("rmpareto works", {
expect_error(rmpareto(n = n, d = d, par = theta_1))
expect_error(rmpareto(n, "HR", -1, par = G))
expect_error(rmpareto(n, "HR", 1.2, par = G))
expect_error(rmpareto(-1, "HR", d, par = G))
expect_error(rmpareto(1.2, "HR", d, par = G))
expect_error(rmpareto(n, "HRrrrrrrrr", d, par = G))
expect_error(rmpareto(n, "HR", d, par = G_wrong))
expect_error(rmpareto(n, "logistic", d, par = theta_wrong1))
expect_error(rmpareto(n, "neglogistic", d, par = theta_wrong2))
expect_error(rmpareto(n, "dirichlet", d, par = alpha_wrong1))
expect_error(rmpareto(n, "dirichlet", d, par = alpha_wrong2))
expect_error(rmpareto(n, "HR", d, par = G_wrong2))
expect_error(rmpareto(n, "HR", d, par = G_wrong3))
res <- rmpareto(n, d = d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "HR", d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "logistic", d, par = theta_1)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "neglogistic", d, par = theta_2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "dirichlet", d, par = alpha)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
test_that("rmpareto_tree works", {
expect_warning(rmpareto_tree(n, tree = my_tree_dir, par = G_tree))
expect_error(rmpareto_tree(n, tree = graph_connected, par = G_tree))
expect_error(rmpareto_tree(n, tree = graph_disconnected, par = G_tree))
expect_error(rmpareto_tree(n, tree = empty_graph, par = G_tree))
expect_error(rmpareto_tree(n, tree = my_tree, par = alpha2))
expect_error(rmpareto_tree(n = -1, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmpareto_tree(n = 1.2, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmpareto_tree(n = n, model = "foo", tree = my_tree,
par = G_tree))
expect_error(rmpareto_tree(n = n, model = "HR", tree = my_tree,
par = c(0.3, 0.2)))
expect_error(rmpareto_tree(n = n, model = "HR", tree = my_tree,
par = G_wrong3))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = theta_wrong1))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .3)))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .3, .2)))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = c(.1, .2, .3, .2)))
expect_warning(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = theta_1))
expect_error(rmpareto_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong3))
expect_error(rmpareto_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong4))
res <- rmpareto_tree(n, tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "HR", tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "HR", tree = my_tree, par = G_tree_vec)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "logistic", tree = my_tree, par = theta_3)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "dirichlet", tree = my_tree, par = alpha2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
test_that("rmstable works", {
expect_error(rmstable(n = n, d = d, par = theta_1))
expect_error(rmstable(n, "HR", -1, par = G))
expect_error(rmstable(n, "HR", 1.2, par = G))
expect_error(rmstable(-1, "HR", d, par = G))
expect_error(rmstable(1.2, "HR", d, par = G))
expect_error(rmstable(n, "HRrrrrrrrr", d, par = G))
expect_error(rmstable(n, "HR", d, par = G_wrong))
expect_error(rmstable(n, "logistic", d, par = theta_wrong1))
expect_error(rmstable(n, "neglogistic", d, par = theta_wrong2))
expect_error(rmstable(n, "dirichlet", d, par = alpha_wrong1))
expect_error(rmstable(n, "dirichlet", d, par = alpha_wrong2))
expect_error(rmstable(n, "HR", d, par = G_wrong2))
expect_error(rmstable(n, "HR", d, par = G_wrong3))
res <- rmstable(n, d = d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "HR", d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "logistic", d, par = theta_1)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "neglogistic", d, par = theta_2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "dirichlet", d, par = alpha)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
test_that("rmstable_tree works", {
expect_warning(rmstable_tree(n, tree = my_tree_dir, par = G_tree))
expect_error(rmstable_tree(n, tree = graph_connected, par = G_tree))
expect_error(rmstable_tree(n, tree = graph_disconnected, par = G_tree))
expect_error(rmstable_tree(n, tree = empty_graph, par = G_tree))
expect_error(rmstable_tree(n, tree = my_tree, par = alpha2))
expect_error(rmstable_tree(n = -1, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmstable_tree(n = 1.2, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmstable_tree(n = n, model = "foo", tree = my_tree,
par = G_tree))
expect_error(rmstable_tree(n = n, model = "HR", tree = my_tree,
par = c(0.3, 0.2)))
expect_error(rmstable_tree(n = n, model = "HR", tree = my_tree,
par = G_wrong3))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = theta_wrong1))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .4)))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .4, 2)))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = c(.9, .2, .4, .2)))
expect_warning(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = theta_1))
expect_error(rmstable_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong3))
expect_error(rmstable_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong4))
res <- rmstable_tree(n, tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, tree = my_tree, par = G_tree_vec)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "HR", tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "HR", tree = my_tree, par = G_tree_vec)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "logistic", tree = my_tree, par = theta_3)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "dirichlet", tree = my_tree, par = alpha2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
| /tests/testthat/test-simulation_functions.R | no_license | ayotoasset/graphicalExtremes | R | false | false | 11,053 | r | context("test-simulation_functions")
# Define variables
set.seed(1234)
n <- 7
idx <- 2
d <- 4
trend <- c(0.75, 0.00, 1.00, 0.75)
G <- cbind(c(0, 1.5, 1.5, 2),
c(1.5, 0, 2, 1.5),
c(1.5, 2, 0, 1.5),
c(2, 1.5, 1.5, 0))
G_tree <- matrix(nrow = d, ncol = d)
for (i in 1:d){
for (j in 1:d){
G_tree[i, j] <- 2 * abs(i - j)
}
}
G_tree_vec <- runif(d - 1)
G_wrong <- 0.2
G_wrong2 <- matrix(rep(0, d * d), nrow = d)
G_wrong3 <- G[, -1]
theta_1 <- 0.3
theta_2 <- 1.5
theta_3 <- c(0.3, 0.2, 0.7)
theta_wrong1 <- 10
theta_wrong2 <- -3
alpha <- runif(d)
alpha2 <- matrix(runif( (d - 1) * 2), nrow = d - 1)
alpha_wrong1 <- 43
alpha_wrong2 <- c(1, 1, 1, -0.2)
alpha_wrong3 <- matrix(runif(d * 2), nrow = d)
alpha_wrong4 <- matrix(-runif( (d - 1) * 2), nrow = d - 1)
cov_mat <- Gamma2Sigma(G, k = 3, full = FALSE)
chol_mat <- matrix(0, d, d)
chol_mat[-3, -3] <- chol(cov_mat)
my_tree <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 0),
c(0, 1, 0, 1),
c(0, 0, 1, 0)),
mode = "undirected")
my_tree_dir <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 0),
c(0, 1, 0, 1),
c(0, 0, 1, 0)))
graph_connected <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 1),
c(0, 1, 0, 1),
c(0, 1, 1, 0)),
mode = "undirected")
graph_disconnected <- igraph::graph_from_adjacency_matrix(rbind(c(0, 1, 0, 0),
c(1, 0, 1, 0),
c(0, 1, 0, 0),
c(0, 0, 0, 0)),
mode = "undirected")
empty_graph <- igraph::make_empty_graph(n = 0, directed = FALSE)
# Run tests
test_that("rmpareto works", {
expect_error(rmpareto(n = n, d = d, par = theta_1))
expect_error(rmpareto(n, "HR", -1, par = G))
expect_error(rmpareto(n, "HR", 1.2, par = G))
expect_error(rmpareto(-1, "HR", d, par = G))
expect_error(rmpareto(1.2, "HR", d, par = G))
expect_error(rmpareto(n, "HRrrrrrrrr", d, par = G))
expect_error(rmpareto(n, "HR", d, par = G_wrong))
expect_error(rmpareto(n, "logistic", d, par = theta_wrong1))
expect_error(rmpareto(n, "neglogistic", d, par = theta_wrong2))
expect_error(rmpareto(n, "dirichlet", d, par = alpha_wrong1))
expect_error(rmpareto(n, "dirichlet", d, par = alpha_wrong2))
expect_error(rmpareto(n, "HR", d, par = G_wrong2))
expect_error(rmpareto(n, "HR", d, par = G_wrong3))
res <- rmpareto(n, d = d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "HR", d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "logistic", d, par = theta_1)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "neglogistic", d, par = theta_2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto(n, "dirichlet", d, par = alpha)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
test_that("rmpareto_tree works", {
expect_warning(rmpareto_tree(n, tree = my_tree_dir, par = G_tree))
expect_error(rmpareto_tree(n, tree = graph_connected, par = G_tree))
expect_error(rmpareto_tree(n, tree = graph_disconnected, par = G_tree))
expect_error(rmpareto_tree(n, tree = empty_graph, par = G_tree))
expect_error(rmpareto_tree(n, tree = my_tree, par = alpha2))
expect_error(rmpareto_tree(n = -1, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmpareto_tree(n = 1.2, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmpareto_tree(n = n, model = "foo", tree = my_tree,
par = G_tree))
expect_error(rmpareto_tree(n = n, model = "HR", tree = my_tree,
par = c(0.3, 0.2)))
expect_error(rmpareto_tree(n = n, model = "HR", tree = my_tree,
par = G_wrong3))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = theta_wrong1))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .3)))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .3, .2)))
expect_error(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = c(.1, .2, .3, .2)))
expect_warning(rmpareto_tree(n = n, model = "logistic", tree = my_tree,
par = theta_1))
expect_error(rmpareto_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong3))
expect_error(rmpareto_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong4))
res <- rmpareto_tree(n, tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "HR", tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "HR", tree = my_tree, par = G_tree_vec)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "logistic", tree = my_tree, par = theta_3)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmpareto_tree(n, model = "dirichlet", tree = my_tree, par = alpha2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
test_that("rmstable works", {
expect_error(rmstable(n = n, d = d, par = theta_1))
expect_error(rmstable(n, "HR", -1, par = G))
expect_error(rmstable(n, "HR", 1.2, par = G))
expect_error(rmstable(-1, "HR", d, par = G))
expect_error(rmstable(1.2, "HR", d, par = G))
expect_error(rmstable(n, "HRrrrrrrrr", d, par = G))
expect_error(rmstable(n, "HR", d, par = G_wrong))
expect_error(rmstable(n, "logistic", d, par = theta_wrong1))
expect_error(rmstable(n, "neglogistic", d, par = theta_wrong2))
expect_error(rmstable(n, "dirichlet", d, par = alpha_wrong1))
expect_error(rmstable(n, "dirichlet", d, par = alpha_wrong2))
expect_error(rmstable(n, "HR", d, par = G_wrong2))
expect_error(rmstable(n, "HR", d, par = G_wrong3))
res <- rmstable(n, d = d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "HR", d, par = G)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "logistic", d, par = theta_1)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "neglogistic", d, par = theta_2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable(n, "dirichlet", d, par = alpha)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
test_that("rmstable_tree works", {
expect_warning(rmstable_tree(n, tree = my_tree_dir, par = G_tree))
expect_error(rmstable_tree(n, tree = graph_connected, par = G_tree))
expect_error(rmstable_tree(n, tree = graph_disconnected, par = G_tree))
expect_error(rmstable_tree(n, tree = empty_graph, par = G_tree))
expect_error(rmstable_tree(n, tree = my_tree, par = alpha2))
expect_error(rmstable_tree(n = -1, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmstable_tree(n = 1.2, model = "HR", tree = my_tree,
par = G_tree))
expect_error(rmstable_tree(n = n, model = "foo", tree = my_tree,
par = G_tree))
expect_error(rmstable_tree(n = n, model = "HR", tree = my_tree,
par = c(0.3, 0.2)))
expect_error(rmstable_tree(n = n, model = "HR", tree = my_tree,
par = G_wrong3))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = theta_wrong1))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .4)))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = c(-theta_wrong1, .2, .4, 2)))
expect_error(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = c(.9, .2, .4, .2)))
expect_warning(rmstable_tree(n = n, model = "logistic", tree = my_tree,
par = theta_1))
expect_error(rmstable_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong3))
expect_error(rmstable_tree(n = n, model = "dirichlet", tree = my_tree,
par = alpha_wrong4))
res <- rmstable_tree(n, tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, tree = my_tree, par = G_tree_vec)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "HR", tree = my_tree, par = G_tree)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "HR", tree = my_tree, par = G_tree_vec)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "logistic", tree = my_tree, par = theta_3)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
res <- rmstable_tree(n, model = "dirichlet", tree = my_tree, par = alpha2)
expect_equal(is.matrix(res), TRUE)
expect_type(res, "double")
expect_equal(dim(res), c(n, d))
})
|
## These data come from gene expression microarray experiments on small, round blue-cell
## tumors (SRCTs), described in Khan et al (2001). Nature Medicine, 7: 673-679. There
## are four different SRBCT tumor types: neuroblastoma (NB), rhabdomyosarcoma (RMS), non-
## Hodgkin lymphoma (NHL), and the Ewing family of tumors (EWS). The column names
## indicate the tumor type, with 1 corresponding to NHL, 2 to EWS, 3 to NB, and 4 to RMS.
## Load data.
rm(list=ls())
setwd("~/MS/636/Code")
data = as.matrix(read.csv("./data/khan.csv"))
id = colnames(data)
for(i in 1:ncol(data))
id[i] = substr(id[i], 2, 2)
m = nrow(data)
n = ncol(data)
n_k = as.numeric(table(id))
## Fix up the column names
c_names_1 = rep("1", n_k[1])
c_names_2 = rep("2", n_k[2])
c_names_3 = rep("3", n_k[3])
c_names_4 = rep("4", n_k[4])
for(i in 1:n_k[1])
c_names_1[i] = paste(c_names_1[i], "_", i, sep = "")
for(i in 1:n_k[2])
c_names_2[i] = paste(c_names_2[i], "_", i, sep = "")
for(i in 1:n_k[3])
c_names_3[i] = paste(c_names_3[i], "_", i, sep = "")
for(i in 1:n_k[4])
c_names_4[i] = paste(c_names_4[i], "_", i, sep = "")
c_names = c(c_names_1, c_names_2, c_names_3, c_names_4)
tmp = id
tmp[id == 1] = c_names_1
tmp[id == 2] = c_names_2
tmp[id == 3] = c_names_3
tmp[id == 4] = c_names_4
c_names = tmp
colnames(data) = c_names
## Basic summary statistics
data = as.matrix(data)
X = model.matrix(~ factor(id) - 1)
grp_means = t(t(data %*% X) / n_k)
grp_sds = sqrt(t(t(((data - grp_means %*% t(X)) ^ 2) %*% X) / (n_k - 1)))
####
#### EDA.
####
#### Boxplots and scatterplots
## Some example gene-specific boxplots.
par(mfrow = c(2, 2))
boxplot(data[1, id == 1], data[1, id == 2], data[1, id == 3], data[1, id == 4])
boxplot(data[2, id == 1], data[2, id == 2], data[2, id == 3], data[2, id == 4])
boxplot(data[3, id == 1], data[3, id == 2], data[3, id == 3], data[3, id == 4])
boxplot(data[4, id == 1], data[4, id == 2], data[4, id == 3], data[4, id == 4])
## Array-specific boxplots by groups.
par(mfrow = c(2, 2))
boxplot(data[, id == 1], ylim = c(-6, 3), main = "Tumor type 1")
boxplot(data[, id == 2], ylim = c(-6, 3), main = "Tumor type 2")
boxplot(data[, id == 3], ylim = c(-6, 3), main = "Tumor type 2")
boxplot(data[, id == 4], ylim = c(-6, 3), main = "Tumor type 2")
## Overall comparisons of groups, in terms of both means and s.d.s.
par(mfrow = c(1, 2))
boxplot(grp_means[, 1], grp_means[, 2], grp_means[, 3], grp_means[, 4], names = 1:4,
main = "Means by tumor type")
boxplot(grp_sds[, 1], grp_sds[, 2], grp_sds[, 3], grp_sds[, 4], names = 1:4,
main = "S.D.s by tumor type")
## Pairwise scatterplots
par(mfrow = c(1, 2))
plot(data[, 1], data[, 2], pch = 20, col = "grey", xlab = "Array 1", ylab = "Array 2")
abline(0, 1, lty = 2)
lines(lowess(data[, 1], data[, 2]), lwd = 2, col = "blue")
plot((data[, 1] + data[, 2]) / 2, data[, 2] - data[, 1], pch = 20, col = "grey",
xlab = "A: (Array 1 + Array 2) / 2", ylab = "M: Array 2 - Array 1")
abline(0, 0, lty = 2)
lines(lowess((data[, 1] + data[, 2]) / 2, data[, 2] - data[, 1]), lwd = 2, col = "blue")
## Mean, variance relationship?
par(mfrow = c(2, 2))
plot(grp_means[, 1], grp_sds[, 1], xlab = "means_1", ylab = "sds_1", pch = 20,
col = "grey")
lines(lowess(grp_means[, 1], grp_sds[, 1]), lwd = 2, col = "blue")
plot(grp_means[, 2], grp_sds[, 2], xlab = "means_2", ylab = "sds_2", pch = 20,
col = "grey")
lines(lowess(grp_means[, 2], grp_sds[, 2]), lwd = 2, col = "blue")
plot(grp_means[, 3], grp_sds[, 3], xlab = "means_3", ylab = "sds_3", pch = 20,
col = "grey")
lines(lowess(grp_means[, 3], grp_sds[, 3]), lwd = 2, col = "blue")
plot(grp_means[, 4], grp_sds[, 4], xlab = "means_4", ylab = "sds_4", pch = 20,
col = "grey")
lines(lowess(grp_means[, 4], grp_sds[, 4]), lwd = 2, col = "blue")
#### Clustering.
## Clustering on samples.
KM = kmeans(t(data), 4)
table(id, KM$cluster)
par(mfrow = c(1, 1))
HC = hclust(dist(t(data)), method = "complete")
plot(HC)
plot(HC$height)
#### SVD.
data_c = t(scale(t(data), center = T, scale = F))
ss = svd(data_c)
dd = ss$d ^ 2 / sum(ss$d ^ 2)
round(dd, 2)
oo = order(id)
plot(1:83, ss$v[oo, 1], type = "l", col = "green", ylim = c(-0.4, 0.4), lwd = 2)
lines(1:83, ss$v[oo, 2], col = "lightblue")
lines(1:83, ss$v[oo, 3], col = "pink")
abline(0, 0, lty = 2, col = "black")
for(i in cumsum(n_k)[1:3])
lines(c(i, i), c(-1, 1))
####
#### Differential expression analysis, looking for differences between the four tumor
#### classes.
####
p_vals = rep(NA, m)
for(i in 1:m) {
d_i = as.numeric(data[i, ])
fit = summary(lm(d_i ~ 1 + factor(id)))$fstat
p_vals[i] = 1 - pf(fit[1], fit[2], fit[3])
}
## Histogram of p-values.
par(mfrow = c(1, 1))
hh = hist(p_vals, prob = T, main = "P-values", xlab = "")
abline(1, 0, lty = 2)
## Estimate FDR and q-values.
pi_0_hat = mean(hh$density[8:10])
library(qvalue)
qq = qvalue(p_vals)
qq$pi0
pi_0_hat
FDR_hat = rep(NA, m)
for(i in 1:m) {
FDR_hat[i] = (pi_0_hat * m * p_vals[i]) / sum(p_vals <= p_vals[i])
}
q_vals = rep(NA, m)
for(i in 1:m) {
q_vals[i] = min(FDR_hat[p_vals >= p_vals[i]])
}
## P-values vs. q-values.
plot(p_vals[order(p_vals)], q_vals[order(p_vals)], type = "s",
xlab = "P-values", ylab = "Q-values")
abline(0, 1, lty = 2)
## Number of genes selected at each q-value threshold.
S_hat = rep(NA, m)
for(i in 1:m) {
S_hat[i] = sum(q_vals <= q_vals[i])
}
oo = order(q_vals)
plot(q_vals[oo], S_hat[oo], type = "s", xlab = "q-value", ylab =
"number of features selected")
## Can obtain 1000 features with an FDR < 0.01.
which.min(abs(S_hat - 1000))
S_hat[2123]
q_vals[2123]
######
###### Classification.
######
## Set aside roughly 2 / 3 of the samples for training. Pick these randomly, within each
## tumor type.
set.seed(101)
tr_ii <- NULL
for(i in 1:4) {
jj <- (1:n)[as.numeric(id) == i]
n_i <- length(jj)
n_tr <- floor(n_i * 2 / 3)
tr_ii <- c(tr_ii, sample(jj, n_tr, replace = FALSE))
}
Y_df <- data.frame("GRP" = factor(id), t(data)) # col is response var
Y_train_df <- Y_df[tr_ii, ]
Y_test_df <- Y_df[-tr_ii, ]
Y_df[1:5, 1:5]
dim(Y_train_df)
dim(Y_test_df)
####
#### Naive Bayes is 86% accurate.
####
library(e1071)
# gen synrax for predictive
# fit model tilda
# pred
#build model qda,lda, naive
fit_NB <- naiveBayes(GRP ~ ., data = Y_train_df) # .dot means vs all pred variables
class(fit_NB)
#[1] "naiveBayes"
#fit that with data
#lot of predict functions
#? predict -- gen fn
# predict.class
#?predict.naiveBayes
# it calls appro predict based on class obj ( lda, qda, naive)
pred_NB <- predict(fit_NB, newdata = Y_test_df)
# confusion matrix
# cross tabulate pred with truth
# dioagnoal eles are correct pred
# 4 cases we pred in class 1 and match with truth
table(pred_NB, Y_test_df$GRP)
# overall accuracy
# how many off diagnoal non zero /total no of pred = overall correct pred
mean(pred_NB == Y_test_df$GRP)
####
#### KNN. Choice of K doesn't seem to matter much, with the accuracies apparently bobbing
#### randomly about 87% accuracy or so.
####
library(class)
pred_KNN <- rep(NA, 10)
for(k in 1:10) {
pred_KNN_k <- knn(Y_train_df[, -1], Y_test_df[, -1], Y_train_df$GRP, k = k)
pred_KNN[k] <- mean(pred_KNN_k == Y_test_df$GRP)
}
plot(1:10, pred_KNN, xlab = "K", ylab = "Accuracy", xaxt = "n", type = "l", lwd = 2)
axis(1, at = 1:10)
####
#### Lasso is nearly 100% accurate, using about 30 features.
####
library(glmnet)
set.seed(101)
cv_lasso <- cv.glmnet(as.matrix(Y_df[, -1]), Y_df$GRP, alpha = 1, family = "multinomial",
type.measure = "class")
plot(cv_lasso)
grid <- cv_lasso$lambda
fit_lasso <- glmnet(as.matrix(Y_df[, -1]), Y_df$GRP, alpha = 1, family = "multinomial",
type.multinomial = "grouped", lambda = grid)
par(mfrow = c(2, 2))
plot(fit_lasso, xvar = "lambda")
lambda_min <- cv_lasso$lambda.min
cf_lasso <- cbind(coef(fit_lasso)[[1]][-1, match(lambda_min, fit_lasso$lambda)],
coef(fit_lasso)[[2]][-1, match(lambda_min, fit_lasso$lambda)],
coef(fit_lasso)[[3]][-1, match(lambda_min, fit_lasso$lambda)],
coef(fit_lasso)[[4]][-1, match(lambda_min, fit_lasso$lambda)])
features_lasso <- (1:m)[rowSums(cf_lasso) != 0]
####
#### Feature selection, then KNN. As lasso suggests, high accuracy can be obtained with
#### a small fraction of the total number of features. Let's try carrying out formal
#### feature selection via feature-specific statistical tests. We'll just do K = 5 for
#### KNN.
####
## Let's do five-fold cross-validation. Our sample sizes in each group don't divide
## nicely into five groups, so we'll use these sample sizes for testing in each fold.
fold_1_n = c(2, 6, 4, 5)
fold_2_n = c(2, 6, 4, 5)
fold_3_n = c(2, 6, 4, 4)
fold_4_n = c(2, 6, 3, 5)
fold_5_n = c(3, 5, 3, 6)
fold_n = rbind(fold_1_n, fold_2_n, fold_3_n, fold_4_n, fold_5_n)
## Now randomly select the above numbers from each class.
class_1_samples = sample((1:n)[id == 1])
class_2_samples = sample((1:n)[id == 2])
class_3_samples = sample((1:n)[id == 3])
class_4_samples = sample((1:n)[id == 4])
fold_1_samples = c(class_1_samples[1:2], class_2_samples[1:6], class_3_samples[1:4],
class_4_samples[1:5])
fold_2_samples = c(class_1_samples[3:4], class_2_samples[7:12], class_3_samples[5:8],
class_4_samples[6:10])
fold_3_samples = c(class_1_samples[5:6], class_2_samples[13:18], class_3_samples[9:12],
class_4_samples[11:14])
fold_4_samples = c(class_1_samples[7:8], class_2_samples[19:24], class_3_samples[13:15],
class_4_samples[15:19])
fold_5_samples = c(class_1_samples[9:11], class_2_samples[25:29], class_3_samples[16:18],
class_4_samples[20:25])
fold_samples = list(fold_1_samples, fold_2_samples, fold_3_samples, fold_4_samples,
fold_5_samples)
## Since we have 4 classes, let's use an F-statistic, which is a measure of between-class
## variability to within-class variability. Here's a function to compute F-statistics
## for a given set of intensities.
F_stat = function(Y, ID) {
n_k = as.numeric(table(ID))
n = sum(n_k)
k = length(n_k)
X = model.matrix(~ factor(ID) - 1)
Y_bars = t(t(Y %*% X) / n_k)
Y_bars_overall = drop(Y %*% rep(1 / n, n))
F_num = drop(t(t((Y_bars - Y_bars_overall) ^ 2) * n_k) %*% rep(1, k))
F_den = drop((Y - Y_bars %*% t(X)) ^ 2 %*% rep(1, n))
return(F_num / F_den)
}
## Now do CV. For each CV fold, we'll build and assess the accuracy of feature set sizes
## of 2, 3, ..., 30, 40, 50, ..., 2300. We need to do the feature selection within each
## CV fold to avoid model selection bias.
f_sizes = c(2:30, seq(40, 2300, by = 10))
acc_f_b = matrix(NA, nrow = length(f_sizes), ncol = 5)
for(b in 1:5) {
cat(".")
Y_test = data[, fold_samples[[b]]]
Y_train = data[, -fold_samples[[b]]]
id_test = id[fold_samples[[b]]]
id_train = id[-fold_samples[[b]]]
n_test = length(id_test)
n_train = length(id_train)
## Feature selection.
FF = F_stat(Y_train, id_train)
oo = order(FF, decreasing = T)
for(f in 1:length(f_sizes)) {
feature_set = oo[1:f_sizes[f]]
y_train = Y_train[feature_set, ]
y_test = Y_test[feature_set, ]
pred_KNN = knn(t(y_train), t(y_test), id_train, k = 5)
acc_f_b[f, b] = mean(pred_KNN == id_test)
}
}
acc_f = rowMeans(acc_f_b)
## Here's the big picture. We obtain basically 100% accuracy with about 30 features. Note
## that we are actually *worse* off when including more features, beyond a certain point.
## This is common, as eventually we're just adding noise to the classifier.
par(mfrow = c(1, 1))
plot(f_sizes, acc_f, xlab = "Feature set size", ylab = "CV-based accuracy", lwd = 2,
type = "s")
## Here's the same picture, but zoomed in on just the lower subset sizes (2, 3, ..., 30).
plot(f_sizes[1:29], acc_f[1:29], xlab = "Feature set size", ylab = "CV-based accuracy",
lwd = 2, type = "s")
## Boxplots for the top 4 features. Two features that characterize class 4 and 2 features
## that characterize class 2. An optimal subset might include just one of each, if the
## others provide redundant information.
FF = F_stat(data, id)
oo = order(FF, decreasing = T)
par(mfrow = c(2, 2))
boxplot(data[oo[1], id == 1], data[oo[1], id == 2], data[oo[1], id == 3],
data[oo[1], id == 4])
boxplot(data[oo[2], id == 1], data[oo[2], id == 2], data[oo[2], id == 3],
data[oo[2], id == 4])
boxplot(data[oo[3], id == 1], data[oo[3], id == 2], data[oo[3], id == 3],
data[oo[3], id == 4])
boxplot(data[oo[4], id == 1], data[oo[4], id == 2], data[oo[4], id == 3],
data[oo[4], id == 4])
features_KNN_fs <- (1:m)[oo[1:30]]
####
#### Classification tree.
####
library(tree)
## Because of the relatively low sample sizes in the different tumor types, the 'tree'
## function ends up only using 3 of the features, achieving a training accuracy of about
## 96%.
fit_tree <- tree(GRP ~ ., data = Y_train_df)
summary(fit_tree)
plot(fit_tree)
text(fit_tree)
## The test accuracy is only about 76%, making it inferior to all the methods considered
## above (in this example, at least). Note, however, the very simple interpretation of
## the classifier. In terms of defining a diagnostic tool, it would be straightforward to
## talk in terms like "if this gene has expression below this threshold, then if that
## gene has expression above that threshold, then you are tumor type X."
pred_tree <- predict(fit_tree, newdata = Y_test_df, type = "class")
table(pred_tree, Y_test_df$GRP)
## While the tree is already small, we could try pruning it further. According to CV, the
## estimated test error is 87% for the full tree, or for the tree with one fewer terminal
## node.
set.seed(101)
cv_tree <- cv.tree(fit_tree, FUN = prune.misclass)
cv_tree
plot(cv_tree$size, cv_tree$dev, type = "b")
## Here is the tree pruned to 4 leaves. Basically the same as the tree from above, just
## with the second split around X1 removed. The test accuracy is actually the same as
## we saw above, about 76%. Note that CV overestimated this.
pruned_tree <- prune.misclass(fit_tree, best = 4)
plot(pruned_tree)
text(pruned_tree)
pred_pruned_tree <- predict(pruned_tree, newdata = Y_test_df, type = "class")
table(pred_pruned_tree, Y_test_df$GRP)
features_tree <- c(1, 107, 1319)
## Overlap between features selected by tree, KNN + feature selection, lasso.
feature_overlap <- matrix(NA, nrow = 3, ncol = 3)
rownames(feature_overlap) <- colnames(feature_overlap) <- c("Tree", "KNN_fs", "Lasso")
feature_overlap[1, 2] <- mean(features_tree %in% features_KNN_fs)
feature_overlap[1, 3] <- mean(features_tree %in% features_lasso)
feature_overlap[2, 3] <- mean(features_KNN_fs %in% features_lasso)
####
#### Random forest.
####
library(randomForest)
set.seed(101)
## The bagged tree achieves an accuracy of nearly 100%.
fit_rf <- randomForest(GRP ~ ., data = Y_train_df, mtry = 2, importance = TRUE)
fit_rf
pred_rf <- predict(fit_rf, newdata = Y_test_df)
table(pred_rf, Y_test_df$GRP)
imp <- importance(fit_rf)
imp[order(imp[, "MeanDecreaseGini"], decreasing = TRUE), ][1:20, ]
varImpPlot(fit_rf)
####
#### SVM.
####
library(e1071)
set.seed(101)
## The 'tune' function takes a long time to run, if we want to evaluate cost together
## with polynomial degree or cost together with radial kernel gamma parameter. After
## running for a while on my own, it appears that a support vector (linear) classifier is
## the most accurate of the polynomial and radial kernel classifiers. Accuracy does not
## appear to change much with 'cost', so I'll just use 'cost' = 1.
fit_svm <- svm(GRP ~ ., data = Y_train_df, kernel = "linear", cost = 1)
pred_svm <- predict(fit_svm, newdata = Y_test_df)
table(pred_svm, Y_test_df$GRP)
####
#### Neural networks. I am not going to cover them. See 'nnet' package.
####
| /tamu/mva/R/CrossValidation/khan_script.r | no_license | sadepu1915/data-science | R | false | false | 15,714 | r | ## These data come from gene expression microarray experiments on small, round blue-cell
## tumors (SRCTs), described in Khan et al (2001). Nature Medicine, 7: 673-679. There
## are four different SRBCT tumor types: neuroblastoma (NB), rhabdomyosarcoma (RMS), non-
## Hodgkin lymphoma (NHL), and the Ewing family of tumors (EWS). The column names
## indicate the tumor type, with 1 corresponding to NHL, 2 to EWS, 3 to NB, and 4 to RMS.
## Load data.
rm(list=ls())
setwd("~/MS/636/Code")
data = as.matrix(read.csv("./data/khan.csv"))
id = colnames(data)
for(i in 1:ncol(data))
id[i] = substr(id[i], 2, 2)
m = nrow(data)
n = ncol(data)
n_k = as.numeric(table(id))
## Fix up the column names
c_names_1 = rep("1", n_k[1])
c_names_2 = rep("2", n_k[2])
c_names_3 = rep("3", n_k[3])
c_names_4 = rep("4", n_k[4])
for(i in 1:n_k[1])
c_names_1[i] = paste(c_names_1[i], "_", i, sep = "")
for(i in 1:n_k[2])
c_names_2[i] = paste(c_names_2[i], "_", i, sep = "")
for(i in 1:n_k[3])
c_names_3[i] = paste(c_names_3[i], "_", i, sep = "")
for(i in 1:n_k[4])
c_names_4[i] = paste(c_names_4[i], "_", i, sep = "")
c_names = c(c_names_1, c_names_2, c_names_3, c_names_4)
tmp = id
tmp[id == 1] = c_names_1
tmp[id == 2] = c_names_2
tmp[id == 3] = c_names_3
tmp[id == 4] = c_names_4
c_names = tmp
colnames(data) = c_names
## Basic summary statistics
data = as.matrix(data)
X = model.matrix(~ factor(id) - 1)
grp_means = t(t(data %*% X) / n_k)
grp_sds = sqrt(t(t(((data - grp_means %*% t(X)) ^ 2) %*% X) / (n_k - 1)))
####
#### EDA.
####
#### Boxplots and scatterplots
## Some example gene-specific boxplots.
par(mfrow = c(2, 2))
boxplot(data[1, id == 1], data[1, id == 2], data[1, id == 3], data[1, id == 4])
boxplot(data[2, id == 1], data[2, id == 2], data[2, id == 3], data[2, id == 4])
boxplot(data[3, id == 1], data[3, id == 2], data[3, id == 3], data[3, id == 4])
boxplot(data[4, id == 1], data[4, id == 2], data[4, id == 3], data[4, id == 4])
## Array-specific boxplots by groups.
par(mfrow = c(2, 2))
boxplot(data[, id == 1], ylim = c(-6, 3), main = "Tumor type 1")
boxplot(data[, id == 2], ylim = c(-6, 3), main = "Tumor type 2")
boxplot(data[, id == 3], ylim = c(-6, 3), main = "Tumor type 2")
boxplot(data[, id == 4], ylim = c(-6, 3), main = "Tumor type 2")
## Overall comparisons of groups, in terms of both means and s.d.s.
par(mfrow = c(1, 2))
boxplot(grp_means[, 1], grp_means[, 2], grp_means[, 3], grp_means[, 4], names = 1:4,
main = "Means by tumor type")
boxplot(grp_sds[, 1], grp_sds[, 2], grp_sds[, 3], grp_sds[, 4], names = 1:4,
main = "S.D.s by tumor type")
## Pairwise scatterplots
par(mfrow = c(1, 2))
plot(data[, 1], data[, 2], pch = 20, col = "grey", xlab = "Array 1", ylab = "Array 2")
abline(0, 1, lty = 2)
lines(lowess(data[, 1], data[, 2]), lwd = 2, col = "blue")
plot((data[, 1] + data[, 2]) / 2, data[, 2] - data[, 1], pch = 20, col = "grey",
xlab = "A: (Array 1 + Array 2) / 2", ylab = "M: Array 2 - Array 1")
abline(0, 0, lty = 2)
lines(lowess((data[, 1] + data[, 2]) / 2, data[, 2] - data[, 1]), lwd = 2, col = "blue")
## Mean, variance relationship?
par(mfrow = c(2, 2))
plot(grp_means[, 1], grp_sds[, 1], xlab = "means_1", ylab = "sds_1", pch = 20,
col = "grey")
lines(lowess(grp_means[, 1], grp_sds[, 1]), lwd = 2, col = "blue")
plot(grp_means[, 2], grp_sds[, 2], xlab = "means_2", ylab = "sds_2", pch = 20,
col = "grey")
lines(lowess(grp_means[, 2], grp_sds[, 2]), lwd = 2, col = "blue")
plot(grp_means[, 3], grp_sds[, 3], xlab = "means_3", ylab = "sds_3", pch = 20,
col = "grey")
lines(lowess(grp_means[, 3], grp_sds[, 3]), lwd = 2, col = "blue")
plot(grp_means[, 4], grp_sds[, 4], xlab = "means_4", ylab = "sds_4", pch = 20,
col = "grey")
lines(lowess(grp_means[, 4], grp_sds[, 4]), lwd = 2, col = "blue")
#### Clustering.
## Clustering on samples.
KM = kmeans(t(data), 4)
table(id, KM$cluster)
par(mfrow = c(1, 1))
HC = hclust(dist(t(data)), method = "complete")
plot(HC)
plot(HC$height)
#### SVD.
data_c = t(scale(t(data), center = T, scale = F))
ss = svd(data_c)
dd = ss$d ^ 2 / sum(ss$d ^ 2)
round(dd, 2)
oo = order(id)
plot(1:83, ss$v[oo, 1], type = "l", col = "green", ylim = c(-0.4, 0.4), lwd = 2)
lines(1:83, ss$v[oo, 2], col = "lightblue")
lines(1:83, ss$v[oo, 3], col = "pink")
abline(0, 0, lty = 2, col = "black")
for(i in cumsum(n_k)[1:3])
lines(c(i, i), c(-1, 1))
####
#### Differential expression analysis, looking for differences between the four tumor
#### classes.
####
p_vals = rep(NA, m)
for(i in 1:m) {
d_i = as.numeric(data[i, ])
fit = summary(lm(d_i ~ 1 + factor(id)))$fstat
p_vals[i] = 1 - pf(fit[1], fit[2], fit[3])
}
## Histogram of p-values.
par(mfrow = c(1, 1))
hh = hist(p_vals, prob = T, main = "P-values", xlab = "")
abline(1, 0, lty = 2)
## Estimate FDR and q-values.
pi_0_hat = mean(hh$density[8:10])
library(qvalue)
qq = qvalue(p_vals)
qq$pi0
pi_0_hat
FDR_hat = rep(NA, m)
for(i in 1:m) {
FDR_hat[i] = (pi_0_hat * m * p_vals[i]) / sum(p_vals <= p_vals[i])
}
q_vals = rep(NA, m)
for(i in 1:m) {
q_vals[i] = min(FDR_hat[p_vals >= p_vals[i]])
}
## P-values vs. q-values.
plot(p_vals[order(p_vals)], q_vals[order(p_vals)], type = "s",
xlab = "P-values", ylab = "Q-values")
abline(0, 1, lty = 2)
## Number of genes selected at each q-value threshold.
S_hat = rep(NA, m)
for(i in 1:m) {
S_hat[i] = sum(q_vals <= q_vals[i])
}
oo = order(q_vals)
plot(q_vals[oo], S_hat[oo], type = "s", xlab = "q-value", ylab =
"number of features selected")
## Can obtain 1000 features with an FDR < 0.01.
which.min(abs(S_hat - 1000))
S_hat[2123]
q_vals[2123]
######
###### Classification.
######
## Set aside roughly 2 / 3 of the samples for training. Pick these randomly, within each
## tumor type.
set.seed(101)
tr_ii <- NULL
for(i in 1:4) {
jj <- (1:n)[as.numeric(id) == i]
n_i <- length(jj)
n_tr <- floor(n_i * 2 / 3)
tr_ii <- c(tr_ii, sample(jj, n_tr, replace = FALSE))
}
Y_df <- data.frame("GRP" = factor(id), t(data)) # col is response var
Y_train_df <- Y_df[tr_ii, ]
Y_test_df <- Y_df[-tr_ii, ]
Y_df[1:5, 1:5]
dim(Y_train_df)
dim(Y_test_df)
####
#### Naive Bayes is 86% accurate.
####
library(e1071)
# gen synrax for predictive
# fit model tilda
# pred
#build model qda,lda, naive
fit_NB <- naiveBayes(GRP ~ ., data = Y_train_df) # .dot means vs all pred variables
class(fit_NB)
#[1] "naiveBayes"
#fit that with data
#lot of predict functions
#? predict -- gen fn
# predict.class
#?predict.naiveBayes
# it calls appro predict based on class obj ( lda, qda, naive)
pred_NB <- predict(fit_NB, newdata = Y_test_df)
# confusion matrix
# cross tabulate pred with truth
# dioagnoal eles are correct pred
# 4 cases we pred in class 1 and match with truth
table(pred_NB, Y_test_df$GRP)
# overall accuracy
# how many off diagnoal non zero /total no of pred = overall correct pred
mean(pred_NB == Y_test_df$GRP)
####
#### KNN. Choice of K doesn't seem to matter much, with the accuracies apparently bobbing
#### randomly about 87% accuracy or so.
####
library(class)
pred_KNN <- rep(NA, 10)
for(k in 1:10) {
pred_KNN_k <- knn(Y_train_df[, -1], Y_test_df[, -1], Y_train_df$GRP, k = k)
pred_KNN[k] <- mean(pred_KNN_k == Y_test_df$GRP)
}
plot(1:10, pred_KNN, xlab = "K", ylab = "Accuracy", xaxt = "n", type = "l", lwd = 2)
axis(1, at = 1:10)
####
#### Lasso is nearly 100% accurate, using about 30 features.
####
library(glmnet)
set.seed(101)
cv_lasso <- cv.glmnet(as.matrix(Y_df[, -1]), Y_df$GRP, alpha = 1, family = "multinomial",
type.measure = "class")
plot(cv_lasso)
grid <- cv_lasso$lambda
fit_lasso <- glmnet(as.matrix(Y_df[, -1]), Y_df$GRP, alpha = 1, family = "multinomial",
type.multinomial = "grouped", lambda = grid)
par(mfrow = c(2, 2))
plot(fit_lasso, xvar = "lambda")
lambda_min <- cv_lasso$lambda.min
cf_lasso <- cbind(coef(fit_lasso)[[1]][-1, match(lambda_min, fit_lasso$lambda)],
coef(fit_lasso)[[2]][-1, match(lambda_min, fit_lasso$lambda)],
coef(fit_lasso)[[3]][-1, match(lambda_min, fit_lasso$lambda)],
coef(fit_lasso)[[4]][-1, match(lambda_min, fit_lasso$lambda)])
features_lasso <- (1:m)[rowSums(cf_lasso) != 0]
####
#### Feature selection, then KNN. As lasso suggests, high accuracy can be obtained with
#### a small fraction of the total number of features. Let's try carrying out formal
#### feature selection via feature-specific statistical tests. We'll just do K = 5 for
#### KNN.
####
## Let's do five-fold cross-validation. Our sample sizes in each group don't divide
## nicely into five groups, so we'll use these sample sizes for testing in each fold.
fold_1_n = c(2, 6, 4, 5)
fold_2_n = c(2, 6, 4, 5)
fold_3_n = c(2, 6, 4, 4)
fold_4_n = c(2, 6, 3, 5)
fold_5_n = c(3, 5, 3, 6)
fold_n = rbind(fold_1_n, fold_2_n, fold_3_n, fold_4_n, fold_5_n)
## Now randomly select the above numbers from each class.
class_1_samples = sample((1:n)[id == 1])
class_2_samples = sample((1:n)[id == 2])
class_3_samples = sample((1:n)[id == 3])
class_4_samples = sample((1:n)[id == 4])
fold_1_samples = c(class_1_samples[1:2], class_2_samples[1:6], class_3_samples[1:4],
class_4_samples[1:5])
fold_2_samples = c(class_1_samples[3:4], class_2_samples[7:12], class_3_samples[5:8],
class_4_samples[6:10])
fold_3_samples = c(class_1_samples[5:6], class_2_samples[13:18], class_3_samples[9:12],
class_4_samples[11:14])
fold_4_samples = c(class_1_samples[7:8], class_2_samples[19:24], class_3_samples[13:15],
class_4_samples[15:19])
fold_5_samples = c(class_1_samples[9:11], class_2_samples[25:29], class_3_samples[16:18],
class_4_samples[20:25])
fold_samples = list(fold_1_samples, fold_2_samples, fold_3_samples, fold_4_samples,
fold_5_samples)
## Since we have 4 classes, let's use an F-statistic, which is a measure of between-class
## variability to within-class variability. Here's a function to compute F-statistics
## for a given set of intensities.
F_stat = function(Y, ID) {
n_k = as.numeric(table(ID))
n = sum(n_k)
k = length(n_k)
X = model.matrix(~ factor(ID) - 1)
Y_bars = t(t(Y %*% X) / n_k)
Y_bars_overall = drop(Y %*% rep(1 / n, n))
F_num = drop(t(t((Y_bars - Y_bars_overall) ^ 2) * n_k) %*% rep(1, k))
F_den = drop((Y - Y_bars %*% t(X)) ^ 2 %*% rep(1, n))
return(F_num / F_den)
}
## Now do CV. For each CV fold, we'll build and assess the accuracy of feature set sizes
## of 2, 3, ..., 30, 40, 50, ..., 2300. We need to do the feature selection within each
## CV fold to avoid model selection bias.
f_sizes = c(2:30, seq(40, 2300, by = 10))
acc_f_b = matrix(NA, nrow = length(f_sizes), ncol = 5)
for(b in 1:5) {
cat(".")
Y_test = data[, fold_samples[[b]]]
Y_train = data[, -fold_samples[[b]]]
id_test = id[fold_samples[[b]]]
id_train = id[-fold_samples[[b]]]
n_test = length(id_test)
n_train = length(id_train)
## Feature selection.
FF = F_stat(Y_train, id_train)
oo = order(FF, decreasing = T)
for(f in 1:length(f_sizes)) {
feature_set = oo[1:f_sizes[f]]
y_train = Y_train[feature_set, ]
y_test = Y_test[feature_set, ]
pred_KNN = knn(t(y_train), t(y_test), id_train, k = 5)
acc_f_b[f, b] = mean(pred_KNN == id_test)
}
}
acc_f = rowMeans(acc_f_b)
## Here's the big picture. We obtain basically 100% accuracy with about 30 features. Note
## that we are actually *worse* off when including more features, beyond a certain point.
## This is common, as eventually we're just adding noise to the classifier.
par(mfrow = c(1, 1))
plot(f_sizes, acc_f, xlab = "Feature set size", ylab = "CV-based accuracy", lwd = 2,
type = "s")
## Here's the same picture, but zoomed in on just the lower subset sizes (2, 3, ..., 30).
plot(f_sizes[1:29], acc_f[1:29], xlab = "Feature set size", ylab = "CV-based accuracy",
lwd = 2, type = "s")
## Boxplots for the top 4 features. Two features that characterize class 4 and 2 features
## that characterize class 2. An optimal subset might include just one of each, if the
## others provide redundant information.
FF = F_stat(data, id)
oo = order(FF, decreasing = T)
par(mfrow = c(2, 2))
boxplot(data[oo[1], id == 1], data[oo[1], id == 2], data[oo[1], id == 3],
data[oo[1], id == 4])
boxplot(data[oo[2], id == 1], data[oo[2], id == 2], data[oo[2], id == 3],
data[oo[2], id == 4])
boxplot(data[oo[3], id == 1], data[oo[3], id == 2], data[oo[3], id == 3],
data[oo[3], id == 4])
boxplot(data[oo[4], id == 1], data[oo[4], id == 2], data[oo[4], id == 3],
data[oo[4], id == 4])
features_KNN_fs <- (1:m)[oo[1:30]]
####
#### Classification tree.
####
library(tree)
## Because of the relatively low sample sizes in the different tumor types, the 'tree'
## function ends up only using 3 of the features, achieving a training accuracy of about
## 96%.
fit_tree <- tree(GRP ~ ., data = Y_train_df)
summary(fit_tree)
plot(fit_tree)
text(fit_tree)
## The test accuracy is only about 76%, making it inferior to all the methods considered
## above (in this example, at least). Note, however, the very simple interpretation of
## the classifier. In terms of defining a diagnostic tool, it would be straightforward to
## talk in terms like "if this gene has expression below this threshold, then if that
## gene has expression above that threshold, then you are tumor type X."
pred_tree <- predict(fit_tree, newdata = Y_test_df, type = "class")
table(pred_tree, Y_test_df$GRP)
## While the tree is already small, we could try pruning it further. According to CV, the
## estimated test error is 87% for the full tree, or for the tree with one fewer terminal
## node.
set.seed(101)
cv_tree <- cv.tree(fit_tree, FUN = prune.misclass)
cv_tree
plot(cv_tree$size, cv_tree$dev, type = "b")
## Here is the tree pruned to 4 leaves. Basically the same as the tree from above, just
## with the second split around X1 removed. The test accuracy is actually the same as
## we saw above, about 76%. Note that CV overestimated this.
pruned_tree <- prune.misclass(fit_tree, best = 4)
plot(pruned_tree)
text(pruned_tree)
pred_pruned_tree <- predict(pruned_tree, newdata = Y_test_df, type = "class")
table(pred_pruned_tree, Y_test_df$GRP)
features_tree <- c(1, 107, 1319)
## Overlap between features selected by tree, KNN + feature selection, lasso.
feature_overlap <- matrix(NA, nrow = 3, ncol = 3)
rownames(feature_overlap) <- colnames(feature_overlap) <- c("Tree", "KNN_fs", "Lasso")
feature_overlap[1, 2] <- mean(features_tree %in% features_KNN_fs)
feature_overlap[1, 3] <- mean(features_tree %in% features_lasso)
feature_overlap[2, 3] <- mean(features_KNN_fs %in% features_lasso)
####
#### Random forest.
####
library(randomForest)
set.seed(101)
## The bagged tree achieves an accuracy of nearly 100%.
fit_rf <- randomForest(GRP ~ ., data = Y_train_df, mtry = 2, importance = TRUE)
fit_rf
pred_rf <- predict(fit_rf, newdata = Y_test_df)
table(pred_rf, Y_test_df$GRP)
imp <- importance(fit_rf)
imp[order(imp[, "MeanDecreaseGini"], decreasing = TRUE), ][1:20, ]
varImpPlot(fit_rf)
####
#### SVM.
####
library(e1071)
set.seed(101)
## The 'tune' function takes a long time to run, if we want to evaluate cost together
## with polynomial degree or cost together with radial kernel gamma parameter. After
## running for a while on my own, it appears that a support vector (linear) classifier is
## the most accurate of the polynomial and radial kernel classifiers. Accuracy does not
## appear to change much with 'cost', so I'll just use 'cost' = 1.
fit_svm <- svm(GRP ~ ., data = Y_train_df, kernel = "linear", cost = 1)
pred_svm <- predict(fit_svm, newdata = Y_test_df)
table(pred_svm, Y_test_df$GRP)
####
#### Neural networks. I am not going to cover them. See 'nnet' package.
####
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hyperdrive.R
\name{lognormal}
\alias{lognormal}
\title{Specify a normal distribution of the form \code{exp(normal(mu, sigma))}}
\usage{
lognormal(mu, sigma)
}
\arguments{
\item{mu}{A double of the mean of the normal distribution.}
\item{sigma}{A double of the standard deviation of the normal distribution.}
}
\value{
A list of the stochastic expression.
}
\description{
Specify a normal distribution of the form \code{exp(normal(mu, sigma))}.
The logarithm of the return value is normally distributed. When optimizing,
this variable is constrained to be positive.
}
\seealso{
\code{random_parameter_sampling()}, \code{grid_parameter_sampling()},
\code{bayesian_parameter_sampling()}
}
| /man/lognormal.Rd | permissive | Azure/azureml-sdk-for-r | R | false | true | 766 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hyperdrive.R
\name{lognormal}
\alias{lognormal}
\title{Specify a normal distribution of the form \code{exp(normal(mu, sigma))}}
\usage{
lognormal(mu, sigma)
}
\arguments{
\item{mu}{A double of the mean of the normal distribution.}
\item{sigma}{A double of the standard deviation of the normal distribution.}
}
\value{
A list of the stochastic expression.
}
\description{
Specify a normal distribution of the form \code{exp(normal(mu, sigma))}.
The logarithm of the return value is normally distributed. When optimizing,
this variable is constrained to be positive.
}
\seealso{
\code{random_parameter_sampling()}, \code{grid_parameter_sampling()},
\code{bayesian_parameter_sampling()}
}
|
rm(list=ls())
#2
x<-1:10
if(x>5){
x<-0
}
#3
f<-function(x){
g<-function(y){
y+z
}
z<-4
x+g(x)
}
z<-10
f(3)
#4
x<-5
y<- if(x<3){
NA
} else {
10
}
y
#5
h <- function(x, y = NULL, d = 3L) {
z <- cbind(x, d)
if(!is.null(y))
z <- z + y
else
z <- z + f
g <- x + y / z
if(d == 3L)
return(g)
g <- g + 10
g
} | /quizR2.R | no_license | ejgisawesome/R2D2 | R | false | false | 479 | r | rm(list=ls())
#2
x<-1:10
if(x>5){
x<-0
}
#3
f<-function(x){
g<-function(y){
y+z
}
z<-4
x+g(x)
}
z<-10
f(3)
#4
x<-5
y<- if(x<3){
NA
} else {
10
}
y
#5
h <- function(x, y = NULL, d = 3L) {
z <- cbind(x, d)
if(!is.null(y))
z <- z + y
else
z <- z + f
g <- x + y / z
if(d == 3L)
return(g)
g <- g + 10
g
} |
# downloading file
download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2",
"storm.bz2")
# loading packages
library(tidyverse)
library(lubridate)
library(stringr)
# reading in the data
data_raw <- read_csv("storm.csv.bz2")
# preprocessing
data <- data_raw %>%
select(State = STATE, Date = BGN_DATE, County = COUNTYNAME, Event = EVTYPE,
Fatalities = FATALITIES, Injuries = INJURIES, Property_Damage = PROPDMG, Crop_Damage = CROPDMG,
Property_Damage_Scale = PROPDMGEXP, Crop_Damage_Scale = CROPDMGEXP) %>%
mutate(Date = mdy(str_extract(Date, "\\d{1,2}/\\d{1,2}/\\d{4}")),
Year = year(Date))
# quick glance at the population and time span
data %>%
count(State) %>%
arrange(desc(n))
data %>%
count(Event) %>%
arrange(desc(n))
# recoding some variables
data <- data %>%
mutate(Event = ifelse(Event %in% c("TSTM WIND", "THUNDERSTORM WINDS"), "THUNDERSTORM WIND", Event))
data %>%
count(Event) %>%
arrange(desc(n))
# Plotting number of events
data %>%
count(Year) %>%
arrange(Year) %>%
ggplot() + geom_line(aes(Year, n), col = "darkred", lwd = 1) + ggtitle("Yearly number of events") +
ylab("")
# which were the most harmful to population
victims <- data %>%
group_by(Event) %>%
summarise(Fatalities = sum(Fatalities),
Injuries = sum(Injuries)) %>%
mutate(Victims = Fatalities + Injuries) %>%
arrange(desc(Victims)) %>%
slice(1:5)
ggplot(victims) + geom_col(aes(reorder(Event, -Victims), Victims), fill = "darkred") +
ggtitle("The most harmful events for the population") + xlab("Event")
# which were the most harmful economically
data %>%
filter(Property_Damage_Scale == "B") %>%
arrange(desc(Property_Damage)) %>%
slice(1:3) %>%
select(State, Date, Event, Property_Damage)
data %>%
filter(Crop_Damage_Scale == "B") %>%
arrange(desc(Crop_Damage)) %>%
slice(1:3) %>%
select(State, Date, Event, Crop_Damage)
| /Data Science/Reproducible Research/Projects/Project2.R | no_license | wizden5/Coursera | R | false | false | 1,957 | r | # downloading file
download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2",
"storm.bz2")
# loading packages
library(tidyverse)
library(lubridate)
library(stringr)
# reading in the data
data_raw <- read_csv("storm.csv.bz2")
# preprocessing
data <- data_raw %>%
select(State = STATE, Date = BGN_DATE, County = COUNTYNAME, Event = EVTYPE,
Fatalities = FATALITIES, Injuries = INJURIES, Property_Damage = PROPDMG, Crop_Damage = CROPDMG,
Property_Damage_Scale = PROPDMGEXP, Crop_Damage_Scale = CROPDMGEXP) %>%
mutate(Date = mdy(str_extract(Date, "\\d{1,2}/\\d{1,2}/\\d{4}")),
Year = year(Date))
# quick glance at the population and time span
data %>%
count(State) %>%
arrange(desc(n))
data %>%
count(Event) %>%
arrange(desc(n))
# recoding some variables
data <- data %>%
mutate(Event = ifelse(Event %in% c("TSTM WIND", "THUNDERSTORM WINDS"), "THUNDERSTORM WIND", Event))
data %>%
count(Event) %>%
arrange(desc(n))
# Plotting number of events
data %>%
count(Year) %>%
arrange(Year) %>%
ggplot() + geom_line(aes(Year, n), col = "darkred", lwd = 1) + ggtitle("Yearly number of events") +
ylab("")
# which were the most harmful to population
victims <- data %>%
group_by(Event) %>%
summarise(Fatalities = sum(Fatalities),
Injuries = sum(Injuries)) %>%
mutate(Victims = Fatalities + Injuries) %>%
arrange(desc(Victims)) %>%
slice(1:5)
ggplot(victims) + geom_col(aes(reorder(Event, -Victims), Victims), fill = "darkred") +
ggtitle("The most harmful events for the population") + xlab("Event")
# which were the most harmful economically
data %>%
filter(Property_Damage_Scale == "B") %>%
arrange(desc(Property_Damage)) %>%
slice(1:3) %>%
select(State, Date, Event, Property_Damage)
data %>%
filter(Crop_Damage_Scale == "B") %>%
arrange(desc(Crop_Damage)) %>%
slice(1:3) %>%
select(State, Date, Event, Crop_Damage)
|
startTime <- Sys.time()
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline")
script0_name <- "0_prepGeneData"
script1_name <- "1_runGeneDE"
script8_name <- "8c_runAllDown"
script_name <- "14i_cumulAllDown_limited_randomTADsShuffle"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
#source("run_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(flux, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(RColorBrewer, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
registerDoMC(ifelse(SSHFS,2, nCpu)) # loaded from main_settings.R
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
#****************************************************************************************************************************
# INPUT DATA
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
#### DRAW THE LEFT PLOT OF THE INITIAL 14E BUT THIS TIME PLOT THE DIFFERENT LINES AND AREA FOR
# -> FIX SIZE
# -> GAUSSIAN SIZE
# !!! WHAT IS DIFFERENT HERE: THE PERMUTATIONS DATA ARE NOT STORED IN A DATAFRAME BUT IN A LIST:
# WHEN SHUFFLING THE GENES, I ALWAYS KEEP THE SAME SET OF TADs BUT NOW THAT I SHUFFLE TAD
# THE NUMBER OF TADs MIGHT BE DIFFERENT (REGIONS OF THE GENOME WHERE NO GENES -> TAD DISCARDED)
# UPDATE SELECT THE GENES ACCORDING TO THE SETTINGS PREPARED IN 0_PREPGENEDATA
initList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/rna_geneList.Rdata"))))
geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
txt <- paste0(toupper(script_name), "> Start with # genes: ", length(geneList), "/", length(initList), "\n")
printAndLog(txt, pipLogFile)
stopifnot(!any(duplicated(names(geneList))))
gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% geneList,]
geneNbr <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
### SET OUTPUT
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 480 , 7)
myWidth <- ifelse(plotType == "png", 600, 10)
# if plotSeparated == TRUE => 1 plot per ratio, otherwise all on the same figure (#x2 plots)
plotSeparated <- F
# "permThresh" the quantile of permutations to take is loaded from main_settings.R
# retrieved from main_settings.R
stopifnot(any(c(step14_for_randomTADsShuffle )))
step14_for_randomTADsShuffle <- TRUE
step14_for_randomTADsFix <- FALSE
step14_for_randomTADsGaussian <- FALSE
ratioSizeFix <- percentTADsizeFix
ratioSizeGaussian <- percentTADsizeGaussian
# for each of the ratioSize -> define the colors
# same colors for gaussian and fix
# but transparency for gaussian (because polygon overlap)
# polygonPermutCol = rgb(0/255,76/255,153/255, 0.3)
myPalette1 <- brewer.pal(9, "YlOrRd")
names(myPalette1) <- seq(0.1,0.9,by=0.1)
myPalette2 <- rev(brewer.pal(9, "YlGn"))
names(myPalette2) <- seq(1.1,1.9,by=0.1)
myPalette <- c(myPalette1, myPalette2)
fixColors_rgb <- lapply(myPalette, function(x) as.numeric(col2rgb(x)))
gaussianColors <- lapply(fixColors_rgb, function(x) {
x <- x/255
rgb(red = x[1], green=x[2], blue=x[3], alpha=0.3)
})
fixColors <- myPalette
shufflePolygonCol <- rgb(0/255,76/255,153/255, 0.3)
###################################################################################################
# get the direction of up/down
###################################################################################################
# retrieve the direction of up/down
DE_topTable <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
DE_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_geneList.Rdata"))))
exprDT <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_rnaseqDT.Rdata"))))
samp1 <- eval(parse(text=load(paste0(setDir, "/", sample1_file))))
samp2 <- eval(parse(text=load(paste0(setDir, "/", sample2_file))))
stopifnot(all(DE_topTable$genes %in% names(DE_geneList)))
stopifnot(!any(duplicated(names(DE_geneList))))
stopifnot(all(colnames(exprDT) %in% c(samp1, samp2)))
stopifnot(all(samp1 %in% colnames(exprDT)))
stopifnot(all(samp2 %in% colnames(exprDT)))
maxDownGene <- DE_topTable$genes[which.min(DE_topTable$logFC)]
stopifnot(maxDownGene %in% rownames(exprDT))
mean_expr1 <- mean(unlist(c(exprDT[maxDownGene, samp1])), na.rm=T)
mean_expr2 <- mean(unlist(c(exprDT[maxDownGene, samp2])), na.rm=T)
if(mean_expr1 > mean_expr2) {
subtitDir <- paste0("down: ", toupper(cond1), " > ", toupper(cond2))
} else{
subtitDir <- paste0("down: ", toupper(cond2), " > ", toupper(cond1))
}
# allDown retrieved from main_settings.R
# allDown_limited <- c("ratioDown", "rescWeighted", "rescWeightedQQ", "prodSignedRatio")
if(! plotSeparated) {
nColPlot <- 1 # here I plot only with the area
# nRowPlot <- length(allDown_limited)*2/nColPlot # only 1 plot/row in this version
nRowPlot <- length(allDown_limited)*1/nColPlot
outFile <- paste0(curr_outFold, "/allRatios_cumsum_obs_permut.", plotType)
do.call(plotType, list(outFile, height=myHeight*nRowPlot, width=myWidth*nColPlot))
par(mfrow=c(nRowPlot, nColPlot))
}
for(curr_ratio_type in allDown_limited) {
cat(paste0("*** START ", curr_ratio_type, "\n"))
obs_curr_down <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/all_obs_", curr_ratio_type, ".Rdata"))))
sort_obs_curr_down <- sort(obs_curr_down, decreasing = TRUE)
# FOR ratioDown => plot ratioConcord, departure from 0.5
if(curr_ratio_type == "ratioDown") {
my_stat_curr_ratio <- "ratioDown_Concord"
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform ratioDown -> ratioConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
sort_obs_curr_down <- abs(sort_obs_curr_down - 0.5) + 0.5
} else if(curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
my_stat_curr_ratio <- paste0(curr_ratio_type, "_Concord")
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform rescWeightedQQ -> rescWeightedQQConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
sort_obs_curr_down <- abs(sort_obs_curr_down - 0.5) + 0.5
} else if(curr_ratio_type == "prodSignedRatio") {
my_stat_curr_ratio <- "prodSignedRatio"
departureFromValue <- 0
# => raw (departure 0)
# prodSignedRatio -> does not need to be transformed
}
observ_vect <- sort(sort_obs_curr_down, decreasing = TRUE)
observ_vect <- cumsum(abs(observ_vect - departureFromValue))
# load and store all the permut to have the limits for the plot limits
# sort when adding them to the list !
all_random_List <- list()
if(step14_for_randomTADsFix){
for(ratioSize in ratioSizeFix) {
tmpDT <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_randomFixSizeList_", gsub("\\.", "",ratioSize), ".Rdata"))))
tmpDT <- lapply(tmpDT, function(x) {
x <- sort(x, decreasing=TRUE)
if(curr_ratio_type == "ratioDown" | curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
x <- abs(x - 0.5) + 0.5
}
x <- sort(x, decreasing=TRUE)
cumsum(abs(x - departureFromValue))
})
all_random_List[["fix"]][[paste0(ratioSize)]] <- tmpDT
}
}
if(step14_for_randomTADsGaussian){
for(ratioSize in ratioSizeGaussian) {
tmpDT <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_randomGaussianList_", gsub("\\.", "",ratioSize), ".Rdata"))))
tmpDT <- lapply(tmpDT, function(x) {
x <- sort(x, decreasing=TRUE)
if(curr_ratio_type == "ratioDown" | curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
x <- abs(x - 0.5) + 0.5
}
x <- sort(x, decreasing=TRUE)
cumsum(abs(x - departureFromValue))
})
all_random_List[["gauss"]][[paste0(ratioSize)]] <- tmpDT
}
}
if(step14_for_randomTADsShuffle){
tmpDT <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_randomShuffleList.Rdata"))))
tmpDT <- lapply(tmpDT, function(x) {
x <- sort(x, decreasing=TRUE)
if(curr_ratio_type == "ratioDown" | curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
x <- abs(x - 0.5) + 0.5
}
x <- sort(x, decreasing=TRUE)
cumsum(abs(x - departureFromValue))
})
all_random_List[["shuffle"]] <- tmpDT
}
# range of the plots:
maxTADs <- max(c(unlist(lapply(all_random_List, function(x) lapply(x, length))), length(observ_vect) ))
x_val <- 1:maxTADs
y_range <- range(c(unlist(all_random_List), observ_vect))
stopifnot(!is.na(x_val))
stopifnot(!is.na(y_range))
my_main <- paste0(curr_ratio_type, ": cumsum departure from ", departureFromValue)
my_ylab <- paste0("cumsum(abs(", curr_ratio_type, " - ", departureFromValue,"))")
my_xlab <- paste0("regions ranked by decreasing ", curr_ratio_type)
if(plotSeparated){
outFile <- paste0(curr_outFold, "/", curr_ratio_type, "_departure", departureFromValue,"_cumsum_obs_permut_AUC.", plotType)
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
}
# START BY PLOTTING EMPTY PLOT
plot(NULL,
main = my_main,
xlab=my_xlab,
ylab=my_ylab,
xlim = range(x_val),
ylim = y_range,
bty="l")
# ADD THE TRUE DATA
points(x = 1:length(observ_vect),
y=observ_vect)
# NOW FOR FIX SIZE AND GAUSSIAN, AND FOR EACH OF THE SIZE RATIOS, ADD THE CURVES/AREAS FOR THE RANDOM
if(step14_for_randomTADsFix){
subList <- all_random_List[["fix"]]
foo <- lapply(1:length(subList), function(i) {
sizeList <- subList[[i]]
curr_ratioSize <- names(subList)[i]
polygonPermutCol <- fixColors[curr_ratioSize]
maxElements <- max(unlist(lapply(sizeList, length)))
stopifnot(!is.na(maxElements))
sizeList2 <- lapply(sizeList, function(x) x[1:maxElements] <- x[1:maxElements])
# minLine <- do.call(pmin, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
# maxLine <- do.call(pmax, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
sizeList2dt <- do.call(cbind,sizeList2)
stopifnot(nrow(sizeList2dt) == maxElements)
minLine <- apply(sizeList2dt,1,min,na.rm=TRUE)
maxLine <- apply(sizeList2dt,1,max,na.rm=TRUE)
stopifnot(length(minLine) == maxElements)
stopifnot(length(maxLine) == maxElements)
stopifnot(all(maxLine>=minLine))
polygon(x = c(1:maxElements, rev(1:maxElements)),
y = c( minLine, rev(maxLine)),
border=polygonPermutCol,
col = polygonPermutCol)
})
}
if(step14_for_randomTADsGaussian){
subList <- all_random_List[["gauss"]]
# for each of the sizeRatio
# - extend the vectors so that they have all the same number of elements (max #)
# - for each position find the min and max value
foo <- lapply(1:length(subList), function(i) {
sizeList <- subList[[i]]
curr_ratioSize <- names(subList)[i]
polygonPermutCol <- gaussianColors[[curr_ratioSize]]
maxElements <- max(unlist(lapply(sizeList, length)))
stopifnot(!is.na(maxElements))
sizeList2 <- lapply(sizeList, function(x) x[1:maxElements] <- x[1:maxElements])
# minLine <- do.call(pmin, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
# maxLine <- do.call(pmax, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
sizeList2dt <- do.call(cbind,sizeList2)
stopifnot(nrow(sizeList2dt) == maxElements)
minLine <- apply(sizeList2dt,1,min,na.rm=TRUE)
maxLine <- apply(sizeList2dt,1,max,na.rm=TRUE)
stopifnot(length(minLine) == maxElements)
stopifnot(length(maxLine) == maxElements)
stopifnot(all(maxLine>=minLine))
polygon(x = c(1:maxElements, rev(1:maxElements)),
y = c( minLine, rev(maxLine)),
border=polygonPermutCol,
col = polygonPermutCol)
})
}
if(step14_for_randomTADsShuffle){
sizeList <- all_random_List[["shuffle"]]
polygonPermutCol <- shufflePolygonCol
maxElements <- max(unlist(lapply(sizeList, length)))
stopifnot(!is.na(maxElements))
sizeList2 <- lapply(sizeList, function(x) x[1:maxElements] <- x[1:maxElements])
# minLine <- do.call(pmin, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
# maxLine <- do.call(pmax, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
sizeList2dt <- do.call(cbind,sizeList2)
stopifnot(nrow(sizeList2dt) == maxElements)
minLine <- apply(sizeList2dt,1,min,na.rm=TRUE)
maxLine <- apply(sizeList2dt,1,max,na.rm=TRUE)
stopifnot(length(minLine) == maxElements)
stopifnot(length(maxLine) == maxElements)
stopifnot(all(maxLine>=minLine))
polygon(x = c(1:maxElements, rev(1:maxElements)),
y = c( minLine, rev(maxLine)),
border=polygonPermutCol,
col = polygonPermutCol)
}
if(step14_for_randomTADsFix | step14_for_randomTADsGaussian) {
ratioLeg <- union(ratioSizeGaussian, ratioSizeFix)
legend("topleft", lty=1, legend=ratioLeg, col = fixColors[as.character(ratioLeg)], bty="n")
}
mtext(subtitDir, font=3)
if(plotSeparated){
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
}
if(!plotSeparated){
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
| /NOT_USED_SCRIPTS/14i_cumulAllDown_limited_randomTADsShuffle.R | no_license | marzuf/TAD_DE_pipeline_v2 | R | false | false | 14,980 | r | startTime <- Sys.time()
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline")
script0_name <- "0_prepGeneData"
script1_name <- "1_runGeneDE"
script8_name <- "8c_runAllDown"
script_name <- "14i_cumulAllDown_limited_randomTADsShuffle"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
#source("run_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(flux, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(RColorBrewer, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
registerDoMC(ifelse(SSHFS,2, nCpu)) # loaded from main_settings.R
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
#****************************************************************************************************************************
# INPUT DATA
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
#### DRAW THE LEFT PLOT OF THE INITIAL 14E BUT THIS TIME PLOT THE DIFFERENT LINES AND AREA FOR
# -> FIX SIZE
# -> GAUSSIAN SIZE
# !!! WHAT IS DIFFERENT HERE: THE PERMUTATIONS DATA ARE NOT STORED IN A DATAFRAME BUT IN A LIST:
# WHEN SHUFFLING THE GENES, I ALWAYS KEEP THE SAME SET OF TADs BUT NOW THAT I SHUFFLE TAD
# THE NUMBER OF TADs MIGHT BE DIFFERENT (REGIONS OF THE GENOME WHERE NO GENES -> TAD DISCARDED)
# UPDATE SELECT THE GENES ACCORDING TO THE SETTINGS PREPARED IN 0_PREPGENEDATA
initList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/rna_geneList.Rdata"))))
geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
txt <- paste0(toupper(script_name), "> Start with # genes: ", length(geneList), "/", length(initList), "\n")
printAndLog(txt, pipLogFile)
stopifnot(!any(duplicated(names(geneList))))
gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% geneList,]
geneNbr <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
### SET OUTPUT
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 480 , 7)
myWidth <- ifelse(plotType == "png", 600, 10)
# if plotSeparated == TRUE => 1 plot per ratio, otherwise all on the same figure (#x2 plots)
plotSeparated <- F
# "permThresh" the quantile of permutations to take is loaded from main_settings.R
# retrieved from main_settings.R
stopifnot(any(c(step14_for_randomTADsShuffle )))
step14_for_randomTADsShuffle <- TRUE
step14_for_randomTADsFix <- FALSE
step14_for_randomTADsGaussian <- FALSE
ratioSizeFix <- percentTADsizeFix
ratioSizeGaussian <- percentTADsizeGaussian
# for each of the ratioSize -> define the colors
# same colors for gaussian and fix
# but transparency for gaussian (because polygon overlap)
# polygonPermutCol = rgb(0/255,76/255,153/255, 0.3)
myPalette1 <- brewer.pal(9, "YlOrRd")
names(myPalette1) <- seq(0.1,0.9,by=0.1)
myPalette2 <- rev(brewer.pal(9, "YlGn"))
names(myPalette2) <- seq(1.1,1.9,by=0.1)
myPalette <- c(myPalette1, myPalette2)
fixColors_rgb <- lapply(myPalette, function(x) as.numeric(col2rgb(x)))
gaussianColors <- lapply(fixColors_rgb, function(x) {
x <- x/255
rgb(red = x[1], green=x[2], blue=x[3], alpha=0.3)
})
fixColors <- myPalette
shufflePolygonCol <- rgb(0/255,76/255,153/255, 0.3)
###################################################################################################
# get the direction of up/down
###################################################################################################
# retrieve the direction of up/down
DE_topTable <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
DE_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_geneList.Rdata"))))
exprDT <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_rnaseqDT.Rdata"))))
samp1 <- eval(parse(text=load(paste0(setDir, "/", sample1_file))))
samp2 <- eval(parse(text=load(paste0(setDir, "/", sample2_file))))
stopifnot(all(DE_topTable$genes %in% names(DE_geneList)))
stopifnot(!any(duplicated(names(DE_geneList))))
stopifnot(all(colnames(exprDT) %in% c(samp1, samp2)))
stopifnot(all(samp1 %in% colnames(exprDT)))
stopifnot(all(samp2 %in% colnames(exprDT)))
maxDownGene <- DE_topTable$genes[which.min(DE_topTable$logFC)]
stopifnot(maxDownGene %in% rownames(exprDT))
mean_expr1 <- mean(unlist(c(exprDT[maxDownGene, samp1])), na.rm=T)
mean_expr2 <- mean(unlist(c(exprDT[maxDownGene, samp2])), na.rm=T)
if(mean_expr1 > mean_expr2) {
subtitDir <- paste0("down: ", toupper(cond1), " > ", toupper(cond2))
} else{
subtitDir <- paste0("down: ", toupper(cond2), " > ", toupper(cond1))
}
# allDown retrieved from main_settings.R
# allDown_limited <- c("ratioDown", "rescWeighted", "rescWeightedQQ", "prodSignedRatio")
if(! plotSeparated) {
nColPlot <- 1 # here I plot only with the area
# nRowPlot <- length(allDown_limited)*2/nColPlot # only 1 plot/row in this version
nRowPlot <- length(allDown_limited)*1/nColPlot
outFile <- paste0(curr_outFold, "/allRatios_cumsum_obs_permut.", plotType)
do.call(plotType, list(outFile, height=myHeight*nRowPlot, width=myWidth*nColPlot))
par(mfrow=c(nRowPlot, nColPlot))
}
for(curr_ratio_type in allDown_limited) {
cat(paste0("*** START ", curr_ratio_type, "\n"))
obs_curr_down <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/all_obs_", curr_ratio_type, ".Rdata"))))
sort_obs_curr_down <- sort(obs_curr_down, decreasing = TRUE)
# FOR ratioDown => plot ratioConcord, departure from 0.5
if(curr_ratio_type == "ratioDown") {
my_stat_curr_ratio <- "ratioDown_Concord"
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform ratioDown -> ratioConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
sort_obs_curr_down <- abs(sort_obs_curr_down - 0.5) + 0.5
} else if(curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
my_stat_curr_ratio <- paste0(curr_ratio_type, "_Concord")
departureFromValue <- 0.5
# => Concord, departure 0.5
# Transform rescWeightedQQ -> rescWeightedQQConcord
# change so that the ratioDown ranges between 0.5 and 1 (-> e.g. treats 0.1 as 0.9)
# transf. ratioDown -> ratioConcord
sort_obs_curr_down <- abs(sort_obs_curr_down - 0.5) + 0.5
} else if(curr_ratio_type == "prodSignedRatio") {
my_stat_curr_ratio <- "prodSignedRatio"
departureFromValue <- 0
# => raw (departure 0)
# prodSignedRatio -> does not need to be transformed
}
observ_vect <- sort(sort_obs_curr_down, decreasing = TRUE)
observ_vect <- cumsum(abs(observ_vect - departureFromValue))
# load and store all the permut to have the limits for the plot limits
# sort when adding them to the list !
all_random_List <- list()
if(step14_for_randomTADsFix){
for(ratioSize in ratioSizeFix) {
tmpDT <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_randomFixSizeList_", gsub("\\.", "",ratioSize), ".Rdata"))))
tmpDT <- lapply(tmpDT, function(x) {
x <- sort(x, decreasing=TRUE)
if(curr_ratio_type == "ratioDown" | curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
x <- abs(x - 0.5) + 0.5
}
x <- sort(x, decreasing=TRUE)
cumsum(abs(x - departureFromValue))
})
all_random_List[["fix"]][[paste0(ratioSize)]] <- tmpDT
}
}
if(step14_for_randomTADsGaussian){
for(ratioSize in ratioSizeGaussian) {
tmpDT <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_randomGaussianList_", gsub("\\.", "",ratioSize), ".Rdata"))))
tmpDT <- lapply(tmpDT, function(x) {
x <- sort(x, decreasing=TRUE)
if(curr_ratio_type == "ratioDown" | curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
x <- abs(x - 0.5) + 0.5
}
x <- sort(x, decreasing=TRUE)
cumsum(abs(x - departureFromValue))
})
all_random_List[["gauss"]][[paste0(ratioSize)]] <- tmpDT
}
}
if(step14_for_randomTADsShuffle){
tmpDT <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/", curr_ratio_type, "_randomShuffleList.Rdata"))))
tmpDT <- lapply(tmpDT, function(x) {
x <- sort(x, decreasing=TRUE)
if(curr_ratio_type == "ratioDown" | curr_ratio_type == "rescWeightedQQ" | curr_ratio_type == "rescWeighted" ) {
x <- abs(x - 0.5) + 0.5
}
x <- sort(x, decreasing=TRUE)
cumsum(abs(x - departureFromValue))
})
all_random_List[["shuffle"]] <- tmpDT
}
# range of the plots:
maxTADs <- max(c(unlist(lapply(all_random_List, function(x) lapply(x, length))), length(observ_vect) ))
x_val <- 1:maxTADs
y_range <- range(c(unlist(all_random_List), observ_vect))
stopifnot(!is.na(x_val))
stopifnot(!is.na(y_range))
my_main <- paste0(curr_ratio_type, ": cumsum departure from ", departureFromValue)
my_ylab <- paste0("cumsum(abs(", curr_ratio_type, " - ", departureFromValue,"))")
my_xlab <- paste0("regions ranked by decreasing ", curr_ratio_type)
if(plotSeparated){
outFile <- paste0(curr_outFold, "/", curr_ratio_type, "_departure", departureFromValue,"_cumsum_obs_permut_AUC.", plotType)
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
}
# START BY PLOTTING EMPTY PLOT
plot(NULL,
main = my_main,
xlab=my_xlab,
ylab=my_ylab,
xlim = range(x_val),
ylim = y_range,
bty="l")
# ADD THE TRUE DATA
points(x = 1:length(observ_vect),
y=observ_vect)
# NOW FOR FIX SIZE AND GAUSSIAN, AND FOR EACH OF THE SIZE RATIOS, ADD THE CURVES/AREAS FOR THE RANDOM
if(step14_for_randomTADsFix){
subList <- all_random_List[["fix"]]
foo <- lapply(1:length(subList), function(i) {
sizeList <- subList[[i]]
curr_ratioSize <- names(subList)[i]
polygonPermutCol <- fixColors[curr_ratioSize]
maxElements <- max(unlist(lapply(sizeList, length)))
stopifnot(!is.na(maxElements))
sizeList2 <- lapply(sizeList, function(x) x[1:maxElements] <- x[1:maxElements])
# minLine <- do.call(pmin, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
# maxLine <- do.call(pmax, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
sizeList2dt <- do.call(cbind,sizeList2)
stopifnot(nrow(sizeList2dt) == maxElements)
minLine <- apply(sizeList2dt,1,min,na.rm=TRUE)
maxLine <- apply(sizeList2dt,1,max,na.rm=TRUE)
stopifnot(length(minLine) == maxElements)
stopifnot(length(maxLine) == maxElements)
stopifnot(all(maxLine>=minLine))
polygon(x = c(1:maxElements, rev(1:maxElements)),
y = c( minLine, rev(maxLine)),
border=polygonPermutCol,
col = polygonPermutCol)
})
}
if(step14_for_randomTADsGaussian){
subList <- all_random_List[["gauss"]]
# for each of the sizeRatio
# - extend the vectors so that they have all the same number of elements (max #)
# - for each position find the min and max value
foo <- lapply(1:length(subList), function(i) {
sizeList <- subList[[i]]
curr_ratioSize <- names(subList)[i]
polygonPermutCol <- gaussianColors[[curr_ratioSize]]
maxElements <- max(unlist(lapply(sizeList, length)))
stopifnot(!is.na(maxElements))
sizeList2 <- lapply(sizeList, function(x) x[1:maxElements] <- x[1:maxElements])
# minLine <- do.call(pmin, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
# maxLine <- do.call(pmax, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
sizeList2dt <- do.call(cbind,sizeList2)
stopifnot(nrow(sizeList2dt) == maxElements)
minLine <- apply(sizeList2dt,1,min,na.rm=TRUE)
maxLine <- apply(sizeList2dt,1,max,na.rm=TRUE)
stopifnot(length(minLine) == maxElements)
stopifnot(length(maxLine) == maxElements)
stopifnot(all(maxLine>=minLine))
polygon(x = c(1:maxElements, rev(1:maxElements)),
y = c( minLine, rev(maxLine)),
border=polygonPermutCol,
col = polygonPermutCol)
})
}
if(step14_for_randomTADsShuffle){
sizeList <- all_random_List[["shuffle"]]
polygonPermutCol <- shufflePolygonCol
maxElements <- max(unlist(lapply(sizeList, length)))
stopifnot(!is.na(maxElements))
sizeList2 <- lapply(sizeList, function(x) x[1:maxElements] <- x[1:maxElements])
# minLine <- do.call(pmin, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
# maxLine <- do.call(pmax, list(sizeList2, na.rm=TRUE)) # ??? why is this not working ???
sizeList2dt <- do.call(cbind,sizeList2)
stopifnot(nrow(sizeList2dt) == maxElements)
minLine <- apply(sizeList2dt,1,min,na.rm=TRUE)
maxLine <- apply(sizeList2dt,1,max,na.rm=TRUE)
stopifnot(length(minLine) == maxElements)
stopifnot(length(maxLine) == maxElements)
stopifnot(all(maxLine>=minLine))
polygon(x = c(1:maxElements, rev(1:maxElements)),
y = c( minLine, rev(maxLine)),
border=polygonPermutCol,
col = polygonPermutCol)
}
if(step14_for_randomTADsFix | step14_for_randomTADsGaussian) {
ratioLeg <- union(ratioSizeGaussian, ratioSizeFix)
legend("topleft", lty=1, legend=ratioLeg, col = fixColors[as.character(ratioLeg)], bty="n")
}
mtext(subtitDir, font=3)
if(plotSeparated){
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
}
if(!plotSeparated){
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
|
#note E_Xab denotes E(Xa - Xb)
na = 26
pa = 0.4
E_Xa = na*pa
Var_Xa = na*pa*(1-pa)
nb = 18
pb = 0.7
E_Xb = nb*pb
Var_Xb = nb*pb*(1-pb)
E_Xab = E_Xa - E_Xb
Var_Xab = Var_Xa + Var_Xb
ans = 1 - pnorm(1 - 1/2, E_Xab, sqrt(Var_Xab))
print(ans)
| /A_First_Course_In_Probability_by_Sheldon_Ross/CH6/EX3.c.b/Ex6_3cb.R | permissive | prashantsinalkar/R_TBC_Uploads | R | false | false | 243 | r | #note E_Xab denotes E(Xa - Xb)
na = 26
pa = 0.4
E_Xa = na*pa
Var_Xa = na*pa*(1-pa)
nb = 18
pb = 0.7
E_Xb = nb*pb
Var_Xb = nb*pb*(1-pb)
E_Xab = E_Xa - E_Xb
Var_Xab = Var_Xa + Var_Xb
ans = 1 - pnorm(1 - 1/2, E_Xab, sqrt(Var_Xab))
print(ans)
|
#' Relative Broken Stick Stopping Rule
#' @export
#'
#' @description
#' This function is a criterion for choosing the number of principal components.
#'
#' @param x numeric, a vector of the eigenvalues of the principal components.
#' @param plot boolean default is FALSE, toggle plot.
#'
#' @seealso \code{\link{brStick}}
#'
#' @examples
#' summary(pc.cr <- princomp(USArrests, cor = TRUE))
#' rbrStick(pc.cr$sdev ** 2)
#'
#' # Show plot
#' rbrStick(pc.cr$sdev ** 2, plot = TRUE)
rbrStick <- function (x, plot = FALSE) {
m <- 0
out <- rbrLoop(x)
colnames(out) <- c("Actual Value", "Rel. B-Stick Threshold")
if (plot == TRUE) {
rbr.df <- melt(out)
p <- xyplot(
value ~ Var1, group = Var2, data = rbr.df, type = c('g', 'l'),
xlab = "Principal Components", ylab = "Relative Percent of Variability",
auto.key = list(corner = c(0.1, 0.9), points = FALSE , lines = TRUE))
show(p)
}
return(list("Use PC(s):" = which(out[, 1] > out[, 2]), Table = out))
} | /R/rbrStick.R | no_license | alstat/rface | R | false | false | 1,000 | r | #' Relative Broken Stick Stopping Rule
#' @export
#'
#' @description
#' This function is a criterion for choosing the number of principal components.
#'
#' @param x numeric, a vector of the eigenvalues of the principal components.
#' @param plot boolean default is FALSE, toggle plot.
#'
#' @seealso \code{\link{brStick}}
#'
#' @examples
#' summary(pc.cr <- princomp(USArrests, cor = TRUE))
#' rbrStick(pc.cr$sdev ** 2)
#'
#' # Show plot
#' rbrStick(pc.cr$sdev ** 2, plot = TRUE)
rbrStick <- function (x, plot = FALSE) {
m <- 0
out <- rbrLoop(x)
colnames(out) <- c("Actual Value", "Rel. B-Stick Threshold")
if (plot == TRUE) {
rbr.df <- melt(out)
p <- xyplot(
value ~ Var1, group = Var2, data = rbr.df, type = c('g', 'l'),
xlab = "Principal Components", ylab = "Relative Percent of Variability",
auto.key = list(corner = c(0.1, 0.9), points = FALSE , lines = TRUE))
show(p)
}
return(list("Use PC(s):" = which(out[, 1] > out[, 2]), Table = out))
} |
## read the data
library(dplyr)
library(lubridate)
power_data <- read.table("./data/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
power_data <- power_data %>%
mutate(Date = dmy(Date)) %>%
filter(Date >= ymd("2007-02-01") & Date <= ymd("2007-02-02")) %>%
mutate(datetime = as_datetime(paste(Date, Time))) %>%
select(datetime, everything(), -Date, -Time)
## create the .png file
png("plot4.png")
## create a 2x2 grid for placing plots, fill rows first
par(mfrow = c(2,2))
## (1,1) plot a line plot of global active power vs. datetime
plot(power_data$datetime, power_data$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power")
## (1,2) plot a line plot of Voltage vs. datetime
plot(power_data$datetime, power_data$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
## (2,1) plot line plots of energy sub metering
with(power_data, plot(datetime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l"))
with(power_data, points(datetime, Sub_metering_2, col = "red", type = "l"))
with(power_data, points(datetime, Sub_metering_3, col = "blue", type = "l"))
legend("topright",
col = c("black", "red", "blue"),
legend = vars_select(names(power_data), starts_with("Sub")),
lty = "solid",
bty = "n")
## (2,2) plot a line plot of global reactive power vs. datetime
plot(power_data$datetime, power_data$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
## close the file
dev.off() | /plot4.R | no_license | tauteo/ExData_Plotting1 | R | false | false | 1,584 | r | ## read the data
library(dplyr)
library(lubridate)
power_data <- read.table("./data/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
power_data <- power_data %>%
mutate(Date = dmy(Date)) %>%
filter(Date >= ymd("2007-02-01") & Date <= ymd("2007-02-02")) %>%
mutate(datetime = as_datetime(paste(Date, Time))) %>%
select(datetime, everything(), -Date, -Time)
## create the .png file
png("plot4.png")
## create a 2x2 grid for placing plots, fill rows first
par(mfrow = c(2,2))
## (1,1) plot a line plot of global active power vs. datetime
plot(power_data$datetime, power_data$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power")
## (1,2) plot a line plot of Voltage vs. datetime
plot(power_data$datetime, power_data$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
## (2,1) plot line plots of energy sub metering
with(power_data, plot(datetime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l"))
with(power_data, points(datetime, Sub_metering_2, col = "red", type = "l"))
with(power_data, points(datetime, Sub_metering_3, col = "blue", type = "l"))
legend("topright",
col = c("black", "red", "blue"),
legend = vars_select(names(power_data), starts_with("Sub")),
lty = "solid",
bty = "n")
## (2,2) plot a line plot of global reactive power vs. datetime
plot(power_data$datetime, power_data$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
## close the file
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marinetraffic.R
\name{mt_vesselinfo}
\alias{mt_vesselinfo}
\title{Vessel info from marinetraffic.com}
\usage{
mt_vesselinfo(mtid = 294305)
}
\arguments{
\item{mtid}{Marine traffic vessel id}
}
\value{
A tibble
}
\description{
Vessel info from marinetraffic.com
}
| /man/mt_vesselinfo.Rd | no_license | einarhjorleifsson/fishvice | R | false | true | 341 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/marinetraffic.R
\name{mt_vesselinfo}
\alias{mt_vesselinfo}
\title{Vessel info from marinetraffic.com}
\usage{
mt_vesselinfo(mtid = 294305)
}
\arguments{
\item{mtid}{Marine traffic vessel id}
}
\value{
A tibble
}
\description{
Vessel info from marinetraffic.com
}
|
# ------------------------------------------------------------------------------
# vec_restore()
test_that("vec_restore() returns an rset subclass if `x` retains rset structure", {
for (x in rset_subclasses) {
expect_identical(vec_restore(x, x), x)
expect_s3_class_rset(vec_restore(x, x))
}
})
test_that("vec_restore() returns bare tibble if `x` loses rset structure", {
for (x in rset_subclasses) {
col <- x[1]
row <- x[0,]
expect_s3_class_bare_tibble(vec_restore(col, x))
expect_s3_class_bare_tibble(vec_restore(row, x))
}
})
test_that("vec_restore() retains extra attributes of `to` no matter what", {
for (x in rset_subclasses) {
to <- x
attr(to, "foo") <- "bar"
x_tbl <- x[1]
expect_identical(attr(vec_restore(x, to), "foo"), "bar")
expect_identical(attr(vec_restore(x_tbl, to), "foo"), "bar")
expect_s3_class_rset(vec_restore(x, to))
expect_s3_class_bare_tibble(vec_restore(x_tbl, to))
}
})
# ------------------------------------------------------------------------------
# vec_ptype2()
test_that("vec_ptype2() is working", {
for (x in rset_subclasses) {
tbl <- tibble::tibble(x = 1)
df <- data.frame(x = 1)
# rset-rset
expect_identical(vec_ptype2(x, x), vec_ptype2(rset_strip(x), rset_strip(x)))
# rset-tbl_df
expect_identical(vec_ptype2(x, tbl), vec_ptype2(rset_strip(x), tbl))
expect_identical(vec_ptype2(tbl, x), vec_ptype2(tbl, rset_strip(x)))
# rset-df
expect_identical(vec_ptype2(x, df), vec_ptype2(rset_strip(x), df))
expect_identical(vec_ptype2(df, x), vec_ptype2(df, rset_strip(x)))
}
})
# ------------------------------------------------------------------------------
# vec_cast()
test_that("vec_cast() is working", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
df <- as.data.frame(tbl)
# rset-rset
expect_error(vec_cast(x, x), class = "vctrs_error_incompatible_type")
# rset-tbl_df
expect_identical(vec_cast(x, tbl), tbl)
expect_error(vec_cast(tbl, x), class = "vctrs_error_incompatible_type")
# rset-df
expect_identical(vec_cast(x, df), df)
expect_error(vec_cast(df, x), class = "vctrs_error_incompatible_type")
}
})
# ------------------------------------------------------------------------------
# vctrs methods
test_that("vec_ptype() returns a bare tibble", {
for (x in rset_subclasses) {
expect_identical(vec_ptype(x), vec_ptype(rset_strip(x)))
expect_s3_class_bare_tibble(vec_ptype(x))
}
})
test_that("vec_slice() generally returns a bare tibble", {
for (x in rset_subclasses) {
expect_identical(vec_slice(x, 0), vec_slice(rset_strip(x), 0))
expect_s3_class_bare_tibble(vec_slice(x, 0))
}
})
test_that("vec_slice() can return an rset if all rows are selected", {
for (x in rset_subclasses) {
expect_identical(vec_slice(x, TRUE), x)
expect_s3_class_rset(vec_slice(x, TRUE))
}
})
test_that("vec_c() returns a bare tibble", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
expect_identical(vec_c(x), vec_c(tbl))
expect_identical(vec_c(x, x), vec_c(tbl, tbl))
expect_identical(vec_c(x, tbl), vec_c(tbl, tbl))
expect_s3_class_bare_tibble(vec_c(x))
expect_s3_class_bare_tibble(vec_c(x, x))
}
})
test_that("vec_rbind() returns a bare tibble", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
expect_identical(vec_rbind(x), vec_rbind(tbl))
expect_identical(vec_rbind(x, x), vec_rbind(tbl, tbl))
expect_identical(vec_rbind(x, tbl), vec_rbind(tbl, tbl))
expect_s3_class_bare_tibble(vec_rbind(x))
expect_s3_class_bare_tibble(vec_cbind(x, x))
}
})
test_that("vec_cbind() returns a bare tibble", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
expect_identical(vec_cbind(x), vec_cbind(tbl))
expect_identical(vec_cbind(x, x), vec_cbind(tbl, tbl))
expect_identical(vec_cbind(x, tbl), vec_cbind(tbl, tbl))
expect_s3_class_bare_tibble(vec_cbind(x))
expect_s3_class_bare_tibble(vec_cbind(x, x))
}
})
| /tests/testthat/test-compat-vctrs.R | no_license | nikhil1689/rsample | R | false | false | 4,024 | r | # ------------------------------------------------------------------------------
# vec_restore()
test_that("vec_restore() returns an rset subclass if `x` retains rset structure", {
for (x in rset_subclasses) {
expect_identical(vec_restore(x, x), x)
expect_s3_class_rset(vec_restore(x, x))
}
})
test_that("vec_restore() returns bare tibble if `x` loses rset structure", {
for (x in rset_subclasses) {
col <- x[1]
row <- x[0,]
expect_s3_class_bare_tibble(vec_restore(col, x))
expect_s3_class_bare_tibble(vec_restore(row, x))
}
})
test_that("vec_restore() retains extra attributes of `to` no matter what", {
for (x in rset_subclasses) {
to <- x
attr(to, "foo") <- "bar"
x_tbl <- x[1]
expect_identical(attr(vec_restore(x, to), "foo"), "bar")
expect_identical(attr(vec_restore(x_tbl, to), "foo"), "bar")
expect_s3_class_rset(vec_restore(x, to))
expect_s3_class_bare_tibble(vec_restore(x_tbl, to))
}
})
# ------------------------------------------------------------------------------
# vec_ptype2()
test_that("vec_ptype2() is working", {
for (x in rset_subclasses) {
tbl <- tibble::tibble(x = 1)
df <- data.frame(x = 1)
# rset-rset
expect_identical(vec_ptype2(x, x), vec_ptype2(rset_strip(x), rset_strip(x)))
# rset-tbl_df
expect_identical(vec_ptype2(x, tbl), vec_ptype2(rset_strip(x), tbl))
expect_identical(vec_ptype2(tbl, x), vec_ptype2(tbl, rset_strip(x)))
# rset-df
expect_identical(vec_ptype2(x, df), vec_ptype2(rset_strip(x), df))
expect_identical(vec_ptype2(df, x), vec_ptype2(df, rset_strip(x)))
}
})
# ------------------------------------------------------------------------------
# vec_cast()
test_that("vec_cast() is working", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
df <- as.data.frame(tbl)
# rset-rset
expect_error(vec_cast(x, x), class = "vctrs_error_incompatible_type")
# rset-tbl_df
expect_identical(vec_cast(x, tbl), tbl)
expect_error(vec_cast(tbl, x), class = "vctrs_error_incompatible_type")
# rset-df
expect_identical(vec_cast(x, df), df)
expect_error(vec_cast(df, x), class = "vctrs_error_incompatible_type")
}
})
# ------------------------------------------------------------------------------
# vctrs methods
test_that("vec_ptype() returns a bare tibble", {
for (x in rset_subclasses) {
expect_identical(vec_ptype(x), vec_ptype(rset_strip(x)))
expect_s3_class_bare_tibble(vec_ptype(x))
}
})
test_that("vec_slice() generally returns a bare tibble", {
for (x in rset_subclasses) {
expect_identical(vec_slice(x, 0), vec_slice(rset_strip(x), 0))
expect_s3_class_bare_tibble(vec_slice(x, 0))
}
})
test_that("vec_slice() can return an rset if all rows are selected", {
for (x in rset_subclasses) {
expect_identical(vec_slice(x, TRUE), x)
expect_s3_class_rset(vec_slice(x, TRUE))
}
})
test_that("vec_c() returns a bare tibble", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
expect_identical(vec_c(x), vec_c(tbl))
expect_identical(vec_c(x, x), vec_c(tbl, tbl))
expect_identical(vec_c(x, tbl), vec_c(tbl, tbl))
expect_s3_class_bare_tibble(vec_c(x))
expect_s3_class_bare_tibble(vec_c(x, x))
}
})
test_that("vec_rbind() returns a bare tibble", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
expect_identical(vec_rbind(x), vec_rbind(tbl))
expect_identical(vec_rbind(x, x), vec_rbind(tbl, tbl))
expect_identical(vec_rbind(x, tbl), vec_rbind(tbl, tbl))
expect_s3_class_bare_tibble(vec_rbind(x))
expect_s3_class_bare_tibble(vec_cbind(x, x))
}
})
test_that("vec_cbind() returns a bare tibble", {
for (x in rset_subclasses) {
tbl <- rset_strip(x)
expect_identical(vec_cbind(x), vec_cbind(tbl))
expect_identical(vec_cbind(x, x), vec_cbind(tbl, tbl))
expect_identical(vec_cbind(x, tbl), vec_cbind(tbl, tbl))
expect_s3_class_bare_tibble(vec_cbind(x))
expect_s3_class_bare_tibble(vec_cbind(x, x))
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collections.R
\name{get_collections}
\alias{get_collections}
\title{Get collections by user or status id.}
\usage{
get_collections(
user,
status_id = NULL,
n = 200,
cursor = NULL,
parse = TRUE,
token = NULL
)
}
\arguments{
\item{user}{Screen name or user id of target user. Requests must
provide a value for one of user or status_id.}
\item{status_id}{Optional, the identifier of the tweet for which to
return results. Requests must provide a value for one of user or
status_id.}
\item{n}{Maximum number of results to return. Defaults to 200.}
\item{cursor}{Page identifier of results to retrieve. If parse = TRUE,
the next cursor value for any given request--if available--is stored
as an attribute, accessible via \code{\link[=next_cursor]{next_cursor()}}}
\item{parse}{The default, \code{TRUE}, indicates that the result should
be parsed into a convenient R data structure like a list or data frame.
This protects you from the vagaries of the twitter API. Use \code{FALSE}
to return the "raw" list produced by the JSON returned from the twitter
API.}
\item{token}{An OAuth token object. The default, \code{NULL}, will retrieve
a default token with \code{\link[=get_token]{get_token()}}. You should only need to
use this argument if you are wrapping rtweet in a package.
See \code{vignette("auth")} for more details.}
}
\value{
Return object converted to nested list if parsed otherwise
an HTTP response object is returned.
}
\description{
Find collections (themed grouping of statuses) created by specific user
or status id. Results include user, status, and collection features.
}
\examples{
\dontrun{
## lookup a specific collection
cnnc <- get_collections("cnn")
## inspect data
str(cnnc)
## by status id
wwe <- get_collections(status_id = "925172982313570306")
## inspect data
str(wwe)
}
}
| /man/get_collections.Rd | no_license | oalbishri/rtweet | R | false | true | 1,900 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collections.R
\name{get_collections}
\alias{get_collections}
\title{Get collections by user or status id.}
\usage{
get_collections(
user,
status_id = NULL,
n = 200,
cursor = NULL,
parse = TRUE,
token = NULL
)
}
\arguments{
\item{user}{Screen name or user id of target user. Requests must
provide a value for one of user or status_id.}
\item{status_id}{Optional, the identifier of the tweet for which to
return results. Requests must provide a value for one of user or
status_id.}
\item{n}{Maximum number of results to return. Defaults to 200.}
\item{cursor}{Page identifier of results to retrieve. If parse = TRUE,
the next cursor value for any given request--if available--is stored
as an attribute, accessible via \code{\link[=next_cursor]{next_cursor()}}}
\item{parse}{The default, \code{TRUE}, indicates that the result should
be parsed into a convenient R data structure like a list or data frame.
This protects you from the vagaries of the twitter API. Use \code{FALSE}
to return the "raw" list produced by the JSON returned from the twitter
API.}
\item{token}{An OAuth token object. The default, \code{NULL}, will retrieve
a default token with \code{\link[=get_token]{get_token()}}. You should only need to
use this argument if you are wrapping rtweet in a package.
See \code{vignette("auth")} for more details.}
}
\value{
Return object converted to nested list if parsed otherwise
an HTTP response object is returned.
}
\description{
Find collections (themed grouping of statuses) created by specific user
or status id. Results include user, status, and collection features.
}
\examples{
\dontrun{
## lookup a specific collection
cnnc <- get_collections("cnn")
## inspect data
str(cnnc)
## by status id
wwe <- get_collections(status_id = "925172982313570306")
## inspect data
str(wwe)
}
}
|
download.file("https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip", destfile = "power.csv", mode = "wb")
columnnames <- c( "Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
file <- read.table(unzip("power.csv"), col.names = columnnames, sep = ";", skip = 66637, nrow = 2880, header = FALSE, colClasses = c("character", "character", rep("numeric",7)))
library(dplyr)
library(lubridate)
file2 <- mutate (file, DateTime = paste(Date, Time, sep = " "))
file2$DateTime <- dmy_hms(file2$DateTime)
png (filename = "plot3.png", width = 480, height = 480, pointsize = 12, bg = "white")
plot(file2$DateTime, file2$Sub_metering_1, type = "l", ylab ="Energy sub metering", xlab = " ")
points(file2$DateTime, file2$Sub_metering_2, col = "red", type = "l")
points(file2$DateTime, file2$Sub_metering_3, col = "blue", type = "l")
legend("topright", lty = 1, lwd = 3, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() | /plot3.R | no_license | autumen/ExData_Plotting1 | R | false | false | 1,104 | r | download.file("https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip", destfile = "power.csv", mode = "wb")
columnnames <- c( "Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
file <- read.table(unzip("power.csv"), col.names = columnnames, sep = ";", skip = 66637, nrow = 2880, header = FALSE, colClasses = c("character", "character", rep("numeric",7)))
library(dplyr)
library(lubridate)
file2 <- mutate (file, DateTime = paste(Date, Time, sep = " "))
file2$DateTime <- dmy_hms(file2$DateTime)
png (filename = "plot3.png", width = 480, height = 480, pointsize = 12, bg = "white")
plot(file2$DateTime, file2$Sub_metering_1, type = "l", ylab ="Energy sub metering", xlab = " ")
points(file2$DateTime, file2$Sub_metering_2, col = "red", type = "l")
points(file2$DateTime, file2$Sub_metering_3, col = "blue", type = "l")
legend("topright", lty = 1, lwd = 3, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() |
\name{clickpoly}
\alias{clickpoly}
\title{Interactively Define a Polygon}
\description{
Allows the user to create a polygon by
point-and-click in the display.
}
\usage{
clickpoly(add=FALSE, nv=NULL, np=1, ...)
}
\arguments{
\item{add}{
Logical value indicating whether to create a new plot
(\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
}
\item{nv}{
Number of vertices of the polygon (if this is predetermined).
}
\item{np}{
Number of polygons to create.
}
\item{\dots}{
Arguments passed to \code{\link[graphics]{locator}} to control the
interactive plot, and to \code{\link[graphics]{polygon}} to plot the
polygons.
}
}
\value{
A window (object of class \code{"owin"}) representing the polygon.
}
\details{
This function allows the user to create a polygonal window
by interactively clicking on the screen display.
The user is prompted to point the mouse at any desired locations
for the polygon vertices,
and click the left mouse button to add each point.
Interactive input stops after \code{nv} clicks (if \code{nv} was given)
or when the middle mouse button is pressed.
The return value is a window (object of class \code{"owin"})
representing the polygon.
This function uses the \R command \code{\link[graphics]{locator}} to
input the mouse clicks. It only works on screen devices such as
\sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be
passed to \code{\link[graphics]{locator}} through \code{\dots} include
\code{pch} (plotting character), \code{cex} (character expansion
factor) and \code{col} (colour). See \code{\link[graphics]{locator}}
and \code{\link{par}}.
Multiple polygons can also be drawn, by specifying
\code{np > 1}. The polygons must be disjoint. The result is
a single window object consisting of all the polygons.
}
\seealso{
\code{\link{identify.ppp}},
\code{\link{clickbox}},
\code{\link{clickppp}},
\code{\link{clickdist}},
\code{\link[graphics]{locator}}
}
\author{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}
and Rolf Turner \email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{iplot}
| /man/clickpoly.Rd | no_license | chenjiaxun9/spatstat | R | false | false | 2,194 | rd | \name{clickpoly}
\alias{clickpoly}
\title{Interactively Define a Polygon}
\description{
Allows the user to create a polygon by
point-and-click in the display.
}
\usage{
clickpoly(add=FALSE, nv=NULL, np=1, ...)
}
\arguments{
\item{add}{
Logical value indicating whether to create a new plot
(\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
}
\item{nv}{
Number of vertices of the polygon (if this is predetermined).
}
\item{np}{
Number of polygons to create.
}
\item{\dots}{
Arguments passed to \code{\link[graphics]{locator}} to control the
interactive plot, and to \code{\link[graphics]{polygon}} to plot the
polygons.
}
}
\value{
A window (object of class \code{"owin"}) representing the polygon.
}
\details{
This function allows the user to create a polygonal window
by interactively clicking on the screen display.
The user is prompted to point the mouse at any desired locations
for the polygon vertices,
and click the left mouse button to add each point.
Interactive input stops after \code{nv} clicks (if \code{nv} was given)
or when the middle mouse button is pressed.
The return value is a window (object of class \code{"owin"})
representing the polygon.
This function uses the \R command \code{\link[graphics]{locator}} to
input the mouse clicks. It only works on screen devices such as
\sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be
passed to \code{\link[graphics]{locator}} through \code{\dots} include
\code{pch} (plotting character), \code{cex} (character expansion
factor) and \code{col} (colour). See \code{\link[graphics]{locator}}
and \code{\link{par}}.
Multiple polygons can also be drawn, by specifying
\code{np > 1}. The polygons must be disjoint. The result is
a single window object consisting of all the polygons.
}
\seealso{
\code{\link{identify.ppp}},
\code{\link{clickbox}},
\code{\link{clickppp}},
\code{\link{clickdist}},
\code{\link[graphics]{locator}}
}
\author{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}
and Rolf Turner \email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{iplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HurdleGEE_c.R
\name{HurdleGEE_c}
\alias{HurdleGEE_c}
\title{Generalized Estimating Equations, Truncated Count component of Longitudinal Hurdle Model}
\usage{
HurdleGEE_c(y, subjectID, N, X_c)
}
\arguments{
\item{y}{The vector of response counts, ordered by subject, time within subject.}
\item{subjectID}{The vector of subject ID values for each response.}
\item{N}{The number of subjects.}
\item{X_c}{The design matrix for all covariates in the truncated count component of the model, including an intercept.}
}
\description{
This function calculates the Generalized Estimating Equations (GEE) parameter estimates and standard errors for the truncated count component of a hurdle model for longitudinal excess zero count responses with independent working correlation structure, based on Dobbie and Welsh (2001). Within the estimation method, responses are filtered to include only the positive counts from the original data. Data must be organized by subject, and an intercept term is assumed for both the logistic and truncated count components of the model. The function outputs a list with parameter estimates betaHat and parameter covariance estimate covEst, along with estimated rates muHat and residuals r_c.
}
\examples{
HurdleGEE_c()
}
\keyword{GEE}
| /man/HurdleGEE_c.Rd | no_license | lalondetl/GMM | R | false | true | 1,344 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HurdleGEE_c.R
\name{HurdleGEE_c}
\alias{HurdleGEE_c}
\title{Generalized Estimating Equations, Truncated Count component of Longitudinal Hurdle Model}
\usage{
HurdleGEE_c(y, subjectID, N, X_c)
}
\arguments{
\item{y}{The vector of response counts, ordered by subject, time within subject.}
\item{subjectID}{The vector of subject ID values for each response.}
\item{N}{The number of subjects.}
\item{X_c}{The design matrix for all covariates in the truncated count component of the model, including an intercept.}
}
\description{
This function calculates the Generalized Estimating Equations (GEE) parameter estimates and standard errors for the truncated count component of a hurdle model for longitudinal excess zero count responses with independent working correlation structure, based on Dobbie and Welsh (2001). Within the estimation method, responses are filtered to include only the positive counts from the original data. Data must be organized by subject, and an intercept term is assumed for both the logistic and truncated count components of the model. The function outputs a list with parameter estimates betaHat and parameter covariance estimate covEst, along with estimated rates muHat and residuals r_c.
}
\examples{
HurdleGEE_c()
}
\keyword{GEE}
|
##################################################
# Multiplot time series of global active power, #
# voltage, submetering and global reactive power #
#################################################
### Download the data from https://d396qusza40orc.cloudfront.net/
### exdata%2Fdata%2Fhousehold_power_consumption.zip
### and then unzipped
work_dir="C:/Users/KAUSHIK/Desktop/Assignment"
# set working directory to your working folder where data is available
setwd(work_dir)
# Read data, Its will take 15 seconds
data <- read.table('household_power_consumption.txt', sep=';', header=T,
colClasses = c('character', 'character', 'numeric',
'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric'),
na.strings='?')
# convert dates and subset on two days in February 2007
data$DateTime <- strptime(paste(data$Date, data$Time),
"%d/%m/%Y %H:%M:%S")
data <- subset(data,
as.Date(DateTime) >= as.Date("2007-02-01") &
as.Date(DateTime) <= as.Date("2007-02-02"))
# call plot device plot3.png
png("plot4.png", height=480, width=480)
# setting plot device option
par(mfrow=c(2,2))
# Global Active Power plot
plot(data$DateTime,
data$Global_active_power,
pch=NA,
xlab="",
ylab="Global Active Power (kilowatts)")
lines(data$DateTime, data$Global_active_power)
# Voltage plot
plot(data$DateTime, data$Voltage, ylab="Voltage", xlab="datetime", pch=NA)
lines(data$DateTime, data$Voltage)
# Submetering plot
plot(data$DateTime,
data$Sub_metering_1,
pch=NA,
xlab="",
ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_1)
lines(data$DateTime, data$Sub_metering_2, col='red')
lines(data$DateTime, data$Sub_metering_3, col='blue')
legend('topright',
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1),
col = c('black', 'red', 'blue'),
bty = 'n')
# Global reactive power plot
with(data, plot(DateTime, Global_reactive_power, xlab='datetime', pch=NA))
with(data, lines(DateTime, Global_reactive_power))
dev.off() # Close graphic device
| /plot4.R | no_license | Arun-Kaushik/ExData_Plotting1 | R | false | false | 2,232 | r |
##################################################
# Multiplot time series of global active power, #
# voltage, submetering and global reactive power #
#################################################
### Download the data from https://d396qusza40orc.cloudfront.net/
### exdata%2Fdata%2Fhousehold_power_consumption.zip
### and then unzipped
work_dir="C:/Users/KAUSHIK/Desktop/Assignment"
# set working directory to your working folder where data is available
setwd(work_dir)
# Read data, Its will take 15 seconds
data <- read.table('household_power_consumption.txt', sep=';', header=T,
colClasses = c('character', 'character', 'numeric',
'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric'),
na.strings='?')
# convert dates and subset on two days in February 2007
data$DateTime <- strptime(paste(data$Date, data$Time),
"%d/%m/%Y %H:%M:%S")
data <- subset(data,
as.Date(DateTime) >= as.Date("2007-02-01") &
as.Date(DateTime) <= as.Date("2007-02-02"))
# call plot device plot3.png
png("plot4.png", height=480, width=480)
# setting plot device option
par(mfrow=c(2,2))
# Global Active Power plot
plot(data$DateTime,
data$Global_active_power,
pch=NA,
xlab="",
ylab="Global Active Power (kilowatts)")
lines(data$DateTime, data$Global_active_power)
# Voltage plot
plot(data$DateTime, data$Voltage, ylab="Voltage", xlab="datetime", pch=NA)
lines(data$DateTime, data$Voltage)
# Submetering plot
plot(data$DateTime,
data$Sub_metering_1,
pch=NA,
xlab="",
ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_1)
lines(data$DateTime, data$Sub_metering_2, col='red')
lines(data$DateTime, data$Sub_metering_3, col='blue')
legend('topright',
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1),
col = c('black', 'red', 'blue'),
bty = 'n')
# Global reactive power plot
with(data, plot(DateTime, Global_reactive_power, xlab='datetime', pch=NA))
with(data, lines(DateTime, Global_reactive_power))
dev.off() # Close graphic device
|
setGeneric("kqr", function(x, ...) standardGeneric("kqr"))
setMethod("kqr",signature(x="formula"),
function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){
cl <- match.call()
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m$formula <- m$x
m$x <- NULL
m[[1]] <- as.name("model.frame")
m <- eval(m, parent.frame())
Terms <- attr(m, "terms")
attr(Terms, "intercept") <- 0
x <- model.matrix(Terms, m)
y <- model.extract(m, response)
if (length(scaled) == 1)
scaled <- rep(scaled, ncol(x))
if (any(scaled)) {
remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))),
which(!scaled)
)
)
scaled <- !attr(x, "assign") %in% remove
}
ret <- kqr(x, y, scaled = scaled, ...)
kcall(ret) <- cl
terms(ret) <- Terms
if (!is.null(attr(m, "na.action")))
n.action(ret) <- attr(m, "na.action")
return (ret)
})
setMethod("kqr",signature(x="vector"),
function(x,...)
{
x <- t(t(x))
ret <- kqr(x, ...)
ret
})
setMethod("kqr",signature(x="matrix"),
function (x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit)
{
if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1")
ret <- new("kqr")
param(ret) <- list(C = C, tau = tau)
if (is.null(y))
x <- na.action(x)
else {
df <- na.action(data.frame(y, x))
y <- df[,1]
x <- as.matrix(df[,-1])
}
ncols <- ncol(x)
m <- nrows <- nrow(x)
x.scale <- y.scale <- NULL
## scaling
if (length(scaled) == 1)
scaled <- rep(scaled, ncol(x))
if (any(scaled)) {
co <- !apply(x[,scaled, drop = FALSE], 2, var)
if (any(co)) {
scaled <- rep(FALSE, ncol(x))
warning(paste("Variable(s)",
paste("`",colnames(x[,scaled, drop = FALSE])[co],
"'", sep="", collapse=" and "),
"constant. Cannot scale data.")
)
} else {
xtmp <- scale(x[,scaled])
x[,scaled] <- xtmp
x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")]
y <- scale(y)
y.scale <- attributes(y)[c("scaled:center","scaled:scale")]
y <- as.vector(y)
tmpsc <- list(scaled = scaled, x.scale = x.scale,y.scale = y.scale)
}
}
## Arrange all the kernel mambo jumpo
if(is.character(kernel)){
kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot"))
if(is.character(kpar))
if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" )
{
cat (" Setting default kernel parameters ","\n")
kpar <- list()
}
}
if (!is.function(kernel))
if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){
kp <- match.arg(kpar,"automatic")
if(kp=="automatic")
kpar <- list(sigma=mean(sigest(x,scaled=FALSE,frac=1)[c(1,3)]))
cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n")
}
if(!is(kernel,"kernel"))
{
if(is(kernel,"function")) kernel <- deparse(substitute(kernel))
kernel <- do.call(kernel, kpar)
}
if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'")
## Setup QP problem and call ipop
if(!reduced)
H = kernelMatrix(kernel,x)
else
H = csi(x, kernel = kernel, rank = rank)
c = -y
A = rep(1,m)
b = 0
r = 0
l = matrix(C * (tau-1),m,1)
u = matrix(C * tau ,m,1)
qpsol = ipop(c, H, A, b, l, u, r)
alpha(ret)= coef(ret) = primal(qpsol)
b(ret) = dual(qpsol)[1]
## Compute training error/loss
xmatrix(ret) <- x
ymatrix(ret) <- y
kernelf(ret) <- kernel
kpar(ret) <- kpar
type(ret) <- ("Quantile Regresion")
if (fit){
fitted(ret) <- predict(ret, x)
if (!is.null(scaling(ret)$y.scale))
fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau))
}
else fitted(ret) <- NULL
if(any(scaled))
scaling(ret) <- tmpsc
## Crossvalidation
cross(ret) <- -1
if(cross == 1)
cat("\n","cross should be >1 no cross-validation done!","\n","\n")
else if (cross > 1)
{
pinloss <- 0
ramloss <- 0
crescs <- NULL
suppressWarnings(vgr<-split(sample(1:m,m),1:cross))
for(i in 1:cross)
{
cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length)))))
cret <- kqr(x[cind,],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE)
cres <- predict(cret, x[vgr[[i]],])
crescs <- c(crescs,cres)
}
if (!is.null(scaling(ret)$y.scale)){
crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
}
else
ysvgr <- y[unlist(vgr)]
pinloss <- drop(pinloss(ysvgr, crescs, tau))
ramloss <- drop(ramloss(ysvgr, crescs, tau))
cross(ret) <- c(pinloss, ramloss)
}
return(ret)
})
setMethod("kqr",signature(x="list"),
function (x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar = list(length=4, C=0.5), fit = TRUE, cross = 0)
{
if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1")
if(!is(kernel,"kernel"))
{
if(is(kernel,"function")) kernel <- deparse(substitute(kernel))
kernel <- do.call(kernel, kpar)
}
if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'")
K <- kernelMatrix(kernel,x)
ret <- kqr(K,y = y,tau = tau, C = C, fit = fit, cross = cross)
kernelf(ret) <- kernel
kpar(ret) <- kpar
return(ret)
})
setMethod("kqr",signature(x="kernelMatrix"),
function (x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0)
{
if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1")
ret <- new("kqr")
param(ret) <- list(C = C, tau = tau)
ncols <- ncol(x)
m <- nrows <- nrow(x)
y <- as.vector(y)
## Setup QP problem and call ipop
H = x
c = -y
A = rep(1,m)
b = 0
r = 0
l = matrix(C * (tau-1),m,1)
u = matrix(C * tau ,m,1)
qpsol = ipop(c, H, A, b, l, u, r)
alpha(ret)= coef(ret) = primal(qpsol)
b(ret) = dual(qpsol)[1]
## Compute training error/loss
ymatrix(ret) <- y
kernelf(ret) <- "Kernel Matrix used."
type(ret) <- ("Quantile Regresion")
if (fit){
fitted(ret) <- predict(ret, x)
error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau))
}
else NA
## Crossvalidation
cross(ret) <- -1
if(cross == 1)
cat("\n","cross should be >1 no cross-validation done!","\n","\n")
else if (cross > 1)
{
pinloss <- 0
ramloss <- 0
crescs <- NULL
suppressWarnings(vgr<-split(sample(1:m,m),1:cross))
for(i in 1:cross)
{
cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length)))))
cret <- kqr(x[cind,cind],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE)
cres <- predict(cret, x[vgr[[i]],vgr[[i]]])
crescs <- c(crescs,cres)
}
if (!is.null(scaling(ret)$y.scale)){
crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
}
else
ysvgr <- y[unlist(vgr)]
pinloss <- drop(pinloss(ysvgr, crescs, tau))
ramloss <- drop(ramloss(ysvgr, crescs, tau))
cross(ret) <- c(pinloss, ramloss)
}
return(ret)
})
pinloss <- function(y,f,tau)
{
if(is.vector(y)) m <- length(y)
else m <- dim(y)[1]
tmp <- y - f
return((tau *sum(tmp*(tmp>=0)) + (tau-1) * sum(tmp * (tmp<0)))/m)
}
ramploss <- function(y,f,tau)
{
if(is.vector(y)) m <- length(y)
else m <- dim(y)[1]
return(sum(y<=f)/m)
}
setMethod("predict", signature(object = "kqr"),
function (object, newdata)
{
sc <- 0
if (missing(newdata))
if(!is.null(fitted(object)))
return(fitted(object))
else
stop("newdata is missing and no fitted values found.")
if(!is(newdata,"kernelMatrix")){
ncols <- ncol(xmatrix(object))
nrows <- nrow(xmatrix(object))
oldco <- ncols
if (!is.null(terms(object)))
{
newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action)
}
else
newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata)
newcols <- 0
newnrows <- nrow(newdata)
newncols <- ncol(newdata)
newco <- newncols
if (oldco != newco) stop ("test vector does not match model !")
if (is.list(scaling(object)) && sc != 1)
newdata[,scaling(object)$scaled] <-
scale(newdata[,scaling(object)$scaled, drop = FALSE],
center = scaling(object)$x.scale$"scaled:center",
scale = scaling(object)$x.scale$"scaled:scale"
)
predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) - b(object)
if (!is.null(scaling(object)$y.scale))
return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center")
else
return(predres)
}
else
{
return(newdata%*%alpha(object) - b(object))
}
})
setMethod("show","kqr",
function(object){
cat("Kernel Quantile Regression object of class \"kqr\"","\n")
cat("\n")
show(kernelf(object))
cat("\n")
cat("Regularization Cost Parameter C: ",round(param(object)[[1]],9))
cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n"))
if(!is.null(fitted(object)))
cat(paste("Train error :"," pinball loss : ", round(error(object)[1],9)," rambloss :", round(error(object)[2],9),"\n"))
##train error & loss
if(cross(object)!=-1)
cat("Cross validation error :", " pinballoss : ", round(cross(object)[1],9)," rambloss :", round(cross(object)[2],9),"\n")
})
| /Programming_Projects/R Projects/kernlab/R/kqr.R | no_license | pmnyc/Data_Engineering_Collections | R | false | false | 10,782 | r | setGeneric("kqr", function(x, ...) standardGeneric("kqr"))
setMethod("kqr",signature(x="formula"),
function (x, data=NULL, ..., subset, na.action = na.omit, scaled = TRUE){
cl <- match.call()
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m$formula <- m$x
m$x <- NULL
m[[1]] <- as.name("model.frame")
m <- eval(m, parent.frame())
Terms <- attr(m, "terms")
attr(Terms, "intercept") <- 0
x <- model.matrix(Terms, m)
y <- model.extract(m, response)
if (length(scaled) == 1)
scaled <- rep(scaled, ncol(x))
if (any(scaled)) {
remove <- unique(c(which(labels(Terms) %in% names(attr(x, "contrasts"))),
which(!scaled)
)
)
scaled <- !attr(x, "assign") %in% remove
}
ret <- kqr(x, y, scaled = scaled, ...)
kcall(ret) <- cl
terms(ret) <- Terms
if (!is.null(attr(m, "na.action")))
n.action(ret) <- attr(m, "na.action")
return (ret)
})
setMethod("kqr",signature(x="vector"),
function(x,...)
{
x <- t(t(x))
ret <- kqr(x, ...)
ret
})
setMethod("kqr",signature(x="matrix"),
function (x, y, scaled = TRUE, tau = 0.5, C = 0.1, kernel = "rbfdot", kpar = "automatic", reduced = FALSE, rank = dim(x)[1]/6, fit = TRUE, cross = 0, na.action = na.omit)
{
if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1")
ret <- new("kqr")
param(ret) <- list(C = C, tau = tau)
if (is.null(y))
x <- na.action(x)
else {
df <- na.action(data.frame(y, x))
y <- df[,1]
x <- as.matrix(df[,-1])
}
ncols <- ncol(x)
m <- nrows <- nrow(x)
x.scale <- y.scale <- NULL
## scaling
if (length(scaled) == 1)
scaled <- rep(scaled, ncol(x))
if (any(scaled)) {
co <- !apply(x[,scaled, drop = FALSE], 2, var)
if (any(co)) {
scaled <- rep(FALSE, ncol(x))
warning(paste("Variable(s)",
paste("`",colnames(x[,scaled, drop = FALSE])[co],
"'", sep="", collapse=" and "),
"constant. Cannot scale data.")
)
} else {
xtmp <- scale(x[,scaled])
x[,scaled] <- xtmp
x.scale <- attributes(xtmp)[c("scaled:center","scaled:scale")]
y <- scale(y)
y.scale <- attributes(y)[c("scaled:center","scaled:scale")]
y <- as.vector(y)
tmpsc <- list(scaled = scaled, x.scale = x.scale,y.scale = y.scale)
}
}
## Arrange all the kernel mambo jumpo
if(is.character(kernel)){
kernel <- match.arg(kernel,c("rbfdot","polydot","tanhdot","vanilladot","laplacedot","besseldot","anovadot","splinedot"))
if(is.character(kpar))
if((kernel == "tanhdot" || kernel == "vanilladot" || kernel == "polydot"|| kernel == "besseldot" || kernel== "anovadot"|| kernel=="splinedot") && kpar=="automatic" )
{
cat (" Setting default kernel parameters ","\n")
kpar <- list()
}
}
if (!is.function(kernel))
if (!is.list(kpar)&&is.character(kpar)&&(class(kernel)=="rbfkernel" || class(kernel) =="laplacedot" || kernel == "laplacedot"|| kernel=="rbfdot")){
kp <- match.arg(kpar,"automatic")
if(kp=="automatic")
kpar <- list(sigma=mean(sigest(x,scaled=FALSE,frac=1)[c(1,3)]))
cat("Using automatic sigma estimation (sigest) for RBF or laplace kernel","\n")
}
if(!is(kernel,"kernel"))
{
if(is(kernel,"function")) kernel <- deparse(substitute(kernel))
kernel <- do.call(kernel, kpar)
}
if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'")
## Setup QP problem and call ipop
if(!reduced)
H = kernelMatrix(kernel,x)
else
H = csi(x, kernel = kernel, rank = rank)
c = -y
A = rep(1,m)
b = 0
r = 0
l = matrix(C * (tau-1),m,1)
u = matrix(C * tau ,m,1)
qpsol = ipop(c, H, A, b, l, u, r)
alpha(ret)= coef(ret) = primal(qpsol)
b(ret) = dual(qpsol)[1]
## Compute training error/loss
xmatrix(ret) <- x
ymatrix(ret) <- y
kernelf(ret) <- kernel
kpar(ret) <- kpar
type(ret) <- ("Quantile Regresion")
if (fit){
fitted(ret) <- predict(ret, x)
if (!is.null(scaling(ret)$y.scale))
fitted(ret) <- fitted(ret) * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau))
}
else fitted(ret) <- NULL
if(any(scaled))
scaling(ret) <- tmpsc
## Crossvalidation
cross(ret) <- -1
if(cross == 1)
cat("\n","cross should be >1 no cross-validation done!","\n","\n")
else if (cross > 1)
{
pinloss <- 0
ramloss <- 0
crescs <- NULL
suppressWarnings(vgr<-split(sample(1:m,m),1:cross))
for(i in 1:cross)
{
cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length)))))
cret <- kqr(x[cind,],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE)
cres <- predict(cret, x[vgr[[i]],])
crescs <- c(crescs,cres)
}
if (!is.null(scaling(ret)$y.scale)){
crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
}
else
ysvgr <- y[unlist(vgr)]
pinloss <- drop(pinloss(ysvgr, crescs, tau))
ramloss <- drop(ramloss(ysvgr, crescs, tau))
cross(ret) <- c(pinloss, ramloss)
}
return(ret)
})
setMethod("kqr",signature(x="list"),
function (x, y, tau = 0.5, C = 0.1, kernel = "strigdot", kpar = list(length=4, C=0.5), fit = TRUE, cross = 0)
{
if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1")
if(!is(kernel,"kernel"))
{
if(is(kernel,"function")) kernel <- deparse(substitute(kernel))
kernel <- do.call(kernel, kpar)
}
if(!is(kernel,"kernel")) stop("kernel must inherit from class `kernel'")
K <- kernelMatrix(kernel,x)
ret <- kqr(K,y = y,tau = tau, C = C, fit = fit, cross = cross)
kernelf(ret) <- kernel
kpar(ret) <- kpar
return(ret)
})
setMethod("kqr",signature(x="kernelMatrix"),
function (x, y, tau = 0.5, C = 0.1, fit = TRUE, cross = 0)
{
if((tau > 1)||(tau < 0 )) stop("tau has to be strictly between 0 and 1")
ret <- new("kqr")
param(ret) <- list(C = C, tau = tau)
ncols <- ncol(x)
m <- nrows <- nrow(x)
y <- as.vector(y)
## Setup QP problem and call ipop
H = x
c = -y
A = rep(1,m)
b = 0
r = 0
l = matrix(C * (tau-1),m,1)
u = matrix(C * tau ,m,1)
qpsol = ipop(c, H, A, b, l, u, r)
alpha(ret)= coef(ret) = primal(qpsol)
b(ret) = dual(qpsol)[1]
## Compute training error/loss
ymatrix(ret) <- y
kernelf(ret) <- "Kernel Matrix used."
type(ret) <- ("Quantile Regresion")
if (fit){
fitted(ret) <- predict(ret, x)
error(ret) <- c(pinloss(y, fitted(ret), tau), ramploss(y,fitted(ret),tau))
}
else NA
## Crossvalidation
cross(ret) <- -1
if(cross == 1)
cat("\n","cross should be >1 no cross-validation done!","\n","\n")
else if (cross > 1)
{
pinloss <- 0
ramloss <- 0
crescs <- NULL
suppressWarnings(vgr<-split(sample(1:m,m),1:cross))
for(i in 1:cross)
{
cind <- unsplit(vgr[-i],factor(rep((1:cross)[-i],unlist(lapply(vgr[-i],length)))))
cret <- kqr(x[cind,cind],y[cind], tau = tau, C = C, scale = FALSE, kernel = kernel, cross = 0, fit = FALSE)
cres <- predict(cret, x[vgr[[i]],vgr[[i]]])
crescs <- c(crescs,cres)
}
if (!is.null(scaling(ret)$y.scale)){
crescs <- crescs * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
ysvgr <- y[unlist(vgr)] * tmpsc$y.scale$"scaled:scale" + tmpsc$y.scale$"scaled:center"
}
else
ysvgr <- y[unlist(vgr)]
pinloss <- drop(pinloss(ysvgr, crescs, tau))
ramloss <- drop(ramloss(ysvgr, crescs, tau))
cross(ret) <- c(pinloss, ramloss)
}
return(ret)
})
pinloss <- function(y,f,tau)
{
if(is.vector(y)) m <- length(y)
else m <- dim(y)[1]
tmp <- y - f
return((tau *sum(tmp*(tmp>=0)) + (tau-1) * sum(tmp * (tmp<0)))/m)
}
ramploss <- function(y,f,tau)
{
if(is.vector(y)) m <- length(y)
else m <- dim(y)[1]
return(sum(y<=f)/m)
}
setMethod("predict", signature(object = "kqr"),
function (object, newdata)
{
sc <- 0
if (missing(newdata))
if(!is.null(fitted(object)))
return(fitted(object))
else
stop("newdata is missing and no fitted values found.")
if(!is(newdata,"kernelMatrix")){
ncols <- ncol(xmatrix(object))
nrows <- nrow(xmatrix(object))
oldco <- ncols
if (!is.null(terms(object)))
{
newdata <- model.matrix(delete.response(terms(object)), as.data.frame(newdata), na.action = na.action)
}
else
newdata <- if (is.vector (newdata)) t(t(newdata)) else as.matrix(newdata)
newcols <- 0
newnrows <- nrow(newdata)
newncols <- ncol(newdata)
newco <- newncols
if (oldco != newco) stop ("test vector does not match model !")
if (is.list(scaling(object)) && sc != 1)
newdata[,scaling(object)$scaled] <-
scale(newdata[,scaling(object)$scaled, drop = FALSE],
center = scaling(object)$x.scale$"scaled:center",
scale = scaling(object)$x.scale$"scaled:scale"
)
predres <- kernelMult(kernelf(object),newdata,xmatrix(object),as.matrix(alpha(object))) - b(object)
if (!is.null(scaling(object)$y.scale))
return(predres * scaling(object)$y.scale$"scaled:scale" + scaling(object)$y.scale$"scaled:center")
else
return(predres)
}
else
{
return(newdata%*%alpha(object) - b(object))
}
})
setMethod("show","kqr",
function(object){
cat("Kernel Quantile Regression object of class \"kqr\"","\n")
cat("\n")
show(kernelf(object))
cat("\n")
cat("Regularization Cost Parameter C: ",round(param(object)[[1]],9))
cat(paste("\nNumber of training instances learned :", dim(xmatrix(object))[1],"\n"))
if(!is.null(fitted(object)))
cat(paste("Train error :"," pinball loss : ", round(error(object)[1],9)," rambloss :", round(error(object)[2],9),"\n"))
##train error & loss
if(cross(object)!=-1)
cat("Cross validation error :", " pinballoss : ", round(cross(object)[1],9)," rambloss :", round(cross(object)[2],9),"\n")
})
|
##----------------------------------------------------------------------------##
## ***
##----------------------------------------------------------------------------##
# set.seed(122)
# histdata <- rnorm(500)
#
# output[["box_plot1"]] <- renderPlot({
# data <- histdata[seq_len(input[["box_slider1"]])]
# hist(data)
# })
#
# output[["box_plot2"]] <- renderPlot({
# data <- histdata[seq_len(input[["box_slider2"]])]
# hist(data)
# })
| /shiny/server_template.R | no_license | milktea-muffin/r-shiny-electron | R | false | false | 445 | r | ##----------------------------------------------------------------------------##
## ***
##----------------------------------------------------------------------------##
# set.seed(122)
# histdata <- rnorm(500)
#
# output[["box_plot1"]] <- renderPlot({
# data <- histdata[seq_len(input[["box_slider1"]])]
# hist(data)
# })
#
# output[["box_plot2"]] <- renderPlot({
# data <- histdata[seq_len(input[["box_slider2"]])]
# hist(data)
# })
|
# replicate Marchinko
# April 5, 2018
replicate_marchinko <- function(marchinko){
# columns are
# population
# family
# treatment {no = -insect, pred = +insect}
# individual
# standlength
# ant.dorspine
# sec.dorspine
# pelspine.len
# pelgirdle.len
# plate.number
# eda.genotype {AA, Aa, aa}
# pelgirdle.presence {1=yes, 0 = no}
# pelspine.presence {1 = yes, 0 = no}
# Because spine and girdle lengths grow with body size, I corrected these traits for size using residuals from an ordinary least squares regressions of each trait on standard length.
# Standardized selection differentials (i) were calculated according to equation (6.1) in Endler (1986), i = \bar{X}_a - \bar{X}_b)/\sqrt{var_b} where \bar{X}_a and \bar{X}_b were the mean trait values of fish from a single family measured at the end of the predation and control treatments, respectively, and var_b is trait variance in the control treatment. Selection differentials were calculated for each family separately and may be found in the Supporting Table S1
# standlength replicates but...
# regression ignoring treatment level does not replicate
# regression within treatment levels does not replicate
# regression within family replicates. Lots of noise here, a multilevel model would be better
# *** recode any length=0.0 to NA
# replace 0.0 with NA to replicate
marchinko[ant.dorspine==0,ant.dorspine:=NA]
marchinko[sec.dorspine==0,sec.dorspine:=NA]
marchinko[pelspine.len==0,pelspine.len:=NA]
marchinko[pelgirdle.len==0,pelgirdle.len:=NA]
# *** ant.dorspine family 6 does not replicate
marchinko[, ant.dorspine.s:=size_corrected(ant.dorspine, standlength), by=family]
marchinko[, sec.dorspine.s:=size_corrected(sec.dorspine, standlength), by=family]
marchinko[, pelspine.len.s:=size_corrected(pelspine.len, standlength), by=family]
marchinko[, pelgirdle.len.s:=size_corrected(pelgirdle.len, standlength), by=family]
working_table <- marchinko[, .(standlength=mean(standlength, na.rm=TRUE),
ant.dorspine.s=mean(ant.dorspine.s, na.rm=TRUE),
sec.dorspine.s=mean(sec.dorspine.s, na.rm=TRUE),
pelspine.len.s=mean(pelspine.len.s, na.rm=TRUE),
pelgirdle.len.s=mean(pelgirdle.len.s, na.rm=TRUE),
sd.standlength=sd(standlength, na.rm=TRUE),
sd.ant.dorspine.s=sd(ant.dorspine.s, na.rm=TRUE),
sd.sec.dorspine.s=sd(sec.dorspine.s, na.rm=TRUE),
sd.pelspine.len.s=sd(pelspine.len.s, na.rm=TRUE),
sd.pelgirdle.len.s=sd(pelgirdle.len.s, na.rm=TRUE)
), by=.(family, treatment)]
working_table.no <- working_table[treatment=='no']
working_table.pred <- working_table[treatment=='pred']
mucols <- c('standlength', 'ant.dorspine.s', 'sec.dorspine.s', 'pelspine.len.s', 'pelgirdle.len.s')
sdcols <- c('sd.standlength', 'sd.ant.dorspine.s', 'sd.sec.dorspine.s', 'sd.pelspine.len.s', 'sd.pelgirdle.len.s')
# checks - ant.dorspine family 6 does not replicate
round(working_table.no[, .SD, .SDcols=mucols], 3)
round(working_table.pred[, .SD, .SDcols=mucols], 3)
y <- marchinko[family=='6', ant.dorspine]
sl <- marchinko[family=='6', standlength]
y[y==0] <- NA
dt <- na.omit(data.table(y=y, sl=sl, treatment=marchinko[family=='6',treatment]))
dt[, y.s := residuals(lm(y~sl))]
dt[, .(mean=mean(y.s)), by=treatment]
# Marchinko table 1
diff_table <- (working_table.pred[, .SD, .SDcols=mucols] - working_table.no[, .SD, .SDcols=mucols])/working_table.no[, .SD, .SDcols=sdcols]
table_1 <- apply(diff_table, 2, median)
return(table_1)
}
size_corrected <- function(x, sl, centered=TRUE){
# size correction via residual of regression of x on standard length sl.
# adds the residual to mean(x) so that value is centered at original
# centered = TRUE returns the residuals, FALSE returns residuals + mean(x)
# to correct within groups, simply use by=.(grouping) in the data.table
# e.g.
# marchinko[, ant.dorspine.s:=size_corrected(ant.dorspine, standlength), by=family]
x.s <- residuals(lm(x ~ sl, na.action = na.exclude))
if(centered==FALSE){
x.s <- x.s + mean(x)
}
return(x.s)
} | /R/replicate.R | permissive | middleprofessor/mr-stickleback | R | false | false | 4,366 | r | # replicate Marchinko
# April 5, 2018
replicate_marchinko <- function(marchinko){
# columns are
# population
# family
# treatment {no = -insect, pred = +insect}
# individual
# standlength
# ant.dorspine
# sec.dorspine
# pelspine.len
# pelgirdle.len
# plate.number
# eda.genotype {AA, Aa, aa}
# pelgirdle.presence {1=yes, 0 = no}
# pelspine.presence {1 = yes, 0 = no}
# Because spine and girdle lengths grow with body size, I corrected these traits for size using residuals from an ordinary least squares regressions of each trait on standard length.
# Standardized selection differentials (i) were calculated according to equation (6.1) in Endler (1986), i = \bar{X}_a - \bar{X}_b)/\sqrt{var_b} where \bar{X}_a and \bar{X}_b were the mean trait values of fish from a single family measured at the end of the predation and control treatments, respectively, and var_b is trait variance in the control treatment. Selection differentials were calculated for each family separately and may be found in the Supporting Table S1
# standlength replicates but...
# regression ignoring treatment level does not replicate
# regression within treatment levels does not replicate
# regression within family replicates. Lots of noise here, a multilevel model would be better
# *** recode any length=0.0 to NA
# replace 0.0 with NA to replicate
marchinko[ant.dorspine==0,ant.dorspine:=NA]
marchinko[sec.dorspine==0,sec.dorspine:=NA]
marchinko[pelspine.len==0,pelspine.len:=NA]
marchinko[pelgirdle.len==0,pelgirdle.len:=NA]
# *** ant.dorspine family 6 does not replicate
marchinko[, ant.dorspine.s:=size_corrected(ant.dorspine, standlength), by=family]
marchinko[, sec.dorspine.s:=size_corrected(sec.dorspine, standlength), by=family]
marchinko[, pelspine.len.s:=size_corrected(pelspine.len, standlength), by=family]
marchinko[, pelgirdle.len.s:=size_corrected(pelgirdle.len, standlength), by=family]
working_table <- marchinko[, .(standlength=mean(standlength, na.rm=TRUE),
ant.dorspine.s=mean(ant.dorspine.s, na.rm=TRUE),
sec.dorspine.s=mean(sec.dorspine.s, na.rm=TRUE),
pelspine.len.s=mean(pelspine.len.s, na.rm=TRUE),
pelgirdle.len.s=mean(pelgirdle.len.s, na.rm=TRUE),
sd.standlength=sd(standlength, na.rm=TRUE),
sd.ant.dorspine.s=sd(ant.dorspine.s, na.rm=TRUE),
sd.sec.dorspine.s=sd(sec.dorspine.s, na.rm=TRUE),
sd.pelspine.len.s=sd(pelspine.len.s, na.rm=TRUE),
sd.pelgirdle.len.s=sd(pelgirdle.len.s, na.rm=TRUE)
), by=.(family, treatment)]
working_table.no <- working_table[treatment=='no']
working_table.pred <- working_table[treatment=='pred']
mucols <- c('standlength', 'ant.dorspine.s', 'sec.dorspine.s', 'pelspine.len.s', 'pelgirdle.len.s')
sdcols <- c('sd.standlength', 'sd.ant.dorspine.s', 'sd.sec.dorspine.s', 'sd.pelspine.len.s', 'sd.pelgirdle.len.s')
# checks - ant.dorspine family 6 does not replicate
round(working_table.no[, .SD, .SDcols=mucols], 3)
round(working_table.pred[, .SD, .SDcols=mucols], 3)
y <- marchinko[family=='6', ant.dorspine]
sl <- marchinko[family=='6', standlength]
y[y==0] <- NA
dt <- na.omit(data.table(y=y, sl=sl, treatment=marchinko[family=='6',treatment]))
dt[, y.s := residuals(lm(y~sl))]
dt[, .(mean=mean(y.s)), by=treatment]
# Marchinko table 1
diff_table <- (working_table.pred[, .SD, .SDcols=mucols] - working_table.no[, .SD, .SDcols=mucols])/working_table.no[, .SD, .SDcols=sdcols]
table_1 <- apply(diff_table, 2, median)
return(table_1)
}
size_corrected <- function(x, sl, centered=TRUE){
# size correction via residual of regression of x on standard length sl.
# adds the residual to mean(x) so that value is centered at original
# centered = TRUE returns the residuals, FALSE returns residuals + mean(x)
# to correct within groups, simply use by=.(grouping) in the data.table
# e.g.
# marchinko[, ant.dorspine.s:=size_corrected(ant.dorspine, standlength), by=family]
x.s <- residuals(lm(x ~ sl, na.action = na.exclude))
if(centered==FALSE){
x.s <- x.s + mean(x)
}
return(x.s)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.