content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
setwd('~/Desktop/R_plotting_workshop')
plot.each.gene.expr = function(input_filepath){
#This function plots each gene's expression trajectory over time as a semi-transparent line. 'input_filepath' is a tab-delimited file where each column represents a time-point, each row is a different gene, and each value in the matrix is that gene's expression level, at that time-point. The expression levels are normalized by time-point, so that each column sums to one.
#First read in the data as a data frame
t = read.table(input_filepath, header=TRUE)
#remove last column of data because this is a summary statistic for each of the genes
num_columns = length(t[1,])
t = t[,-num_columns]
#uncomment the line below if the data file has row identifiers as the 1st column (which is the case for the allele freq trajectory file)
#t = t[,-1]
#now find the maximum value in all the data to make appropriate limits for the plot's y-axis
max_value = max(t)
#also find number of time-points for x-axis limits
num_tpoints = length(t[1,])
#set filename and file-type (pdf in this case) for plot
pdf(paste(input_filepath, '_line_plots.pdf', sep=''))
#now use plot() function to plot first line of data, and set up the axes and such. The first two arguments give the x and y coordinants for each data-point, respectively.
#see: https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/plot.html
#and https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/par.html
#for list of parameters that can be passed to plot()
plot(x=0:(num_tpoints-1), y=t[1,], xlim=c(0,num_tpoints-1), ylim=c(0,max_value), main='Each V Gene Heavy Expression Trajectory', xlab='Time (days since vaccination)', ylab='Gene Expression Level', type='l', col=rgb(0,0,0,0.1))
#now loop through all remaining rows (genes) in the data
num_genes = length(t[,1])
for (i in 2:num_genes){
lines(x=0:(num_tpoints-1), y=t[i,], col=rgb(0,0,0,0.1))
}
#this will instruct R to close the file that the plot is being written
dev.off()
}
plot.stacked.barplot = function(input_filepath){
#This script uses the function barplot() to make a stacked bar plot of the data. Here, the overall length of a bar for a time-point corresponds to the cumulative gene expression at that time. Each color within a bar corresponds to a unique gene, so that the length of an individual colored segment corresponds to that gene's expression level
#read in data
t = read.table(input_filepath, header=TRUE)
#convert to matrix because 'barplot()' only takes vectors of matrices as input.
m = as.matrix(t, mode='numeric')
#make different colors for different genes using rainbow() function
#see: https://stat.ethz.ch/R-manual/R-devel/library/grDevices/html/palettes.html for more info
#note that this only generates 10 colors. So the color pattern repeats every 10 genes, but this should be sufficient to distinguish between genes in the plot.
colors = rainbow(10)
#remove last column because it is not a time-point
num_columns = length(m[1,])
m = m[,-num_columns]
num_tpoints = length(m[1,])
#now make barplot. See: https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/barplot.html for more info
pdf(paste(input_filepath, '_stacked_barplot.pdf', sep=''))
barplot(m, col=colors, names.arg=0:(num_tpoints-1), main='Each V Gene Heavy Expression Level', xlab='Time (days since vaccination)', ylab='Cumulative Expression Level')
dev.off()
}
parse.data.for.ggplot = function(input_filepath){
#This script will parse the data with in input filepath (which is essentially a matrix) into a large data frame (which is the proper input for ggplot)
#read in data and then turn it into a matrix
t = read.table(input_filepath, header=TRUE)
num_columns = length(t[1,])
m = as.matrix(t[,-num_columns], mode='numeric')
#get dimensions of the matrix
num_genes = length(m[,1])
num_tpoints = length(m[1,])
#now we need to cycle through the elements of the matrix and append to our new data frame each time
#each of the empty variables below will become the columns to our data frame
expr_level = c()
tpoints = c()
gene_ids = c()
#1st cycle through each row (i.e. gene of the matrix)
for (i in 1:num_genes){
#assign a random ID for this gene (in this case a random number b/t 0 and 100)
gene_id = runif(1, 0, 100)
gene_id = as.character(gene_id)
tpoint = 0
#now cycle through each element of the given row (i.e. time-point)
for (j in m[i,]){
#append each of our variables (i.e. soon-to-be columns in our data frame)
expr_level = append(expr_level, j)
tpoints = append(tpoints, tpoint)
gene_ids = append(gene_ids, gene_id)
#update the timepoint each with each cycle
tpoint = tpoint + 1
}
}
#now combine each of the variables so that they are columns in a data frame
data = data.frame(expr_level, tpoints, gene_ids)
#write the data frame to file using write.table()
#see: https://stat.ethz.ch/R-manual/R-devel/library/utils/html/write.table.html for more info
data_filepath = paste(input_filepath, '_ggplot_friendly_dataframe', sep='')
write.table(data, data_filepath, row.names=FALSE, quote=FALSE)
#return the data frame as well
return (data)
}
plot_stacked_area = function(input_filepath){
#This script makes a stacked area chart using ggplot2's geom_area() function. These plots essentially convey the same information as a stacked bar plot but are way prettier, and thus more interpretable. Here, each color corresponds to a gene, and the width of that color at a given time-point gives it expression level for that time.
#1st the data needs to be parsed in a way that is amenable for ggplot plotting
data_frame = parse.data.for.ggplot(input_filepath)
#must load ggplot may need to use install.packages('ggplot2') if ggplot has not been installed yet
library(ggplot2)
#This tells ggplot how to interperate the data, i.e. which columns in the data frame give the x coordinants, which give the y, etc...
p = ggplot(data_frame, aes(x=tpoints, y=expr_level, fill=gene_ids))
#this tells ggplot what type of plot you would like to make
p = p + geom_area(position='stack')
#this instructs ggplot to not include a legend
p = p + guides(fill=FALSE)
#this gives the title and axis labels for the plot
p = p + ggtitle('Each V Gene Expression Trajectories') + xlab('Time (days since vaccination)') + ylab('Cumulative Expression Level')
#now save the plot
pdf(paste(input_filepath, '_stacked_area_plot.pdf', sep=''))
plot(p)
dev.off()
}
| /plotting_functions.R | no_license | nbstrauli/R_plotting_workshop | R | false | false | 6,496 | r | setwd('~/Desktop/R_plotting_workshop')
plot.each.gene.expr = function(input_filepath){
#This function plots each gene's expression trajectory over time as a semi-transparent line. 'input_filepath' is a tab-delimited file where each column represents a time-point, each row is a different gene, and each value in the matrix is that gene's expression level, at that time-point. The expression levels are normalized by time-point, so that each column sums to one.
#First read in the data as a data frame
t = read.table(input_filepath, header=TRUE)
#remove last column of data because this is a summary statistic for each of the genes
num_columns = length(t[1,])
t = t[,-num_columns]
#uncomment the line below if the data file has row identifiers as the 1st column (which is the case for the allele freq trajectory file)
#t = t[,-1]
#now find the maximum value in all the data to make appropriate limits for the plot's y-axis
max_value = max(t)
#also find number of time-points for x-axis limits
num_tpoints = length(t[1,])
#set filename and file-type (pdf in this case) for plot
pdf(paste(input_filepath, '_line_plots.pdf', sep=''))
#now use plot() function to plot first line of data, and set up the axes and such. The first two arguments give the x and y coordinants for each data-point, respectively.
#see: https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/plot.html
#and https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/par.html
#for list of parameters that can be passed to plot()
plot(x=0:(num_tpoints-1), y=t[1,], xlim=c(0,num_tpoints-1), ylim=c(0,max_value), main='Each V Gene Heavy Expression Trajectory', xlab='Time (days since vaccination)', ylab='Gene Expression Level', type='l', col=rgb(0,0,0,0.1))
#now loop through all remaining rows (genes) in the data
num_genes = length(t[,1])
for (i in 2:num_genes){
lines(x=0:(num_tpoints-1), y=t[i,], col=rgb(0,0,0,0.1))
}
#this will instruct R to close the file that the plot is being written
dev.off()
}
plot.stacked.barplot = function(input_filepath){
#This script uses the function barplot() to make a stacked bar plot of the data. Here, the overall length of a bar for a time-point corresponds to the cumulative gene expression at that time. Each color within a bar corresponds to a unique gene, so that the length of an individual colored segment corresponds to that gene's expression level
#read in data
t = read.table(input_filepath, header=TRUE)
#convert to matrix because 'barplot()' only takes vectors of matrices as input.
m = as.matrix(t, mode='numeric')
#make different colors for different genes using rainbow() function
#see: https://stat.ethz.ch/R-manual/R-devel/library/grDevices/html/palettes.html for more info
#note that this only generates 10 colors. So the color pattern repeats every 10 genes, but this should be sufficient to distinguish between genes in the plot.
colors = rainbow(10)
#remove last column because it is not a time-point
num_columns = length(m[1,])
m = m[,-num_columns]
num_tpoints = length(m[1,])
#now make barplot. See: https://stat.ethz.ch/R-manual/R-devel/library/graphics/html/barplot.html for more info
pdf(paste(input_filepath, '_stacked_barplot.pdf', sep=''))
barplot(m, col=colors, names.arg=0:(num_tpoints-1), main='Each V Gene Heavy Expression Level', xlab='Time (days since vaccination)', ylab='Cumulative Expression Level')
dev.off()
}
parse.data.for.ggplot = function(input_filepath){
#This script will parse the data with in input filepath (which is essentially a matrix) into a large data frame (which is the proper input for ggplot)
#read in data and then turn it into a matrix
t = read.table(input_filepath, header=TRUE)
num_columns = length(t[1,])
m = as.matrix(t[,-num_columns], mode='numeric')
#get dimensions of the matrix
num_genes = length(m[,1])
num_tpoints = length(m[1,])
#now we need to cycle through the elements of the matrix and append to our new data frame each time
#each of the empty variables below will become the columns to our data frame
expr_level = c()
tpoints = c()
gene_ids = c()
#1st cycle through each row (i.e. gene of the matrix)
for (i in 1:num_genes){
#assign a random ID for this gene (in this case a random number b/t 0 and 100)
gene_id = runif(1, 0, 100)
gene_id = as.character(gene_id)
tpoint = 0
#now cycle through each element of the given row (i.e. time-point)
for (j in m[i,]){
#append each of our variables (i.e. soon-to-be columns in our data frame)
expr_level = append(expr_level, j)
tpoints = append(tpoints, tpoint)
gene_ids = append(gene_ids, gene_id)
#update the timepoint each with each cycle
tpoint = tpoint + 1
}
}
#now combine each of the variables so that they are columns in a data frame
data = data.frame(expr_level, tpoints, gene_ids)
#write the data frame to file using write.table()
#see: https://stat.ethz.ch/R-manual/R-devel/library/utils/html/write.table.html for more info
data_filepath = paste(input_filepath, '_ggplot_friendly_dataframe', sep='')
write.table(data, data_filepath, row.names=FALSE, quote=FALSE)
#return the data frame as well
return (data)
}
plot_stacked_area = function(input_filepath){
#This script makes a stacked area chart using ggplot2's geom_area() function. These plots essentially convey the same information as a stacked bar plot but are way prettier, and thus more interpretable. Here, each color corresponds to a gene, and the width of that color at a given time-point gives it expression level for that time.
#1st the data needs to be parsed in a way that is amenable for ggplot plotting
data_frame = parse.data.for.ggplot(input_filepath)
#must load ggplot may need to use install.packages('ggplot2') if ggplot has not been installed yet
library(ggplot2)
#This tells ggplot how to interperate the data, i.e. which columns in the data frame give the x coordinants, which give the y, etc...
p = ggplot(data_frame, aes(x=tpoints, y=expr_level, fill=gene_ids))
#this tells ggplot what type of plot you would like to make
p = p + geom_area(position='stack')
#this instructs ggplot to not include a legend
p = p + guides(fill=FALSE)
#this gives the title and axis labels for the plot
p = p + ggtitle('Each V Gene Expression Trajectories') + xlab('Time (days since vaccination)') + ylab('Cumulative Expression Level')
#now save the plot
pdf(paste(input_filepath, '_stacked_area_plot.pdf', sep=''))
plot(p)
dev.off()
}
|
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(bin2mi)
library(m2imp)
alpha <- 0.025
power <- 0.85
cor_xl <- 0.4
pc <- 0.8
pt <- 0.825
m1 <- 0.23
n_obs <- 250
mu_x <- 20
mu_lambda <- 0.7
sd_x <- 7
sd_lambda <- 0.12
#parameters tbu in the clinical experts opinions model (to calculate probability to be non/observed)
xcov <- matrix(c(sd_x^2, sd_x*sd_lambda*cor_xl, sd_x*sd_lambda*cor_xl, sd_lambda^2), 2, 2)
#number of imputations for the MDs survey
num_m_md <- 20
x1 <- parallel::mclapply(X = 1:5000,
mc.cores = 24,
FUN= function(i){
#population of physicians consists of 1000 doctors
set.seed(100*1 + i)
dt_pop0 <- MASS::mvrnorm(1000, mu = c(mu_x, mu_lambda), Sigma = xcov)
dt_pop <- tibble::tibble(x = dt_pop0[,1],
lambda = dt_pop0[,2],
ph_id = seq(1, length(dt_pop0[,1])))%>%
dplyr::mutate(lambda = ifelse(lambda<0.40, 0.4, ifelse(lambda>0.95, 0.95, lambda)), #cut-off lambda values to be between 0.40 and 0.95
x = ifelse(x < 0, 0, x)) #cut-off x values at 0
#representative sample of MDs - 30% of the population
dt_sample <- dt_pop%>%
dplyr::sample_frac(size = 0.3)%>%
dplyr::mutate(x_20 = ifelse(x > 20, 1, 0)) #introduce cut-off value of x at 20
#observe only k physicians
dt_all <-
dt_sample%>%
dplyr::mutate(pmiss = ifelse(x_20 == 1, 0.95, 0.99))%>%
split(.$pmiss)%>%
purrr::map(.f = function(x){ x%>%
dplyr::mutate(r = stats::rbinom(dplyr::n(), 1, pmiss))})%>%
dplyr::bind_rows()%>%
dplyr::select(-pmiss)
#the below condition added in order to make sure that at least 4 responses are observed in the survey
while(length(dt_all$r[dt_all$r==0])<4){
dt_all <-
dt_sample%>%
dplyr::mutate(pmiss = ifelse(x_20 == 1, 0.95, 0.99))%>%
split(.$pmiss)%>%
purrr::map(.f = function(x){ x%>%
dplyr::mutate(r = stats::rbinom(dplyr::n(), 1, pmiss))})%>%
dplyr::bind_rows()%>%
dplyr::select(-pmiss)
}
#mean/sd lambda for the whole representitive sample of MDs
mdsur_all <- dt_all%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())%>%
dplyr::mutate(se_l = sd_l/sqrt(n_l))
#mean/sd lambda for the observed sample of MDs
mdsur_obs <- dt_all%>%
dplyr::filter(r==0)%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())%>%
dplyr::mutate(se_l = sd_l/sqrt(n_l))
ch <- dt_all%>%
group_by(x_20)%>%
summarise(ml = mean(lambda), rm = mean(r), xm = mean(x))
#mask unobserved values from the sample of MDs
dt_obs <- dt_all%>%
dplyr::mutate(lambda = ifelse(r==0, lambda, NA))%>%
dplyr::select(-x_20)
mdsur_mi <- m2_mi(dt_obs, num_m = num_m_md, i = i, use_pckg = 'norm', n_iter = 100000)%>%
dplyr::rename(mean_l = qbar)%>%
dplyr::mutate(sd_l = sqrt(t), se_l = sd_l)
mdsur_smin <- dt_all%>%
dplyr::summarise(mean_l = min(lambda, na.rm = T))%>%
dplyr::mutate(sd_l = 0, n_l = 1)
mdsur_smax <- dt_all%>%
dplyr::summarise(mean_l = max(lambda, na.rm = T))%>%
dplyr::mutate(sd_l = 0, n_l = 1)
#generate trial data:
set.seed(200*1 + i)
dt0 <- bin2mi::dt_p2(n = n_obs, pc = pc, pt = pt)
#calculate ci and derive decision based on the full/obs/mi/sing cohort of MDs
mdall_des <- ci_sur(mdsur_all, dt0, type = 'all')
mdobs_des <- ci_sur(mdsur_obs, dt0, type = 'obs')
mdmi_des <- ci_sur(mdsur_mi, dt0, type = 'mi')
mdmin_des <- ci_sur(mdsur_smin, dt0, type = 'sing min')
mdmax_des <- ci_sur(mdsur_smax, dt0, type = 'sing max')
ct_des <- bind_rows(mdall_des, mdobs_des, mdmi_des, mdmin_des, mdmax_des)%>%
dplyr::mutate(sim_id = i)
out <- list(ct_des, ch)%>%
purrr::set_names("ct_des", 'ch')
return(out)
})
saveRDS(x1, "results/mdsu_obs3_sc1_pnorm_niter100k.rds")
| /pgms_simrun/mdsur_obs3_sc1_pnorm_niter100k.R | no_license | yuliasidi/ch2sim | R | false | false | 4,154 | r | library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(bin2mi)
library(m2imp)
alpha <- 0.025
power <- 0.85
cor_xl <- 0.4
pc <- 0.8
pt <- 0.825
m1 <- 0.23
n_obs <- 250
mu_x <- 20
mu_lambda <- 0.7
sd_x <- 7
sd_lambda <- 0.12
#parameters tbu in the clinical experts opinions model (to calculate probability to be non/observed)
xcov <- matrix(c(sd_x^2, sd_x*sd_lambda*cor_xl, sd_x*sd_lambda*cor_xl, sd_lambda^2), 2, 2)
#number of imputations for the MDs survey
num_m_md <- 20
x1 <- parallel::mclapply(X = 1:5000,
mc.cores = 24,
FUN= function(i){
#population of physicians consists of 1000 doctors
set.seed(100*1 + i)
dt_pop0 <- MASS::mvrnorm(1000, mu = c(mu_x, mu_lambda), Sigma = xcov)
dt_pop <- tibble::tibble(x = dt_pop0[,1],
lambda = dt_pop0[,2],
ph_id = seq(1, length(dt_pop0[,1])))%>%
dplyr::mutate(lambda = ifelse(lambda<0.40, 0.4, ifelse(lambda>0.95, 0.95, lambda)), #cut-off lambda values to be between 0.40 and 0.95
x = ifelse(x < 0, 0, x)) #cut-off x values at 0
#representative sample of MDs - 30% of the population
dt_sample <- dt_pop%>%
dplyr::sample_frac(size = 0.3)%>%
dplyr::mutate(x_20 = ifelse(x > 20, 1, 0)) #introduce cut-off value of x at 20
#observe only k physicians
dt_all <-
dt_sample%>%
dplyr::mutate(pmiss = ifelse(x_20 == 1, 0.95, 0.99))%>%
split(.$pmiss)%>%
purrr::map(.f = function(x){ x%>%
dplyr::mutate(r = stats::rbinom(dplyr::n(), 1, pmiss))})%>%
dplyr::bind_rows()%>%
dplyr::select(-pmiss)
#the below condition added in order to make sure that at least 4 responses are observed in the survey
while(length(dt_all$r[dt_all$r==0])<4){
dt_all <-
dt_sample%>%
dplyr::mutate(pmiss = ifelse(x_20 == 1, 0.95, 0.99))%>%
split(.$pmiss)%>%
purrr::map(.f = function(x){ x%>%
dplyr::mutate(r = stats::rbinom(dplyr::n(), 1, pmiss))})%>%
dplyr::bind_rows()%>%
dplyr::select(-pmiss)
}
#mean/sd lambda for the whole representitive sample of MDs
mdsur_all <- dt_all%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())%>%
dplyr::mutate(se_l = sd_l/sqrt(n_l))
#mean/sd lambda for the observed sample of MDs
mdsur_obs <- dt_all%>%
dplyr::filter(r==0)%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())%>%
dplyr::mutate(se_l = sd_l/sqrt(n_l))
ch <- dt_all%>%
group_by(x_20)%>%
summarise(ml = mean(lambda), rm = mean(r), xm = mean(x))
#mask unobserved values from the sample of MDs
dt_obs <- dt_all%>%
dplyr::mutate(lambda = ifelse(r==0, lambda, NA))%>%
dplyr::select(-x_20)
mdsur_mi <- m2_mi(dt_obs, num_m = num_m_md, i = i, use_pckg = 'norm', n_iter = 100000)%>%
dplyr::rename(mean_l = qbar)%>%
dplyr::mutate(sd_l = sqrt(t), se_l = sd_l)
mdsur_smin <- dt_all%>%
dplyr::summarise(mean_l = min(lambda, na.rm = T))%>%
dplyr::mutate(sd_l = 0, n_l = 1)
mdsur_smax <- dt_all%>%
dplyr::summarise(mean_l = max(lambda, na.rm = T))%>%
dplyr::mutate(sd_l = 0, n_l = 1)
#generate trial data:
set.seed(200*1 + i)
dt0 <- bin2mi::dt_p2(n = n_obs, pc = pc, pt = pt)
#calculate ci and derive decision based on the full/obs/mi/sing cohort of MDs
mdall_des <- ci_sur(mdsur_all, dt0, type = 'all')
mdobs_des <- ci_sur(mdsur_obs, dt0, type = 'obs')
mdmi_des <- ci_sur(mdsur_mi, dt0, type = 'mi')
mdmin_des <- ci_sur(mdsur_smin, dt0, type = 'sing min')
mdmax_des <- ci_sur(mdsur_smax, dt0, type = 'sing max')
ct_des <- bind_rows(mdall_des, mdobs_des, mdmi_des, mdmin_des, mdmax_des)%>%
dplyr::mutate(sim_id = i)
out <- list(ct_des, ch)%>%
purrr::set_names("ct_des", 'ch')
return(out)
})
saveRDS(x1, "results/mdsu_obs3_sc1_pnorm_niter100k.rds")
|
getwd()
setwd("D:/limworkspace/R_Data_Analysis/Part3/data")
setwd("D:/limworkspace/R_Data_Analysis/Part3")
getwd()
#--------------------------------------------------------------#
#------------------ section 8 : ๋ค์ํ ํจ์ -------------------#
#--------------------------------------------------------------#
# dplyr ์ฐธ๊ณ ์๋ฃ : https://rfriend.tistory.com/235
# 8.5 ๋ฐ์ดํฐ ํธ๋ค๋ง - plyr packages ------------------------------------------------------------------------------------------------------------
# ์ถ๋ ฅํํ array data frame list nothing
# ์
๋ ฅํํ
# array aaply adply alply a_ply
# data frame daply ddply* dlply* d_ply
# list laply ldply* llply l_ply
# n replicates raply rdply rlply r_ply
# function arguments maply mdply mlply m_ply
# * ์์ฃผ ์ฐ์ด๋ ํจ์
# ๋ค์ํ ์ถ๋ ฅํํ๋ฅผ ๋ํ๋
install.packages("plyr")
library(plyr)
rm(list = ls())
list.files()
fruits <- read.csv( "data/fruits_10.csv" ); fruits
# ddply(data, ๊ธฐ์ค์ปฌ๋ผ, ์ ์ฉํจ์ or ๊ฒฐ๊ณผ๋ฌผ)
ddply(fruits, 'name', summarise, sum_qty = sum(qty), # ๋ณ์ ์์ฑ
sum_price = sum(price) # summarise๋ ์๋ก์ด dfm์ ์์ฑ
) # ๊ธฐ์ค์ปฌ๋ผ ๋ณ๋ก ๋ฐ์ดํฐ ์์ฑ
ddply(fruits, 'name', summarise, max_qty = max(qty), # ๋ค์ํ ํจ์ ์ ์ฉ์ด ๊ฐ๋ฅ
min_price = min(price) # summarise = group_by์ ๊ฐ์ ๊ธฐ๋ฅ
)
ddply(fruits, c('year','name'), summarise, max_qty = max(qty), # ๋ค์ํ ํจ์ ์ ์ฉ์ด ๊ฐ๋ฅ
min_price = min(price) # summarise = group_by์ ๊ฐ์ ๊ธฐ๋ฅ
)
ddply(fruits, c('year','name'), transform, sum_qty = sum(qty),
pct_qty = (100*qty)/sum(qty) # transform์ ์ปฌ๋ผ์ ๊ธฐ์กด ๋ฐ์ดํฐ์ ํจ๊ป ๋ํ๋
)
# 8.6 ๋ฐ์ดํฐ ํธ๋ค๋ง - dplyr packages ********** ------------------------------------------------------------------------------------------------
install.packages('dplyr')
library(dplyr)
list.files()
data1 <- read.csv("data/2013๋
_ํ๋ก์ผ๊ตฌ์ ์_์ฑ์ .csv")
str(data1)
# 8.6.1 filter(๋ฐ์ดํฐ ,์กฐ๊ฑด) - ์กฐ๊ฑด์ ์ค์ ์ํ๋ ๋ฐ์ดํฐ๋ง ์ป๋ ๊ธฐ๋ฅ ---------------------------------------------------------------------------
data2 <- filter(data1, ๊ฒฝ๊ธฐ > 120 ); data2
data3 <- filter(data1, ๊ฒฝ๊ธฐ > 120 & ๋์ > 80); data3
data4 <- filter(data1, ํฌ์ง์
== '1๋ฃจ์' | ํฌ์ง์
== '3๋ฃจ์'); data4
data5 <- filter(data1, ํฌ์ง์
%in% c('1๋ฃจ์','2๋ฃจ์')); data5 # %in% ํฌํจํ๊ณ ์๋์ง ๋ฌป๋ ์ฐ์ฐ์, ์ ํํ ๊ฐ์ ์
๋ ฅํด์ผํจ
# 8.6.2 select(๋ฐ์ดํฐ, ์ปฌ๋ผ๋ช
) - ํน์ ์ปฌ๋ผ๋ง ์ ํํด์ ์ฌ์ฉํ๋ ๊ธฐ๋ฅ ----------------------------------------------------------------------------
select(data1, ์ ์๋ช
, ํฌ์ง์
, ํ) # ์ํ๋ ์ปฌ๋ผ ์ ํ
select(data1, ์์:ํ์) # :๋ก ๋ฒ์์ง์ ๊ฐ๋ฅ
select(data1, -ํ๋ฐ, -ํ์ , -๋๋ฃจ) # ํด๋น ์ปฌ๋ผ์ ์ ์ธ
select(data1, -ํ๋ฐ:-๋๋ฃจ)
data1 %>% # %>%(pipe) : ์ฌ๋ฌ๋ฌธ์ฅ์ ์กฐํฉํด์ ํ ๋ฌธ์ฅ์ฒ๋ผ ์ฌ์ฉ ๊ฐ๋ฅ
select(์ ์๋ช
, ํ, ๊ฒฝ๊ธฐ, ํ์) %>%
filter(ํ์ > 400)
# 8.6.3 arrange(์ ๋ ฌํ๊ณ ์ ํ๋ ๋ณ์) - ๋ฐ์ดํฐ๋ฅผ ์ค๋ฆ์ฐจ์ or ๋ด๋ฆผ์ฐจ์์ผ๋ก ์ ๋ ฌ (= sorting) ------------------------------------------------------
data1 %>%
select(์ ์๋ช
, ํ, ๊ฒฝ๊ธฐ, ํ์) %>%
filter(ํ์ > 400) %>%
arrange(desc(ํ์)) # desc(๋ณ์) : ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌ
data1 %>%
select(์ ์๋ช
, ํ, ๊ฒฝ๊ธฐ, ํ์) %>%
filter(ํ์ > 400) %>%
arrange(desc(๊ฒฝ๊ธฐ), desc(ํ์)) # ์์๋๋ก ์ ๋ ฌ ๊ธฐ์ค์ด ์ ํด์ง
# 8.6.4 mutate(์๋ก์ด ๋ณ์ = ํจ์) - ๊ธฐ์กด์ ๋ณ์๋ฅผ ํ์ฉํ์ฌ ์๋ก์ด ๋ณ์๋ฅผ ์์ฑ (with %>%) -------------------------------------------------------
data2 <- data1 %>%
select(์ ์๋ช
, ํ, ์ถ๋ฃจ์จ, ์ฅํ์จ) %>%
mutate(OPS = ์ถ๋ฃจ์จ + ์ฅํ์จ) %>% # ์๋ก์ด ๋ณ์ ์์ฑ
arrange(desc(OPS))
# 8.6.5 summarise - ๋ค์ํ ํจ์๋ฅผ ํตํด ์ฃผ์ด์ง ๋ฐ์ดํฐ๋ฅผ ์ง๊ณํ๋ค (with group_by) -----------------------------------------------------------------
str(data1)
data1 %>%
group_by(ํ) %>%
summarise(average = mean(๊ฒฝ๊ธฐ, na.rm = T)) # ํ๊ท ๊ฒฝ๊ธฐ ์ถ์ฅ์, ๊ฒฐ์ธก์น ์ ๊ฑฐ
data1 %>%
group_by(ํ) %>%
summarise_each(list(mean), ๊ฒฝ๊ธฐ, ํ์)
data1 %>%
group_by(ํ) %>%
mutate(OPS = ์ถ๋ฃจ์จ + ์ฅํ์จ) %>% # summarise_each : ์ฌ๋ฌ๊ฐ์ ๋ณ์์ ํจ์๋ฅผ ์ ์ฉํ ๋ ์ฌ์ฉ
summarise_each(list(mean), ์ฅํ์จ, ์ถ๋ฃจ์จ, ํ์จ, OPS) %>% # summarise_each(funs(ํจ์), ๋ณ์๋ค)
arrange(desc(OPS)) # deprecated ์ค๋ฅ๋ ์์ผ๋ก ๊ธฐ๋ฅ์ ๋ณ๊ฒฝํ ๊ฒ์ ์์
data1 %>%
group_by(ํ) %>%
mutate(OPS = ์ถ๋ฃจ์จ + ์ฅํ์จ) %>%
summarise_each(list(mean), ์ฅํ์จ, ์ถ๋ฃจ์จ, ํ์จ, OPS) %>% # ์ํ๋ ๋ณ์๋ค์ ์์ฝ๊ฐ ์ถ๋ ฅ
arrange(desc(OPS))
data1 %>%
group_by(ํ) %>%
summarise_each(funs(mean, n()), ๊ฒฝ๊ธฐ, ํ์) # n() ํจ์๋ก ๊ฐ์๋ ํฌํจ // n ํจ์๋ funs๋ก๋ง ์ฌ์ฉ๊ฐ๋ฅ
library(reshape2)
library(ggplot2)
# dplyr ์ฐ์ต๋ฌธ์ 1
library(googleVis)
attach(Fruits)
Fruits_2 <- filter(Fruits, Expenses > 80); Fruits_2
Fruits_3 <- filter(Fruits, Expenses > 90 & Sales > 90); Fruits_3
Fruits_4 <- filter(Fruits, Expenses > 90 | Sales > 80); Fruits_4
Fruits_5 <- filter(Fruits, Expenses %in% c(79,91)); Fruits_5
Fruits_5 <- filter(Fruits, Expenses == 79 | Expenses == 91); Fruits_5
Fruits_6 <- select(Fruits[,1:4], -Location); Fruits_6
Fruits_7 <- Fruits %>%
group_by(Fruit) %>%
summarise(average = sum(Sales, na.rm = T)); Fruits_7
Fruits_8 <- Fruits %>%
group_by(Fruit) %>%
summarise(Sales = sum(Sales),
Profit = sum(Profit)); Fruits_8
Fruits_8 <- Fruits %>%
group_by(Fruit) %>%
summarise_each(list(sum),Sales,Profit); Fruits_8
rm(list=ls())
# 8.7 ๋ฐ์ดํฐ ํธ๋ค๋ง - reshape2 packages ***** ---------------------------------------------------------------------------------------------------
install.packages("reshape2")
library(reshape2)
fruits <- read.csv("data/fruits_10.csv" ); fruits
melt(fruits, id = 'year') # melt : ๋
น์ด๋ค // id ๊ฐ์ ๊ธฐ์ค์ผ๋ก long format ์ผ๋ก ๋ณํ
melt(fruits, id = c('year','name'))
melt(fruits, id = c('year','name'),
variable.name = '๋ณ์๋ช
',
value.name = '๋ณ์๊ฐ')
mtest <- melt(fruits, id = c('year','name'),
variable.name = '๋ณ์๋ช
', # variable์ ์์๋ก ๋ฐ๊ฟ ์ ์๋ค
value.name = '๋ณ์๊ฐ') # value๋ฅผ ์์๋ก ๋ฐ๊ฟ ์ ์๋ค
# dcast(melt๋ ๋ฐ์ดํฐ, ๊ธฐ์ค์ปฌ๋ผ ~ ๋์์ปฌ๋ผ, ์ ์ฉํจ์)
dcast(mtest, year+name ~ ๋ณ์๋ช
) # dcast(data, ๊ธฐ์ค์ปฌ๋ผ ~ ๋์์ปฌ๋ผ, ์ ์ฉํจ์)
dcast(mtest, name~๋ณ์๋ช
, mean, subset=.(name=='berry'))
dcast(mtest, name~๋ณ์๋ช
, sum, subset=.(name=='apple')) # subset ์ต์
์ผ๋ก ํน์ ๋ณ์๋ง ์ถ๋ ฅ ๊ฐ๋ฅ
# 8.8 ๋ฐ์ดํฐ ํธ๋ค๋ง - stringr packages ****** ---------------------------------------------------------------------------------------------------
install.packages("stringr")
library(stringr)
# 8.8.1 str_detect(data, '์ฐพ๊ณ ์ํ๋ ๋ฌธ์') - ํน์ ๋ฌธ์๋ฅผ ์ฐพ๋ ํจ์ ------------------------------------------------------------------------------
fruits_string <- c('apple','Apple','banana','pineapple')
str_detect(fruits_string, 'A') # ๋๋ฌธ์ A๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ์ผ๋ก ์ถ๋ ฅ)
str_detect(fruits_string, 'a') # ์๋ฌธ์ a๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ์ผ๋ก ์ถ๋ ฅ)
str_detect(fruits_string, '^a') # ์ฒซ ๊ธ์๊ฐ ์๋ฌธ์ a์ธ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, 'e$') # ๋๋๋ ๊ธ์๊ฐ ์๋ฌธ์ e์ธ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, '^[aA]') # ์์ํ๋ ๊ธ์๊ฐ ๋๋ฌธ์ A ๋๋ ์๋ฌธ์ a์ธ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, '[aA]') # ๋จ์ด์ ์๋ฌธ์ a์ ๋๋ฌธ์ A๊ฐ ๋ค์ด ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, regex('a', ignore_case = T)) # ๋์๋ฌธ์ ๊ตฌ๋ถ์ ์ํ๋๋ก ์ค์ ํ๋ ํจ์
# 8.8.2 str_count(data, '์ธ๊ณ ์ ํ๋ ๋ฌธ์) - ์ฃผ์ด์ง ๋จ์ด์์ ํด๋น ๊ธ์๊ฐ ๋ช ๋ฒ ๋์ค๋์ง ์ธ๋ ํจ์ ---------------------------------------------
str_count(fruits_string, 'a') # 'a'๊ฐ ๊ฐ๊ฐ์ ๋ฐ์ดํฐ์ ๋ช๋ฒ ํฌํจํ๋์ง ์ถ๋ ฅ
# 8.8.3 str_c('chr1', 'chr2'|data) - ๋ฌธ์์ด์ ํฉ์ณ์ ์ถ๋ ฅ -------------------------------------------------------------
# str_c == paste
# str_c(sep="") == paste0
str_c('apple','banana')
str_c('Fruits : ' ,fruits_string) # ๋ฌธ์์ ๋ฒกํฐ ๋ฐ์ดํฐ๋ ๋ถ์ด๊ธฐ ๊ฐ๋ฅ
str_c(fruits_string, " name is", fruits_string)
str_c(fruits_string, collapse = '') # collapse ์ต์
์ผ๋ก ๊ตฌ๋ถ์ ์ค์ ์ด ๊ฐ๋ฅ
str_c(fruits_string, collapse = ', ')
# 8.8.4 str_dup(data, ํ์) - data๋ฅผ ํ์๋งํผ ๋ฐ๋ณตํด์ ์ถ๋ ฅ -------------------------------------------------------------------------------------
str_dup(fruits_string, 3)
# 8.8.5 str_length('๋ฌธ์์ด') - ์ฃผ์ด์ง ๋ฌธ์์ด์ ๊ธธ์ด๋ฅผ ์ถ๋ ฅ --------------------------------------------------------------------------------------
# str_length == length
str_length(fruits_string)
str_length('๊ณผ์ผ')
# 8.8.6 str_locate('๋ฌธ์์ด, '๋ฌธ์') - ์ฃผ์ด์ง ๋ฌธ์์ด์์ ํน์ ๋ฌธ์์ ์ฒ์, ๋ง์ง๋ง ์์น๋ฅผ ์ถ๋ ฅ ----------------------------------------------------
str_locate(fruits_string, 'a') # ๋์๋ฌธ์ ๊ตฌ๋ณ์ด ํ์ // ์ฒ์ ๋์ค๋ 'a'์ ์์น๋ฅผ ์ถ๋ ฅ
str_locate('apple', 'app') # ํ ์ธ์ด์ ์ธ๋ฑ์ค์ ๊ตฌ๊ฐ๊ณผ ์ฐจ๋ณ์ด ์์
# 8.8.7 str_repalce('chr1,'old,'new) - ์ด์ ๋ฌธ์๋ฅผ ์๋ก์ด ๋ฌธ์๋ก ๋ณ๊ฒฝ // sub() ํจ์์ ๊ฐ์ ๊ธฐ๋ฅ
# str_replace == sub
# str_replace_all == gsub
str_replace('apple','p','*') # apple์์ ์ฒซ๋ฒ์งธ p๋ฅผ *๋ก ๋ณ๊ฒฝ
str_replace('apple','p','++') # apple์์ ์ฒซ๋ฒ์งธ p๋ฅผ ++๋ก ๋ณ๊ฒฝ
str_replace_all('apple','p','*') # apple์์ ๋ชจ๋ p๋ฅผ *๋ก ๋ณ๊ฒฝ
# 8.8.8 str_split(๋ฌธ์์ด, '๊ธฐ์ค์ผ๋ก ๋๋ ๋ฌธ์) - ํน์ ๋ฌธ์๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ฌธ์์ด์ ๋๋ ์ค -----------------------------------------------------------
fruits_string2 <- str_c('apple','/','orange','/','banana')
str_split(fruits_string2, '/') # fruits_string2๋ฅผ /๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ถ๋ฆฌ // ๊ฒฐ๊ณผ๊ฐ์ list๋ก ์ถ๋ ฅ
str_split_fixed(fruits_string2,'/',3)[,2] # '/'์ ๊ธฐ์ค์ผ๋ก ๋ฌธ์์ด์ ์ง์ ๋ ์๋ฆฌ์๋ก ๋ถ๋ฆฌ // ๋ค์ ์ธ๋ฑ์ฑ์ผ๋ก ๋ถ๋ฆฌ๋ ๋ฌธ์์ด ์ ํ ๊ฐ๋ฅ
str_split_fixed(fruits_string2,'/',3)[,1]
# 8.8.9 str_sub(๋ฌธ์์ด, start, end) - ๋ฌธ์์ด์์ ์ง์ ๋ ๊ธธ์ด ๋งํผ์ ๋ฌธ์๋ฅผ ์๋ผ๋ด์ค -------------------------------------------------------------
# str_sub == substr
fruits_string2
str_sub(fruits_string2, start=1, end=3)
str_sub(fruits_string2,1,3) # start, end ์์ด๋ ๊ฐ๋ฅ
str_sub(fruits_string2, -5) # ๋ค์์ ๋ค์ฏ๋ฒ ์งธ ๋ถํฐ ๋ฌธ์์ด์ ์๋ผ๋
# 8.8.10 str_trim() - ๋ฌธ์์ด์ ๊ฐ์ฅ ์, ๋ค์ ๊ณต๋ฐฑ์ ์ ๊ฑฐํด์ค ------------------------------------------------------------------------------------
str_trim(' apple banana berry ')
str_trim('\t apple banana berry')
str_trim(' apple bananan berry \n ')
# 8.8.11 str_match(data, pattern) - ๋ฌธ์์ ํจํด์ด ๋งค์นํ๋ ์๋ฆฌ๋ฅผ ์ถ๋ ฅํด์ค ----------------------------------------------------------------------
fruits_string <- c('apple','Apple','banana','pineapple')
str_detect(fruits_string, 'A') # ๋๋ฌธ์ A๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ์ผ๋ก ์ถ๋ ฅ)
str_match(fruits_string, 'A') # ๋๋ฌธ์ A๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋งค์นญ๋๋ ์์น์ ๊ฐ๋ง ์ถ๋ ฅ, ๋งค์น๊ฐ ์๋ ๊ฒฝ์ฐ NA๋ก ์ถ๋ ฅ )
# 8.8.12 ์ํ๋ ํ ์ถ์ถํ๊ธฐ *** -----------------------------------------------------------------------------------------------------------------
data2[nchar(data2$์๊ฐ)==3,2] <- paste0(0,data2[nchar(data2$์๊ฐ)==3,2]); data2 # 1๋ฒ ๋ฐฉ๋ฒ
data2$์๋ก์ด์๊ฐ <- paste(str_sub(data2[,2],1,2),
str_sub(data2[,2],3,4), sep=":"); data2$์๊ฐ <- NULL; data2 # 2๋ฒ ๋ฐฉ๋ฒ
library(googleVis)
Fruits
Fruits[str_detect(Fruits$Date, "10"),] # Fruits๋ฐ์ดํฐ์ Date์ด์์ "10"์ด ๋ค์ด๊ฐ ํ๋ง ์ถ์ถ
Fruits[grep("10",Fruits$Date),] # ๊ฐ์ ๊ธฐ๋ฅ
| /Part3_R๊ธฐ์ด/sec 8_3 (plyr dplyr stringr).R | no_license | limwonki0619/R_Data_Analysis | R | false | false | 14,391 | r | getwd()
setwd("D:/limworkspace/R_Data_Analysis/Part3/data")
setwd("D:/limworkspace/R_Data_Analysis/Part3")
getwd()
#--------------------------------------------------------------#
#------------------ section 8 : ๋ค์ํ ํจ์ -------------------#
#--------------------------------------------------------------#
# dplyr ์ฐธ๊ณ ์๋ฃ : https://rfriend.tistory.com/235
# 8.5 ๋ฐ์ดํฐ ํธ๋ค๋ง - plyr packages ------------------------------------------------------------------------------------------------------------
# ์ถ๋ ฅํํ array data frame list nothing
# ์
๋ ฅํํ
# array aaply adply alply a_ply
# data frame daply ddply* dlply* d_ply
# list laply ldply* llply l_ply
# n replicates raply rdply rlply r_ply
# function arguments maply mdply mlply m_ply
# * ์์ฃผ ์ฐ์ด๋ ํจ์
# ๋ค์ํ ์ถ๋ ฅํํ๋ฅผ ๋ํ๋
install.packages("plyr")
library(plyr)
rm(list = ls())
list.files()
fruits <- read.csv( "data/fruits_10.csv" ); fruits
# ddply(data, ๊ธฐ์ค์ปฌ๋ผ, ์ ์ฉํจ์ or ๊ฒฐ๊ณผ๋ฌผ)
ddply(fruits, 'name', summarise, sum_qty = sum(qty), # ๋ณ์ ์์ฑ
sum_price = sum(price) # summarise๋ ์๋ก์ด dfm์ ์์ฑ
) # ๊ธฐ์ค์ปฌ๋ผ ๋ณ๋ก ๋ฐ์ดํฐ ์์ฑ
ddply(fruits, 'name', summarise, max_qty = max(qty), # ๋ค์ํ ํจ์ ์ ์ฉ์ด ๊ฐ๋ฅ
min_price = min(price) # summarise = group_by์ ๊ฐ์ ๊ธฐ๋ฅ
)
ddply(fruits, c('year','name'), summarise, max_qty = max(qty), # ๋ค์ํ ํจ์ ์ ์ฉ์ด ๊ฐ๋ฅ
min_price = min(price) # summarise = group_by์ ๊ฐ์ ๊ธฐ๋ฅ
)
ddply(fruits, c('year','name'), transform, sum_qty = sum(qty),
pct_qty = (100*qty)/sum(qty) # transform์ ์ปฌ๋ผ์ ๊ธฐ์กด ๋ฐ์ดํฐ์ ํจ๊ป ๋ํ๋
)
# 8.6 ๋ฐ์ดํฐ ํธ๋ค๋ง - dplyr packages ********** ------------------------------------------------------------------------------------------------
install.packages('dplyr')
library(dplyr)
list.files()
data1 <- read.csv("data/2013๋
_ํ๋ก์ผ๊ตฌ์ ์_์ฑ์ .csv")
str(data1)
# 8.6.1 filter(๋ฐ์ดํฐ ,์กฐ๊ฑด) - ์กฐ๊ฑด์ ์ค์ ์ํ๋ ๋ฐ์ดํฐ๋ง ์ป๋ ๊ธฐ๋ฅ ---------------------------------------------------------------------------
data2 <- filter(data1, ๊ฒฝ๊ธฐ > 120 ); data2
data3 <- filter(data1, ๊ฒฝ๊ธฐ > 120 & ๋์ > 80); data3
data4 <- filter(data1, ํฌ์ง์
== '1๋ฃจ์' | ํฌ์ง์
== '3๋ฃจ์'); data4
data5 <- filter(data1, ํฌ์ง์
%in% c('1๋ฃจ์','2๋ฃจ์')); data5 # %in% ํฌํจํ๊ณ ์๋์ง ๋ฌป๋ ์ฐ์ฐ์, ์ ํํ ๊ฐ์ ์
๋ ฅํด์ผํจ
# 8.6.2 select(๋ฐ์ดํฐ, ์ปฌ๋ผ๋ช
) - ํน์ ์ปฌ๋ผ๋ง ์ ํํด์ ์ฌ์ฉํ๋ ๊ธฐ๋ฅ ----------------------------------------------------------------------------
select(data1, ์ ์๋ช
, ํฌ์ง์
, ํ) # ์ํ๋ ์ปฌ๋ผ ์ ํ
select(data1, ์์:ํ์) # :๋ก ๋ฒ์์ง์ ๊ฐ๋ฅ
select(data1, -ํ๋ฐ, -ํ์ , -๋๋ฃจ) # ํด๋น ์ปฌ๋ผ์ ์ ์ธ
select(data1, -ํ๋ฐ:-๋๋ฃจ)
data1 %>% # %>%(pipe) : ์ฌ๋ฌ๋ฌธ์ฅ์ ์กฐํฉํด์ ํ ๋ฌธ์ฅ์ฒ๋ผ ์ฌ์ฉ ๊ฐ๋ฅ
select(์ ์๋ช
, ํ, ๊ฒฝ๊ธฐ, ํ์) %>%
filter(ํ์ > 400)
# 8.6.3 arrange(์ ๋ ฌํ๊ณ ์ ํ๋ ๋ณ์) - ๋ฐ์ดํฐ๋ฅผ ์ค๋ฆ์ฐจ์ or ๋ด๋ฆผ์ฐจ์์ผ๋ก ์ ๋ ฌ (= sorting) ------------------------------------------------------
data1 %>%
select(์ ์๋ช
, ํ, ๊ฒฝ๊ธฐ, ํ์) %>%
filter(ํ์ > 400) %>%
arrange(desc(ํ์)) # desc(๋ณ์) : ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌ
data1 %>%
select(์ ์๋ช
, ํ, ๊ฒฝ๊ธฐ, ํ์) %>%
filter(ํ์ > 400) %>%
arrange(desc(๊ฒฝ๊ธฐ), desc(ํ์)) # ์์๋๋ก ์ ๋ ฌ ๊ธฐ์ค์ด ์ ํด์ง
# 8.6.4 mutate(์๋ก์ด ๋ณ์ = ํจ์) - ๊ธฐ์กด์ ๋ณ์๋ฅผ ํ์ฉํ์ฌ ์๋ก์ด ๋ณ์๋ฅผ ์์ฑ (with %>%) -------------------------------------------------------
data2 <- data1 %>%
select(์ ์๋ช
, ํ, ์ถ๋ฃจ์จ, ์ฅํ์จ) %>%
mutate(OPS = ์ถ๋ฃจ์จ + ์ฅํ์จ) %>% # ์๋ก์ด ๋ณ์ ์์ฑ
arrange(desc(OPS))
# 8.6.5 summarise - ๋ค์ํ ํจ์๋ฅผ ํตํด ์ฃผ์ด์ง ๋ฐ์ดํฐ๋ฅผ ์ง๊ณํ๋ค (with group_by) -----------------------------------------------------------------
str(data1)
data1 %>%
group_by(ํ) %>%
summarise(average = mean(๊ฒฝ๊ธฐ, na.rm = T)) # ํ๊ท ๊ฒฝ๊ธฐ ์ถ์ฅ์, ๊ฒฐ์ธก์น ์ ๊ฑฐ
data1 %>%
group_by(ํ) %>%
summarise_each(list(mean), ๊ฒฝ๊ธฐ, ํ์)
data1 %>%
group_by(ํ) %>%
mutate(OPS = ์ถ๋ฃจ์จ + ์ฅํ์จ) %>% # summarise_each : ์ฌ๋ฌ๊ฐ์ ๋ณ์์ ํจ์๋ฅผ ์ ์ฉํ ๋ ์ฌ์ฉ
summarise_each(list(mean), ์ฅํ์จ, ์ถ๋ฃจ์จ, ํ์จ, OPS) %>% # summarise_each(funs(ํจ์), ๋ณ์๋ค)
arrange(desc(OPS)) # deprecated ์ค๋ฅ๋ ์์ผ๋ก ๊ธฐ๋ฅ์ ๋ณ๊ฒฝํ ๊ฒ์ ์์
data1 %>%
group_by(ํ) %>%
mutate(OPS = ์ถ๋ฃจ์จ + ์ฅํ์จ) %>%
summarise_each(list(mean), ์ฅํ์จ, ์ถ๋ฃจ์จ, ํ์จ, OPS) %>% # ์ํ๋ ๋ณ์๋ค์ ์์ฝ๊ฐ ์ถ๋ ฅ
arrange(desc(OPS))
data1 %>%
group_by(ํ) %>%
summarise_each(funs(mean, n()), ๊ฒฝ๊ธฐ, ํ์) # n() ํจ์๋ก ๊ฐ์๋ ํฌํจ // n ํจ์๋ funs๋ก๋ง ์ฌ์ฉ๊ฐ๋ฅ
library(reshape2)
library(ggplot2)
# dplyr ์ฐ์ต๋ฌธ์ 1
library(googleVis)
attach(Fruits)
Fruits_2 <- filter(Fruits, Expenses > 80); Fruits_2
Fruits_3 <- filter(Fruits, Expenses > 90 & Sales > 90); Fruits_3
Fruits_4 <- filter(Fruits, Expenses > 90 | Sales > 80); Fruits_4
Fruits_5 <- filter(Fruits, Expenses %in% c(79,91)); Fruits_5
Fruits_5 <- filter(Fruits, Expenses == 79 | Expenses == 91); Fruits_5
Fruits_6 <- select(Fruits[,1:4], -Location); Fruits_6
Fruits_7 <- Fruits %>%
group_by(Fruit) %>%
summarise(average = sum(Sales, na.rm = T)); Fruits_7
Fruits_8 <- Fruits %>%
group_by(Fruit) %>%
summarise(Sales = sum(Sales),
Profit = sum(Profit)); Fruits_8
Fruits_8 <- Fruits %>%
group_by(Fruit) %>%
summarise_each(list(sum),Sales,Profit); Fruits_8
rm(list=ls())
# 8.7 ๋ฐ์ดํฐ ํธ๋ค๋ง - reshape2 packages ***** ---------------------------------------------------------------------------------------------------
install.packages("reshape2")
library(reshape2)
fruits <- read.csv("data/fruits_10.csv" ); fruits
melt(fruits, id = 'year') # melt : ๋
น์ด๋ค // id ๊ฐ์ ๊ธฐ์ค์ผ๋ก long format ์ผ๋ก ๋ณํ
melt(fruits, id = c('year','name'))
melt(fruits, id = c('year','name'),
variable.name = '๋ณ์๋ช
',
value.name = '๋ณ์๊ฐ')
mtest <- melt(fruits, id = c('year','name'),
variable.name = '๋ณ์๋ช
', # variable์ ์์๋ก ๋ฐ๊ฟ ์ ์๋ค
value.name = '๋ณ์๊ฐ') # value๋ฅผ ์์๋ก ๋ฐ๊ฟ ์ ์๋ค
# dcast(melt๋ ๋ฐ์ดํฐ, ๊ธฐ์ค์ปฌ๋ผ ~ ๋์์ปฌ๋ผ, ์ ์ฉํจ์)
dcast(mtest, year+name ~ ๋ณ์๋ช
) # dcast(data, ๊ธฐ์ค์ปฌ๋ผ ~ ๋์์ปฌ๋ผ, ์ ์ฉํจ์)
dcast(mtest, name~๋ณ์๋ช
, mean, subset=.(name=='berry'))
dcast(mtest, name~๋ณ์๋ช
, sum, subset=.(name=='apple')) # subset ์ต์
์ผ๋ก ํน์ ๋ณ์๋ง ์ถ๋ ฅ ๊ฐ๋ฅ
# 8.8 ๋ฐ์ดํฐ ํธ๋ค๋ง - stringr packages ****** ---------------------------------------------------------------------------------------------------
install.packages("stringr")
library(stringr)
# 8.8.1 str_detect(data, '์ฐพ๊ณ ์ํ๋ ๋ฌธ์') - ํน์ ๋ฌธ์๋ฅผ ์ฐพ๋ ํจ์ ------------------------------------------------------------------------------
fruits_string <- c('apple','Apple','banana','pineapple')
str_detect(fruits_string, 'A') # ๋๋ฌธ์ A๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ์ผ๋ก ์ถ๋ ฅ)
str_detect(fruits_string, 'a') # ์๋ฌธ์ a๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ์ผ๋ก ์ถ๋ ฅ)
str_detect(fruits_string, '^a') # ์ฒซ ๊ธ์๊ฐ ์๋ฌธ์ a์ธ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, 'e$') # ๋๋๋ ๊ธ์๊ฐ ์๋ฌธ์ e์ธ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, '^[aA]') # ์์ํ๋ ๊ธ์๊ฐ ๋๋ฌธ์ A ๋๋ ์๋ฌธ์ a์ธ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, '[aA]') # ๋จ์ด์ ์๋ฌธ์ a์ ๋๋ฌธ์ A๊ฐ ๋ค์ด ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ)
str_detect(fruits_string, regex('a', ignore_case = T)) # ๋์๋ฌธ์ ๊ตฌ๋ถ์ ์ํ๋๋ก ์ค์ ํ๋ ํจ์
# 8.8.2 str_count(data, '์ธ๊ณ ์ ํ๋ ๋ฌธ์) - ์ฃผ์ด์ง ๋จ์ด์์ ํด๋น ๊ธ์๊ฐ ๋ช ๋ฒ ๋์ค๋์ง ์ธ๋ ํจ์ ---------------------------------------------
str_count(fruits_string, 'a') # 'a'๊ฐ ๊ฐ๊ฐ์ ๋ฐ์ดํฐ์ ๋ช๋ฒ ํฌํจํ๋์ง ์ถ๋ ฅ
# 8.8.3 str_c('chr1', 'chr2'|data) - ๋ฌธ์์ด์ ํฉ์ณ์ ์ถ๋ ฅ -------------------------------------------------------------
# str_c == paste
# str_c(sep="") == paste0
str_c('apple','banana')
str_c('Fruits : ' ,fruits_string) # ๋ฌธ์์ ๋ฒกํฐ ๋ฐ์ดํฐ๋ ๋ถ์ด๊ธฐ ๊ฐ๋ฅ
str_c(fruits_string, " name is", fruits_string)
str_c(fruits_string, collapse = '') # collapse ์ต์
์ผ๋ก ๊ตฌ๋ถ์ ์ค์ ์ด ๊ฐ๋ฅ
str_c(fruits_string, collapse = ', ')
# 8.8.4 str_dup(data, ํ์) - data๋ฅผ ํ์๋งํผ ๋ฐ๋ณตํด์ ์ถ๋ ฅ -------------------------------------------------------------------------------------
str_dup(fruits_string, 3)
# 8.8.5 str_length('๋ฌธ์์ด') - ์ฃผ์ด์ง ๋ฌธ์์ด์ ๊ธธ์ด๋ฅผ ์ถ๋ ฅ --------------------------------------------------------------------------------------
# str_length == length
str_length(fruits_string)
str_length('๊ณผ์ผ')
# 8.8.6 str_locate('๋ฌธ์์ด, '๋ฌธ์') - ์ฃผ์ด์ง ๋ฌธ์์ด์์ ํน์ ๋ฌธ์์ ์ฒ์, ๋ง์ง๋ง ์์น๋ฅผ ์ถ๋ ฅ ----------------------------------------------------
str_locate(fruits_string, 'a') # ๋์๋ฌธ์ ๊ตฌ๋ณ์ด ํ์ // ์ฒ์ ๋์ค๋ 'a'์ ์์น๋ฅผ ์ถ๋ ฅ
str_locate('apple', 'app') # ํ ์ธ์ด์ ์ธ๋ฑ์ค์ ๊ตฌ๊ฐ๊ณผ ์ฐจ๋ณ์ด ์์
# 8.8.7 str_repalce('chr1,'old,'new) - ์ด์ ๋ฌธ์๋ฅผ ์๋ก์ด ๋ฌธ์๋ก ๋ณ๊ฒฝ // sub() ํจ์์ ๊ฐ์ ๊ธฐ๋ฅ
# str_replace == sub
# str_replace_all == gsub
str_replace('apple','p','*') # apple์์ ์ฒซ๋ฒ์งธ p๋ฅผ *๋ก ๋ณ๊ฒฝ
str_replace('apple','p','++') # apple์์ ์ฒซ๋ฒ์งธ p๋ฅผ ++๋ก ๋ณ๊ฒฝ
str_replace_all('apple','p','*') # apple์์ ๋ชจ๋ p๋ฅผ *๋ก ๋ณ๊ฒฝ
# 8.8.8 str_split(๋ฌธ์์ด, '๊ธฐ์ค์ผ๋ก ๋๋ ๋ฌธ์) - ํน์ ๋ฌธ์๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ฌธ์์ด์ ๋๋ ์ค -----------------------------------------------------------
fruits_string2 <- str_c('apple','/','orange','/','banana')
str_split(fruits_string2, '/') # fruits_string2๋ฅผ /๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ถ๋ฆฌ // ๊ฒฐ๊ณผ๊ฐ์ list๋ก ์ถ๋ ฅ
str_split_fixed(fruits_string2,'/',3)[,2] # '/'์ ๊ธฐ์ค์ผ๋ก ๋ฌธ์์ด์ ์ง์ ๋ ์๋ฆฌ์๋ก ๋ถ๋ฆฌ // ๋ค์ ์ธ๋ฑ์ฑ์ผ๋ก ๋ถ๋ฆฌ๋ ๋ฌธ์์ด ์ ํ ๊ฐ๋ฅ
str_split_fixed(fruits_string2,'/',3)[,1]
# 8.8.9 str_sub(๋ฌธ์์ด, start, end) - ๋ฌธ์์ด์์ ์ง์ ๋ ๊ธธ์ด ๋งํผ์ ๋ฌธ์๋ฅผ ์๋ผ๋ด์ค -------------------------------------------------------------
# str_sub == substr
fruits_string2
str_sub(fruits_string2, start=1, end=3)
str_sub(fruits_string2,1,3) # start, end ์์ด๋ ๊ฐ๋ฅ
str_sub(fruits_string2, -5) # ๋ค์์ ๋ค์ฏ๋ฒ ์งธ ๋ถํฐ ๋ฌธ์์ด์ ์๋ผ๋
# 8.8.10 str_trim() - ๋ฌธ์์ด์ ๊ฐ์ฅ ์, ๋ค์ ๊ณต๋ฐฑ์ ์ ๊ฑฐํด์ค ------------------------------------------------------------------------------------
str_trim(' apple banana berry ')
str_trim('\t apple banana berry')
str_trim(' apple bananan berry \n ')
# 8.8.11 str_match(data, pattern) - ๋ฌธ์์ ํจํด์ด ๋งค์นํ๋ ์๋ฆฌ๋ฅผ ์ถ๋ ฅํด์ค ----------------------------------------------------------------------
fruits_string <- c('apple','Apple','banana','pineapple')
str_detect(fruits_string, 'A') # ๋๋ฌธ์ A๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋
ผ๋ฆฌ๊ฐ์ผ๋ก ์ถ๋ ฅ)
str_match(fruits_string, 'A') # ๋๋ฌธ์ A๊ฐ ์๋ ๋จ์ด ์ฐพ๊ธฐ (๋งค์นญ๋๋ ์์น์ ๊ฐ๋ง ์ถ๋ ฅ, ๋งค์น๊ฐ ์๋ ๊ฒฝ์ฐ NA๋ก ์ถ๋ ฅ )
# 8.8.12 ์ํ๋ ํ ์ถ์ถํ๊ธฐ *** -----------------------------------------------------------------------------------------------------------------
data2[nchar(data2$์๊ฐ)==3,2] <- paste0(0,data2[nchar(data2$์๊ฐ)==3,2]); data2 # 1๋ฒ ๋ฐฉ๋ฒ
data2$์๋ก์ด์๊ฐ <- paste(str_sub(data2[,2],1,2),
str_sub(data2[,2],3,4), sep=":"); data2$์๊ฐ <- NULL; data2 # 2๋ฒ ๋ฐฉ๋ฒ
library(googleVis)
Fruits
Fruits[str_detect(Fruits$Date, "10"),] # Fruits๋ฐ์ดํฐ์ Date์ด์์ "10"์ด ๋ค์ด๊ฐ ํ๋ง ์ถ์ถ
Fruits[grep("10",Fruits$Date),] # ๊ฐ์ ๊ธฐ๋ฅ
|
#' Variance test or non-parametric test results visualization, using boxplot
#'
#' @param data a data.frame contain the input data
#' @param i col index wtich need to test
#' @param sig_show Distinctive display, "abc" or "line"
#' @param result output from aovMcomper or KwWlx. You can also import result calculated from other software (a data frame)
#' @param ns Logical value, whether to display insignificant marks
#' @examples
#' # data(data_wt)
#' result = KwWlx(data = data_wt, i= 4)
#' PlotresultBox = aovMuiBoxP(data = data_wt, i= 3,sig_show ="abc",result = result[[1]])
#' # utput result
#' p = PlotresultBox[[1]]
#' p
#' @return data frame
#' @author Contact: Tao Wen \email{2018203048@@njau.edu.cn} Jun Yuan \email{junyuan@@njau.edu.cn}
#' @references
#'
#' Yuan J, Zhao J, Wen T, Zhao M, Li R, Goossens P, Huang Q, Bai Y, Vivanco JM, Kowalchuk GA, Berendsen RL, Shen Q
#' Root exudates drive the soil-borne legacy of aboveground pathogen infection
#' Microbiome 2018,DOI: \url{doi: 10.1186/s40168-018-0537-x}
#' @export
###----ไฝฟ็จๆนๅทฎๆฃ้ช็ปๆๅๅค้ๆฏ่พ็ปๆๅๅฑ็คบ๏ผ ็ฎฑ็บฟๅพๅฑ็คบ
aovMuiBoxP = function(data = data_wt, i= 3,sig_show ="line",result = result,ns = FALSE){
aa = result
name_i = colnames(data[i])
data_box = data[c(1,2,i)]
colnames(data_box) = c("ID" , "group","dd" )
data_box$stat=aa[as.character(data_box$group),]$groups
max=max(data_box[,c("dd")],na.rm = TRUE)
min=min(data_box[,c("dd")],na.rm = TRUE)
x = data_box[,c("group","dd")]
y = x %>% group_by(group) %>% summarise_(Max=paste('max(',"dd",",na.rm = TRUE",')',sep=""))
y=as.data.frame(y)
y
rownames(y)=y$group
data_box$y=y[as.character(data_box$group),]$Max + (max-min)*0.05
head(data_box)
p = ggplot(data_box, aes(x=group, y=data_box[["dd"]], color=group)) +
geom_boxplot(alpha=1, outlier.size=0, size=0.7, width=0.5, fill="transparent") +
labs(
y=name_i)+
geom_jitter( position=position_jitter(0.17), size=1, alpha=0.7)+theme(legend.position="none")
p
if (sig_show == "abc") {
p = p +
geom_text(data=data_box, aes(x=group, y=y, color=group, label= stat))
p
}
wtq = levels(data$group)
lis = combn(levels(as.factor(data$group)), 2)
x <-lis
my_comparisons <- tapply(x,rep(1:ncol(x),each=nrow(x)),function(i)i)
line = list()
if (sig_show == "line") {
zuhe = combn(aa$group,2)
xxxx <- tapply(zuhe,rep(1:ncol(zuhe),each=nrow(zuhe)),function(i)i)
xxxx
sig_lis = rep("a",dim(zuhe)[2])
for (i in 1:dim(zuhe)[2]) {
if (filter(aa, group == xxxx[[i]][1])$groups == filter(aa, group == xxxx[[i]][2])$groups) {
sig_lis[i] = "no_sig"
}
if (filter(aa, group == xxxx[[i]][1])$groups != filter(aa, group == xxxx[[i]][2])$groups) {
sig_lis[i] = "*"
}
}
if (ns == TRUE) {
#-remove the ns
xxxx[as.character((1:length(sig_lis))[sig_lis =="no_sig"])] = NULL
sig_lis = sig_lis[sig_lis != "no_sig"]
}
line = list(comparisons = xxxx,annotations=sig_lis,y_position = (seq(from=1, to=max(data_box$dd)/4,length.out=dim(zuhe)[2]) + max(data_box$dd)),tip_length = rep(0.03,dim(zuhe)[2]))
p = p +
ggsignif::geom_signif(comparisons = xxxx, annotations=sig_lis,
y_position = (seq(from=1, to=max(data_box$dd)/4,length.out=dim(zuhe)[2]) + max(data_box$dd)), tip_length = rep(0.03,dim(zuhe)[2]),color = "black")
p
}
# p=p+Mytheme
p
return(list(p,data_box))
}
| /R/BoxP.R | no_license | zhao-hx/EasyStat | R | false | false | 3,463 | r | #' Variance test or non-parametric test results visualization, using boxplot
#'
#' @param data a data.frame contain the input data
#' @param i col index wtich need to test
#' @param sig_show Distinctive display, "abc" or "line"
#' @param result output from aovMcomper or KwWlx. You can also import result calculated from other software (a data frame)
#' @param ns Logical value, whether to display insignificant marks
#' @examples
#' # data(data_wt)
#' result = KwWlx(data = data_wt, i= 4)
#' PlotresultBox = aovMuiBoxP(data = data_wt, i= 3,sig_show ="abc",result = result[[1]])
#' # utput result
#' p = PlotresultBox[[1]]
#' p
#' @return data frame
#' @author Contact: Tao Wen \email{2018203048@@njau.edu.cn} Jun Yuan \email{junyuan@@njau.edu.cn}
#' @references
#'
#' Yuan J, Zhao J, Wen T, Zhao M, Li R, Goossens P, Huang Q, Bai Y, Vivanco JM, Kowalchuk GA, Berendsen RL, Shen Q
#' Root exudates drive the soil-borne legacy of aboveground pathogen infection
#' Microbiome 2018,DOI: \url{doi: 10.1186/s40168-018-0537-x}
#' @export
###----ไฝฟ็จๆนๅทฎๆฃ้ช็ปๆๅๅค้ๆฏ่พ็ปๆๅๅฑ็คบ๏ผ ็ฎฑ็บฟๅพๅฑ็คบ
aovMuiBoxP = function(data = data_wt, i= 3,sig_show ="line",result = result,ns = FALSE){
aa = result
name_i = colnames(data[i])
data_box = data[c(1,2,i)]
colnames(data_box) = c("ID" , "group","dd" )
data_box$stat=aa[as.character(data_box$group),]$groups
max=max(data_box[,c("dd")],na.rm = TRUE)
min=min(data_box[,c("dd")],na.rm = TRUE)
x = data_box[,c("group","dd")]
y = x %>% group_by(group) %>% summarise_(Max=paste('max(',"dd",",na.rm = TRUE",')',sep=""))
y=as.data.frame(y)
y
rownames(y)=y$group
data_box$y=y[as.character(data_box$group),]$Max + (max-min)*0.05
head(data_box)
p = ggplot(data_box, aes(x=group, y=data_box[["dd"]], color=group)) +
geom_boxplot(alpha=1, outlier.size=0, size=0.7, width=0.5, fill="transparent") +
labs(
y=name_i)+
geom_jitter( position=position_jitter(0.17), size=1, alpha=0.7)+theme(legend.position="none")
p
if (sig_show == "abc") {
p = p +
geom_text(data=data_box, aes(x=group, y=y, color=group, label= stat))
p
}
wtq = levels(data$group)
lis = combn(levels(as.factor(data$group)), 2)
x <-lis
my_comparisons <- tapply(x,rep(1:ncol(x),each=nrow(x)),function(i)i)
line = list()
if (sig_show == "line") {
zuhe = combn(aa$group,2)
xxxx <- tapply(zuhe,rep(1:ncol(zuhe),each=nrow(zuhe)),function(i)i)
xxxx
sig_lis = rep("a",dim(zuhe)[2])
for (i in 1:dim(zuhe)[2]) {
if (filter(aa, group == xxxx[[i]][1])$groups == filter(aa, group == xxxx[[i]][2])$groups) {
sig_lis[i] = "no_sig"
}
if (filter(aa, group == xxxx[[i]][1])$groups != filter(aa, group == xxxx[[i]][2])$groups) {
sig_lis[i] = "*"
}
}
if (ns == TRUE) {
#-remove the ns
xxxx[as.character((1:length(sig_lis))[sig_lis =="no_sig"])] = NULL
sig_lis = sig_lis[sig_lis != "no_sig"]
}
line = list(comparisons = xxxx,annotations=sig_lis,y_position = (seq(from=1, to=max(data_box$dd)/4,length.out=dim(zuhe)[2]) + max(data_box$dd)),tip_length = rep(0.03,dim(zuhe)[2]))
p = p +
ggsignif::geom_signif(comparisons = xxxx, annotations=sig_lis,
y_position = (seq(from=1, to=max(data_box$dd)/4,length.out=dim(zuhe)[2]) + max(data_box$dd)), tip_length = rep(0.03,dim(zuhe)[2]),color = "black")
p
}
# p=p+Mytheme
p
return(list(p,data_box))
}
|
#e4_prepdfFig4.R
#Prep dataframe for Figure 4
### Remove unnecessary cols ###
#str(datas)
removecols<-c('s0Epid','s0Ppid','s0E','s0P','julydate','type','delta.sE','delta.sP')
indx<-colnames(datas) %in% removecols
datas.r<-datas[,!indx]
ru.datas <- data.frame(sFpid=datas.r$sFpid,
bk=datas.r$bk,
comptrt=datas.r$comptrt,
mvtrt=datas.r$mvtrt,
mivi=datas.r$mivi,
compabund=datas.r$compabund,
total=datas.r$total,
relmivi=datas.r$relmivi,
soilmeas=datas.r$scol,
sF=datas.r$sF)
data4.1 <- ru.datas
### Reshape ###
library(reshape2)
## Reshape so that plant biomass values are all in one column (biomval), with an identifier column to identify what type of biomass that value represents (biommeas)
data4.2 <- melt(data4.1, measure.vars=c('mivi','compabund','total', 'relmivi'), id.vars=c('sFpid','bk','comptrt','mvtrt','soilmeas','sF'))
whichvar<-which(colnames(data4.2)=='variable')
whichval<-which(colnames(data4.2)=='value')
colnames(data4.2)[whichvar]<-'biommeas'
colnames(data4.2)[whichval]<-'biomval'
### Check structure ###
#str(data4.2)
### Make it a nice name ###
data4 <- data4.2
| /code/e4_prepdfFig5.R | no_license | marissalee/E4-GHExpt | R | false | false | 1,318 | r | #e4_prepdfFig4.R
#Prep dataframe for Figure 4
### Remove unnecessary cols ###
#str(datas)
removecols<-c('s0Epid','s0Ppid','s0E','s0P','julydate','type','delta.sE','delta.sP')
indx<-colnames(datas) %in% removecols
datas.r<-datas[,!indx]
ru.datas <- data.frame(sFpid=datas.r$sFpid,
bk=datas.r$bk,
comptrt=datas.r$comptrt,
mvtrt=datas.r$mvtrt,
mivi=datas.r$mivi,
compabund=datas.r$compabund,
total=datas.r$total,
relmivi=datas.r$relmivi,
soilmeas=datas.r$scol,
sF=datas.r$sF)
data4.1 <- ru.datas
### Reshape ###
library(reshape2)
## Reshape so that plant biomass values are all in one column (biomval), with an identifier column to identify what type of biomass that value represents (biommeas)
data4.2 <- melt(data4.1, measure.vars=c('mivi','compabund','total', 'relmivi'), id.vars=c('sFpid','bk','comptrt','mvtrt','soilmeas','sF'))
whichvar<-which(colnames(data4.2)=='variable')
whichval<-which(colnames(data4.2)=='value')
colnames(data4.2)[whichvar]<-'biommeas'
colnames(data4.2)[whichval]<-'biomval'
### Check structure ###
#str(data4.2)
### Make it a nice name ###
data4 <- data4.2
|
# efficacy
efficacy <- read.csv("/Users/jiayuan/Documents/MA675/311/regression - simple & multiple/csv/efficacy.csv",header=T)
attach(efficacy)
# category
efficacy$age[age<=35] <- 0
efficacy$age[age>35] <- 1
efficacy$white[white<=0.6] <- 0
efficacy$white[white>0.6 & white<=0.8] <- 1
efficacy$white[white>0.8] <- 2
efficacy$black.african[black.african <= 0.1] <- 0
efficacy$black.african[black.african > 0.1 & black.african <= 0.2] <- 1
efficacy$black.african[black.african > 0.2] <- 2
efficacy$asian[asian <= 0.05] <- 0
efficacy$asian[asian > 0.05 & asian <= 0.1] <- 1
efficacy$asian[asian > 0.1] <- 2
efficacy$hispanic.latino[hispanic.latino <= 0.1] <- 0
efficacy$hispanic.latino[hispanic.latino > 0.1 & hispanic.latino <= 0.2] <- 1
efficacy$hispanic.latino[hispanic.latino > 0.2] <- 2
# lm - public works
summary(lm(Public.Works.Department ~ female)) #Multiple R-squared: 0.3372
summary(lm(Public.Works.Department ~ male)) #Multiple R-squared: 0.3372
summary(lm(Public.Works.Department ~ factor(efficacy$black.african))) #Multiple R-squared: 0.487
# lm - Boston.Public.School
summary(lm(Boston.Public.School ~ income)) #Multiple R-squared: 0.3189
# lm - Boston.Water...Sewer.Commission, too many NAs so I pass it.
# lm - Inspectional.Services, no significance
# lm - Mayor.s.24.Hour.Hotline, no significance
# lm - Parks...Recreation.Department
summary(lm(Parks...Recreation.Department ~ income)) #Multiple R-squared: 0.2421
# lm - Property.Management
summary(lm(Property.Management ~ density)) #Multiple R-squared: 0.3291
# lm - Transportation...Traffic.Division, no significance
# multiple lm
summary(lm(Public.Works.Department ~ female + male + factor(efficacy$black.african)))
#### without category
summary(lm(Transportation...Traffic.Division ~ hispanic.latino))
summary(lm(Inspectional.Services ~ black.african))
| /Documents/MA675/City_of_Boston_311_Service_Request_Data_Analysis/regression - simple & multiple/R/regression - efficacy.R | no_license | jiayuans/Statistics-Practicum-1 | R | false | false | 1,833 | r | # efficacy
efficacy <- read.csv("/Users/jiayuan/Documents/MA675/311/regression - simple & multiple/csv/efficacy.csv",header=T)
attach(efficacy)
# category
efficacy$age[age<=35] <- 0
efficacy$age[age>35] <- 1
efficacy$white[white<=0.6] <- 0
efficacy$white[white>0.6 & white<=0.8] <- 1
efficacy$white[white>0.8] <- 2
efficacy$black.african[black.african <= 0.1] <- 0
efficacy$black.african[black.african > 0.1 & black.african <= 0.2] <- 1
efficacy$black.african[black.african > 0.2] <- 2
efficacy$asian[asian <= 0.05] <- 0
efficacy$asian[asian > 0.05 & asian <= 0.1] <- 1
efficacy$asian[asian > 0.1] <- 2
efficacy$hispanic.latino[hispanic.latino <= 0.1] <- 0
efficacy$hispanic.latino[hispanic.latino > 0.1 & hispanic.latino <= 0.2] <- 1
efficacy$hispanic.latino[hispanic.latino > 0.2] <- 2
# lm - public works
summary(lm(Public.Works.Department ~ female)) #Multiple R-squared: 0.3372
summary(lm(Public.Works.Department ~ male)) #Multiple R-squared: 0.3372
summary(lm(Public.Works.Department ~ factor(efficacy$black.african))) #Multiple R-squared: 0.487
# lm - Boston.Public.School
summary(lm(Boston.Public.School ~ income)) #Multiple R-squared: 0.3189
# lm - Boston.Water...Sewer.Commission, too many NAs so I pass it.
# lm - Inspectional.Services, no significance
# lm - Mayor.s.24.Hour.Hotline, no significance
# lm - Parks...Recreation.Department
summary(lm(Parks...Recreation.Department ~ income)) #Multiple R-squared: 0.2421
# lm - Property.Management
summary(lm(Property.Management ~ density)) #Multiple R-squared: 0.3291
# lm - Transportation...Traffic.Division, no significance
# multiple lm
summary(lm(Public.Works.Department ~ female + male + factor(efficacy$black.african)))
#### without category
summary(lm(Transportation...Traffic.Division ~ hispanic.latino))
summary(lm(Inspectional.Services ~ black.african))
|
#' @export gsoap_layout
#' @importFrom tsne tsne
#' @importFrom ProjectionBasedClustering Isomap SammonsMapping tSNE CCA KruskalStress
#' @importFrom philentropy distance
#' @importFrom packcircles circleRepelLayout
#' @importFrom WeightedCluster wcKMedRange wcKMedoids
create_association_matrix = function(l){
# Extract instances and members
instances = rep(names(l), times = sapply(l, length))
members = unlist(l)
# Create long format adjacency mat.
am = data.frame(instances, members, adjacency = 1)
# Reshape from long to wide format
am = reshape(am, idvar = 'instances', timevar = 'members', direction = 'wide')
am = am[,-1]
# Set colnames and row names
colnames(am) = unique(members)
rownames(am) = unique(instances)
# Replace NAs by zero
am[is.na(am)] = 0
# Get rid of the first column
am = am[,-1]
# Convert to matrix
am = as.matrix(am)
# Return adjacency matrix
return(am)
}
calc_distance_matrix = function(m, distance.method = 'jaccard'){
dist.mat = suppressMessages(philentropy::distance(m, distance.method))
rownames(dist.mat) = rownames(m)
colnames(dist.mat) = rownames(m)
return(dist.mat)
}
non.diag = function(m){
nd = upper.tri(m, diag = FALSE) | lower.tri(m, diag = FALSE)
return(nd)
}
resolve.nondiag.zeros = function(M, k){
n = nrow(M)
m = ncol(M)
nondiag.zeros = (M == 0 & non.diag(M))
d = 1 - (k - 1)/k
r = matrix(d, n, m)
e = rnorm(n * m, 0., d/2.5)
e = matrix(e, n, m)
M[nondiag.zeros] = r[nondiag.zeros] + e[nondiag.zeros]
return(M)
}
isomap_transformation = function(d, isomap.k = 3){
res = ProjectionBasedClustering::Isomap(d,
k = isomap.k,
OutputDimension = 2,
PlotIt = FALSE)
return(res)
}
sammons_tranformation = function(d){
res = ProjectionBasedClustering::SammonsMapping(d,
OutputDimension = 2,
PlotIt = FALSE)
return(res)
}
tsne_transformation = function(d, tsne.perplexity = 30, tsne.iterations = 1e+3){
res = ProjectionBasedClustering::tSNE(d,
k = tsne.perplexity,
OutputDimension = 2,
Whitening = FALSE,
PlotIt = FALSE,
Iterations = tsne.iterations)
return(res)
}
cca_transformation = function(d, cca.epochs = 10, cca.alpha0 = 0.5){
res = ProjectionBasedClustering::CCA(d,
Epochs = cca.epochs,
OutputDimension = 2,
alpha0 = cca.alpha0,
PlotIt = FALSE)
return(res)
}
min_max_scale = function(x)(x - min(x))/(max(x) - min(x))
create_layout = function(x, size, scale.factor = 1.0){
# Normalize coordinates to 0-1 interval
x = apply(x, 2, min_max_scale)
# Rescale size by mean
size.normed = (size / max(size)) / nrow(x) * scale.factor[1]
# Calc raw radius
radius = sqrt(size.normed / pi)
# Create layout
y = cbind(x, radius)
return(y)
}
packing_simple = function(x, packing.maxiter = 1e+6){
# Get range of values
xrange = range(x[,1])
yrange = range(x[,2])
# Circle packing
circles = packcircles::circleRepelLayout(x,
xrange,
yrange,
xysizecols = 1:3,
sizetype = "radius",
maxiter = packing.maxiter,
wrap = FALSE)
# Take layout
layout = circles$layout
return(layout)
}
calc_closeness = function(dm, w){
closeness = 1.
if (class(dm) != 'matrix'){
dm = as.matrix(dm)
}
if (all(dim(dm) > 1)){
closeness = 1. / apply(dm , 1, weighted.mean, w)
}
return(closeness)
}
select_clustering = function(cls, dm, w, stat = 'meta'){
cq = lapply(cls, function(cl)WeightedCluster::wcClusterQuality(dm, cl, weights = w)$stats)
cq = data.frame(do.call(rbind, cq))
if (stat == 'meta'){
cq = apply(cq, 2, rank)/ncol(cq)
score = exp(rowMeans(log(cq)))
} else {
score = cq[,stat]
}
cl = cls[[which.max(score)]]
return(cl)
}
hkclustering = function(dm, w, no.clusters = NULL, max.clusters = 5, hc.method = 'ward.D', cluster.stat = 'meta'){
hc = hclust(dist(dm), method = hc.method, members = w)
if (is.null(no.clusters)){
max.clusters = min(max.clusters, nrow(dm))
clustering_list = lapply(2:max.clusters, function(k)cutree(hc, k))
clustering = select_clustering(clustering_list, dm, w, stat = cluster.stat)
} else {
no.clusters = min(no.clusters, nrow(dm))
clustering = cutree(hc, no.clusters)
}
return(clustering)
}
#' A function to create a layout for GSOAP plot
#'
#' A function evaluates the overlaps among instance
#' (e.g. pathways, or GO terms) query gene members
#' by binary distance measure and applies the projection
#' to map the instances into 2-dimensional space.
#' Obtained coordinates are then adjusted by circle packing.
#' Additional characteristics of the instances - closeness (centrality)
#' and clustering - are calculated.
#'
#'
#' @param x a data frame with the results of gene set over-representation analysis.
#' Must have rownames indicating names of the genes sets and at least two columns,
#' one of which contains query gene members; and another one that contains respective
#' p-values (raw or adjusted for multiple testing).
#'
#' @param genes a character or integer, indicating name or index of the column containing genes members.
#' @param pvalues a character or integer, indicating name or index of the column containing p-values.
#' @param splitter a character to be used as a delimiter to parse the genes column.
#' @param distance a character indicating method used to calculate the distance/dissimilarity between instances.
#' Options include (but are not limited to) \emph{jaccard} (default), \emph{manhattan}, \emph{dice},
#' \emph{pearson}. For more details see \code{\link[philentropy]{distance}}.
#' @param projection a character indicating method used to project instances into 2-dimensional space based on their distance/dissimilarity..
#' Options include \emph{iso} (isomap; default), \emph{mds} (multidimensional scaling), \emph{cca} (curvilinear component analysis), \emph{tsne} (t-distributed stochastic neighbor embedding),
#' @param scale.factor a positive real number to control dependence of the circle radius on the number of query gene members of the given gene set.
#' @param weighted a boolean indicating whether to use pathway \emph{significance}
#' (-log10(pvalue)) as a weight when closeness and clustering are calculated.
#' @param packing a boolean indicating whether to apply circle packing.
#' @param clustering a boolean indicating whether to apply clustering.
#' @param hc.method a character indicating method of hierarchical cluster to be used.
#' Options include: \emph{ward.D} (default), \emph{ward.D2}, \emph{single}, \emph{complete},
#' \emph{average}, \emph{mcquitty}, \emph{median} and \emph{centroid}.
#' @param no.clusters an integer indicating number of clusters, must be less than number of gene sets (rows).
#' @param max.clusters an integer indicating maximum number of clusters to consider, must be at least two.
#' @param cluster.stat an indicating statistic used to select optimal number of clusters.
#' Options are:
#' \itemize{
#' \item \emph{meta} (default) is a combination of the methods listed below
#' \item \emph{PBC} (point biserial correlation)
#' \item \emph{HG} (Hubert's gamma)
#' \item \emph{HGSD} (Hubertโs gamma - Somer's D)
#' \item \emph{ASW} (average silhouette width)
#' \item \emph{ASWw} (Average weighted silhouette width)
#' \item \emph{CH} (Calinski-Harabasz index)
#' \item \emph{R2} (R-squared)
#' \item \emph{CHsq} (Calinski-Harabasz index using squared distances)
#' \item \emph{R2sq} (R-squared using squared distances)
#' \item \emph{HC} (Hubertโs C coefficient)
#' }
#'
#' @param isomap.k an integer indicating number of k nearest neighbors of
#' the \emph{isomap} projection.
#' @param tsne.perplexity an integer indicating \emph{tSNE} perplexity.
#' @param tsne.iterations an integer indicating maximum number of \emph{tSNE} iterations to perform.
#' @param cca.epochs an integer indicating \emph{CCA} training length.
#' @param cca.alpha0 a positive real number indicating \emph{CCA} initial step size.
#'
#' @return \code{layout} a data frame with x and y coordinates of
#' the points representing the instances, their size (radius) derived from
#' the number of gene members; significance (-log10(p-value)), closeness and
#' cluster membership.
#'
#' @author Tomas Tokar <tomastokar@gmail.com>
#'
#' @examples
#' data(pxgenes)
#'
#' l = gsoap_layout(pxgenes, 'Members', 'p.value')
#'
#' head(l)
#'
gsoap_layout = function(x,
genes,
pvalues,
splitter = '/',
distance = 'jaccard',
projection = 'iso',
scale.factor = 1.0,
weighted = TRUE,
packing = TRUE,
clustering = TRUE,
hc.method = 'ward.D',
no.clusters = NULL,
max.clusters = 8,
cluster.stat = 'meta',
isomap.k = 3,
tsne.perplexity = 30,
tsne.iterations = 1e+3,
cca.epochs = 10,
cca.alpha0 = 0.5){
# -------------
# Check inputs
# -------------
if (missing(x)){
stop('Input is missing')
}
if (!is.data.frame(x)){
stop('Input is not data frame')
}
if (!is.character(rownames(x))){
stop('Input has missing or improper rownames')
}
if(!((genes %in% colnames(x))|(genes %in% 1:ncol(x)))){
stop('Wrong `genes` value')
}
if(!((pvalues %in% colnames(x))|(pvalues %in% 1:ncol(x)))){
stop('Wrong `pvalues` value')
}
if (!any(grepl(splitter, x[,genes]))){
warning('Either `genes`, or `splitter` seem to be not correct.')
}
if (any(is.na(x[c(genes, pvalues)]))){
stop('Input contains NA values.')
}
# --------------
# Create layout
# --------------
# Extract query genes -- instances memberships
memberships.list = setNames(strsplit(x[,genes], splitter), rownames(x))
# Create association matrix
asc.mat = create_association_matrix(memberships.list)
# Get number of member genes
no.members = rowSums(asc.mat)
# Calculate distance matrix
dist.mat = calc_distance_matrix(asc.mat, distance.method = distance)
# --------------------------
# Do projection to 2d space
# --------------------------
# Check for zeros appart of the main diagonal
if (any(rowSums(dist.mat == 0.) > 1)){
warning("Zero dissimilarity between non-identical entries.")
k = ncol(asc.mat)
dist.mat = resolve.nondiag.zeros(dist.mat, k)
}
if (projection == 'iso'){
proj = suppressMessages(isomap_transformation(dist.mat,
isomap.k = isomap.k))
}
if (projection == 'mds'){
#res = mds_transformation(d)
proj = suppressMessages(sammons_tranformation(dist.mat))
}
if (projection == 'cca'){
proj = suppressMessages(cca_transformation(dist.mat,
cca.epochs,
cca.alpha0))
}
if (projection == 'tsne'){
proj = suppressMessages(tsne_transformation(dist.mat,
tsne.perplexity,
tsne.iterations))
}
# Do 2d projection
xy = proj$ProjectedPoints
# Calculate circle radius
layout = create_layout(xy, no.members, scale.factor = scale.factor)
# Circle packing
if (packing){
layout = packing_simple(layout)
} else {
layout = data.frame(layout)
}
# Set colnames
layout = setNames(layout, c('x', 'y', 'radius'))
# Set rownames
rownames(layout) = rownames(x)
# Calculate number of members
layout$size = no.members
# Calculate significance
layout$significance = -log10(x[,pvalues])
# Set weights
weights = rep(1, nrow(layout))
if (weighted){
weights = layout$significance
}
# Calculate closeness and add to layout
layout$closeness = calc_closeness(dist.mat, weights)
# ---------------------
# Calculate distortion
# ---------------------
# Calculate euclidean distance between instances after projection
dx = as.matrix(suppressMessages(philentropy::distance(layout[,1:2], method = 'euclidean')))
# Calculate Kruskal stress after projection and packing
stress = ProjectionBasedClustering::KruskalStress(dist.mat, dx)
message(paste('Kruskall stress :', sprintf('%1.3f', stress)))
# Calculate spearman correlation
spcorr = cor(c(dist.mat), c(dx), method = 'spearman')
message(paste('Rank correlation :', sprintf('%1.3f', spcorr)))
# -----------------------
# Extended functionality
# -----------------------
# Do clustering
if (clustering){
# Clustering
layout$cluster = hkclustering(dist.mat,
weights,
no.clusters = no.clusters,
max.clusters = max.clusters,
hc.method = hc.method,
cluster.stat = cluster.stat)
layout$cluster = paste('Cluster', layout$cluster)
}
# Return
return(layout)
}
| /R/gsoap_layout.R | no_license | tomastokar/gsoap | R | false | false | 13,921 | r | #' @export gsoap_layout
#' @importFrom tsne tsne
#' @importFrom ProjectionBasedClustering Isomap SammonsMapping tSNE CCA KruskalStress
#' @importFrom philentropy distance
#' @importFrom packcircles circleRepelLayout
#' @importFrom WeightedCluster wcKMedRange wcKMedoids
create_association_matrix = function(l){
# Extract instances and members
instances = rep(names(l), times = sapply(l, length))
members = unlist(l)
# Create long format adjacency mat.
am = data.frame(instances, members, adjacency = 1)
# Reshape from long to wide format
am = reshape(am, idvar = 'instances', timevar = 'members', direction = 'wide')
am = am[,-1]
# Set colnames and row names
colnames(am) = unique(members)
rownames(am) = unique(instances)
# Replace NAs by zero
am[is.na(am)] = 0
# Get rid of the first column
am = am[,-1]
# Convert to matrix
am = as.matrix(am)
# Return adjacency matrix
return(am)
}
calc_distance_matrix = function(m, distance.method = 'jaccard'){
dist.mat = suppressMessages(philentropy::distance(m, distance.method))
rownames(dist.mat) = rownames(m)
colnames(dist.mat) = rownames(m)
return(dist.mat)
}
non.diag = function(m){
nd = upper.tri(m, diag = FALSE) | lower.tri(m, diag = FALSE)
return(nd)
}
resolve.nondiag.zeros = function(M, k){
n = nrow(M)
m = ncol(M)
nondiag.zeros = (M == 0 & non.diag(M))
d = 1 - (k - 1)/k
r = matrix(d, n, m)
e = rnorm(n * m, 0., d/2.5)
e = matrix(e, n, m)
M[nondiag.zeros] = r[nondiag.zeros] + e[nondiag.zeros]
return(M)
}
isomap_transformation = function(d, isomap.k = 3){
res = ProjectionBasedClustering::Isomap(d,
k = isomap.k,
OutputDimension = 2,
PlotIt = FALSE)
return(res)
}
sammons_tranformation = function(d){
res = ProjectionBasedClustering::SammonsMapping(d,
OutputDimension = 2,
PlotIt = FALSE)
return(res)
}
tsne_transformation = function(d, tsne.perplexity = 30, tsne.iterations = 1e+3){
res = ProjectionBasedClustering::tSNE(d,
k = tsne.perplexity,
OutputDimension = 2,
Whitening = FALSE,
PlotIt = FALSE,
Iterations = tsne.iterations)
return(res)
}
cca_transformation = function(d, cca.epochs = 10, cca.alpha0 = 0.5){
res = ProjectionBasedClustering::CCA(d,
Epochs = cca.epochs,
OutputDimension = 2,
alpha0 = cca.alpha0,
PlotIt = FALSE)
return(res)
}
min_max_scale = function(x)(x - min(x))/(max(x) - min(x))
create_layout = function(x, size, scale.factor = 1.0){
# Normalize coordinates to 0-1 interval
x = apply(x, 2, min_max_scale)
# Rescale size by mean
size.normed = (size / max(size)) / nrow(x) * scale.factor[1]
# Calc raw radius
radius = sqrt(size.normed / pi)
# Create layout
y = cbind(x, radius)
return(y)
}
packing_simple = function(x, packing.maxiter = 1e+6){
# Get range of values
xrange = range(x[,1])
yrange = range(x[,2])
# Circle packing
circles = packcircles::circleRepelLayout(x,
xrange,
yrange,
xysizecols = 1:3,
sizetype = "radius",
maxiter = packing.maxiter,
wrap = FALSE)
# Take layout
layout = circles$layout
return(layout)
}
calc_closeness = function(dm, w){
closeness = 1.
if (class(dm) != 'matrix'){
dm = as.matrix(dm)
}
if (all(dim(dm) > 1)){
closeness = 1. / apply(dm , 1, weighted.mean, w)
}
return(closeness)
}
select_clustering = function(cls, dm, w, stat = 'meta'){
cq = lapply(cls, function(cl)WeightedCluster::wcClusterQuality(dm, cl, weights = w)$stats)
cq = data.frame(do.call(rbind, cq))
if (stat == 'meta'){
cq = apply(cq, 2, rank)/ncol(cq)
score = exp(rowMeans(log(cq)))
} else {
score = cq[,stat]
}
cl = cls[[which.max(score)]]
return(cl)
}
hkclustering = function(dm, w, no.clusters = NULL, max.clusters = 5, hc.method = 'ward.D', cluster.stat = 'meta'){
hc = hclust(dist(dm), method = hc.method, members = w)
if (is.null(no.clusters)){
max.clusters = min(max.clusters, nrow(dm))
clustering_list = lapply(2:max.clusters, function(k)cutree(hc, k))
clustering = select_clustering(clustering_list, dm, w, stat = cluster.stat)
} else {
no.clusters = min(no.clusters, nrow(dm))
clustering = cutree(hc, no.clusters)
}
return(clustering)
}
#' A function to create a layout for GSOAP plot
#'
#' A function evaluates the overlaps among instance
#' (e.g. pathways, or GO terms) query gene members
#' by binary distance measure and applies the projection
#' to map the instances into 2-dimensional space.
#' Obtained coordinates are then adjusted by circle packing.
#' Additional characteristics of the instances - closeness (centrality)
#' and clustering - are calculated.
#'
#'
#' @param x a data frame with the results of gene set over-representation analysis.
#' Must have rownames indicating names of the genes sets and at least two columns,
#' one of which contains query gene members; and another one that contains respective
#' p-values (raw or adjusted for multiple testing).
#'
#' @param genes a character or integer, indicating name or index of the column containing genes members.
#' @param pvalues a character or integer, indicating name or index of the column containing p-values.
#' @param splitter a character to be used as a delimiter to parse the genes column.
#' @param distance a character indicating method used to calculate the distance/dissimilarity between instances.
#' Options include (but are not limited to) \emph{jaccard} (default), \emph{manhattan}, \emph{dice},
#' \emph{pearson}. For more details see \code{\link[philentropy]{distance}}.
#' @param projection a character indicating method used to project instances into 2-dimensional space based on their distance/dissimilarity..
#' Options include \emph{iso} (isomap; default), \emph{mds} (multidimensional scaling), \emph{cca} (curvilinear component analysis), \emph{tsne} (t-distributed stochastic neighbor embedding),
#' @param scale.factor a positive real number to control dependence of the circle radius on the number of query gene members of the given gene set.
#' @param weighted a boolean indicating whether to use pathway \emph{significance}
#' (-log10(pvalue)) as a weight when closeness and clustering are calculated.
#' @param packing a boolean indicating whether to apply circle packing.
#' @param clustering a boolean indicating whether to apply clustering.
#' @param hc.method a character indicating method of hierarchical cluster to be used.
#' Options include: \emph{ward.D} (default), \emph{ward.D2}, \emph{single}, \emph{complete},
#' \emph{average}, \emph{mcquitty}, \emph{median} and \emph{centroid}.
#' @param no.clusters an integer indicating number of clusters, must be less than number of gene sets (rows).
#' @param max.clusters an integer indicating maximum number of clusters to consider, must be at least two.
#' @param cluster.stat an indicating statistic used to select optimal number of clusters.
#' Options are:
#' \itemize{
#' \item \emph{meta} (default) is a combination of the methods listed below
#' \item \emph{PBC} (point biserial correlation)
#' \item \emph{HG} (Hubert's gamma)
#' \item \emph{HGSD} (Hubertโs gamma - Somer's D)
#' \item \emph{ASW} (average silhouette width)
#' \item \emph{ASWw} (Average weighted silhouette width)
#' \item \emph{CH} (Calinski-Harabasz index)
#' \item \emph{R2} (R-squared)
#' \item \emph{CHsq} (Calinski-Harabasz index using squared distances)
#' \item \emph{R2sq} (R-squared using squared distances)
#' \item \emph{HC} (Hubertโs C coefficient)
#' }
#'
#' @param isomap.k an integer indicating number of k nearest neighbors of
#' the \emph{isomap} projection.
#' @param tsne.perplexity an integer indicating \emph{tSNE} perplexity.
#' @param tsne.iterations an integer indicating maximum number of \emph{tSNE} iterations to perform.
#' @param cca.epochs an integer indicating \emph{CCA} training length.
#' @param cca.alpha0 a positive real number indicating \emph{CCA} initial step size.
#'
#' @return \code{layout} a data frame with x and y coordinates of
#' the points representing the instances, their size (radius) derived from
#' the number of gene members; significance (-log10(p-value)), closeness and
#' cluster membership.
#'
#' @author Tomas Tokar <tomastokar@gmail.com>
#'
#' @examples
#' data(pxgenes)
#'
#' l = gsoap_layout(pxgenes, 'Members', 'p.value')
#'
#' head(l)
#'
gsoap_layout = function(x,
genes,
pvalues,
splitter = '/',
distance = 'jaccard',
projection = 'iso',
scale.factor = 1.0,
weighted = TRUE,
packing = TRUE,
clustering = TRUE,
hc.method = 'ward.D',
no.clusters = NULL,
max.clusters = 8,
cluster.stat = 'meta',
isomap.k = 3,
tsne.perplexity = 30,
tsne.iterations = 1e+3,
cca.epochs = 10,
cca.alpha0 = 0.5){
# -------------
# Check inputs
# -------------
if (missing(x)){
stop('Input is missing')
}
if (!is.data.frame(x)){
stop('Input is not data frame')
}
if (!is.character(rownames(x))){
stop('Input has missing or improper rownames')
}
if(!((genes %in% colnames(x))|(genes %in% 1:ncol(x)))){
stop('Wrong `genes` value')
}
if(!((pvalues %in% colnames(x))|(pvalues %in% 1:ncol(x)))){
stop('Wrong `pvalues` value')
}
if (!any(grepl(splitter, x[,genes]))){
warning('Either `genes`, or `splitter` seem to be not correct.')
}
if (any(is.na(x[c(genes, pvalues)]))){
stop('Input contains NA values.')
}
# --------------
# Create layout
# --------------
# Extract query genes -- instances memberships
memberships.list = setNames(strsplit(x[,genes], splitter), rownames(x))
# Create association matrix
asc.mat = create_association_matrix(memberships.list)
# Get number of member genes
no.members = rowSums(asc.mat)
# Calculate distance matrix
dist.mat = calc_distance_matrix(asc.mat, distance.method = distance)
# --------------------------
# Do projection to 2d space
# --------------------------
# Check for zeros appart of the main diagonal
if (any(rowSums(dist.mat == 0.) > 1)){
warning("Zero dissimilarity between non-identical entries.")
k = ncol(asc.mat)
dist.mat = resolve.nondiag.zeros(dist.mat, k)
}
if (projection == 'iso'){
proj = suppressMessages(isomap_transformation(dist.mat,
isomap.k = isomap.k))
}
if (projection == 'mds'){
#res = mds_transformation(d)
proj = suppressMessages(sammons_tranformation(dist.mat))
}
if (projection == 'cca'){
proj = suppressMessages(cca_transformation(dist.mat,
cca.epochs,
cca.alpha0))
}
if (projection == 'tsne'){
proj = suppressMessages(tsne_transformation(dist.mat,
tsne.perplexity,
tsne.iterations))
}
# Do 2d projection
xy = proj$ProjectedPoints
# Calculate circle radius
layout = create_layout(xy, no.members, scale.factor = scale.factor)
# Circle packing
if (packing){
layout = packing_simple(layout)
} else {
layout = data.frame(layout)
}
# Set colnames
layout = setNames(layout, c('x', 'y', 'radius'))
# Set rownames
rownames(layout) = rownames(x)
# Calculate number of members
layout$size = no.members
# Calculate significance
layout$significance = -log10(x[,pvalues])
# Set weights
weights = rep(1, nrow(layout))
if (weighted){
weights = layout$significance
}
# Calculate closeness and add to layout
layout$closeness = calc_closeness(dist.mat, weights)
# ---------------------
# Calculate distortion
# ---------------------
# Calculate euclidean distance between instances after projection
dx = as.matrix(suppressMessages(philentropy::distance(layout[,1:2], method = 'euclidean')))
# Calculate Kruskal stress after projection and packing
stress = ProjectionBasedClustering::KruskalStress(dist.mat, dx)
message(paste('Kruskall stress :', sprintf('%1.3f', stress)))
# Calculate spearman correlation
spcorr = cor(c(dist.mat), c(dx), method = 'spearman')
message(paste('Rank correlation :', sprintf('%1.3f', spcorr)))
# -----------------------
# Extended functionality
# -----------------------
# Do clustering
if (clustering){
# Clustering
layout$cluster = hkclustering(dist.mat,
weights,
no.clusters = no.clusters,
max.clusters = max.clusters,
hc.method = hc.method,
cluster.stat = cluster.stat)
layout$cluster = paste('Cluster', layout$cluster)
}
# Return
return(layout)
}
|
## boyancy-driven ventilation
# key parameters!!
H <- 3 # height difference, m
L <- 400 # tunnel length, m
Q <- 50 # heat generation rate, kW
a <- 2 # size of duct, m
b <- 2.4 # size of duct, m
T.out <- 273+15 # outdoor temp, K
xi <- 100 # total minor pressure loss coefficient
U.out <- 2 # wind velocity, m/s
#
# properties
rho <- 1.205 # density at 20C, kg/m3
g <- 9.8
mu <- 1.88e-5 # dynamic (absolute) viscosity, kg/(m s)
k <- 0.1 # roughness, m
cp <- 1.005 # kJ/(kg K)
#
f <- function(u){
# calculated parameters
T.in <- T.out + Q/(cp * u * rho * a * b)
dT <- abs(T.in - T.out) # temperature difference, K
d.e <- 1.30 * (a * b)^0.625 / (a + b)^0.25 # effective diameter, m
d.h <- 2 * a * b / (a + b) # hydraulic diameter, m
Re <- rho * u * d.h / mu # Reynolds number, turbulent: Re > 4000. Turbulent: TRUE
#
# solve minor pressure loss coefficient, lambda
f1 <- function(lambda){1 / lambda^0.5 + 2 * log(2.51 / (Re * lambda^0.5) + (k / d.h) / 3.72)}
lambda <- uniroot(f1, c(0,10))$root
#
P.major.loss <- lambda * L / d.h * (rho * u^2)/2 # major pressure loss, Pa
P.minor.loss <- xi * (rho * u^2)/2
P.loss <- P.major.loss + P.minor.loss
Ps <- rho * g * H * dT / T.in # boyancy-driven pressure, Pa
Pw <- 0.5 * rho * U.out # dynamic pressure, Pa
Ps - P.loss
}
u <- uniroot(f, c(1e-5, 100))$root
T.in <- T.out + Q/(cp * u * rho * a * b)
dT <- abs(T.in - T.out) # temperature difference, K
Ps <- rho * g * H * abs(T.in - T.out) / T.in # boyancy-driven pressure, Pa
ACH <- u/L*3600 # air change rate per hour
Q.air <- a * b * u * 3600 # air volume rate per hour, m3/h
print(data.frame(u, ACH, Q.air, Ps, dT))
| /tunnelVentCal.R | no_license | alpertSnow/tunnelVent | R | false | false | 1,798 | r | ## boyancy-driven ventilation
# key parameters!!
H <- 3 # height difference, m
L <- 400 # tunnel length, m
Q <- 50 # heat generation rate, kW
a <- 2 # size of duct, m
b <- 2.4 # size of duct, m
T.out <- 273+15 # outdoor temp, K
xi <- 100 # total minor pressure loss coefficient
U.out <- 2 # wind velocity, m/s
#
# properties
rho <- 1.205 # density at 20C, kg/m3
g <- 9.8
mu <- 1.88e-5 # dynamic (absolute) viscosity, kg/(m s)
k <- 0.1 # roughness, m
cp <- 1.005 # kJ/(kg K)
#
f <- function(u){
# calculated parameters
T.in <- T.out + Q/(cp * u * rho * a * b)
dT <- abs(T.in - T.out) # temperature difference, K
d.e <- 1.30 * (a * b)^0.625 / (a + b)^0.25 # effective diameter, m
d.h <- 2 * a * b / (a + b) # hydraulic diameter, m
Re <- rho * u * d.h / mu # Reynolds number, turbulent: Re > 4000. Turbulent: TRUE
#
# solve minor pressure loss coefficient, lambda
f1 <- function(lambda){1 / lambda^0.5 + 2 * log(2.51 / (Re * lambda^0.5) + (k / d.h) / 3.72)}
lambda <- uniroot(f1, c(0,10))$root
#
P.major.loss <- lambda * L / d.h * (rho * u^2)/2 # major pressure loss, Pa
P.minor.loss <- xi * (rho * u^2)/2
P.loss <- P.major.loss + P.minor.loss
Ps <- rho * g * H * dT / T.in # boyancy-driven pressure, Pa
Pw <- 0.5 * rho * U.out # dynamic pressure, Pa
Ps - P.loss
}
u <- uniroot(f, c(1e-5, 100))$root
T.in <- T.out + Q/(cp * u * rho * a * b)
dT <- abs(T.in - T.out) # temperature difference, K
Ps <- rho * g * H * abs(T.in - T.out) / T.in # boyancy-driven pressure, Pa
ACH <- u/L*3600 # air change rate per hour
Q.air <- a * b * u * 3600 # air volume rate per hour, m3/h
print(data.frame(u, ACH, Q.air, Ps, dT))
|
library(lubridate)
library(dplyr)
fileUrl<-("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
download.file(fileUrl, destfile = "./data/power.zip")
unzip("./data/power.zip", exdir="./data")
power<-read.table("./data/household_power_consumption.txt", sep=";", header = TRUE)
plotpower1<-power%>% filter(Date==c("1/2/2007"))
plotpower2<-power%>% filter(Date==c("2/2/2007"))
plotpower<-rbind(plotpower1, plotpower2)
plotpower$newdate<-strptime(plotpower$Date, "%d/%m/%Y")
plotpower$day<-weekdays(plotpower$newdate)
plotpower$datetime<-as.POSIXct(paste(plotpower$newdate, plotpower$Time))
plotpower$Global_active_power<-as.numeric(as.character(plotpower$Global_active_power))
png(file="plot4.png", width=480, height=480, unit="px")
#plot4
par(mfcol=c(2,2))
plot(plotpower$datetime, plotpower$Global_active_power, type="l", xlab="",ylab="Global Active Power (kilowatts)")
plot(plotpower$datetime, plotpower$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(plotpower$datetime, plotpower$Sub_metering_2, col="red")
lines(plotpower$datetime, plotpower$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1)
plot(plotpower$datetime, plotpower$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(plotpower$datetime, plotpower$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() | /plot4.r | no_license | pattyc882/ExData_Plotting1 | R | false | false | 1,500 | r | library(lubridate)
library(dplyr)
fileUrl<-("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
download.file(fileUrl, destfile = "./data/power.zip")
unzip("./data/power.zip", exdir="./data")
power<-read.table("./data/household_power_consumption.txt", sep=";", header = TRUE)
plotpower1<-power%>% filter(Date==c("1/2/2007"))
plotpower2<-power%>% filter(Date==c("2/2/2007"))
plotpower<-rbind(plotpower1, plotpower2)
plotpower$newdate<-strptime(plotpower$Date, "%d/%m/%Y")
plotpower$day<-weekdays(plotpower$newdate)
plotpower$datetime<-as.POSIXct(paste(plotpower$newdate, plotpower$Time))
plotpower$Global_active_power<-as.numeric(as.character(plotpower$Global_active_power))
png(file="plot4.png", width=480, height=480, unit="px")
#plot4
par(mfcol=c(2,2))
plot(plotpower$datetime, plotpower$Global_active_power, type="l", xlab="",ylab="Global Active Power (kilowatts)")
plot(plotpower$datetime, plotpower$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(plotpower$datetime, plotpower$Sub_metering_2, col="red")
lines(plotpower$datetime, plotpower$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1)
plot(plotpower$datetime, plotpower$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(plotpower$datetime, plotpower$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oauth2_objects.R
\name{Tokeninfo}
\alias{Tokeninfo}
\title{Tokeninfo Object}
\usage{
Tokeninfo(access_type = NULL, audience = NULL, email = NULL,
expires_in = NULL, issued_to = NULL, scope = NULL,
token_handle = NULL, user_id = NULL, verified_email = NULL)
}
\arguments{
\item{access_type}{The access type granted with this token}
\item{audience}{Who is the intended audience for this token}
\item{email}{The email address of the user}
\item{expires_in}{The expiry time of the token, as number of seconds left until expiry}
\item{issued_to}{To whom was the token issued to}
\item{scope}{The space separated list of scopes granted to this token}
\item{token_handle}{The token handle associated with this token}
\item{user_id}{The obfuscated user id}
\item{verified_email}{Boolean flag which is true if the email address is verified}
}
\value{
Tokeninfo object
}
\description{
Tokeninfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
| /googleoauth2v2.auto/man/Tokeninfo.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 1,079 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oauth2_objects.R
\name{Tokeninfo}
\alias{Tokeninfo}
\title{Tokeninfo Object}
\usage{
Tokeninfo(access_type = NULL, audience = NULL, email = NULL,
expires_in = NULL, issued_to = NULL, scope = NULL,
token_handle = NULL, user_id = NULL, verified_email = NULL)
}
\arguments{
\item{access_type}{The access type granted with this token}
\item{audience}{Who is the intended audience for this token}
\item{email}{The email address of the user}
\item{expires_in}{The expiry time of the token, as number of seconds left until expiry}
\item{issued_to}{To whom was the token issued to}
\item{scope}{The space separated list of scopes granted to this token}
\item{token_handle}{The token handle associated with this token}
\item{user_id}{The obfuscated user id}
\item{verified_email}{Boolean flag which is true if the email address is verified}
}
\value{
Tokeninfo object
}
\description{
Tokeninfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RerunWorkflow.R
\name{RerunWorkflow}
\alias{RerunWorkflow}
\title{Rerun a workflow object.}
\usage{
RerunWorkflow(workflow, from = NULL)
}
\arguments{
\item{workflow}{A zoonWorkflow object from a previous zoon analysis}
\item{from}{Which modules should be run. If NULL (default), run from the
first NULL output (i.e. where the workflow broke). Otherwise takes an
integer and runs from that module.}
}
\value{
A list with the results of each module and a copy of the
call used to execute the workflow.
}
\description{
Takes a workflow object and reruns it.
}
\examples{
\dontrun{
w <- workflow(UKAnophelesPlumbeus,
UKAir,
Background(n = 70),
LogisticRegression,
PrintMap)
RerunWorkflow(w)
}
}
| /man/RerunWorkflow.Rd | no_license | cran/zoon | R | false | true | 829 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RerunWorkflow.R
\name{RerunWorkflow}
\alias{RerunWorkflow}
\title{Rerun a workflow object.}
\usage{
RerunWorkflow(workflow, from = NULL)
}
\arguments{
\item{workflow}{A zoonWorkflow object from a previous zoon analysis}
\item{from}{Which modules should be run. If NULL (default), run from the
first NULL output (i.e. where the workflow broke). Otherwise takes an
integer and runs from that module.}
}
\value{
A list with the results of each module and a copy of the
call used to execute the workflow.
}
\description{
Takes a workflow object and reruns it.
}
\examples{
\dontrun{
w <- workflow(UKAnophelesPlumbeus,
UKAir,
Background(n = 70),
LogisticRegression,
PrintMap)
RerunWorkflow(w)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-script.R
\name{open_goat_door}
\alias{open_goat_door}
\title{Open Goat Door}
\usage{
open_goat_door(game, a.pick)
}
\arguments{
\item{None}{}
}
\value{
Host will open a door with a goat behind it, even if the
contestant chose the door with the car behind it.
}
\description{
Host will open a door that will have a goat behind it.
}
\details{
If contestant selected car, randomly select one of two goats
}
\examples{
open_goat_door()
}
| /man/open_goat_door.Rd | no_license | voznyuky/montyhall | R | false | true | 525 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monty-hall-script.R
\name{open_goat_door}
\alias{open_goat_door}
\title{Open Goat Door}
\usage{
open_goat_door(game, a.pick)
}
\arguments{
\item{None}{}
}
\value{
Host will open a door with a goat behind it, even if the
contestant chose the door with the car behind it.
}
\description{
Host will open a door that will have a goat behind it.
}
\details{
If contestant selected car, randomly select one of two goats
}
\examples{
open_goat_door()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correctionsataglance-data.R
\name{calcStartSeq}
\alias{calcStartSeq}
\title{Get a list of start dates}
\usage{
calcStartSeq(startD, endD)
}
\arguments{
\item{startD}{the start date of the report}
\item{endD}{the end date of the report}
}
\value{
a list of start dates for the corr report sections
}
\description{
Given a start date and end date and whether
or not the start date is the first of the month, provides a list of
YYYY-MM-DD used in other functions
}
| /man/calcStartSeq.Rd | permissive | USGS-R/repgen | R | false | true | 543 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correctionsataglance-data.R
\name{calcStartSeq}
\alias{calcStartSeq}
\title{Get a list of start dates}
\usage{
calcStartSeq(startD, endD)
}
\arguments{
\item{startD}{the start date of the report}
\item{endD}{the end date of the report}
}
\value{
a list of start dates for the corr report sections
}
\description{
Given a start date and end date and whether
or not the start date is the first of the month, provides a list of
YYYY-MM-DD used in other functions
}
|
weirdness = c(rep(NA,nrow(features)))
names(weirdness)<- rownames(features)
weirdness["TMEM132A"]<-1
weirdness["TONSL"]<-1
weirdness["TOP2A"]<-1
weirdness["TOPBP1"]<-1
weirdness["TRIP13"]<-1
weirdness["TTK"]<-1
weirdness["UBE2C"]<-1
weirdness["UBE2S"]<-1
weirdness["UBE2T"]<-1
weirdness["UHRF1"]<-1
weirdness["WDHD1"]<-1
weirdness["WDR62"]<-1
weirdness["WDR72"]<-1
weirdness["XRCC2"]<-1
weirdness["ZWILCH"]<-0
weirdness["ZWINT"]<-1
weirdness["ACTL6A"]<-1
weirdness["ANLN"]<-1
weirdness["ARHGAP11A"]<-1
weirdness["ARHGEF39"]<-1
weirdness["ASPM"]<-1
weirdness["ATAD2"]<-0
weirdness["AUNIP"]<-0
weirdness["AURKA"]<-0
weirdness["AURKB"]<-1
weirdness["B4GALNT4"]<-1
weirdness["BIRC5"]<-2
weirdness["BRIP1"]<-1
weirdness["BUB1"]<-1
weirdness["BUB1B"]<-1
weirdness["C17orf53"]<-1
weirdness["C19orf48"]<-0
weirdness["CBLC"]<-1
weirdness["CCNA2"]<-1
weirdness["CCNB1"]<-1
weirdness["CCNB2"]<-1
weirdness["CCNE1"]<-1
weirdness["CDC6"]<-1
weirdness["CDC20"]<-1
weirdness["CDC45"]<-1
weirdness["CDCA3"]<-1
weirdness["CDCA4"]<-1
weirdness["CDCA5"]<-1
weirdness["CDCA8"]<-1
weirdness["CDK1"]<-1
weirdness["CDT1"]<-1
weirdness["CENPA"]<-1
weirdness["CENPE"]<-1
weirdness["CENPF"]<-1
weirdness["CENPH"]<-1
weirdness["CENPI"]<-0
weirdness["CENPL"]<-1
weirdness["CENPW"]<-1
weirdness["CEP55"]<-1
weirdness["CHEK1"]<-1
weirdness["CHEK2"]<-1
weirdness["CHTF18"]<-1
weirdness["CIP2A"]<-1
weirdness["CKAP2L"]<-1
weirdness["CKS1B"]<-1
weirdness["CSE1L"]<-1
weirdness["DEPDC1B"]<-1
weirdness["DLGAP5"]<-1
weirdness["DNA2"]<-1
weirdness["DSP"]<-1
weirdness["DUSP9"]<-0
weirdness["E2F7"]<-0
weirdness["E2F7"]<-1
weirdness["ECE2"]<-1
weirdness["ECT2"]<-0
weirdness["EFNA3"]<-1
weirdness["EME1"]<-1
weirdness["ERCC6L"]<-1
weirdness["FKBP4"]<-1
weirdness["ESRP1"]<- 0
weirdness["EXO1"]<- 1
weirdness["EZH2"]<- 1
weirdness["FAM83B"]<- 1
weirdness["FAM83F"]<- 1
weirdness["FAM131C"]<- 1
weirdness["FANCI"]<- 1
weirdness["FBXO45"]<- 1
weirdness["FEN1"]<- 1
weirdness["FERMT1"]<- 1
weirdness["FKBP4"]<- 1
weirdness["FLAD1"]<- 0
weirdness["FOXM1"]<- 1
weirdness["GINS1"]<- 1
weirdness["GINS2"]<- 0
weirdness["GINS4"]<- 1
weirdness["GMPS"]<- 1
weirdness["GPR87"]<- 1
weirdness["GTSE1"]<- 1
weirdness["HELLS"]<- 1
weirdness["HJURP"]<- 1
weirdness["HMGA1"]<- 1
weirdness["IQANK1"]<- 1
weirdness["IQGAP3"]<- 1
weirdness["KIF2C"]<- 1
weirdness["KIF4A"]<- 1
weirdness["KIF11"]<- 1
weirdness["KIF15"]<- 1
weirdness["KIF18B"]<- 1
weirdness["KIF20A"]<- 1
weirdness["KIF23"]<- 1
weirdness["KIFC1"]<- 1
weirdness["KNL1"]<- 1
weirdness["KNTC1"]<- 1
weirdness["KPNA2"]<- 0
weirdness["LARGE2"]<- 1
weirdness["LMNB2"]<- 1
weirdness["MAD2L1"]<- 1
weirdness["MCM2"]<- 1
weirdness["MCM4"]<- 0
weirdness["MCM8"]<- 1
weirdness["MCM10"]<- 1
weirdness["MELK"]<- 1
weirdness["MTBP"]<- 1
weirdness["MYBL2"]<- 1
weirdness["NCAPG"]<- 1
weirdness["NCAPG2"]<- 1
weirdness["NCAPH"]<- 1
weirdness["NDC80"]<- 1
weirdness["NECTIN1"]<- 1
weirdness["NEK2"]<- 1
weirdness["NUF2"]<- 1
weirdness["NUP155"]<- 1
weirdness["NUSAP1"]<- 1
weirdness["NXPH4"]<- 1
weirdness["OIP5"]<- 1
weirdness["ORC1"]<- 1
weirdness["ORC6"]<- 1
weirdness["OTX1"]<- 1
weirdness["PAFAH1B3"]<- 1
weirdness["PARPBP"]<- 1
weirdness["PCNA"]<- 0
weirdness["PERP"]<- 1
weirdness["PITX1"]<- 1
weirdness["PKMYT1"]<- 1
weirdness["PLK1"]<- 1
weirdness["PLK4"]<- 1
weirdness["POLE2"]<- 1
weirdness["POLQ"]<- 1
weirdness["POLR2H"]<- 1
weirdness["PRC1"]<- 1
weirdness["PRR11"]<- 1
weirdness["PSAT1"]<- 1
weirdness["PTTG1"]<- 1
weirdness["RACGAP1"]<- 0
weirdness["RAD51"]<- 1
weirdness["RAD51AP1"]<- 1
weirdness["RAD54B"]<- 1
weirdness["RAD54L"]<- 1
weirdness["RANBP1"]<- 0
weirdness["RCC2"]<- 1
weirdness["RECQL4"]<- 1
weirdness["RFC4"]<- 1
weirdness["RHOV"]<- 1
weirdness["RNASEH2A"]<- 1
weirdness["SAPCD2"]<- 1
weirdness["SELENOI"]<- 1
weirdness["SERPINB5"]<- 1
weirdness["SHMT2"]<- 1
weirdness["SKP2"]<- 1
weirdness["SLC2A1"]<- 1
weirdness["SLC6A8"]<- 0
weirdness["SPAG5"]<- 1
weirdness["SPC24"]<- 1
weirdness["SPC25"]<-1
weirdness["STIL"]<- 1
weirdness["TEDC2"]<- 1
weirdness["TFAP2A"]<- 0
weirdness["SLC2A1"]<- 1
weirdness["TICRR"]<- 1
weirdness["TIMELESS"]<- 1
weirdness["TK1"]<- 1
weirdness["TMEM132A"]<- 1
weirdness["TONSL"]<- 1
weirdness["ESRP1"]<- 1
weirdness["FERMT1"]<-1
weirdness["TPX2"]<-1
| /results/tables/scripts/weirdness_script.R | no_license | magrichard/supergenes | R | false | false | 4,265 | r |
weirdness = c(rep(NA,nrow(features)))
names(weirdness)<- rownames(features)
weirdness["TMEM132A"]<-1
weirdness["TONSL"]<-1
weirdness["TOP2A"]<-1
weirdness["TOPBP1"]<-1
weirdness["TRIP13"]<-1
weirdness["TTK"]<-1
weirdness["UBE2C"]<-1
weirdness["UBE2S"]<-1
weirdness["UBE2T"]<-1
weirdness["UHRF1"]<-1
weirdness["WDHD1"]<-1
weirdness["WDR62"]<-1
weirdness["WDR72"]<-1
weirdness["XRCC2"]<-1
weirdness["ZWILCH"]<-0
weirdness["ZWINT"]<-1
weirdness["ACTL6A"]<-1
weirdness["ANLN"]<-1
weirdness["ARHGAP11A"]<-1
weirdness["ARHGEF39"]<-1
weirdness["ASPM"]<-1
weirdness["ATAD2"]<-0
weirdness["AUNIP"]<-0
weirdness["AURKA"]<-0
weirdness["AURKB"]<-1
weirdness["B4GALNT4"]<-1
weirdness["BIRC5"]<-2
weirdness["BRIP1"]<-1
weirdness["BUB1"]<-1
weirdness["BUB1B"]<-1
weirdness["C17orf53"]<-1
weirdness["C19orf48"]<-0
weirdness["CBLC"]<-1
weirdness["CCNA2"]<-1
weirdness["CCNB1"]<-1
weirdness["CCNB2"]<-1
weirdness["CCNE1"]<-1
weirdness["CDC6"]<-1
weirdness["CDC20"]<-1
weirdness["CDC45"]<-1
weirdness["CDCA3"]<-1
weirdness["CDCA4"]<-1
weirdness["CDCA5"]<-1
weirdness["CDCA8"]<-1
weirdness["CDK1"]<-1
weirdness["CDT1"]<-1
weirdness["CENPA"]<-1
weirdness["CENPE"]<-1
weirdness["CENPF"]<-1
weirdness["CENPH"]<-1
weirdness["CENPI"]<-0
weirdness["CENPL"]<-1
weirdness["CENPW"]<-1
weirdness["CEP55"]<-1
weirdness["CHEK1"]<-1
weirdness["CHEK2"]<-1
weirdness["CHTF18"]<-1
weirdness["CIP2A"]<-1
weirdness["CKAP2L"]<-1
weirdness["CKS1B"]<-1
weirdness["CSE1L"]<-1
weirdness["DEPDC1B"]<-1
weirdness["DLGAP5"]<-1
weirdness["DNA2"]<-1
weirdness["DSP"]<-1
weirdness["DUSP9"]<-0
weirdness["E2F7"]<-0
weirdness["E2F7"]<-1
weirdness["ECE2"]<-1
weirdness["ECT2"]<-0
weirdness["EFNA3"]<-1
weirdness["EME1"]<-1
weirdness["ERCC6L"]<-1
weirdness["FKBP4"]<-1
weirdness["ESRP1"]<- 0
weirdness["EXO1"]<- 1
weirdness["EZH2"]<- 1
weirdness["FAM83B"]<- 1
weirdness["FAM83F"]<- 1
weirdness["FAM131C"]<- 1
weirdness["FANCI"]<- 1
weirdness["FBXO45"]<- 1
weirdness["FEN1"]<- 1
weirdness["FERMT1"]<- 1
weirdness["FKBP4"]<- 1
weirdness["FLAD1"]<- 0
weirdness["FOXM1"]<- 1
weirdness["GINS1"]<- 1
weirdness["GINS2"]<- 0
weirdness["GINS4"]<- 1
weirdness["GMPS"]<- 1
weirdness["GPR87"]<- 1
weirdness["GTSE1"]<- 1
weirdness["HELLS"]<- 1
weirdness["HJURP"]<- 1
weirdness["HMGA1"]<- 1
weirdness["IQANK1"]<- 1
weirdness["IQGAP3"]<- 1
weirdness["KIF2C"]<- 1
weirdness["KIF4A"]<- 1
weirdness["KIF11"]<- 1
weirdness["KIF15"]<- 1
weirdness["KIF18B"]<- 1
weirdness["KIF20A"]<- 1
weirdness["KIF23"]<- 1
weirdness["KIFC1"]<- 1
weirdness["KNL1"]<- 1
weirdness["KNTC1"]<- 1
weirdness["KPNA2"]<- 0
weirdness["LARGE2"]<- 1
weirdness["LMNB2"]<- 1
weirdness["MAD2L1"]<- 1
weirdness["MCM2"]<- 1
weirdness["MCM4"]<- 0
weirdness["MCM8"]<- 1
weirdness["MCM10"]<- 1
weirdness["MELK"]<- 1
weirdness["MTBP"]<- 1
weirdness["MYBL2"]<- 1
weirdness["NCAPG"]<- 1
weirdness["NCAPG2"]<- 1
weirdness["NCAPH"]<- 1
weirdness["NDC80"]<- 1
weirdness["NECTIN1"]<- 1
weirdness["NEK2"]<- 1
weirdness["NUF2"]<- 1
weirdness["NUP155"]<- 1
weirdness["NUSAP1"]<- 1
weirdness["NXPH4"]<- 1
weirdness["OIP5"]<- 1
weirdness["ORC1"]<- 1
weirdness["ORC6"]<- 1
weirdness["OTX1"]<- 1
weirdness["PAFAH1B3"]<- 1
weirdness["PARPBP"]<- 1
weirdness["PCNA"]<- 0
weirdness["PERP"]<- 1
weirdness["PITX1"]<- 1
weirdness["PKMYT1"]<- 1
weirdness["PLK1"]<- 1
weirdness["PLK4"]<- 1
weirdness["POLE2"]<- 1
weirdness["POLQ"]<- 1
weirdness["POLR2H"]<- 1
weirdness["PRC1"]<- 1
weirdness["PRR11"]<- 1
weirdness["PSAT1"]<- 1
weirdness["PTTG1"]<- 1
weirdness["RACGAP1"]<- 0
weirdness["RAD51"]<- 1
weirdness["RAD51AP1"]<- 1
weirdness["RAD54B"]<- 1
weirdness["RAD54L"]<- 1
weirdness["RANBP1"]<- 0
weirdness["RCC2"]<- 1
weirdness["RECQL4"]<- 1
weirdness["RFC4"]<- 1
weirdness["RHOV"]<- 1
weirdness["RNASEH2A"]<- 1
weirdness["SAPCD2"]<- 1
weirdness["SELENOI"]<- 1
weirdness["SERPINB5"]<- 1
weirdness["SHMT2"]<- 1
weirdness["SKP2"]<- 1
weirdness["SLC2A1"]<- 1
weirdness["SLC6A8"]<- 0
weirdness["SPAG5"]<- 1
weirdness["SPC24"]<- 1
weirdness["SPC25"]<-1
weirdness["STIL"]<- 1
weirdness["TEDC2"]<- 1
weirdness["TFAP2A"]<- 0
weirdness["SLC2A1"]<- 1
weirdness["TICRR"]<- 1
weirdness["TIMELESS"]<- 1
weirdness["TK1"]<- 1
weirdness["TMEM132A"]<- 1
weirdness["TONSL"]<- 1
weirdness["ESRP1"]<- 1
weirdness["FERMT1"]<-1
weirdness["TPX2"]<-1
|
### TERN LANDSCAPES
# Soil pH model model fitting
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 2.9.22
# modified: 6.9.22
# CODE PURPOSE
# # Depth 4 [30-60cm]
# Use optimally determined hyperparameter values to fit ranger models
# fit 100 models
# fixed parameters
vart<- "pH4b"
depth<- "d4"
colsel<- 12
paramsf<- 4
its<- 100
# root directory
g.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/"
data.root<- paste0(g.root, "data/curated_all/")
dists.root<- paste0(g.root, "data/field_2_4B_dists/")
params.out<- paste0(g.root, "data/ranger_model_hyperparams/")
model.out<- paste0(g.root, "models/ph_4b/")
funcs.out<- paste0(g.root, "rcode/miscell/")
slurm.root<- paste0(g.root, "rcode/slurm/ph_4b/digital_soil_mapping/model_fitting/")
r.code<- paste0(g.root, "rcode/digital_soil_mapping/model_fitting/ph_4b/")
# libraries
library(caret);library(ranger);library(raster);library(rgdal);library(sp);library(MASS);library(automap);library(gstat)
source(paste0(funcs.out,"goof.R"))
#data
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_CALVALDAT_ARD.rds"))
# external validation data
ext.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_EXTERNAL_ARD.rds"))
# hyperparameter values
tuning.params<- read.csv(file = paste0(model.out, "optimal_tuning_params_pH_4b_90m.csv"))
## distribtions
dist_35<- readRDS(file = paste0(dists.root,"dists_35_field_2_4b.rds"))
dist_40<- readRDS(file = paste0(dists.root,"dists_40_field_2_4b.rds"))
dist_45<- readRDS(file = paste0(dists.root,"dists_45_field_2_4b.rds"))
dist_50<- readRDS(file = paste0(dists.root,"dists_50_field_2_4b.rds"))
dist_55<- readRDS(file = paste0(dists.root,"dists_55_field_2_4b.rds"))
dist_60<- readRDS(file = paste0(dists.root,"dists_60_field_2_4b.rds"))
dist_65<- readRDS(file = paste0(dists.root,"dists_65_field_2_4b.rds"))
dist_70<- readRDS(file = paste0(dists.root,"dists_70_field_2_4b.rds"))
dist_75<- readRDS(file = paste0(dists.root,"dists_75_field_2_4b.rds"))
dist_80<- readRDS(file = paste0(dists.root,"dists_80_field_2_4b.rds"))
dist_85<- readRDS(file = paste0(dists.root,"dists_85_field_2_4b.rds"))
dist_90<- readRDS(file = paste0(dists.root,"dists_90_field_2_4b.rds"))
dist_95<- readRDS(file = paste0(dists.root,"dists_95_field_2_4b.rds"))
dist_100<- readRDS(file = paste0(dists.root,"dists_100_field_2_4b.rds"))
dist_105<- readRDS(file = paste0(dists.root,"dists_105_field_2_4b.rds"))
# site data (what sort of data frame are we working with)
names(site.dat)
site.dat<- site.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
site.dat<- site.dat[complete.cases(site.dat[,c(9,11:49)]),]
site.dat$target<- site.dat[,9]
##########################
# convert field to lab
names(site.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(site.dat$type == "F" & site.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(site.dat$type == "F" & site.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
site.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(site.dat)
site.dat<- site.dat[,c(1:9,50,10:49)]
lns<- length(which(site.dat$target < 0))
lns
if (lns != 0){site.dat<- site.dat[-which(site.dat$target < 0),]}
hist(site.dat$target)
summary(site.dat$target)
site.dat$Relief_geomorphons<- as.factor(site.dat$Relief_geomorphons)
# EXTERNAL DATA
# lab dat
names(ext.dat)
ext.dat<- ext.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
ext.dat<- ext.dat[complete.cases(ext.dat[,c(9,11:49)]),]
ext.dat$target<- ext.dat[,9]
##########################
# convert field to lab
names(ext.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
ext.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(ext.dat)
ext.dat<- ext.dat[,c(1:9,50,10:49)]
lns<- length(which(ext.dat$target < 0))
lns
if (lns != 0){ext.dat<- ext.dat[-which(ext.dat$target < 0),]}
hist(ext.dat$target)
saveRDS(object = ext.dat, file = paste0(model.out, depth,"/data_obs_preds/ext/ranger_EXT_covariates_pH4b_depth_",depth,".rds"))
ext.dat$Relief_geomorphons<- as.factor(ext.dat$Relief_geomorphons)
# putting model diagnostics into tables
cal_diog<- matrix(NA, ncol= 7, nrow=its)
val_diog<- matrix(NA, ncol= 7, nrow=its)
ext_diog<- matrix(NA, ncol= 7, nrow=its)
# set up some matrices to put model outputs for each bootstrap iteration
target.mat<- matrix(NA, ncol = its, nrow = nrow(site.dat))
target.mat_ext<- matrix(NA, ncol = its, nrow = nrow(ext.dat))
pred.mat_c<- matrix(NA, ncol = its, nrow = nrow(site.dat))
residual.mat_c<- matrix(NA, ncol = its, nrow = nrow(site.dat))
pred.mat_v<- matrix(NA, ncol = its, nrow = nrow(site.dat))
# cycle throoug its number of realisations
for (zz in 1:its){
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_CALVALDAT_ARD.rds"))
new.col.names<- substr(x = names(site.dat)[17:54],start = 1, stop = nchar(names(site.dat)[17:54])-4)
names(site.dat)[17:54]<- new.col.names
# external validation data
ext.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_EXTERNAL_ARD.rds"))
names(ext.dat)[17:54]<- new.col.names
###############################################
### get both datasets model ready
# CAL VAL DATA
# lab dat
names(site.dat)
site.dat<- site.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
site.dat<- site.dat[complete.cases(site.dat[,c(9,11:49)]),]
site.dat$target<- site.dat[,9]
##########################
# convert field to lab
names(site.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(site.dat$type == "F" & site.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(site.dat$type == "F" & site.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
site.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(site.dat)
site.dat<- site.dat[,c(1:9,50,10:49)]
lns<- length(which(site.dat$target < 0))
lns
if (lns != 0){site.dat<- site.dat[-which(site.dat$target < 0),]}
hist(site.dat$target)
summary(site.dat$target)
site.dat$Relief_geomorphons<- as.factor(site.dat$Relief_geomorphons)
# target variable data into matrix
target.mat[,zz]<- site.dat$target
# EXTERNAL DATA
# lab dat
names(ext.dat)
ext.dat<- ext.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
ext.dat<- ext.dat[complete.cases(ext.dat[,c(9,11:49)]),]
ext.dat$target<- ext.dat[,9]
##########################
# convert field to lab
names(ext.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
ext.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(ext.dat)
ext.dat<- ext.dat[,c(1:9,50,10:49)]
lns<- length(which(ext.dat$target < 0))
lns
if (lns != 0){ext.dat<- ext.dat[-which(ext.dat$target < 0),]}
hist(ext.dat$target)
ext.dat$Relief_geomorphons<- as.factor(ext.dat$Relief_geomorphons)
# target variable data for external data
target.mat_ext[,zz]<- ext.dat$target
##############################################################################
## calibration and validation (bootstrap)
boots<- sample.int(nrow(site.dat), replace = TRUE)
uboots<- unique(boots)
inbag.uboots<- uboots[order(uboots)]
all.rows<- c(1:nrow(site.dat))
outbag.uboots<- all.rows[!all.rows %in% inbag.uboots]
# calibration data
DSM_data_c<- site.dat[inbag.uboots,]
# validation
DSM_data_v<- site.dat[outbag.uboots,]
# model tuning parameters
tgrid <- expand.grid(
.mtry = tuning.params$mtry[paramsf],
.splitrule= as.character(tuning.params$splitrule[paramsf]),
.min.node.size = tuning.params$nodesize[paramsf])
names(DSM_data_c)
str(DSM_data_c)
ranger.model<-train(x= DSM_data_c[,12:50], y= DSM_data_c$target,
tuneGrid = tgrid,
method = "ranger",
trControl =trainControl(method = "oob"),
num.trees = 500,
importance = 'impurity')
summary(ranger.model)
ranger.model
## capture output
var_nm1<- paste0(model.out,"/",depth,"/", "aus_",vart,"_model_",depth, "_iteration_", zz, ".txt")
out1<- capture.output(summary(ranger.model))
out1<- paste0(out1,collapse = "\r\n")
cat(out1, file = var_nm1, sep=",", append = T)
out2<- capture.output(ranger.model)
out2<- paste0(out2,collapse = "\r\n")
cat(out2, file = var_nm1, sep=",", append = T)
#capture variable importance
varOutput<- varImp(ranger.model, scale=FALSE)
varOutput
out3<- capture.output(varOutput)
out3<- paste0(out3,collapse = "\r\n")
cat(out3, file = var_nm1, sep=",", append = T)
# save model to file
mod_name<- paste0(model.out,"/",depth,"/","aus_",vart,"_fittedmodel_",depth,"_iteration_", zz, ".rds")
saveRDS(ranger.model, file = mod_name)
## VALIDATION OF MODEL
# predict on calibration data
ranger.pred_c<-predict(ranger.model, DSM_data_c)
cal_diog[zz, 1]<- 1
cal_diog[zz, 2]<- zz
cal_diog[zz, 3:7]<- as.matrix(goof(observed = DSM_data_c$target, predicted = ranger.pred_c, plot.it = T))
# predict on validation data
ranger.pred_v<-predict(ranger.model, DSM_data_v)
val_diog[zz, 1]<- 1
val_diog[zz, 2]<- zz
val_diog[zz, 3:7]<- as.matrix(goof(observed = DSM_data_v$target, predicted = ranger.pred_v, plot.it = T))
# predict on external data
ranger.pred_ex<-predict(ranger.model, ext.dat)
ext_diog[zz, 1]<- 1
ext_diog[zz, 2]<- zz
ext_diog[zz, 3:7]<- as.matrix(goof(observed = ext.dat$target, predicted = ranger.pred_ex, plot.it = T))
## Residual modeling using variogram
pred_residual_c<- residual<- DSM_data_c$target - ranger.pred_c
# put predictions into frames
# calibration predictions
pred.mat_c[inbag.uboots,zz]<- ranger.pred_c
residual.mat_c[inbag.uboots,zz]<- pred_residual_c
pred.mat_v[outbag.uboots,zz]<- ranger.pred_v
# write outputs to file
cal_frame<- as.data.frame(cal_diog)
cal_frame<- cal_frame[complete.cases(cal_frame),]
names(cal_frame)<- c("col", "iter", "R2", "CCC","MSE", "RMSE", "bias")
cal_name<- paste0(model.out,"ranger_CAL_diogs_",vart,"_",depth, ".txt")
write.table(cal_frame,file = cal_name, sep = ",",row.names = F, col.names = T)
val_frame<- as.data.frame(val_diog)
val_frame<- val_frame[complete.cases(val_frame),]
names(val_frame)<- c("col", "iter", "R2", "CCC","MSE", "RMSE", "bias")
val_name<- paste0(model.out,"ranger_VAL_diogs_",vart,"_",depth,".txt")
write.table(val_frame,file = val_name, sep = ",",row.names = F, col.names = T)
ext_frame<- as.data.frame(ext_diog)
ext_frame<- ext_frame[complete.cases(ext_frame),]
names(ext_frame)<- c("col", "iter", "R2", "CCC","MSE", "RMSE", "bias")
ext_name<- paste0(model.out,"ranger_EXT_diogs_",vart,"_",depth,".txt")
write.table(ext_frame,file = ext_name, sep = ",",row.names = F, col.names = T)
#output observation and prediction for calibration sites
names(DSM_data_c)
calOut_dat<- DSM_data_c[,c(1:11)]
calOut_dat$prediction<- ranger.pred_c
cal_name1<- paste0(model.out,"/",depth,"/data_obs_preds/cal/","ranger_CAL_preds_", vart, "_depth_", depth,"_iteration_", zz, ".txt")
write.table(calOut_dat,file = cal_name1, sep = ",",row.names = F, col.names = T)
#output observation and prediction for validation sites
valOut_dat<- DSM_data_v[,c(1:11)]
valOut_dat$prediction<- ranger.pred_v
val_name1<- paste0(model.out,"/",depth,"/data_obs_preds/val/","ranger_VAL_preds_", vart, "_depth_", depth,"_iteration_", zz, ".txt")
write.table(valOut_dat,file = val_name1, sep = ",",row.names = F, col.names = T)
#output observation and prediction for external
names(ext.dat)
extOut_dat<- ext.dat[,c(1:11)]
extOut_dat$prediction<- ranger.pred_ex
ext_name1<- paste0(model.out,"/",depth,"/data_obs_preds/ext/","ranger_EXT_preds_", vart, "_depth_", depth,"_iteration_", zz, ".txt")
write.table(extOut_dat,file = ext_name1, sep = ",",row.names = F, col.names = T)
}
### save the predictions frames
# calibration
pred.mat_c<- as.data.frame(pred.mat_c)
pred.mat_c_rm<- rowMeans(pred.mat_c,na.rm = T)
residual.mat_c<- as.data.frame(residual.mat_c)
residual.mat_c_rm<- rowMeans(residual.mat_c,na.rm = T)
calOut_dat<- site.dat[,c(1:11)]
calOut_dat$prediction_avg<- pred.mat_c_rm
calOut_dat$residual_avg<- residual.mat_c_rm
cal_name1<- paste0(model.out,"/",depth,"/data_obs_preds/cal/","ranger_CAL_preds_average_summaries_", vart, "_depth_", depth, ".txt")
write.table(calOut_dat,file = cal_name1, sep = ",",row.names = F, col.names = T)
# validation/ out
pred.mat_v<- as.data.frame(pred.mat_v)
pred.mat_v_rm<- rowMeans(pred.mat_v,na.rm = T)
pred.mat_v_tv<- as.data.frame(target.mat)
pred.mat_v_tv_rm<- rowMeans(pred.mat_v_tv,na.rm = T)
valOut_dat<- site.dat[,c(1:11)]
valOut_dat$prediction_avg<- pred.mat_v_rm
valOut_dat$target_avg<- pred.mat_v_tv_rm
valOut_dat$residual_avg<- pred.mat_v_tv_rm - pred.mat_v_rm
val_name1<- paste0(model.out,"/",depth,"/data_obs_preds/val/","ranger_VAL_preds_average_summaries_", vart, "_depth_", depth, ".txt")
write.table(valOut_dat,file = val_name1, sep = ",",row.names = F, col.names = T)
# END
| /Production/DSM/pH/digital_soil_mapping/model_fitting/ph_4b/rangerModelling_ph_4b_d4.R | permissive | AusSoilsDSM/SLGA | R | false | false | 19,229 | r | ### TERN LANDSCAPES
# Soil pH model model fitting
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 2.9.22
# modified: 6.9.22
# CODE PURPOSE
# # Depth 4 [30-60cm]
# Use optimally determined hyperparameter values to fit ranger models
# fit 100 models
# fixed parameters
vart<- "pH4b"
depth<- "d4"
colsel<- 12
paramsf<- 4
its<- 100
# root directory
g.root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/"
data.root<- paste0(g.root, "data/curated_all/")
dists.root<- paste0(g.root, "data/field_2_4B_dists/")
params.out<- paste0(g.root, "data/ranger_model_hyperparams/")
model.out<- paste0(g.root, "models/ph_4b/")
funcs.out<- paste0(g.root, "rcode/miscell/")
slurm.root<- paste0(g.root, "rcode/slurm/ph_4b/digital_soil_mapping/model_fitting/")
r.code<- paste0(g.root, "rcode/digital_soil_mapping/model_fitting/ph_4b/")
# libraries
library(caret);library(ranger);library(raster);library(rgdal);library(sp);library(MASS);library(automap);library(gstat)
source(paste0(funcs.out,"goof.R"))
#data
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_CALVALDAT_ARD.rds"))
# external validation data
ext.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_EXTERNAL_ARD.rds"))
# hyperparameter values
tuning.params<- read.csv(file = paste0(model.out, "optimal_tuning_params_pH_4b_90m.csv"))
## distribtions
dist_35<- readRDS(file = paste0(dists.root,"dists_35_field_2_4b.rds"))
dist_40<- readRDS(file = paste0(dists.root,"dists_40_field_2_4b.rds"))
dist_45<- readRDS(file = paste0(dists.root,"dists_45_field_2_4b.rds"))
dist_50<- readRDS(file = paste0(dists.root,"dists_50_field_2_4b.rds"))
dist_55<- readRDS(file = paste0(dists.root,"dists_55_field_2_4b.rds"))
dist_60<- readRDS(file = paste0(dists.root,"dists_60_field_2_4b.rds"))
dist_65<- readRDS(file = paste0(dists.root,"dists_65_field_2_4b.rds"))
dist_70<- readRDS(file = paste0(dists.root,"dists_70_field_2_4b.rds"))
dist_75<- readRDS(file = paste0(dists.root,"dists_75_field_2_4b.rds"))
dist_80<- readRDS(file = paste0(dists.root,"dists_80_field_2_4b.rds"))
dist_85<- readRDS(file = paste0(dists.root,"dists_85_field_2_4b.rds"))
dist_90<- readRDS(file = paste0(dists.root,"dists_90_field_2_4b.rds"))
dist_95<- readRDS(file = paste0(dists.root,"dists_95_field_2_4b.rds"))
dist_100<- readRDS(file = paste0(dists.root,"dists_100_field_2_4b.rds"))
dist_105<- readRDS(file = paste0(dists.root,"dists_105_field_2_4b.rds"))
# site data (what sort of data frame are we working with)
names(site.dat)
site.dat<- site.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
site.dat<- site.dat[complete.cases(site.dat[,c(9,11:49)]),]
site.dat$target<- site.dat[,9]
##########################
# convert field to lab
names(site.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(site.dat$type == "F" & site.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(site.dat$type == "F" & site.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
site.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(site.dat)
site.dat<- site.dat[,c(1:9,50,10:49)]
lns<- length(which(site.dat$target < 0))
lns
if (lns != 0){site.dat<- site.dat[-which(site.dat$target < 0),]}
hist(site.dat$target)
summary(site.dat$target)
site.dat$Relief_geomorphons<- as.factor(site.dat$Relief_geomorphons)
# EXTERNAL DATA
# lab dat
names(ext.dat)
ext.dat<- ext.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
ext.dat<- ext.dat[complete.cases(ext.dat[,c(9,11:49)]),]
ext.dat$target<- ext.dat[,9]
##########################
# convert field to lab
names(ext.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
ext.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(ext.dat)
ext.dat<- ext.dat[,c(1:9,50,10:49)]
lns<- length(which(ext.dat$target < 0))
lns
if (lns != 0){ext.dat<- ext.dat[-which(ext.dat$target < 0),]}
hist(ext.dat$target)
saveRDS(object = ext.dat, file = paste0(model.out, depth,"/data_obs_preds/ext/ranger_EXT_covariates_pH4b_depth_",depth,".rds"))
ext.dat$Relief_geomorphons<- as.factor(ext.dat$Relief_geomorphons)
# putting model diagnostics into tables
cal_diog<- matrix(NA, ncol= 7, nrow=its)
val_diog<- matrix(NA, ncol= 7, nrow=its)
ext_diog<- matrix(NA, ncol= 7, nrow=its)
# set up some matrices to put model outputs for each bootstrap iteration
target.mat<- matrix(NA, ncol = its, nrow = nrow(site.dat))
target.mat_ext<- matrix(NA, ncol = its, nrow = nrow(ext.dat))
pred.mat_c<- matrix(NA, ncol = its, nrow = nrow(site.dat))
residual.mat_c<- matrix(NA, ncol = its, nrow = nrow(site.dat))
pred.mat_v<- matrix(NA, ncol = its, nrow = nrow(site.dat))
# cycle throoug its number of realisations
for (zz in 1:its){
# site data
site.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_CALVALDAT_ARD.rds"))
new.col.names<- substr(x = names(site.dat)[17:54],start = 1, stop = nchar(names(site.dat)[17:54])-4)
names(site.dat)[17:54]<- new.col.names
# external validation data
ext.dat<- readRDS(paste0(data.root,"tern_soilpH4B_siteDat_covariates_20223008_EXTERNAL_ARD.rds"))
names(ext.dat)[17:54]<- new.col.names
###############################################
### get both datasets model ready
# CAL VAL DATA
# lab dat
names(site.dat)
site.dat<- site.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
site.dat<- site.dat[complete.cases(site.dat[,c(9,11:49)]),]
site.dat$target<- site.dat[,9]
##########################
# convert field to lab
names(site.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(site.dat$type == "F" & site.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(site.dat$type == "F" & site.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
site.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(site.dat)
site.dat<- site.dat[,c(1:9,50,10:49)]
lns<- length(which(site.dat$target < 0))
lns
if (lns != 0){site.dat<- site.dat[-which(site.dat$target < 0),]}
hist(site.dat$target)
summary(site.dat$target)
site.dat$Relief_geomorphons<- as.factor(site.dat$Relief_geomorphons)
# target variable data into matrix
target.mat[,zz]<- site.dat$target
# EXTERNAL DATA
# lab dat
names(ext.dat)
ext.dat<- ext.dat[,c(1:8,colsel,16,17:55)] # change column selection for target variable
ext.dat<- ext.dat[complete.cases(ext.dat[,c(9,11:49)]),]
ext.dat$target<- ext.dat[,9]
##########################
# convert field to lab
names(ext.dat)
vect<- c(3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5)
cnt<- 9 # target variable
for (j in 1:length(vect)){
len<- length(which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j]))
if(len != 0){
chg<- which(ext.dat$type == "F" & ext.dat[,cnt] == vect[j])
# take a sample from a required distribution
if(vect[j] == 3.5){
sel.samp<- sample(dist_35,size = len, replace = T)}
if(vect[j] == 4){
sel.samp<- sample(dist_40,size = len, replace = T)}
if(vect[j] == 4.5){
sel.samp<- sample(dist_45,size = len, replace = T)}
if(vect[j] == 5){
sel.samp<- sample(dist_50,size = len, replace = T)}
if(vect[j] == 5.5){
sel.samp<- sample(dist_55,size = len, replace = T)}
if(vect[j] == 6){
sel.samp<- sample(dist_60,size = len, replace = T)}
if(vect[j] == 6.5){
sel.samp<- sample(dist_65,size = len, replace = T)}
if(vect[j] == 7){
sel.samp<- sample(dist_70,size = len, replace = T)}
if(vect[j] == 7.5){
sel.samp<- sample(dist_75,size = len, replace = T)}
if(vect[j] == 8){
sel.samp<- sample(dist_80,size = len, replace = T)}
if(vect[j] == 8.5){
sel.samp<- sample(dist_85,size = len, replace = T)}
if(vect[j] == 9){
sel.samp<- sample(dist_90,size = len, replace = T)}
if(vect[j] == 9.5){
sel.samp<- sample(dist_95,size = len, replace = T)}
if(vect[j] == 10){
sel.samp<- sample(dist_100,size = len, replace = T)}
if(vect[j] == 10.5){
sel.samp<- sample(dist_105,size = len, replace = T)}
# change the values
ext.dat[chg,50]<- sel.samp}}
######################
# clean up and re-appoint data
names(ext.dat)
ext.dat<- ext.dat[,c(1:9,50,10:49)]
lns<- length(which(ext.dat$target < 0))
lns
if (lns != 0){ext.dat<- ext.dat[-which(ext.dat$target < 0),]}
hist(ext.dat$target)
ext.dat$Relief_geomorphons<- as.factor(ext.dat$Relief_geomorphons)
# target variable data for external data
target.mat_ext[,zz]<- ext.dat$target
##############################################################################
## calibration and validation (bootstrap)
boots<- sample.int(nrow(site.dat), replace = TRUE)
uboots<- unique(boots)
inbag.uboots<- uboots[order(uboots)]
all.rows<- c(1:nrow(site.dat))
outbag.uboots<- all.rows[!all.rows %in% inbag.uboots]
# calibration data
DSM_data_c<- site.dat[inbag.uboots,]
# validation
DSM_data_v<- site.dat[outbag.uboots,]
# model tuning parameters
tgrid <- expand.grid(
.mtry = tuning.params$mtry[paramsf],
.splitrule= as.character(tuning.params$splitrule[paramsf]),
.min.node.size = tuning.params$nodesize[paramsf])
names(DSM_data_c)
str(DSM_data_c)
ranger.model<-train(x= DSM_data_c[,12:50], y= DSM_data_c$target,
tuneGrid = tgrid,
method = "ranger",
trControl =trainControl(method = "oob"),
num.trees = 500,
importance = 'impurity')
summary(ranger.model)
ranger.model
## capture output
var_nm1<- paste0(model.out,"/",depth,"/", "aus_",vart,"_model_",depth, "_iteration_", zz, ".txt")
out1<- capture.output(summary(ranger.model))
out1<- paste0(out1,collapse = "\r\n")
cat(out1, file = var_nm1, sep=",", append = T)
out2<- capture.output(ranger.model)
out2<- paste0(out2,collapse = "\r\n")
cat(out2, file = var_nm1, sep=",", append = T)
#capture variable importance
varOutput<- varImp(ranger.model, scale=FALSE)
varOutput
out3<- capture.output(varOutput)
out3<- paste0(out3,collapse = "\r\n")
cat(out3, file = var_nm1, sep=",", append = T)
# save model to file
mod_name<- paste0(model.out,"/",depth,"/","aus_",vart,"_fittedmodel_",depth,"_iteration_", zz, ".rds")
saveRDS(ranger.model, file = mod_name)
## VALIDATION OF MODEL
# predict on calibration data
ranger.pred_c<-predict(ranger.model, DSM_data_c)
cal_diog[zz, 1]<- 1
cal_diog[zz, 2]<- zz
cal_diog[zz, 3:7]<- as.matrix(goof(observed = DSM_data_c$target, predicted = ranger.pred_c, plot.it = T))
# predict on validation data
ranger.pred_v<-predict(ranger.model, DSM_data_v)
val_diog[zz, 1]<- 1
val_diog[zz, 2]<- zz
val_diog[zz, 3:7]<- as.matrix(goof(observed = DSM_data_v$target, predicted = ranger.pred_v, plot.it = T))
# predict on external data
ranger.pred_ex<-predict(ranger.model, ext.dat)
ext_diog[zz, 1]<- 1
ext_diog[zz, 2]<- zz
ext_diog[zz, 3:7]<- as.matrix(goof(observed = ext.dat$target, predicted = ranger.pred_ex, plot.it = T))
## Residual modeling using variogram
pred_residual_c<- residual<- DSM_data_c$target - ranger.pred_c
# put predictions into frames
# calibration predictions
pred.mat_c[inbag.uboots,zz]<- ranger.pred_c
residual.mat_c[inbag.uboots,zz]<- pred_residual_c
pred.mat_v[outbag.uboots,zz]<- ranger.pred_v
# write outputs to file
cal_frame<- as.data.frame(cal_diog)
cal_frame<- cal_frame[complete.cases(cal_frame),]
names(cal_frame)<- c("col", "iter", "R2", "CCC","MSE", "RMSE", "bias")
cal_name<- paste0(model.out,"ranger_CAL_diogs_",vart,"_",depth, ".txt")
write.table(cal_frame,file = cal_name, sep = ",",row.names = F, col.names = T)
val_frame<- as.data.frame(val_diog)
val_frame<- val_frame[complete.cases(val_frame),]
names(val_frame)<- c("col", "iter", "R2", "CCC","MSE", "RMSE", "bias")
val_name<- paste0(model.out,"ranger_VAL_diogs_",vart,"_",depth,".txt")
write.table(val_frame,file = val_name, sep = ",",row.names = F, col.names = T)
ext_frame<- as.data.frame(ext_diog)
ext_frame<- ext_frame[complete.cases(ext_frame),]
names(ext_frame)<- c("col", "iter", "R2", "CCC","MSE", "RMSE", "bias")
ext_name<- paste0(model.out,"ranger_EXT_diogs_",vart,"_",depth,".txt")
write.table(ext_frame,file = ext_name, sep = ",",row.names = F, col.names = T)
#output observation and prediction for calibration sites
names(DSM_data_c)
calOut_dat<- DSM_data_c[,c(1:11)]
calOut_dat$prediction<- ranger.pred_c
cal_name1<- paste0(model.out,"/",depth,"/data_obs_preds/cal/","ranger_CAL_preds_", vart, "_depth_", depth,"_iteration_", zz, ".txt")
write.table(calOut_dat,file = cal_name1, sep = ",",row.names = F, col.names = T)
#output observation and prediction for validation sites
valOut_dat<- DSM_data_v[,c(1:11)]
valOut_dat$prediction<- ranger.pred_v
val_name1<- paste0(model.out,"/",depth,"/data_obs_preds/val/","ranger_VAL_preds_", vart, "_depth_", depth,"_iteration_", zz, ".txt")
write.table(valOut_dat,file = val_name1, sep = ",",row.names = F, col.names = T)
#output observation and prediction for external
names(ext.dat)
extOut_dat<- ext.dat[,c(1:11)]
extOut_dat$prediction<- ranger.pred_ex
ext_name1<- paste0(model.out,"/",depth,"/data_obs_preds/ext/","ranger_EXT_preds_", vart, "_depth_", depth,"_iteration_", zz, ".txt")
write.table(extOut_dat,file = ext_name1, sep = ",",row.names = F, col.names = T)
}
### save the predictions frames
# calibration
pred.mat_c<- as.data.frame(pred.mat_c)
pred.mat_c_rm<- rowMeans(pred.mat_c,na.rm = T)
residual.mat_c<- as.data.frame(residual.mat_c)
residual.mat_c_rm<- rowMeans(residual.mat_c,na.rm = T)
calOut_dat<- site.dat[,c(1:11)]
calOut_dat$prediction_avg<- pred.mat_c_rm
calOut_dat$residual_avg<- residual.mat_c_rm
cal_name1<- paste0(model.out,"/",depth,"/data_obs_preds/cal/","ranger_CAL_preds_average_summaries_", vart, "_depth_", depth, ".txt")
write.table(calOut_dat,file = cal_name1, sep = ",",row.names = F, col.names = T)
# validation/ out
pred.mat_v<- as.data.frame(pred.mat_v)
pred.mat_v_rm<- rowMeans(pred.mat_v,na.rm = T)
pred.mat_v_tv<- as.data.frame(target.mat)
pred.mat_v_tv_rm<- rowMeans(pred.mat_v_tv,na.rm = T)
valOut_dat<- site.dat[,c(1:11)]
valOut_dat$prediction_avg<- pred.mat_v_rm
valOut_dat$target_avg<- pred.mat_v_tv_rm
valOut_dat$residual_avg<- pred.mat_v_tv_rm - pred.mat_v_rm
val_name1<- paste0(model.out,"/",depth,"/data_obs_preds/val/","ranger_VAL_preds_average_summaries_", vart, "_depth_", depth, ".txt")
write.table(valOut_dat,file = val_name1, sep = ",",row.names = F, col.names = T)
# END
|
################################ Mulilevel Analysis ################################
################################# General Setup ###################################
library(easypackages)
libraries("Hmisc", "psych", "lme4", "texreg", "sjPlot", "sjmisc", "sjstats",
"haven", "ggplot2", "effects", "tidyverse", "ggExtra", "ggeffects", "reghelper")
setwd("~/Documents/Uni Konstanz/Master/Master Thesis/Daten/Multilevel/New")
#################################### Importing my Data ################################
dat <- read.csv("/Users/sarahalt/Desktop/Daten_Masterarbeit_Multilevel_GMC2_Sarah.csv")
attach(dat)
##factoring the categorial variables
dat$sex_m = factor(dat$sex_m,
levels = c(1,2),
labels = c("Female", "Male"))
dat$startup_m = factor(dat$startup_m,
levels = c(1,2),
labels = c("Startup", "No-Startup"))
################################### Data Screening ####################################
## checking for accuracy & missings ##
summary(dat)
###########################################################################################################
###################################### Basic Models #######################################################
###########################################################################################################
## Null Model - Intercept only
h_null <- lmer(PSS_mean ~ 1 + (1 | teamcode), data = dat)
## Model 1: Model with control variables
h_1 <- lmer(PSS_mean ~ 1 + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
###########################################################################################################
###################################### Individual PsyCap #######################################################
###########################################################################################################
## Model 2: Model with predictor iPsyCap - grand-mean-centered - deleted from analysis
h2_c_i <- lmer(PSS_mean ~ PCI_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 3: Model with predictor WLoad - grand-mean-centered
h3_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 4: Model with predictors WLoad & iPsyCap - grand-mean-centered
h4_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc + PCI_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 5: Model with interaction - random intercept, fixed slope - grand-mean-centered
h5_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc * PCI_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 6: Model with interaction - random intercept, random slope - grand-mean-centered
h6_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc * PCI_mean_gmc + sex_m + Age_gmc + startup_m + (WLoad_mean_gmc | teamcode), data = dat)
## T-Values
summary(h_null)
summary(h_1)
summary(h2_c_i)
summary(h3_c_i)
summary(h4_c_i)
summary(h5_c_i)
summary(h6_c_i)
###### Comparison of models ####
## Direct Relationhip ##
screenreg(list(h_null, h_1, h2_c_i))
anova(h_null, h_1, h2_c_i)
## Moderation
screenreg(list(h_null, h_1, h3_c_i, h4_c_i, h5_c_i, h6_c_i))
anova(h_null, h_1, h3_c_i, h4_c_i, h5_c_i, h6_c_i)
##html overview for Word
htmlreg(list(h_null, h_1, h2_c_i, h3_c_i, h4_c_i, h5_c_i, h6_c_i),
file = "iPsyCap-Models.html",
single.row = T,
caption = "The role of individual psychological capital in regard to perceived stress if controlled for gender, startup & age",
custom.note = "
%stars.
Null Model = Intercept only model.
Model 1 = Model with control variables.
Model 2 = Model with predictor iPsyCap.
Model 3 = Model with predictor workload.
Model 4 = Model with both predictors.
Model 5 = Random Intercept & Fixed Slope, with interaction.
Model 6 = Random Intercept & Random Slope, with interaction;
All predictors are grand-mean-centered; Controlled for Age, Startup & Gender",
custom.model.names = c("Null Model", "Model 1", "Model 2", "Model 3", "Model 4", "Model 5", "Model 6"))
###########################################################################################################
###################################### Team PsyCap #######################################################
###########################################################################################################
## Model 2: Model with predictor tPsyCap - grand-mean-centered - deleted
h2_c_t <- lmer(PSS_mean ~ PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 3: Model with predictor WLoad - grand-mean-centered
h3_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 4: Model with predictors WLoad & tPsyCap - grand-mean-centered
h4_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc + PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 5: Model with interaction - random intercept, fixed slope - grand-mean-centered
h5_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 6: Model with interaction - random intercept, random slope - grand-mean-centered
h6_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (WLoad_mean_gmc | teamcode), data = dat)
## T-Values
summary(h_null)
summary(h_1)
summary(h2_c_t)
summary(h3_c_t)
summary(h4_c_t)
summary(h5_c_t)
summary(h6_c_t)
###### Comparison of models ####
## Direct Relationhip ##
screenreg(list(h_null, h_1, h2_c_t))
anova(h_null, h_1, h2_c_t)
## Moderation
screenreg(list(h_null, h_1, h3_c_t, h4_c_t, h5_c_t, h6_c_t))
anova(h_null, h_1, h3_c_t, h4_c_t, h5_c_t, h6_c_t)
#probing the interaction
if (require(lme4, quietly=TRUE)) {
model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Length + (1|Species), data=iris)
interaction <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
summary(interaction)
simple_slopes(interaction)
simple_slopes(interaction,
levels=list(WLoad_mean_gmc=c(4, 5, 6, 'sstest'),
PCT_mean_gmc=c(2, 3, 'sstest'))) # test at specific levels
}
##html overview for Word
htmlreg(list(h_null, h_1, h2_c_t, h3_c_t, h4_c_t, h5_c_t, h6_c_t),
file = "tPsyCap-Models.html",
single.row = T,
caption = "The role of team psychological capital in regard to perceived stress if controlled for gender, startup & age",
custom.note = "
%stars.
Null Model = Intercept only model.
Model 1 = Model with control variables.
Model 2 = Model with predictor tPsyCap.
Model 3 = Model with predictor workload.
Model 4 = Model with both predictors.
Model 5 = Random Intercept & Fixed Slope, with interaction.
Model 6 = Random Intercept & Random Slope, with interaction;
All predictors are grand-mean-centered; Controlled for Age, Startup & Gender",
custom.model.names = c("Null Model", "Model 1", "Model 2", "Model 3", "Model 4", "Model 5", "Model 6"))
###### Plotting ####
Interaktion
plot(PSS_mean,WLoad_mean_gmc)
plot(PSS_mean,PCT_mean_gmc)
plot(WLoad_mean_gmc,PCT_mean_gmc)
plot(PSS_mean,PCT_mean_gmc*WLoad_mean_gmc)
x <- ggpredict(h5_c_t, c("WLoad_mean_gmc", "PCT_mean_gmc"))
x
plot(x)
h5_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
eff.h5_c_t <- effect("WLoad_mean_gmc*PCT_mean_gmc", h5_c_t, KR=T)
eff.h5_c_t <- as.data.frame(eff.h5_c_t)
ggplot(eff.h5_c_t, aes(PCT_mean_gmc, linetype=factor(WLoad_mean_gmc),
color = factor(WLoad_mean_gmc))) +
geom_line(aes(y = fit, group=factor(WLoad_mean_gmc)), size=1.2) +
geom_line(aes(y = lower,
group=factor(WLoad_mean_gmc)), linetype =3) +
geom_line(aes(y = upper,
group=factor(WLoad_mean_gmc)), linetype =3) +
xlab("team PsyCap") +
ylab("Effects on Perceived Stress") +
scale_colour_discrete("") +
scale_linetype_discrete("") +
labs(color='WLoad_mean_gmc') + theme_minimal()
plot_model(h5_c_t, type = "int", terms = c(PCT_mean_gmc,WLoad_mean_gmc), ci.lvl = 0.95)
## Further Test ##
h_test <- lmer(PSS_mean ~ WLoad_mean_gmc + PCI_mean_gmc + PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
summary(h_test)
screenreg(list(h_null, h_1, h2_c_t, h_test))
#################################### Finish ################################
##detach
detach(dat) | /multilevelAnalysisPart2.R | no_license | seabysalt/multilevelanalysis | R | false | false | 8,652 | r | ################################ Mulilevel Analysis ################################
################################# General Setup ###################################
library(easypackages)
libraries("Hmisc", "psych", "lme4", "texreg", "sjPlot", "sjmisc", "sjstats",
"haven", "ggplot2", "effects", "tidyverse", "ggExtra", "ggeffects", "reghelper")
setwd("~/Documents/Uni Konstanz/Master/Master Thesis/Daten/Multilevel/New")
#################################### Importing my Data ################################
dat <- read.csv("/Users/sarahalt/Desktop/Daten_Masterarbeit_Multilevel_GMC2_Sarah.csv")
attach(dat)
##factoring the categorial variables
dat$sex_m = factor(dat$sex_m,
levels = c(1,2),
labels = c("Female", "Male"))
dat$startup_m = factor(dat$startup_m,
levels = c(1,2),
labels = c("Startup", "No-Startup"))
################################### Data Screening ####################################
## checking for accuracy & missings ##
summary(dat)
###########################################################################################################
###################################### Basic Models #######################################################
###########################################################################################################
## Null Model - Intercept only
h_null <- lmer(PSS_mean ~ 1 + (1 | teamcode), data = dat)
## Model 1: Model with control variables
h_1 <- lmer(PSS_mean ~ 1 + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
###########################################################################################################
###################################### Individual PsyCap #######################################################
###########################################################################################################
## Model 2: Model with predictor iPsyCap - grand-mean-centered - deleted from analysis
h2_c_i <- lmer(PSS_mean ~ PCI_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 3: Model with predictor WLoad - grand-mean-centered
h3_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 4: Model with predictors WLoad & iPsyCap - grand-mean-centered
h4_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc + PCI_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 5: Model with interaction - random intercept, fixed slope - grand-mean-centered
h5_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc * PCI_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 6: Model with interaction - random intercept, random slope - grand-mean-centered
h6_c_i <- lmer(PSS_mean ~ WLoad_mean_gmc * PCI_mean_gmc + sex_m + Age_gmc + startup_m + (WLoad_mean_gmc | teamcode), data = dat)
## T-Values
summary(h_null)
summary(h_1)
summary(h2_c_i)
summary(h3_c_i)
summary(h4_c_i)
summary(h5_c_i)
summary(h6_c_i)
###### Comparison of models ####
## Direct Relationhip ##
screenreg(list(h_null, h_1, h2_c_i))
anova(h_null, h_1, h2_c_i)
## Moderation
screenreg(list(h_null, h_1, h3_c_i, h4_c_i, h5_c_i, h6_c_i))
anova(h_null, h_1, h3_c_i, h4_c_i, h5_c_i, h6_c_i)
##html overview for Word
htmlreg(list(h_null, h_1, h2_c_i, h3_c_i, h4_c_i, h5_c_i, h6_c_i),
file = "iPsyCap-Models.html",
single.row = T,
caption = "The role of individual psychological capital in regard to perceived stress if controlled for gender, startup & age",
custom.note = "
%stars.
Null Model = Intercept only model.
Model 1 = Model with control variables.
Model 2 = Model with predictor iPsyCap.
Model 3 = Model with predictor workload.
Model 4 = Model with both predictors.
Model 5 = Random Intercept & Fixed Slope, with interaction.
Model 6 = Random Intercept & Random Slope, with interaction;
All predictors are grand-mean-centered; Controlled for Age, Startup & Gender",
custom.model.names = c("Null Model", "Model 1", "Model 2", "Model 3", "Model 4", "Model 5", "Model 6"))
###########################################################################################################
###################################### Team PsyCap #######################################################
###########################################################################################################
## Model 2: Model with predictor tPsyCap - grand-mean-centered - deleted
h2_c_t <- lmer(PSS_mean ~ PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 3: Model with predictor WLoad - grand-mean-centered
h3_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 4: Model with predictors WLoad & tPsyCap - grand-mean-centered
h4_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc + PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 5: Model with interaction - random intercept, fixed slope - grand-mean-centered
h5_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
## Model 6: Model with interaction - random intercept, random slope - grand-mean-centered
h6_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (WLoad_mean_gmc | teamcode), data = dat)
## T-Values
summary(h_null)
summary(h_1)
summary(h2_c_t)
summary(h3_c_t)
summary(h4_c_t)
summary(h5_c_t)
summary(h6_c_t)
###### Comparison of models ####
## Direct Relationhip ##
screenreg(list(h_null, h_1, h2_c_t))
anova(h_null, h_1, h2_c_t)
## Moderation
screenreg(list(h_null, h_1, h3_c_t, h4_c_t, h5_c_t, h6_c_t))
anova(h_null, h_1, h3_c_t, h4_c_t, h5_c_t, h6_c_t)
#probing the interaction
if (require(lme4, quietly=TRUE)) {
model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Length + (1|Species), data=iris)
interaction <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
summary(interaction)
simple_slopes(interaction)
simple_slopes(interaction,
levels=list(WLoad_mean_gmc=c(4, 5, 6, 'sstest'),
PCT_mean_gmc=c(2, 3, 'sstest'))) # test at specific levels
}
##html overview for Word
htmlreg(list(h_null, h_1, h2_c_t, h3_c_t, h4_c_t, h5_c_t, h6_c_t),
file = "tPsyCap-Models.html",
single.row = T,
caption = "The role of team psychological capital in regard to perceived stress if controlled for gender, startup & age",
custom.note = "
%stars.
Null Model = Intercept only model.
Model 1 = Model with control variables.
Model 2 = Model with predictor tPsyCap.
Model 3 = Model with predictor workload.
Model 4 = Model with both predictors.
Model 5 = Random Intercept & Fixed Slope, with interaction.
Model 6 = Random Intercept & Random Slope, with interaction;
All predictors are grand-mean-centered; Controlled for Age, Startup & Gender",
custom.model.names = c("Null Model", "Model 1", "Model 2", "Model 3", "Model 4", "Model 5", "Model 6"))
###### Plotting ####
Interaktion
plot(PSS_mean,WLoad_mean_gmc)
plot(PSS_mean,PCT_mean_gmc)
plot(WLoad_mean_gmc,PCT_mean_gmc)
plot(PSS_mean,PCT_mean_gmc*WLoad_mean_gmc)
x <- ggpredict(h5_c_t, c("WLoad_mean_gmc", "PCT_mean_gmc"))
x
plot(x)
h5_c_t <- lmer(PSS_mean ~ WLoad_mean_gmc * PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
eff.h5_c_t <- effect("WLoad_mean_gmc*PCT_mean_gmc", h5_c_t, KR=T)
eff.h5_c_t <- as.data.frame(eff.h5_c_t)
ggplot(eff.h5_c_t, aes(PCT_mean_gmc, linetype=factor(WLoad_mean_gmc),
color = factor(WLoad_mean_gmc))) +
geom_line(aes(y = fit, group=factor(WLoad_mean_gmc)), size=1.2) +
geom_line(aes(y = lower,
group=factor(WLoad_mean_gmc)), linetype =3) +
geom_line(aes(y = upper,
group=factor(WLoad_mean_gmc)), linetype =3) +
xlab("team PsyCap") +
ylab("Effects on Perceived Stress") +
scale_colour_discrete("") +
scale_linetype_discrete("") +
labs(color='WLoad_mean_gmc') + theme_minimal()
plot_model(h5_c_t, type = "int", terms = c(PCT_mean_gmc,WLoad_mean_gmc), ci.lvl = 0.95)
## Further Test ##
h_test <- lmer(PSS_mean ~ WLoad_mean_gmc + PCI_mean_gmc + PCT_mean_gmc + sex_m + Age_gmc + startup_m + (1 | teamcode), data = dat)
summary(h_test)
screenreg(list(h_null, h_1, h2_c_t, h_test))
#################################### Finish ################################
##detach
detach(dat) |
context("test the k-medoids algorithm")
data <- matrix(c(1,1.1,1,1,2,2,2,2.1), ncol=4)
data2 <- NULL
medoids <- c(2,3)
test_that("k-medoids finds the correct clusters for k = 2", {
expect_equal(6, sum(attributes(k_medoids(data, 2))[[2]]))
})
test_that("k-medoids finds the correct clusters for k = 1", {
expect_equal(4, sum(attributes(k_medoids(data, 1))[[2]]))
})
test_that("k-medoids finds the correct clusters for k = 4", {
expect_equal(10, sum(attributes(k_medoids(data, 4))[[2]]))
})
test_that("wrong input results in error for k-medoids", {
expect_error(k_medoids(data2, 2))
})
test_that("dissim-function finds correct dissimilarities", {
expect_equal(dissim(data, 3), c(1.9, 2, 0, 0.1))
})
test_that("set_closest finds sets correct clusters", {
expect_equal(attributes(set_closest(data, medoids))[[2]], c(1,1,2,2))
})
| /tests/testthat/test-k_medoids.R | permissive | Ahmad-fadl/R-kurs | R | false | false | 845 | r | context("test the k-medoids algorithm")
data <- matrix(c(1,1.1,1,1,2,2,2,2.1), ncol=4)
data2 <- NULL
medoids <- c(2,3)
test_that("k-medoids finds the correct clusters for k = 2", {
expect_equal(6, sum(attributes(k_medoids(data, 2))[[2]]))
})
test_that("k-medoids finds the correct clusters for k = 1", {
expect_equal(4, sum(attributes(k_medoids(data, 1))[[2]]))
})
test_that("k-medoids finds the correct clusters for k = 4", {
expect_equal(10, sum(attributes(k_medoids(data, 4))[[2]]))
})
test_that("wrong input results in error for k-medoids", {
expect_error(k_medoids(data2, 2))
})
test_that("dissim-function finds correct dissimilarities", {
expect_equal(dissim(data, 3), c(1.9, 2, 0, 0.1))
})
test_that("set_closest finds sets correct clusters", {
expect_equal(attributes(set_closest(data, medoids))[[2]], c(1,1,2,2))
})
|
# Place all the common options here
func.filter.datadir <- function(dir.path) {
files <- list.files(dir.path, full.names = TRUE)
subset(files, grepl("^dataset_[.0-9]+", basename(files), perl = TRUE))
}
func.plink.filename <- function(plink.home) {
sub(".bed", "", list.files(plink.files, pattern = "*.bed")[1])
}
# all input/output files root
root.dir <- "D:\\work\\bio\\workdir"
# common scripts root (normally it's a parent of this file)
lib.dir <- "D:\\work\\bio\\rlib\\common"
# libs aliases
lib.reader <- file.path(lib.dir, "readers.R")
lib.summary <- file.path(lib.dir, "summarizers.R")
lib.frn <- file.path(lib.dir, "frn.R")
# where all raw input files are stored
base <- file.path(root.dir, "raw")
# absolute paths to raw input
raw.files <- func.filter.datadir(base)
# reader options
phe.as.fam <- FALSE
remove.dups <- TRUE
maf.thresh <- 0.01
ignore_cols <- NULL
use_cols <- NULL
# output folders homes
home.lasso <- file.path(root.dir, "lasso_out")
home.gemma <- file.path(root.dir, "gemma_out")
home.moss <- file.path(root.dir, "moss_out")
home.corr <- file.path(root.dir, "correlation")
home.rf <- file.path(root.dir, "rf_out")
home.rbm <- file.path(root.dir, "rbm_out")
home.frn <- file.path(root.dir, "frn_out")
home.tmp <- "D:\\work\\bio\\tmp"
# set output precission
options("scipen" = 100, "digits" = 4)
# extra options for script (can be ignored)
extra <- ""
| /_old/common/config.R | permissive | ikavalio/MDRTB-pipe | R | false | false | 1,395 | r | # Place all the common options here
func.filter.datadir <- function(dir.path) {
files <- list.files(dir.path, full.names = TRUE)
subset(files, grepl("^dataset_[.0-9]+", basename(files), perl = TRUE))
}
func.plink.filename <- function(plink.home) {
sub(".bed", "", list.files(plink.files, pattern = "*.bed")[1])
}
# all input/output files root
root.dir <- "D:\\work\\bio\\workdir"
# common scripts root (normally it's a parent of this file)
lib.dir <- "D:\\work\\bio\\rlib\\common"
# libs aliases
lib.reader <- file.path(lib.dir, "readers.R")
lib.summary <- file.path(lib.dir, "summarizers.R")
lib.frn <- file.path(lib.dir, "frn.R")
# where all raw input files are stored
base <- file.path(root.dir, "raw")
# absolute paths to raw input
raw.files <- func.filter.datadir(base)
# reader options
phe.as.fam <- FALSE
remove.dups <- TRUE
maf.thresh <- 0.01
ignore_cols <- NULL
use_cols <- NULL
# output folders homes
home.lasso <- file.path(root.dir, "lasso_out")
home.gemma <- file.path(root.dir, "gemma_out")
home.moss <- file.path(root.dir, "moss_out")
home.corr <- file.path(root.dir, "correlation")
home.rf <- file.path(root.dir, "rf_out")
home.rbm <- file.path(root.dir, "rbm_out")
home.frn <- file.path(root.dir, "frn_out")
home.tmp <- "D:\\work\\bio\\tmp"
# set output precission
options("scipen" = 100, "digits" = 4)
# extra options for script (can be ignored)
extra <- ""
|
###############################################
# Tests for simca and simcares class methods #
###############################################
setup({
pdf(file = tempfile("mdatools-test-simcaplots-", fileext = ".pdf"))
sink(tempfile("mdatools-test-simcaplots-", fileext = ".txt"), append = FALSE, split = FALSE)
})
teardown({
dev.off()
sink()
})
## prepare datasets
data(iris)
ind.test <- seq(2, nrow(iris), by = 2)
x.cal <- iris[-ind.test, 1:4]
x.cal1 <- x.cal[1:25, ]
x.cal1 <- mda.exclrows(x.cal1, c(1, 10))
x.cal2 <- x.cal[26:50, ]
x.cal2 <- mda.exclrows(x.cal2, c(11, 21))
x.cal3 <- x.cal[51:75, ]
x.cal3 <- mda.exclrows(x.cal3, c(6, 16))
classnames <- levels(iris[, 5])
x.test <- iris[ind.test, 1:4]
c.test <- iris[ind.test, 5]
## create models
m1 <- simca(x.cal1, classnames[1], 4, cv = 1, scale = F)
m1 <- selectCompNum(m1, 2)
m2 <- simca(x.cal2, classnames[2], 4, cv = 5, scale = T, lim.type = "chisq", alpha = 0.01)
m2 <- selectCompNum(m2, 3)
m3 <- simca(x.cal3, classnames[3], 4, cv = list("rand", 5, 10), x.test = x.test,
c.test = c.test, scale = T, lim.type = "ddrobust", alpha = 0.10)
m3 <- selectCompNum(m3, 3)
m3 <- setDistanceLimits(m3, lim.type = "ddrobust", alpha = 0.05)
models <- list("se" = m1, "ve" = m2, "vi" = m3)
for (i in seq_along(models)) {
m <- models[[i]]
name <- names(models)[i]
calres <- list("cal" = m$res[["cal"]])
context(sprintf("simca: test pcamodel related part (model %s)", name))
par(mfrow = c(1, 1))
plot.new()
text(0, 0, paste0("SIMCA - pca plots - ", name), pos = 4)
test_that("loadings plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotLoadings(m))
expect_silent(plotLoadings(m, comp = c(1, 2), type = "h", show.labels = T))
expect_silent(plotLoadings(m, type = "l", show.labels = T, labels = "values"))
expect_silent(plotLoadings(m, type = "b"))
})
test_that("variance plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotVariance(m))
expect_silent(plotVariance(m, type = "h", show.labels = T))
expect_silent(plotCumVariance(m))
expect_silent(plotCumVariance(m, type = "h", show.labels = T))
})
if (m$lim.type != "jm") {
test_that("DoF plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotQDoF(m))
expect_silent(plotT2DoF(m, type = "l", show.labels = T))
expect_silent(plotDistDoF(m))
expect_silent(plotDistDoF(m, type = "l", show.labels = T))
})
}
test_that("scores plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotScores(m))
expect_silent(plotScores(m, comp = c(1, 3), show.labels = T))
expect_silent(plotScores(m, type = "h", show.labels = T, labels = "values", res = calres))
expect_silent(plotScores(m, type = "b", res = calres))
})
test_that("extreme plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotExtreme(m))
expect_silent(plotExtreme(m, comp = 2, show.labels = T))
expect_silent(plotExtreme(m, comp = 1:3))
expect_silent(plotExtreme(m, comp = 1:2, col = c("red", "green")))
})
test_that("residuals plot works well", {
res <- list("cal" = m$res[["cal"]])
par(mfrow = c(2, 2))
expect_silent(plotResiduals(m))
expect_silent(plotResiduals(m, ncomp = 3))
if (m$lim.type != "chisq") {
expect_silent(plotResiduals(m, ncomp = 3, cgroup = "categories", res = calres, log = T))
} else {
expect_silent(plotResiduals(m, ncomp = 3, cgroup = "categories", res = calres))
}
expect_silent(plotResiduals(m, ncomp = 2, show.labels = T))
})
context(sprintf("simca: test classmodel related part (model %s)", name))
par(mfrow = c(1, 1))
plot.new()
text(0, 0, paste0("SIMCA - classification plots - ", name), pos = 4)
par(mfrow = c(2, 2))
test_that("performance plot works correctly", {
expect_silent(plotPerformance(m))
expect_silent(plotPerformance(m, type = "h"))
expect_error(plotSpecificity(m))
expect_silent(plotSensitivity(m))
})
# classification results
par(mfrow = c(2, 2))
test_that("predictions and probabilities plot works correctly", {
expect_silent(plotPredictions(m))
expect_silent(plotPredictions(m, ncomp = 1, pch = 1))
expect_silent(plotProbabilities(m$calres))
expect_silent(plotProbabilities(m$calres, ncomp = 1, pch = 1))
})
# just output to check in txt file
fprintf("\nSummary and print methods for model: %s\n", name)
cat("-------------------------------\n")
print(m)
summary(m)
context(sprintf("simca: new prdictions (model %s)", name))
par(mfrow = c(1, 1))
plot.new()
text(0, 0, paste0("SIMCA - results for predictions - ", name), pos = 4)
res <- predict(m, x.test, c.test)
test_that("variance plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotVariance(res))
expect_silent(plotVariance(res, type = "h", show.labels = T))
expect_silent(plotCumVariance(res))
expect_silent(plotCumVariance(res, type = "h", show.labels = T))
})
test_that("scores plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotScores(res))
expect_silent(plotScores(res, comp = c(1, 3), show.labels = T))
expect_silent(plotScores(res, type = "h", show.labels = T, labels = "values"))
expect_silent(plotScores(res, type = "b", res = calres))
})
test_that("residuals plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotResiduals(res))
expect_silent(plotResiduals(res, ncomp = 3))
expect_silent(plotResiduals(m, cgroup = "categories", res = list("new" = res)))
expect_silent(plotResiduals(m, cgroup = c.test, res = list("new" = res)))
})
par(mfrow = c(2, 2))
test_that("performance plot works correctly", {
expect_silent(plotPerformance(res))
expect_silent(plotPerformance(res, type = "h"))
expect_silent(plotSpecificity(res))
expect_silent(plotSensitivity(res))
})
# classification results
par(mfrow = c(2, 2))
test_that("prediction plot works correctly", {
expect_silent(plotPredictions(res))
expect_silent(plotPredictions(res, ncomp = 1, pch = 1))
expect_silent(plotProbabilities(res))
expect_silent(plotProbabilities(res, ncomp = 1, pch = 1))
})
# just output to check in txt file
fprintf("\nSummary and print methods for result: %s\n", name)
cat("-------------------------------\n")
print(res)
summary(res)
showPredictions(res)
}
# code for manual comparison with DDSIMCA GUI - not uses as real tests
if (FALSE) {
## prepare datasets
rm(list = ls())
data(iris)
ind.test <- seq(2, nrow(iris), by = 2)
x.cal <- iris[-ind.test, 1:4]
x.cal1 <- x.cal[1:25, ]
x.cal2 <- x.cal[26:50, ]
x.cal3 <- x.cal[51:75, ]
classnames <- levels(iris[, 5])
x.test <- iris[ind.test, 1:4]
c.test <- iris[ind.test, 5]
## test for ddmoments
m <- simca(x.cal1, classnames[1], 3, scale = F, lim.type = "ddmoments")
plotResiduals(m, show.labels = T, labels = "indices", cgroup = "categories", log = T)
plotExtreme(m)
summary.pca(m)
summary(m)
r <- predict(m, x.test, c.test)
plotResiduals(m, res = list(new = r), show.labels = T, labels = "indices", cgroup = c.test, log = T)
plotExtreme(m, res = r)
summary(r)
## test for ddrobust
m <- simca(x.cal1, classnames[1], 3, scale = F, lim.type = "ddrobust")
plotResiduals(m, show.labels = T, labels = "indices", cgroup = "categories", log = T)
plotExtreme(m)
summary.pca(m)
summary(m)
r <- predict(m, x.test, c.test)
plotResiduals(m, res = list(new = r), show.labels = T, labels = "indices", cgroup = c.test, log = T)
plotExtreme(m, res = r)
summary(r)
## test for chisq
m <- simca(x.cal1, classnames[1], 3, scale = F, lim.type = "chisq")
plotResiduals(m, show.labels = T, labels = "indices", cgroup = "categories", log = T)
plotExtreme(m)
summary.pca(m)
summary(m)
r <- predict(m, x.test, c.test)
plotResiduals(m, res = list(new = r), show.labels = T, labels = "indices", cgroup = c.test, log = T)
plotExtreme(m, res = r)
summary(r)
}
| /tests/testthat/test-simcaplots.R | permissive | svkucheryavski/mdatools | R | false | false | 8,237 | r | ###############################################
# Tests for simca and simcares class methods #
###############################################
setup({
pdf(file = tempfile("mdatools-test-simcaplots-", fileext = ".pdf"))
sink(tempfile("mdatools-test-simcaplots-", fileext = ".txt"), append = FALSE, split = FALSE)
})
teardown({
dev.off()
sink()
})
## prepare datasets
data(iris)
ind.test <- seq(2, nrow(iris), by = 2)
x.cal <- iris[-ind.test, 1:4]
x.cal1 <- x.cal[1:25, ]
x.cal1 <- mda.exclrows(x.cal1, c(1, 10))
x.cal2 <- x.cal[26:50, ]
x.cal2 <- mda.exclrows(x.cal2, c(11, 21))
x.cal3 <- x.cal[51:75, ]
x.cal3 <- mda.exclrows(x.cal3, c(6, 16))
classnames <- levels(iris[, 5])
x.test <- iris[ind.test, 1:4]
c.test <- iris[ind.test, 5]
## create models
m1 <- simca(x.cal1, classnames[1], 4, cv = 1, scale = F)
m1 <- selectCompNum(m1, 2)
m2 <- simca(x.cal2, classnames[2], 4, cv = 5, scale = T, lim.type = "chisq", alpha = 0.01)
m2 <- selectCompNum(m2, 3)
m3 <- simca(x.cal3, classnames[3], 4, cv = list("rand", 5, 10), x.test = x.test,
c.test = c.test, scale = T, lim.type = "ddrobust", alpha = 0.10)
m3 <- selectCompNum(m3, 3)
m3 <- setDistanceLimits(m3, lim.type = "ddrobust", alpha = 0.05)
models <- list("se" = m1, "ve" = m2, "vi" = m3)
for (i in seq_along(models)) {
m <- models[[i]]
name <- names(models)[i]
calres <- list("cal" = m$res[["cal"]])
context(sprintf("simca: test pcamodel related part (model %s)", name))
par(mfrow = c(1, 1))
plot.new()
text(0, 0, paste0("SIMCA - pca plots - ", name), pos = 4)
test_that("loadings plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotLoadings(m))
expect_silent(plotLoadings(m, comp = c(1, 2), type = "h", show.labels = T))
expect_silent(plotLoadings(m, type = "l", show.labels = T, labels = "values"))
expect_silent(plotLoadings(m, type = "b"))
})
test_that("variance plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotVariance(m))
expect_silent(plotVariance(m, type = "h", show.labels = T))
expect_silent(plotCumVariance(m))
expect_silent(plotCumVariance(m, type = "h", show.labels = T))
})
if (m$lim.type != "jm") {
test_that("DoF plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotQDoF(m))
expect_silent(plotT2DoF(m, type = "l", show.labels = T))
expect_silent(plotDistDoF(m))
expect_silent(plotDistDoF(m, type = "l", show.labels = T))
})
}
test_that("scores plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotScores(m))
expect_silent(plotScores(m, comp = c(1, 3), show.labels = T))
expect_silent(plotScores(m, type = "h", show.labels = T, labels = "values", res = calres))
expect_silent(plotScores(m, type = "b", res = calres))
})
test_that("extreme plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotExtreme(m))
expect_silent(plotExtreme(m, comp = 2, show.labels = T))
expect_silent(plotExtreme(m, comp = 1:3))
expect_silent(plotExtreme(m, comp = 1:2, col = c("red", "green")))
})
test_that("residuals plot works well", {
res <- list("cal" = m$res[["cal"]])
par(mfrow = c(2, 2))
expect_silent(plotResiduals(m))
expect_silent(plotResiduals(m, ncomp = 3))
if (m$lim.type != "chisq") {
expect_silent(plotResiduals(m, ncomp = 3, cgroup = "categories", res = calres, log = T))
} else {
expect_silent(plotResiduals(m, ncomp = 3, cgroup = "categories", res = calres))
}
expect_silent(plotResiduals(m, ncomp = 2, show.labels = T))
})
context(sprintf("simca: test classmodel related part (model %s)", name))
par(mfrow = c(1, 1))
plot.new()
text(0, 0, paste0("SIMCA - classification plots - ", name), pos = 4)
par(mfrow = c(2, 2))
test_that("performance plot works correctly", {
expect_silent(plotPerformance(m))
expect_silent(plotPerformance(m, type = "h"))
expect_error(plotSpecificity(m))
expect_silent(plotSensitivity(m))
})
# classification results
par(mfrow = c(2, 2))
test_that("predictions and probabilities plot works correctly", {
expect_silent(plotPredictions(m))
expect_silent(plotPredictions(m, ncomp = 1, pch = 1))
expect_silent(plotProbabilities(m$calres))
expect_silent(plotProbabilities(m$calres, ncomp = 1, pch = 1))
})
# just output to check in txt file
fprintf("\nSummary and print methods for model: %s\n", name)
cat("-------------------------------\n")
print(m)
summary(m)
context(sprintf("simca: new prdictions (model %s)", name))
par(mfrow = c(1, 1))
plot.new()
text(0, 0, paste0("SIMCA - results for predictions - ", name), pos = 4)
res <- predict(m, x.test, c.test)
test_that("variance plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotVariance(res))
expect_silent(plotVariance(res, type = "h", show.labels = T))
expect_silent(plotCumVariance(res))
expect_silent(plotCumVariance(res, type = "h", show.labels = T))
})
test_that("scores plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotScores(res))
expect_silent(plotScores(res, comp = c(1, 3), show.labels = T))
expect_silent(plotScores(res, type = "h", show.labels = T, labels = "values"))
expect_silent(plotScores(res, type = "b", res = calres))
})
test_that("residuals plot works well", {
par(mfrow = c(2, 2))
expect_silent(plotResiduals(res))
expect_silent(plotResiduals(res, ncomp = 3))
expect_silent(plotResiduals(m, cgroup = "categories", res = list("new" = res)))
expect_silent(plotResiduals(m, cgroup = c.test, res = list("new" = res)))
})
par(mfrow = c(2, 2))
test_that("performance plot works correctly", {
expect_silent(plotPerformance(res))
expect_silent(plotPerformance(res, type = "h"))
expect_silent(plotSpecificity(res))
expect_silent(plotSensitivity(res))
})
# classification results
par(mfrow = c(2, 2))
test_that("prediction plot works correctly", {
expect_silent(plotPredictions(res))
expect_silent(plotPredictions(res, ncomp = 1, pch = 1))
expect_silent(plotProbabilities(res))
expect_silent(plotProbabilities(res, ncomp = 1, pch = 1))
})
# just output to check in txt file
fprintf("\nSummary and print methods for result: %s\n", name)
cat("-------------------------------\n")
print(res)
summary(res)
showPredictions(res)
}
# code for manual comparison with DDSIMCA GUI - not uses as real tests
if (FALSE) {
## prepare datasets
rm(list = ls())
data(iris)
ind.test <- seq(2, nrow(iris), by = 2)
x.cal <- iris[-ind.test, 1:4]
x.cal1 <- x.cal[1:25, ]
x.cal2 <- x.cal[26:50, ]
x.cal3 <- x.cal[51:75, ]
classnames <- levels(iris[, 5])
x.test <- iris[ind.test, 1:4]
c.test <- iris[ind.test, 5]
## test for ddmoments
m <- simca(x.cal1, classnames[1], 3, scale = F, lim.type = "ddmoments")
plotResiduals(m, show.labels = T, labels = "indices", cgroup = "categories", log = T)
plotExtreme(m)
summary.pca(m)
summary(m)
r <- predict(m, x.test, c.test)
plotResiduals(m, res = list(new = r), show.labels = T, labels = "indices", cgroup = c.test, log = T)
plotExtreme(m, res = r)
summary(r)
## test for ddrobust
m <- simca(x.cal1, classnames[1], 3, scale = F, lim.type = "ddrobust")
plotResiduals(m, show.labels = T, labels = "indices", cgroup = "categories", log = T)
plotExtreme(m)
summary.pca(m)
summary(m)
r <- predict(m, x.test, c.test)
plotResiduals(m, res = list(new = r), show.labels = T, labels = "indices", cgroup = c.test, log = T)
plotExtreme(m, res = r)
summary(r)
## test for chisq
m <- simca(x.cal1, classnames[1], 3, scale = F, lim.type = "chisq")
plotResiduals(m, show.labels = T, labels = "indices", cgroup = "categories", log = T)
plotExtreme(m)
summary.pca(m)
summary(m)
r <- predict(m, x.test, c.test)
plotResiduals(m, res = list(new = r), show.labels = T, labels = "indices", cgroup = c.test, log = T)
plotExtreme(m, res = r)
summary(r)
}
|
library(shiny)
library(reshape2); library(ggplot2)
cities.dat <- read.csv("dat1.csv",header=T,stringsAsFactors=F)
cru.dat <- read.csv("dat2.csv",header=T,stringsAsFactors=F)
sta.dat <- read.csv("dat3.csv",header=T,stringsAsFactors=F)
staNA.dat <- read.csv("dat3b.csv",header=T,stringsAsFactors=F)
cru.names <- gsub("_"," ",names(cru.dat)[-c(1:3)])
names(cru.dat)[-c(1:3)] <- cru.names
sta.names <- gsub("_"," ",substr(names(sta.dat)[-c(1:3)],3,nchar(names(sta.dat)[-c(1:3)])))
reg.names <- gsub("_"," ",substr(names(sta.dat)[-c(1:3)],1,1))
names(sta.dat)[-c(1:3)] <- sta.names
names(staNA.dat)[-c(1:3)] <- sta.names
reg.sta.mat <- cbind(reg.names,sta.names)
mos <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
cru.dat$Month <- factor(cru.dat$Month,levels=mos)
var <- c("T","P")
reshape.fun <- function(x){
x <- melt(x,id=c("Year","Month","Variable"))
x <- dcast(x,Year+Variable+variable~Month)
rownames(x) <- NULL
x
}
plot.multiDens <- source("plot.multiDens.txt")$value
clrs <- paste(c("#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513"))
shinyServer(function(input, output, session){
city.names <- reactive({
if(input$dataset=="Weather stations (CRU-substituted NAs)" | input$dataset=="Weather stations (w/ missing data)") sta.names else if(input$dataset=="2-km downscaled CRU 3.1") cru.names
})
output$cityNames <- renderUI({
selectInput("city","Choose a city:",choices=city.names(),selected=city.names()[1],multiple=T)
})
DATASET <- reactive({
if(input$dataset=="Weather stations (CRU-substituted NAs)"){
d <- sta.dat
} else if(input$dataset=="Weather stations (w/ missing data)"){
d <- staNA.dat
} else if(input$dataset=="2-km downscaled CRU 3.1"){
d <- cru.dat
}
d
})
output$yearSlider <- renderUI({
if(length(input$city)){
if(length(input$city)==1){
r <- range(DATASET()$Year[!is.na(DATASET()[input$city])])
} else {
r <- c()
for(i in 1:length(input$city)) r <- rbind( r, range(DATASET()$Year[!is.na(DATASET()[input$city[i]])]) )
r <- c(max(r[,1]),min(r[,2]))
}
mn <- r[1]
mx <- r[2]
sliderInput("yrs", "Year range:" ,mn, mx, c(max(mn,1950),mx), step=1, sep="")
}
})
output$Var <- renderUI({
if(length(input$city)) selectInput("var","Choose a variable:",choices=c("Precipitation","Temperature"))
})
output$Mo <- renderUI({
if(length(input$city)) selectInput("mo","Choose a month:",choices=c(mos,"Choose multiple"))
})
curMo <- reactive({
if(length(input$mo)){
if(length(input$mo2)>0 & !is.null(input$mo2) & input$mo=="Choose multiple") { curMo <- input$mo2
} else if(input$mo!="Choose multiple") { curMo <- input$mo
} else if(input$mo=="Choose multiple") { curMo <- NULL }
} else curMo <- NULL
curMo
})
cols <- reactive({
if(length(input$city)) cols <- c(1:3,match(input$city,names(DATASET()))) else cols <- NULL
})
dat <- reactive({
if(length(curMo()) & length(input$city) & !is.null(cols()) & length(input$yrs)){
mo <- DATASET()$Month %in% curMo()
d <- subset(DATASET(),Year>=input$yrs[1] & Year<=input$yrs[2] & mo & Variable==substr(input$var,1,1), select=cols())
rownames(d) <- NULL
} else d <- NULL
d
})
dat2 <- reactive({
if(!is.null(dat())){
x <- as.numeric(t(as.matrix(dat()[-c(1:3)])))
v <- reshape.fun(dat())
if(!is.null(input$stat) & length(input$mo2)>1){
x <- switch(input$stat,
None=as.numeric(t(as.matrix(dat()[-c(1:3)]))),
Mean=apply(v[-c(1:3)],1,mean),
Total=apply(v[-c(1:3)],1,sum),
'Std. Dev.'=apply(v[-c(1:3)],1,sd),
Minimum=apply(v[-c(1:3)],1,min),
Maximum=apply(v[-c(1:3)],1,max)
)
}
x <- cbind(v[1:3],x)
names(x) <- c("Year","Variable","City","Values")
} else { x <- NULL }
x
})
output$histBin <- renderUI({
if(length(input$city)){
if(length(input$city)==1){
checkboxInput("hb","Vary number of histogram bins",FALSE)
} else if(length(input$multiplot)){
if(input$multiplot=="Separate histograms"){
checkboxInput("hb","Vary number of histogram bins",FALSE)
}
}
}
})
output$histBinNum <- renderUI({
if(length(input$hb)){
if(input$hb){
if(!length(input$multiplot)){
selectInput("hbn","Number of histogram bins (approximate):",choices=c("5","10","20","40"),selected="5")
} else if(input$multiplot=="Separate histograms"){
selectInput("hbn","Number of histogram bins (approximate):",choices=c("5","10","20","40"),selected="5")
}
}
}
})
output$histDensCurve <- renderUI({
if(length(input$city)){
if(!length(input$multiplot)){
checkboxInput("hdc","Overlay density curve",FALSE)
} else if(input$multiplot=="Separate histograms"|length(input$city)==1){
checkboxInput("hdc","Overlay density curve",input$hdc)
}
}
})
output$histDensCurveBW <- renderUI({
if(length(input$hdc)){
if(!length(input$multiplot)){
if(input$hdc) sliderInput("hdcBW","bandwidth:",0.2,2,1,0.2)
} else {
if(input$hdc & input$multiplot=="Separate histograms") sliderInput("hdcBW","bandwidth:",0.2,2,1,0.2)
}
}
})
output$histIndObs <- renderUI({
if(length(input$city)){
if(!length(input$multiplot)){
checkboxInput("hio","Show individual observations",FALSE)
} else if(input$multiplot=="Separate histograms"|length(input$city)==1){
checkboxInput("hio","Show individual observations",input$hio)
}
}
})
output$multMo <- renderUI({
if(length(input$mo)){
if(input$mo=="Choose multiple"){
selectInput("mo2","Select CONSECUTIVE months:", mos, multiple=TRUE)
}
}
})
output$multMo2 <- renderUI({
if(length(input$mo)){
if(length(input$mo2)>1 & input$mo=="Choose multiple" & input$var=="Precipitation"){
selectInput("stat","Choose seasonal statistic:",choices=c("None","Total","Std. Dev.","Minimum","Maximum"),selected="None")
}
else if(length(input$mo2)>1 & input$mo=="Choose multiple" & input$var=="Temperature"){
selectInput("stat","Choose seasonal statistic:",choices=c("None","Mean","Std. Dev.","Minimum","Maximum"),selected="None")
}
}
})
output$multCity <- renderUI({
if(length(input$city)>1){
radioButtons("multiplot","Plot view for multiple cities:",c("Separate histograms","Common-axis density estimation plots"),"Separate histograms")
}
})
output$dldat <- downloadHandler(
filename = function() { "data.csv" },
content = function(file) {
write.csv(dat(), file)
}
)
htfun <- function(){
n <- length(input$city)
if(n>1) n <- n + n%%2
ht1 <- 600
if(length(input$multiplot)){
if(n==1 | input$multiplot=="Common-axis density estimation plots") ht <- ht1 else if(input$multiplot=="Separate histograms") ht <- (n/2)*(ht1/2)
} else { ht <- ht1 }
ht
}
output$plot <- renderPlot({
if(length(input$city) & !is.null(dat2())){
if(input$mo!="Choose multiple") mo <- input$mo else mo <- input$mo2
## Print as if selection implies consecutive months. Still need to ensure that months must actually be consecutive. Users can still leave gaps in selection.
if(length(mo)>1) mo <- paste(mo[1],"-",mo[length(mo)])
if(input$var=="Precipitation"){
clr <- "#1E90FF" # "dodgerblue"
xlabel <- expression("Observations"~(mm)~"")
} else if(input$var=="Temperature") {
clr <- "#FFA500" # "orange"
xlabel <- expression("Observations"~(degree~C)~"")
}
units <- ""
if(length(input$stat)>0){ if(input$stat!="None") units <- input$stat }
n <- length(input$city)
h.brks <- "Sturges"
if(length(input$hb) & length(input$hbn)) if(input$hb) h.brks <- as.numeric(input$hbn)
if(n==1){
dat2.cities <- dat2()$City
x <- as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city,select=4)))
if(!all(is.na(x))){
hist(x,breaks=h.brks,main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],input$city,mo,units,input$var)),
xlab=xlabel,col=clr,cex.main=1.3,cex.axis=1.3,cex.lab=1.3,prob=T)
if(length(input$hio)) if(input$hio) rug(x)
if(length(input$hdc)) if(input$hdc & length(input$hdcBW)) lines(density(na.omit(x),adjust=input$hdcBW),lwd=2)
}
} else if(!length(input$multiplot)){
if(n>1) layout(matrix(1:(n+n%%2),ceiling(n/2),2,byrow=T))
dat2.cities <- dat2()$City
for(i in 1:n){
x <- as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city[i],select=4)))
if(!all(is.na(x))){
hist(x,breaks=h.brks,main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],input$city[i],mo,units,input$var)),
xlab=xlabel,col=clr,cex.main=1.3,cex.axis=1.3,cex.lab=1.3,prob=T)
if(length(input$hio)) if(input$hio) rug(x)
if(length(input$hdc)) if(input$hdc & length(input$hdcBW)) lines(density(na.omit(x),adjust=input$hdcBW),lwd=2)
}
}
} else if(length(input$multiplot)){
if(n>1 & input$multiplot=="Separate histograms") layout(matrix(1:(n+n%%2),ceiling(n/2),2,byrow=T))
if(input$multiplot=="Separate histograms"){
dat2.cities <- dat2()$City
for(i in 1:n){
x <- as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city[i],select=4)))
if(!all(is.na(x))){
hist(x,breaks=h.brks,main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],input$city[i],mo,units,input$var)),
xlab=xlabel,col=clr,cex.main=1.3,cex.axis=1.3,cex.lab=1.3,prob=T)
if(length(input$hio)) if(input$hio) rug(x)
if(length(input$hdc)) if(input$hdc & length(input$hdcBW)) lines(density(na.omit(x),adjust=input$hdcBW),lwd=2)
}
}
} else if(n>1 & input$multiplot=="Common-axis density estimation plots"){
data <- dat()
if(length(input$mo2)>1) if(length(input$stat)) if(input$stat!="None") data <- dat2()
plot.multiDens(data,input$city,stat=input$stat,n.mo=length(input$mo2),main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],mo,units,input$var)),
xlab=xlabel,cex.main=1.3,cex.axis=1.3,cex.lab=1.3)
}
}
}
},
height=htfun, width="auto"
)
output$summary <- renderPrint({
if(!is.null(dat())){
x <- list(summary(dat()[-c(1:3)]))
if(input$mo!="Choose multiple") mo <- input$mo else mo <- input$mo2
if(length(mo)>1) mo <- paste(mo[1],"-",mo[length(mo)]) ## Still need to ensure that months must be consecutive.
names(x) <- names.x <- paste(input$yrs[1],"-",input$yrs[2],mo,"City",input$var,"Data")
if(length(input$stat)>0 & length(input$mo2)>1){
if(input$stat!="None"){
dat2.cities <- dat2()$City
x2 <- c()
for(i in 1:ncol(x[[1]])) x2 <- cbind(x2, as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city[i],select=4))))
x2 <- summary(x2)
colnames(x2) <- colnames(x[[1]])
x <- list(x[[1]],x2)
names(x) <- c(names.x,paste("Distribution Summary of Seasonal",input$stat,input$var))
}
}
x
}
})
output$table <- renderTable({
if(!is.null(dat())){
dat()
}
})
output$regInputX <- renderUI({
if(length(input$city)){
selectInput("regX","Explanatory variable(s)",c("Year","Precipitation","Temperature"),selected="Year")
}
})
output$regInputY <- renderUI({
if(length(input$city)){
selectInput("regY","Response variable",c("Year","Precipitation","Temperature"),selected="Precipitation")
}
})
output$regCondMo <- renderUI({
if(length(input$city)){
selectInput("regbymo","Select consecutive months:",c("All",mos),selected="All")
}
})
reg.dat <- reactive({
if(length(input$city) & length(input$yrs) & length(input$regbymo)){
d <- list()
for(i in 1:length(input$city)){
d.tmp <- dcast(subset(DATASET(),Year>=input$yrs[1] & Year<=input$yrs[2],select=c(1:3,which(names(DATASET())==input$city[i]))),Year+Month ~Variable,value=input$city[i])
names(d.tmp)[3:4] <- c("Precipitation","Temperature")
if(length(input$regbymo)) if(input$regbymo!="All") d.tmp <- subset(d.tmp,d.tmp$Month==input$regbymo)
d[[i]] <- d.tmp
}
d
}
})
form <- reactive({
if(length(input$city) & length(input$yrs) & length(input$regX) & length(input$regY) & length(input$regbymo)){
form <- c()
for(i in 1:length(input$city)) form <- c(form,paste(input$regY,"~",paste(input$regX,collapse="+")))
} else form <- NULL
form
})
lm1 <- reactive({
if(length(input$city) & length(input$regY) & length(input$regX) & length(input$yrs) & length(input$regbymo) & !is.null(form())){
lm.list <- list()
for(i in 1:length(input$city)) lm.list[[i]] <- lm(formula=as.formula(form()[i]),data=reg.dat()[[i]])
names(lm.list) <- paste(input$city,form())
lm.list
}
})
output$reglines <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("reglns","Show time series line(s)",FALSE)
}
})
output$regpoints <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("regpts","Show points",TRUE)
}
})
output$regablines <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("regablns","Show regression line(s)",FALSE)
}
})
output$regGGPLOT <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("reg.ggplot","Switch to ggplot version",FALSE)
}
})
output$regGGPLOTse <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX) & length(input$regablns)){
if(input$regablns) checkboxInput("reg.ggplot.se","Show shaded confidence band for mean response",FALSE)
}
})
output$regplot <- renderPlot({
if(length(input$city) & length(input$regY) & length(input$regX) & length(input$reglns) & length(input$yrs) & length(reg.dat())){
ylm <- do.call(range,lapply(reg.dat(),function(x) range(x[[input$regY]],na.rm=T)))
alpha <- 70
x <- reg.dat()[[1]][[input$regX]]
n <- length(x)
xr <- range(x)
if(input$regX=="Year") { x <- seq(xr[1],xr[2]+1,len=n+1); x <- x[-c(n+1)] }
y <- reg.dat()[[1]][[input$regY]]
yr <- range(y)
if(input$regY=="Year") { y <- seq(yr[1],yr[2]+1,len=n+1); y <- y[-c(n+1)] }
if(!input$reg.ggplot){
plot(0,0,type="n",xlim=range(x),ylim=ylm,xlab=input$regX,ylab=input$regY,main=form()[1],cex.main=1.3,cex.axis=1.3,cex.lab=1.3)
for(i in 1:length(input$city)){
if(input$regX!="Year") x <- reg.dat()[[i]][[input$regX]]
if(input$regY!="Year") y <- reg.dat()[[i]][[input$regY]]
if(length(input$reg.ggplot.se)) {
if(input$reg.ggplot.se){
d1 <- data.frame(x,y)
names(d1) <- c(input$regX,input$regY)
pred <- predict(lm1()[[i]],d1,interval="confidence")
CIL <- pred[,'lwr']
CIU <- pred[,'upr']
ord <- order(d1[,1])
xvec <- c(d1[ord,1],tail(d1[ord,1],1),rev(d1[ord,1]),d1[ord,1][1])
yvec <- c(CIL[ord],tail(CIU[ord],1),rev(CIU[ord]),CIL[ord][1])
polygon(xvec,yvec,col="#00000070",border=NA)
}
}
if(input$reglns) lines(x[order(x)],y[order(x)], lty=1, lwd=1, col=paste(clrs[i],alpha,sep=""))
if(input$regpts) points(x,y, pch=21, col=1, bg=paste(clrs[i],alpha,sep=""), cex=1)
if(input$regablns) abline(lm1()[[i]],col=clrs[i],lwd=2)
}
}
if(input$reg.ggplot){
if(input$regX=="Year") x <- rep(x,length(input$city)) else x <- c()
if(input$regY=="Year") y <- rep(y,length(input$city)) else y <- c()
for(i in 1:length(input$city)){
if(input$regX!="Year") x <- c(x, reg.dat()[[i]][[input$regX]])
if(input$regY!="Year") y <- c(y, reg.dat()[[i]][[input$regY]])
}
cond <- rep(input$city,each=n)
d2 <- data.frame(cond,x,y)
names(d2) <- c("City",input$regX,input$regY)
p <- ggplot(d2, aes_string(x=input$regX, y=input$regY, color="City")) +
scale_colour_hue(l=50) # Use a darker palette
if(input$regablns){
if(length(input$reg.ggplot.se)) SE <- input$reg.ggplot.se else SE <- F
p <- p +
geom_smooth(method=lm, # Add linear regression lines
se=SE, # Don't add shaded confidence region
fullrange=T)
}
if(input$reglns) p <- p + geom_line(shape=1)
if(input$regpts) p <- p + geom_point(shape=1)
print(p)
}
}
}, height=function(){ w <- session$clientData$output_regplot_width; if(length(w)) return(round(0.5*w)) else return("auto") }, width="auto"
)
output$regsum <- renderPrint({
if(length(input$city) & length(input$regY) & length(input$regX) & !is.null(form())){
lapply(lm1(),summary)
}
})
output$header <- renderUI({
if(input$dataset=="Weather stations (CRU-substituted NAs)" | input$dataset=="Weather stations (w/ missing data)"){
txt <- paste("Weather station historical time series climate data for",length(city.names()),"AK cities")
} else if(input$dataset=="2-km downscaled CRU 3.1"){
txt <- paste("2-km downscaled CRU 3.1 historical time series climate data for",length(city.names()),"AK cities")
}
txt <- HTML(paste(txt,'<a href="http://snap.uaf.edu" target="_blank"><img id="stats_logo" align="right" alt="SNAP Logo" src="./img/SNAP_acronym_100px.png" /></a>',sep="",collapse=""))
})
})
| /ak_station_cru_eda/server.R | no_license | xtmgah/shiny-apps | R | false | false | 17,423 | r | library(shiny)
library(reshape2); library(ggplot2)
cities.dat <- read.csv("dat1.csv",header=T,stringsAsFactors=F)
cru.dat <- read.csv("dat2.csv",header=T,stringsAsFactors=F)
sta.dat <- read.csv("dat3.csv",header=T,stringsAsFactors=F)
staNA.dat <- read.csv("dat3b.csv",header=T,stringsAsFactors=F)
cru.names <- gsub("_"," ",names(cru.dat)[-c(1:3)])
names(cru.dat)[-c(1:3)] <- cru.names
sta.names <- gsub("_"," ",substr(names(sta.dat)[-c(1:3)],3,nchar(names(sta.dat)[-c(1:3)])))
reg.names <- gsub("_"," ",substr(names(sta.dat)[-c(1:3)],1,1))
names(sta.dat)[-c(1:3)] <- sta.names
names(staNA.dat)[-c(1:3)] <- sta.names
reg.sta.mat <- cbind(reg.names,sta.names)
mos <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
cru.dat$Month <- factor(cru.dat$Month,levels=mos)
var <- c("T","P")
reshape.fun <- function(x){
x <- melt(x,id=c("Year","Month","Variable"))
x <- dcast(x,Year+Variable+variable~Month)
rownames(x) <- NULL
x
}
plot.multiDens <- source("plot.multiDens.txt")$value
clrs <- paste(c("#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513","#006400","#2F4F4F","#CD9B1D",
"#000080","#CD3700","#ADFF2F","#8B4513"))
shinyServer(function(input, output, session){
city.names <- reactive({
if(input$dataset=="Weather stations (CRU-substituted NAs)" | input$dataset=="Weather stations (w/ missing data)") sta.names else if(input$dataset=="2-km downscaled CRU 3.1") cru.names
})
output$cityNames <- renderUI({
selectInput("city","Choose a city:",choices=city.names(),selected=city.names()[1],multiple=T)
})
DATASET <- reactive({
if(input$dataset=="Weather stations (CRU-substituted NAs)"){
d <- sta.dat
} else if(input$dataset=="Weather stations (w/ missing data)"){
d <- staNA.dat
} else if(input$dataset=="2-km downscaled CRU 3.1"){
d <- cru.dat
}
d
})
output$yearSlider <- renderUI({
if(length(input$city)){
if(length(input$city)==1){
r <- range(DATASET()$Year[!is.na(DATASET()[input$city])])
} else {
r <- c()
for(i in 1:length(input$city)) r <- rbind( r, range(DATASET()$Year[!is.na(DATASET()[input$city[i]])]) )
r <- c(max(r[,1]),min(r[,2]))
}
mn <- r[1]
mx <- r[2]
sliderInput("yrs", "Year range:" ,mn, mx, c(max(mn,1950),mx), step=1, sep="")
}
})
output$Var <- renderUI({
if(length(input$city)) selectInput("var","Choose a variable:",choices=c("Precipitation","Temperature"))
})
output$Mo <- renderUI({
if(length(input$city)) selectInput("mo","Choose a month:",choices=c(mos,"Choose multiple"))
})
curMo <- reactive({
if(length(input$mo)){
if(length(input$mo2)>0 & !is.null(input$mo2) & input$mo=="Choose multiple") { curMo <- input$mo2
} else if(input$mo!="Choose multiple") { curMo <- input$mo
} else if(input$mo=="Choose multiple") { curMo <- NULL }
} else curMo <- NULL
curMo
})
cols <- reactive({
if(length(input$city)) cols <- c(1:3,match(input$city,names(DATASET()))) else cols <- NULL
})
dat <- reactive({
if(length(curMo()) & length(input$city) & !is.null(cols()) & length(input$yrs)){
mo <- DATASET()$Month %in% curMo()
d <- subset(DATASET(),Year>=input$yrs[1] & Year<=input$yrs[2] & mo & Variable==substr(input$var,1,1), select=cols())
rownames(d) <- NULL
} else d <- NULL
d
})
dat2 <- reactive({
if(!is.null(dat())){
x <- as.numeric(t(as.matrix(dat()[-c(1:3)])))
v <- reshape.fun(dat())
if(!is.null(input$stat) & length(input$mo2)>1){
x <- switch(input$stat,
None=as.numeric(t(as.matrix(dat()[-c(1:3)]))),
Mean=apply(v[-c(1:3)],1,mean),
Total=apply(v[-c(1:3)],1,sum),
'Std. Dev.'=apply(v[-c(1:3)],1,sd),
Minimum=apply(v[-c(1:3)],1,min),
Maximum=apply(v[-c(1:3)],1,max)
)
}
x <- cbind(v[1:3],x)
names(x) <- c("Year","Variable","City","Values")
} else { x <- NULL }
x
})
output$histBin <- renderUI({
if(length(input$city)){
if(length(input$city)==1){
checkboxInput("hb","Vary number of histogram bins",FALSE)
} else if(length(input$multiplot)){
if(input$multiplot=="Separate histograms"){
checkboxInput("hb","Vary number of histogram bins",FALSE)
}
}
}
})
output$histBinNum <- renderUI({
if(length(input$hb)){
if(input$hb){
if(!length(input$multiplot)){
selectInput("hbn","Number of histogram bins (approximate):",choices=c("5","10","20","40"),selected="5")
} else if(input$multiplot=="Separate histograms"){
selectInput("hbn","Number of histogram bins (approximate):",choices=c("5","10","20","40"),selected="5")
}
}
}
})
output$histDensCurve <- renderUI({
if(length(input$city)){
if(!length(input$multiplot)){
checkboxInput("hdc","Overlay density curve",FALSE)
} else if(input$multiplot=="Separate histograms"|length(input$city)==1){
checkboxInput("hdc","Overlay density curve",input$hdc)
}
}
})
output$histDensCurveBW <- renderUI({
if(length(input$hdc)){
if(!length(input$multiplot)){
if(input$hdc) sliderInput("hdcBW","bandwidth:",0.2,2,1,0.2)
} else {
if(input$hdc & input$multiplot=="Separate histograms") sliderInput("hdcBW","bandwidth:",0.2,2,1,0.2)
}
}
})
output$histIndObs <- renderUI({
if(length(input$city)){
if(!length(input$multiplot)){
checkboxInput("hio","Show individual observations",FALSE)
} else if(input$multiplot=="Separate histograms"|length(input$city)==1){
checkboxInput("hio","Show individual observations",input$hio)
}
}
})
output$multMo <- renderUI({
if(length(input$mo)){
if(input$mo=="Choose multiple"){
selectInput("mo2","Select CONSECUTIVE months:", mos, multiple=TRUE)
}
}
})
output$multMo2 <- renderUI({
if(length(input$mo)){
if(length(input$mo2)>1 & input$mo=="Choose multiple" & input$var=="Precipitation"){
selectInput("stat","Choose seasonal statistic:",choices=c("None","Total","Std. Dev.","Minimum","Maximum"),selected="None")
}
else if(length(input$mo2)>1 & input$mo=="Choose multiple" & input$var=="Temperature"){
selectInput("stat","Choose seasonal statistic:",choices=c("None","Mean","Std. Dev.","Minimum","Maximum"),selected="None")
}
}
})
output$multCity <- renderUI({
if(length(input$city)>1){
radioButtons("multiplot","Plot view for multiple cities:",c("Separate histograms","Common-axis density estimation plots"),"Separate histograms")
}
})
output$dldat <- downloadHandler(
filename = function() { "data.csv" },
content = function(file) {
write.csv(dat(), file)
}
)
htfun <- function(){
n <- length(input$city)
if(n>1) n <- n + n%%2
ht1 <- 600
if(length(input$multiplot)){
if(n==1 | input$multiplot=="Common-axis density estimation plots") ht <- ht1 else if(input$multiplot=="Separate histograms") ht <- (n/2)*(ht1/2)
} else { ht <- ht1 }
ht
}
output$plot <- renderPlot({
if(length(input$city) & !is.null(dat2())){
if(input$mo!="Choose multiple") mo <- input$mo else mo <- input$mo2
## Print as if selection implies consecutive months. Still need to ensure that months must actually be consecutive. Users can still leave gaps in selection.
if(length(mo)>1) mo <- paste(mo[1],"-",mo[length(mo)])
if(input$var=="Precipitation"){
clr <- "#1E90FF" # "dodgerblue"
xlabel <- expression("Observations"~(mm)~"")
} else if(input$var=="Temperature") {
clr <- "#FFA500" # "orange"
xlabel <- expression("Observations"~(degree~C)~"")
}
units <- ""
if(length(input$stat)>0){ if(input$stat!="None") units <- input$stat }
n <- length(input$city)
h.brks <- "Sturges"
if(length(input$hb) & length(input$hbn)) if(input$hb) h.brks <- as.numeric(input$hbn)
if(n==1){
dat2.cities <- dat2()$City
x <- as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city,select=4)))
if(!all(is.na(x))){
hist(x,breaks=h.brks,main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],input$city,mo,units,input$var)),
xlab=xlabel,col=clr,cex.main=1.3,cex.axis=1.3,cex.lab=1.3,prob=T)
if(length(input$hio)) if(input$hio) rug(x)
if(length(input$hdc)) if(input$hdc & length(input$hdcBW)) lines(density(na.omit(x),adjust=input$hdcBW),lwd=2)
}
} else if(!length(input$multiplot)){
if(n>1) layout(matrix(1:(n+n%%2),ceiling(n/2),2,byrow=T))
dat2.cities <- dat2()$City
for(i in 1:n){
x <- as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city[i],select=4)))
if(!all(is.na(x))){
hist(x,breaks=h.brks,main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],input$city[i],mo,units,input$var)),
xlab=xlabel,col=clr,cex.main=1.3,cex.axis=1.3,cex.lab=1.3,prob=T)
if(length(input$hio)) if(input$hio) rug(x)
if(length(input$hdc)) if(input$hdc & length(input$hdcBW)) lines(density(na.omit(x),adjust=input$hdcBW),lwd=2)
}
}
} else if(length(input$multiplot)){
if(n>1 & input$multiplot=="Separate histograms") layout(matrix(1:(n+n%%2),ceiling(n/2),2,byrow=T))
if(input$multiplot=="Separate histograms"){
dat2.cities <- dat2()$City
for(i in 1:n){
x <- as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city[i],select=4)))
if(!all(is.na(x))){
hist(x,breaks=h.brks,main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],input$city[i],mo,units,input$var)),
xlab=xlabel,col=clr,cex.main=1.3,cex.axis=1.3,cex.lab=1.3,prob=T)
if(length(input$hio)) if(input$hio) rug(x)
if(length(input$hdc)) if(input$hdc & length(input$hdcBW)) lines(density(na.omit(x),adjust=input$hdcBW),lwd=2)
}
}
} else if(n>1 & input$multiplot=="Common-axis density estimation plots"){
data <- dat()
if(length(input$mo2)>1) if(length(input$stat)) if(input$stat!="None") data <- dat2()
plot.multiDens(data,input$city,stat=input$stat,n.mo=length(input$mo2),main=gsub(" "," ",paste(input$yrs[1],"-",input$yrs[2],mo,units,input$var)),
xlab=xlabel,cex.main=1.3,cex.axis=1.3,cex.lab=1.3)
}
}
}
},
height=htfun, width="auto"
)
output$summary <- renderPrint({
if(!is.null(dat())){
x <- list(summary(dat()[-c(1:3)]))
if(input$mo!="Choose multiple") mo <- input$mo else mo <- input$mo2
if(length(mo)>1) mo <- paste(mo[1],"-",mo[length(mo)]) ## Still need to ensure that months must be consecutive.
names(x) <- names.x <- paste(input$yrs[1],"-",input$yrs[2],mo,"City",input$var,"Data")
if(length(input$stat)>0 & length(input$mo2)>1){
if(input$stat!="None"){
dat2.cities <- dat2()$City
x2 <- c()
for(i in 1:ncol(x[[1]])) x2 <- cbind(x2, as.numeric(as.matrix(subset(dat2(),dat2.cities==input$city[i],select=4))))
x2 <- summary(x2)
colnames(x2) <- colnames(x[[1]])
x <- list(x[[1]],x2)
names(x) <- c(names.x,paste("Distribution Summary of Seasonal",input$stat,input$var))
}
}
x
}
})
output$table <- renderTable({
if(!is.null(dat())){
dat()
}
})
output$regInputX <- renderUI({
if(length(input$city)){
selectInput("regX","Explanatory variable(s)",c("Year","Precipitation","Temperature"),selected="Year")
}
})
output$regInputY <- renderUI({
if(length(input$city)){
selectInput("regY","Response variable",c("Year","Precipitation","Temperature"),selected="Precipitation")
}
})
output$regCondMo <- renderUI({
if(length(input$city)){
selectInput("regbymo","Select consecutive months:",c("All",mos),selected="All")
}
})
reg.dat <- reactive({
if(length(input$city) & length(input$yrs) & length(input$regbymo)){
d <- list()
for(i in 1:length(input$city)){
d.tmp <- dcast(subset(DATASET(),Year>=input$yrs[1] & Year<=input$yrs[2],select=c(1:3,which(names(DATASET())==input$city[i]))),Year+Month ~Variable,value=input$city[i])
names(d.tmp)[3:4] <- c("Precipitation","Temperature")
if(length(input$regbymo)) if(input$regbymo!="All") d.tmp <- subset(d.tmp,d.tmp$Month==input$regbymo)
d[[i]] <- d.tmp
}
d
}
})
form <- reactive({
if(length(input$city) & length(input$yrs) & length(input$regX) & length(input$regY) & length(input$regbymo)){
form <- c()
for(i in 1:length(input$city)) form <- c(form,paste(input$regY,"~",paste(input$regX,collapse="+")))
} else form <- NULL
form
})
lm1 <- reactive({
if(length(input$city) & length(input$regY) & length(input$regX) & length(input$yrs) & length(input$regbymo) & !is.null(form())){
lm.list <- list()
for(i in 1:length(input$city)) lm.list[[i]] <- lm(formula=as.formula(form()[i]),data=reg.dat()[[i]])
names(lm.list) <- paste(input$city,form())
lm.list
}
})
output$reglines <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("reglns","Show time series line(s)",FALSE)
}
})
output$regpoints <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("regpts","Show points",TRUE)
}
})
output$regablines <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("regablns","Show regression line(s)",FALSE)
}
})
output$regGGPLOT <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX)){
checkboxInput("reg.ggplot","Switch to ggplot version",FALSE)
}
})
output$regGGPLOTse <- renderUI({
if(length(input$city) & length(input$regY) & length(input$regX) & length(input$regablns)){
if(input$regablns) checkboxInput("reg.ggplot.se","Show shaded confidence band for mean response",FALSE)
}
})
output$regplot <- renderPlot({
if(length(input$city) & length(input$regY) & length(input$regX) & length(input$reglns) & length(input$yrs) & length(reg.dat())){
ylm <- do.call(range,lapply(reg.dat(),function(x) range(x[[input$regY]],na.rm=T)))
alpha <- 70
x <- reg.dat()[[1]][[input$regX]]
n <- length(x)
xr <- range(x)
if(input$regX=="Year") { x <- seq(xr[1],xr[2]+1,len=n+1); x <- x[-c(n+1)] }
y <- reg.dat()[[1]][[input$regY]]
yr <- range(y)
if(input$regY=="Year") { y <- seq(yr[1],yr[2]+1,len=n+1); y <- y[-c(n+1)] }
if(!input$reg.ggplot){
plot(0,0,type="n",xlim=range(x),ylim=ylm,xlab=input$regX,ylab=input$regY,main=form()[1],cex.main=1.3,cex.axis=1.3,cex.lab=1.3)
for(i in 1:length(input$city)){
if(input$regX!="Year") x <- reg.dat()[[i]][[input$regX]]
if(input$regY!="Year") y <- reg.dat()[[i]][[input$regY]]
if(length(input$reg.ggplot.se)) {
if(input$reg.ggplot.se){
d1 <- data.frame(x,y)
names(d1) <- c(input$regX,input$regY)
pred <- predict(lm1()[[i]],d1,interval="confidence")
CIL <- pred[,'lwr']
CIU <- pred[,'upr']
ord <- order(d1[,1])
xvec <- c(d1[ord,1],tail(d1[ord,1],1),rev(d1[ord,1]),d1[ord,1][1])
yvec <- c(CIL[ord],tail(CIU[ord],1),rev(CIU[ord]),CIL[ord][1])
polygon(xvec,yvec,col="#00000070",border=NA)
}
}
if(input$reglns) lines(x[order(x)],y[order(x)], lty=1, lwd=1, col=paste(clrs[i],alpha,sep=""))
if(input$regpts) points(x,y, pch=21, col=1, bg=paste(clrs[i],alpha,sep=""), cex=1)
if(input$regablns) abline(lm1()[[i]],col=clrs[i],lwd=2)
}
}
if(input$reg.ggplot){
if(input$regX=="Year") x <- rep(x,length(input$city)) else x <- c()
if(input$regY=="Year") y <- rep(y,length(input$city)) else y <- c()
for(i in 1:length(input$city)){
if(input$regX!="Year") x <- c(x, reg.dat()[[i]][[input$regX]])
if(input$regY!="Year") y <- c(y, reg.dat()[[i]][[input$regY]])
}
cond <- rep(input$city,each=n)
d2 <- data.frame(cond,x,y)
names(d2) <- c("City",input$regX,input$regY)
p <- ggplot(d2, aes_string(x=input$regX, y=input$regY, color="City")) +
scale_colour_hue(l=50) # Use a darker palette
if(input$regablns){
if(length(input$reg.ggplot.se)) SE <- input$reg.ggplot.se else SE <- F
p <- p +
geom_smooth(method=lm, # Add linear regression lines
se=SE, # Don't add shaded confidence region
fullrange=T)
}
if(input$reglns) p <- p + geom_line(shape=1)
if(input$regpts) p <- p + geom_point(shape=1)
print(p)
}
}
}, height=function(){ w <- session$clientData$output_regplot_width; if(length(w)) return(round(0.5*w)) else return("auto") }, width="auto"
)
output$regsum <- renderPrint({
if(length(input$city) & length(input$regY) & length(input$regX) & !is.null(form())){
lapply(lm1(),summary)
}
})
output$header <- renderUI({
if(input$dataset=="Weather stations (CRU-substituted NAs)" | input$dataset=="Weather stations (w/ missing data)"){
txt <- paste("Weather station historical time series climate data for",length(city.names()),"AK cities")
} else if(input$dataset=="2-km downscaled CRU 3.1"){
txt <- paste("2-km downscaled CRU 3.1 historical time series climate data for",length(city.names()),"AK cities")
}
txt <- HTML(paste(txt,'<a href="http://snap.uaf.edu" target="_blank"><img id="stats_logo" align="right" alt="SNAP Logo" src="./img/SNAP_acronym_100px.png" /></a>',sep="",collapse=""))
})
})
|
dataset = read.csv('Position_Salaries.csv')
dataset=dataset[2:3]
library(randomForest)
set.seed(123)
regressor=randomForest(x=dataset[1],y=dataset$Salary,ntree = 10)
predict(regressor,newdata = data.frame(Level=6.5))
x_seq=seq(min(dataset$Level),max(dataset$Level),0.01)
ggplot()+
geom_point(aes(x=dataset$Level,y=dataset$Salary))+
geom_line(aes(x=x_seq,y=predict(regressor,newdata = data.frame(Level=x_seq))))
# this is the higher resolution Decision Tree
set.seed(123)
regressor=randomForest(x=dataset[1],y=dataset$Salary,ntree = 100)
predict(regressor,newdata = data.frame(Level=6.5))
x_seq=seq(min(dataset$Level),max(dataset$Level),0.001)
ggplot()+
geom_point(aes(x=dataset$Level,y=dataset$Salary))+
geom_line(aes(x=x_seq,y=predict(regressor,newdata = data.frame(Level=x_seq))))
set.seed(123)
regressor=randomForest(x=dataset[1],y=dataset$Salary,ntree = 300)
predict(regressor,newdata = data.frame(Level=6.5))
x_seq=seq(min(dataset$Level),max(dataset$Level),0.001)
ggplot()+
geom_point(aes(x=dataset$Level,y=dataset$Salary))+
geom_line(aes(x=x_seq,y=predict(regressor,newdata = data.frame(Level=x_seq))))
| /Part 2 - Regression/Section 9 - Random Forest Regression/RFinR.R | no_license | ytnvj2/Machine_Learning_AZ | R | false | false | 1,133 | r | dataset = read.csv('Position_Salaries.csv')
dataset=dataset[2:3]
library(randomForest)
set.seed(123)
regressor=randomForest(x=dataset[1],y=dataset$Salary,ntree = 10)
predict(regressor,newdata = data.frame(Level=6.5))
x_seq=seq(min(dataset$Level),max(dataset$Level),0.01)
ggplot()+
geom_point(aes(x=dataset$Level,y=dataset$Salary))+
geom_line(aes(x=x_seq,y=predict(regressor,newdata = data.frame(Level=x_seq))))
# this is the higher resolution Decision Tree
set.seed(123)
regressor=randomForest(x=dataset[1],y=dataset$Salary,ntree = 100)
predict(regressor,newdata = data.frame(Level=6.5))
x_seq=seq(min(dataset$Level),max(dataset$Level),0.001)
ggplot()+
geom_point(aes(x=dataset$Level,y=dataset$Salary))+
geom_line(aes(x=x_seq,y=predict(regressor,newdata = data.frame(Level=x_seq))))
set.seed(123)
regressor=randomForest(x=dataset[1],y=dataset$Salary,ntree = 300)
predict(regressor,newdata = data.frame(Level=6.5))
x_seq=seq(min(dataset$Level),max(dataset$Level),0.001)
ggplot()+
geom_point(aes(x=dataset$Level,y=dataset$Salary))+
geom_line(aes(x=x_seq,y=predict(regressor,newdata = data.frame(Level=x_seq))))
|
# This is package fishMod
"deltaLN" <-
function ( ln.form, binary.form, data, residuals=TRUE)
{
temp <- model.frame( ln.form, data=as.data.frame( data))
X <- model.matrix( ln.form, temp)
offy <- model.offset( temp)
if( is.null( offy))
offy <- rep( 0, nrow( X))
nonzeros <- model.response( temp)>0
temp.ln <- model.frame( ln.form, data=as.data.frame( data[nonzeros,]))
nz.y <- log( model.response( temp.ln))
nz.X <- model.matrix( ln.form, temp.ln)
nz.offset <- model.offset( temp.ln)
if (is.null(nz.offset))
nz.offset <- rep(0, length(nz.y))
temp.bin <- model.frame( binary.form, data=as.data.frame( data))
temp.bin <- model.matrix( binary.form, temp.bin)
bin.X <- cbind( as.numeric( nonzeros), temp.bin)
colnames( bin.X) <- c("positive", colnames( bin.X)[-1])
# if( length( colnames( temp.bin))==1)
# binary.form <- as.formula( paste( colnames( temp.bin)[1],"~1"))
# else
# binary.form <- as.formula( paste( colnames( temp.bin)[1],"~1+",paste( colnames( temp.bin)[-1], collapse="+")))
#positive data log-normal
lnMod <- lm( nz.y~-1+nz.X, offset=nz.offset)#ln.form, temp.ln)
#bianry glm
binMod <- glm( bin.X[,"positive"]~-1+bin.X[,-1], family=binomial())
stdev <- summary( lnMod)$sigma
coefs <- list( binary=binMod$coef, ln=lnMod$coef, ln.sigma2=stdev^2)
logl <- sum( dlnorm( exp(nz.y), lnMod$fitted, sdlog=stdev, log=TRUE)) + sum( dbinom( nonzeros, size=1, prob=binMod$fitted, log=TRUE))
n <- nrow( temp)
ncovars <- length( lnMod$coef) + 1 + length( binMod$coef)
nnonzero <- sum( nonzeros)
nzero <- n-nnonzero
AIC <- -2*logl + 2*ncovars
BIC <- -2*logl + log( n)*ncovars
fitted <- var <- rep( -999, n)
# colnames( temp)[ncol( temp)] <- "nz.offset"
# lpv <- predict( lnMod, temp)
lpv <- X %*% lnMod$coef + offy
pv <- exp( lpv + 0.5*(stdev^2))
fitted <- binMod$fitted * pv
var <- binMod$fitted*exp( 2*lpv+stdev^2)*(exp( stdev^2)-binMod$fitted)
if( residuals){
resids <- matrix( rep( -999, 2*n), ncol=2, dimnames=list(NULL,c("quantile","Pearson")))
resids[nonzeros,"quantile"] <- (1-binMod$fitted[nonzeros]) + binMod$fitted[nonzeros] * plnorm( temp.ln[,1], lnMod$fitted, stdev, log.p=FALSE)
resids[!nonzeros,"quantile"] <- runif( n=sum( !nonzeros), min=rep( 0, nzero), max=1-binMod$fitted[!nonzeros])
resids[,"quantile"] <- qnorm( resids[,"quantile"])
resids[,"Pearson"] <- ( model.response(temp)-fitted) / sqrt(var)
}
else
resids <- NULL
res <- list( coefs=coefs, logl=logl, AIC=AIC, BIC=BIC, fitted=fitted, fittedVar=var, residuals=resids, n=n, ncovars=ncovars, nzero=nzero, lnMod=lnMod, binMod=binMod)
class( res) <- "DeltaLNmod"
return( res)
}
"dPoisGam" <-
function ( y, lambda, mu.Z, alpha, LOG=TRUE)
{
#function to calculate Random sum (Tweedie) densities.
#y is the value of the r.v. Can be a vector
#mu.N is the mean of the Poisson summing r.v. Can be a vector of length(y)
#mu.Z is the mean of the Gamma rv Can be a vector of length(y)
#alpha is the `other' parameter of the gamma distribution s.t. var = ( mu.Z^2)/alpha Can be a vector of length(y)
#If mu.N, mu.Z or alpha are scalare but y isn't then they will be used for all y. If lengths mis-match then error
#LOG=TRUE gives the density on the log scale
#do.checks=TRUE checks the input vectors for compatability and gives errors / changes them as appropriate.
#do.checks=FALSE doesn't check and relies on the user to have things right. If not right then catastrophic failure may occur.
# if( any( is.null( c( y, mu.N, mu.Z, alpha)))){
# print( "Error: null input values -- please check. Null values are:")
# tmp <- double( is.null( c( y, mu.N, mu.Z, alpha)))
# names( tmp) <- c( "y", "mu.N","mu.Z","alpha")
# print( tmp)
# print( "Exitting")
# return()
# }
mu.N <- lambda
if( !all( is.element( c( length( mu.N), length( mu.Z), length( alpha)), c( length( y), 1)))){
print( "Error: length of parameter vectors does not match length of random variable vector")
print( "Exitting")
return()
}
if( length( mu.N) != length( y))
mu.N <- rep( mu.N, length( y))
if( length( mu.Z) != length( y))
mu.Z <- rep( mu.Z, length( y))
if( length( alpha) != length( y))
alpha <- rep( alpha, length( y))
res <- .Call( "dTweedie", as.numeric( y), as.numeric( mu.N), as.numeric( mu.Z), as.numeric( alpha), as.integer( LOG), PACKAGE="fishMod")
return( res)
}
"dPoisGamDerivs" <-
function ( y=NULL, lambda=NULL, mu.Z=NULL, alpha=NULL, do.checks=TRUE)
{
#function to calculate Random sum (Tweedie) densities.
#y is the value of the r.v. Can be a vector
#mu.N is the mean of the Poisson summing r.v. Can be a vector of length(y)
#mu.Z is the mean of the Gamma rv Can be a vector of length(y)
#alpha is the `other' parameter of the gamma distribution s.t. var = ( mu.Z^2)/alpha Can be a vector of length(y)
#If mu.N, mu.Z or alpha are scalare but y isn't then they will be used for all y. If lengths mis-match then error
#LOG=TRUE gives the density on the log scale
#do.checks=TRUE checks the input vectors for compatability and gives errors / changes them as appropriate.
#do.checks=FALSE doesn't check and relies on the user to have things right. If not right then catastrophic failure may occur.
mu.N <- lambda
if( do.checks){
if( any( is.null( c( y, mu.N, mu.Z, alpha)))){
print( "Error: null input values -- please check. Null values are:")
tmp <- double( is.null( c( y, mu.N, mu.Z, alpha)))
names( tmp) <- c( "y", "mu.N","mu.Z","alpha")
print( tmp)
print( "Exitting")
return()
}
if( !all( is.element( c( length( mu.N), length( mu.Z), length( alpha)), c( length( y), 1)))){
print( "Error: length of parameter vectors does not match length of random variable vector")
print( "Exitting")
}
if( length( mu.N) != length( y))
mu.N <- rep( mu.N, length( y))
if( length( mu.Z) != length( y))
mu.Z <- rep( mu.Z, length( y))
if( length( alpha) != length( y))
alpha <- rep( alpha, length( y))
}
res <- .Call( "dTweedieDeriv", as.numeric( y), as.numeric( mu.N), as.numeric( mu.Z), as.numeric( alpha), PACKAGE="fishMod")
colnames( res) <- c("lambda","mu.Z","alpha")
return( res)
}
"dTweedie" <-
function ( y, mu, phi, p, LOG=TRUE)
{
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
dens <- dPoisGam( y, lambda, mu.Z, alpha, LOG)
return( dens)
}
"ldPoisGam.lp" <-
function ( parms, y, X.p, X.g, offsetty, alpha, wts=rep( 1, length( y)))
{
mu.p <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
mu.g <- exp( X.g %*% parms[ncol( X.p)+1:ncol( X.g)])
if( is.null( alpha))
return( - sum( wts * dPoisGam( y, lambda=mu.p, mu.Z=mu.g, alpha=exp(tail( parms, 1)), LOG=TRUE)))
else
return( - sum( wts * dPoisGam( y, lambda=mu.p, mu.Z=mu.g, alpha=alpha, LOG=TRUE)))
}
"ldPoisGam.lp.deriv" <-
function ( parms, y, X.p, X.g, offsetty, alpha, wts=rep( 1, length( y)))
{
mu.p <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
mu.g <- exp( X.g %*% parms[ncol( X.p)+1:ncol( X.g)])
if( is.null( alpha))
alpha1 <- exp( tail( parms,1))
else
alpha1 <- alpha
dTweedparms <- - wts * dPoisGamDerivs( y, lambda=mu.p, mu.Z=mu.g, alpha=alpha1) #wts should replicate appropriately
deri.lambda <- ( as.numeric( dTweedparms[,"lambda"] * mu.p)) * X.p
deri.mu <- ( as.numeric( dTweedparms[,"mu.Z"] * mu.g)) * X.g
deri.alpha <- as.numeric( dTweedparms[,"alpha"]) * alpha1
deri.all <- c( colSums( deri.lambda), colSums( deri.mu), sum( deri.alpha))
if( is.null( alpha))
return( deri.all)
else
return( deri.all[-length( deri.all)])
}
"ldTweedie.lp" <-
function ( parms, y, X.p, offsetty, phi, p, wts=rep( 1, length( y)))
{
mu <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
if( is.null( phi) & is.null( p)){
phi <- parms[ncol( X.p) + 1]
p <- parms[ncol( X.p) + 2]
}
if( is.null( phi) & !is.null( p))
phi <- parms[ncol( X.p)+1]
if( !is.null( phi) & is.null( p))
p <- parms[ncol( X.p)+1]
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*(mu^(p-1))
mu.Z <- alpha * tau
return( -sum( wts * dPoisGam( y, lambda=lambda, mu.Z=mu.Z, alpha=alpha, LOG=TRUE)))
}
"ldTweedie.lp.deriv" <-
function ( parms, y, X.p, offsetty, phi, p, wts=rep( 1, length( y)))
{
mu <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
p.flag <- phi.flag <- FALSE
if( is.null( phi) & is.null( p)){
p.flag <- phi.flag <- TRUE
phi <- parms[ncol( X.p) + 1]
p <- parms[ncol( X.p) + 2]
}
if( is.null( phi) & !is.null( p)){
phi <- parms[ncol( X.p)+1]
phi.flag <- TRUE
}
if( !is.null( phi) & is.null( p)){
p <- parms[ncol( X.p)+1]
p.flag <- TRUE
}
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
dTweedparms <- -wts * dPoisGamDerivs( y, lambda=lambda, mu.Z=mu.Z, alpha=alpha)
DTweedparmsDmu <- matrix( c( ( mu^(1-p)) / phi, alpha*phi*( ( p-1)^2)*( mu^(p-2)), rep( 0, length( mu))), nrow=3, byrow=T)
tmp <- rowSums( dTweedparms * t( DTweedparmsDmu))
tmp <- tmp * mu
tmp <- apply( X.p, 2, function( x) x*tmp)
derivs <- colSums( tmp)
if( phi.flag){
DTweedparmsDphi <- matrix( c( -( ( mu^(2-p)) / ( ( phi^2)*(2-p))), alpha*( p-1)*( mu^( p-1)), rep( 0, length( mu))), nrow=3, byrow=T)
tmpPhi <- rowSums( dTweedparms * t( DTweedparmsDphi)) #vectorised way of doing odd calculation
derivs <- c( derivs, sum( tmpPhi))
names( derivs)[length( derivs)] <- "phi"
}
if( p.flag){
dalphadp <- -( 1+alpha) / ( p-1)
DTweedparmsDp <- matrix( c( lambda*( 1/(2-p) - log( mu)), mu.Z*( dalphadp/alpha + 1/( p-1) + log( mu)), rep( dalphadp, length( y))), nrow=3, byrow=T)
tmpP <- rowSums( dTweedparms * t( DTweedparmsDp))
derivs <- c( derivs, sum( tmpP))
names( derivs)[length( derivs)] <- "p"
}
return( derivs)
}
"nd2" <-
function(x0, f, m=NULL, D.accur=4, eps=NULL, ...) {
# A function to compute highly accurate first-order derivatives
# Stolen (mostly) from the net and adapted / modified by Scott
# From Fornberg and Sloan (Acta Numerica, 1994, p. 203-267; Table 1, page 213)
# x0 is the point where the derivative is to be evaluated,
# f is the function that requires differentiating
# m is output dimension of f, that is f:R^n -> R^m
#D.accur is the required accuracy of the resulting derivative. Options are 2 and 4. The 2 choice does a two point finite difference approximation and the 4 choice does a four point finite difference approximation.
#eps is the
# Report any bugs to Scott as he uses this extensively!
D.n<-length(x0)
if (is.null(m)) {
D.f0<-f(x0, ...)
m<-length(D.f0) }
if (D.accur==2) {
D.w<-tcrossprod(rep(1,m),c(-1/2,1/2))
D.co<-c(-1,1) }
else {
D.w<-tcrossprod(rep(1,m),c(1/12,-2/3,2/3,-1/12))
D.co<-c(-2,-1,1,2) }
D.n.c<-length(D.co)
if( is.null( eps)) {
macheps<-.Machine$double.eps
D.h<-macheps^(1/3)*abs(x0)
}
else
D.h <- rep( eps, D.accur)
D.deriv<-matrix(NA,nrow=m,ncol=D.n)
for (ii in 1:D.n) {
D.temp.f<-matrix(0,m,D.n.c)
for (jj in 1:D.n.c) {
D.xd<-x0+D.h[ii]*D.co[jj]*(1:D.n==ii)
D.temp.f[,jj]<-f(D.xd, ...) }
D.deriv[,ii]<-rowSums(D.w*D.temp.f)/D.h[ii] }
return( D.deriv)
}
".onLoad" <-
function( libname, pkgname){
# Generic DLL loader
dll.path <- file.path( libname, pkgname, 'libs')
if( nzchar( subarch <- .Platform$r_arch))
dll.path <- file.path( dll.path, subarch)
this.ext <- paste( sub( '.', '[.]', .Platform$dynlib.ext, fixed=TRUE), '$', sep='')
dlls <- dir( dll.path, pattern=this.ext, full.names=FALSE)
names( dlls) <- dlls
if( length( dlls))
lapply( dlls, function( x) library.dynam( sub( this.ext, '', x), package=pkgname, lib.loc=libname))
}
"pgm" <-
function ( p.form, g.form, data, wts=NULL, alpha=NULL, inits=NULL, vcov=TRUE, residuals=TRUE, trace=1)
{
if( is.null( wts))
wts <- rep( 1, nrow( data))
# temp.p <- model.frame( p.form, data=as.data.frame( data))#, weights=wts)
e<-new.env()
e$wts <- wts
environment(p.form) <- e
temp.p <- model.frame( p.form, data=as.data.frame( data), weights=wts)
y <- model.response( temp.p)
names( y) <- NULL
X.p <- model.matrix( p.form, data)
offset.p <- model.offset( temp.p)
if ( is.null( offset.p))
offset.p <- rep( 0, nrow( temp.p))
wts1 <- as.vector( model.weights( temp.p))
# if( is.null( wts))
# wts <- rep( 1, length( y))
tmp.form <- g.form
tmp.form[[2]] <- p.form[[2]]
tmp.form[[3]] <- g.form[[2]]
temp.g <- model.frame( tmp.form, as.data.frame( data))
names( y) <- NULL
X.g <- model.matrix( tmp.form, data)
# if( is.null( weights)){
# weights <- rep( 1, length( y))
# if( trace!=0)
# print( "all weights are unity")
# }
# if( length( weights) != length( y)){
# print( "number of weights does not match number of observations")
# return( NULL)
# }
if( is.null( inits) & is.null( alpha))
inits <- rep( 0, ncol( X.p)+ncol( X.g)+1)
if( is.null( inits) & !is.null( alpha))
inits <- rep( 0, ncol( X.p)+ncol( X.g))
if( !is.element( length( inits),ncol( X.p)+ncol( X.g)+0:1)) {
print( "Initial values supplied are of the wrong length -- please check")
tmp <- c( length( inits), ncol( X.p)+ncol( X.g)+1)
names( tmp) <- c( "inits", "ncolDesign")
return( tmp)
}
if( trace!=0){
print( "Estimating parameters")
if( is.null( alpha))
cat("iter:", "-logl", paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'), "log( alpha)", "\n", sep = "\t")
else
cat("iter:", "-logl", paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'), "\n", sep = "\t")
}
fm <- nlminb( start=inits, objective=ldPoisGam.lp, gradient=ldPoisGam.lp.deriv, hessian = NULL, y=y, X.p=X.p, X.g=X.g, offsetty=offset.p, alpha=alpha, control=list(trace=trace), wts=wts1)
parms <- fm$par
if( is.null( alpha))
names( parms) <- c( paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'), "logalpha")
else
names( parms) <- c( paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'))
if( vcov){
if( trace!=0)
print( "Calculating variance matrix of estimates")
vcovar <- nd2(x0=parms, f=ldPoisGam.lp.deriv, y=y, X.p=X.p, X.g=X.g, offsetty=offset.p, alpha=alpha, wts=wts1)
vcovar <- 0.5 * ( vcovar + t( vcovar))
vcovar <- solve( vcovar)
rownames( vcovar) <- colnames( vcovar) <- names( parms)
}
else{
if( trace!=0)
print( "Not calculating variance matrix of estimates")
vcovar <- NULL
}
scores <- -ldPoisGam.lp.deriv( parms=parms, y=y, X.p=X.p, X.g=X.g, offsetty=offset.p, alpha=alpha, wts=wts1)
if( trace !=0)
print( "Calculating means")
muLamb <- exp( X.p %*% parms[1:ncol( X.p)] + offset.p)
muMuZ <- exp( X.g %*% parms[ncol( X.p)+1:ncol( X.g)])
mu <- muLamb * muMuZ
fitMu <- cbind( mu, muLamb, muMuZ)
colnames( fitMu) <- c("total","Lambda","muZ")
if( residuals){
if( trace!=0)
print( "Calculating quantile residuals")
if( is.null( alpha)){
resids <- matrix( rep( pPoisGam( y, muLamb, muMuZ, exp( tail( parms,1))), 2), ncol=2)
resids[y==0,1] <- 0.5 * dPoisGam( y[y==0], muLamb[y==0], muMuZ[y==0], exp( tail( parms,1)), LOG=FALSE)
}
else{
resids <- matrix( rep( pPoisGam( y, muLamb, muMuZ, alpha),2), ncol=2)
resids[y==0,1] <- 0.5 * dPoisGam( y[y==0], muLamb[y==0], muMuZ[y==0], alpha, LOG=FALSE)
}
nzero <- sum( y==0)
resids[y==0,2] <- runif( nzero, min=rep( 0, nzero), max=2*resids[y==0,1])
resids <- qnorm( resids)
colnames( resids) <- c("expect","random")
}
else{
if( trace!=0)
print( "Not calculating quantile residuals")
resids <- NULL
}
if( trace!=0)
print( "Done")
ICadj <- 0
if( !is.null( alpha))
ICadj <- ICadj + 1
AIC <- -2*(-fm$objective) + 2*(length( parms)-ICadj)
BIC <- -2*(-fm$objective) + log( nrow( X.p))*(length( parms)-ICadj)
res <- list( coef=parms, logl=-fm$objective, scores=scores, vcov=vcovar, conv=fm$convergence, message=fm$message, niter=fm$iterations, evals=fm$evaluations, call=match.call(), fitted=fitMu, residuals=resids, AIC=AIC, BIC=BIC)
class( res) <- "pgm"
return( res)
}
"pPoisGam" <-
function ( q, lambda, mu.Z, alpha)
{
tmp <- c( length( q), length( lambda), length(mu.Z), length( alpha))
names( tmp) <- c( "n","lambda","mu.Z","alpha")
if( !all( is.element( tmp[-1], c( 1, tmp[1])))) {
print( "pPoisGam: error -- length of arguments are not compatible")
return( tmp)
}
#from here on taken from Dunn's function ptweedie.series
#I have to admit that I don't quite get it. I think that it is some sort of quadrature (this is a guess)
y <- q
drop <- 39
lambdaMax <- N <- max(lambda)
logfmax <- -log(lambdaMax)/2
estlogf <- logfmax
while ((estlogf > (logfmax - drop)) & (N > 1)) {
N <- max(1, N - 2)
estlogf <- -lambdaMax + N * (log(lambdaMax) - log(N) + 1) - log(N)/2
}
lo.N <- max(1, floor(N))
lambdaMin <- N <- min(lambda)
logfmax <- -log(lambdaMin)/2
estlogf <- logfmax
while (estlogf > (logfmax - drop)) {
N <- N + 1
estlogf <- -lambdaMin + N * (log(lambdaMin) - log(N) + 1) - log(N)/2
}
hi.N <- max(ceiling(N))
cdf <- matrix( 0, nrow=length( y), ncol=1) #array(dim = length(y), 0)
for (N in (lo.N:hi.N)) {
pois.den <- dpois(N, lambda)
incgamma.den <- pchisq(2 * as.numeric( y) * alpha / as.numeric( mu.Z), 2 * alpha * N)
cdf <- cdf + pois.den * incgamma.den
}
cdf <- cdf + exp(-lambda)
its <- hi.N - lo.N + 1
return( cdf)
}
"pTweedie" <-
function ( q, mu, phi, p)
{
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
ps <- pPoisGam( q, lambda, mu.Z, alpha)
# require( tweedie)
# ps <- ptweedie( as.numeric( q), mu=as.numeric( mu), phi=as.numeric( phi), power=as.numeric( p))
return( ps)
}
"rPoisGam" <-
function ( n, lambda, mu.Z, alpha)
{
mu.N <- lambda
#simulate n random variables from the same compound poisson distribution
my.fun <- function (parms)
return( rgamma( n=1, scale=parms[3], shape=parms[1]*parms[2]))
# return( sum( rgamma( n=parms[1], scale=parms[3], shape=parms[2])))
tmp <- c( n, length( mu.N), length(mu.Z), length( alpha))
names( tmp) <- c( "n","mu.N","mu.Z","alpha")
if( !all( is.element( tmp[-1], c( 1, tmp[1])))) {
print( "rPoisGam: error -- length of arguments are not compatible")
return( tmp)
}
if( tmp["mu.N"]==1)
mu.N <- rep( mu.N, tmp["n"])
if( tmp["mu.Z"]==1)
mu.Z <- rep( mu.Z, tmp["n"])
if( tmp["alpha"]==1)
alpha <- rep( alpha, tmp["n"])
np <- matrix( rpois( n, mu.N), ncol=1)
beta <- mu.Z / alpha
y <- apply( cbind( np, alpha, beta), 1, my.fun)
return( y)
}
"rTweedie" <-
function ( n, mu, phi, p)
{
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
rans <- rPoisGam( n, lambda, mu.Z, alpha)
return( rans)
}
"simReg" <-
function (n, lambda.tau, mu.Z.tau, alpha, offset1=NULL, X=NULL)
{
if (length(lambda.tau) != length(mu.Z.tau) && length(alpha) !=
1) {
print("coefficient vectors are inconsistent -- try again")
return()
}
if( is.null( X))
X <- cbind(1, matrix(rnorm(n * (length(lambda.tau) - 1)), nrow = n))
else
X <- as.matrix( X)
offset2 <- offset1
if( is.null( offset1))
offset2 <- rep( 0, nrow( X))
if( ncol( X) != length( lambda.tau)) {
print( "Supplied X does not match coefficients")
X <- cbind(1, matrix(rnorm(n * (length(lambda.tau) - 1)), nrow = n)) }
lambda <- exp(X %*% lambda.tau + offset2)
mu <- exp(X %*% mu.Z.tau)
y <- rPoisGam(n = n, lambda = lambda, mu.Z = mu, alpha = alpha[1])
if( is.null( offset1)){
res <- as.data.frame(cbind(y, X))
colnames(res) <- c("y", "const", paste("x", 1:(length(lambda.tau) - 1), sep = ""))
}
else{
res <- as.data.frame( cbind( y, offset2, X))
colnames(res) <- c("y", "offset", "const", paste("x", 1:(length(lambda.tau) - 1), sep = ""))
}
attr(res, "coefs") <- list(lambda.tau = lambda.tau, mu.Z.tau = mu.Z.tau,
alpha = alpha)
return(res)
}
"tglm" <-
function ( mean.form, data, wts=NULL, phi=NULL, p=NULL, inits=NULL, vcov=TRUE, residuals=TRUE, trace=1, iter.max=150)
{
if( is.null( wts))
wts <- rep( 1, nrow( data))
e<-new.env()
e$wts <- wts
environment(mean.form) <- e
temp.p <- model.frame( mean.form, data=as.data.frame( data), weights=wts)
y <- model.response( temp.p)
names( y) <- NULL
X.p <- model.matrix( mean.form, data)
offset.p <- model.offset( temp.p)
if ( is.null( offset.p))
offset.p <- rep( 0, nrow( temp.p))
wts1 <- as.vector( model.weights( temp.p))
fm1 <- NULL
if( is.null( inits)){
inits <- rep( 0, ncol( X.p))
if( trace!=0)
print( "Obtaining initial mean values from log-linear Poisson model -- this might be stupid")
abit <- 1
ystar <- round( y, digits=0)
fm1 <- glm( ystar~-1+X.p, family=poisson( link="log"), weights=wts1)
inits <- fm1$coef
}
if( is.null( phi) & length( inits)==ncol( X.p)){
if( is.null( fm1))
inits <- c( inits, 1)
else{
if( trace!=0)
print( "Obtaining initial dispersion from the smaller of the Pearson or Deviance estimator (or 25)")
if( is.null( p) & length( inits)==ncol( X.p))
ptemp <- 1.9
else
ptemp <- p
disDev <- fm1$deviance/( length( y) - length( fm1$coef))
disPear <- sum( ( wts1 * ( y-fm1$fitted)^2) / ( fm1$fitted^ptemp)) / ( length( y) - length( fm1$coef))
dis <- min( disDev, disPear, 25)
inits <- c( inits, dis)
}
}
if( is.null( p) & length( inits)==ncol( X.p) + is.null( phi))
inits <- c( inits, 1.6)
if( length( inits) != ncol( X.p) + is.null( phi) + is.null( p)) {
print( "Initial values supplied are of the wrong length -- please check")
tmp <- c( length( inits), ncol( X.p) + is.null( phi) + is.null( p))
names( tmp) <- c( "inits", "nParams")
return( tmp)
}
fmTGLM <- tglm.fit( x=X.p, y=y, wts=wts1, offset=offset.p, inits=inits, phi=phi, p=p, vcov=vcov, residuals=residuals, trace=trace, iter.max=iter.max)
return( fmTGLM)
}
"tglm.fit" <-
function ( x, y, wts=NULL, offset=rep( 0, length( y)), inits=rnorm( ncol( x)), phi=NULL, p=NULL, vcov=TRUE, residuals=TRUE, trace=1, iter.max=150)
{
if( trace!=0){
print( "Estimating parameters")
if( is.null( phi) & is.null( p))
cat("iter:", "-logl", colnames(x), "phi", "p", "\n", sep = "\t")
if( is.null( phi) & !is.null( p))
cat("iter:", "-logl", colnames(x), "phi", "\n", sep = "\t")
if( !is.null( phi) & is.null( p))
cat("iter:", "-logl", colnames(x), "p", "\n", sep = "\t")
if( !is.null( phi) & !is.null( p))
cat("iter:", "-logl", colnames(x), "\n", sep = "\t")
}
eps <- 1e-5
my.lower <- rep( -Inf, ncol( x))
my.upper <- rep( Inf, ncol( x))
if( is.null( phi))
{my.lower <- c( my.lower, eps); my.upper <- c( my.upper, Inf)}
if( is.null( p))
{my.lower <- c( my.lower, 1+eps); my.upper <- c( my.upper, 2-eps)}
fm <- nlminb( start=inits, objective=ldTweedie.lp, gradient=ldTweedie.lp.deriv, hessian=NULL, lower=my.lower, upper=my.upper, y=y, X.p=x, offsetty=offset, phi=phi, p=p, control=list(trace=trace, iter.max=iter.max), wts=wts)
parms <- fm$par
tmp <- colnames( x)
if( !is.null( phi) & !is.null( p))
tmp <- tmp
if( !is.null( phi) & is.null( p))
tmp <- c( tmp, "p")
if( is.null( phi) & !is.null( p))
tmp <- c( tmp, "phi")
if( is.null( phi) & is.null( p))
tmp <- c( tmp, "phi", "p")
names( parms) <- tmp
if( vcov){
if( trace!=0)
print( "Calculating variance matrix of estimates")
vcovar <- nd2(x0=parms, f=ldTweedie.lp.deriv, y=y, X.p=x, offsetty=offset, phi=phi, p=p, wts=wts)
vcovar <- 0.5 * ( vcovar + t( vcovar))
vcovar <- solve( vcovar)
rownames( vcovar) <- colnames( vcovar) <- names( parms)
}
else{
if( trace!=0)
print( "Not calculating variance matrix of estimates")
vcovar <- NULL
}
scores <- -ldTweedie.lp.deriv( parms=parms, y=y, X.p=x, offsetty=offset, phi=phi, p=p, wts=wts)
if( trace !=0)
print( "Calculating means")
mu <- exp( x %*% parms[1:ncol( x)] + offset)
if( residuals){
if( trace!=0)
print( "Calculating quantile residuals")
if( is.null( phi)){
phi1 <- parms[ncol( x)+1]
if( is.null( p))
p1 <- parms[ncol(x)+2]
else
p1 <- p
}
else{
phi1 <- phi
if( is.null( p))
p1 <- parms[ncol( x)+1]
else
p1 <- p
}
resids <- matrix( rep( pTweedie( y, mu, phi1, p1), 2), ncol=2)
resids[y==0,1] <- 0.5 * dTweedie( y[y==0], mu[y==0], phi1, p1, LOG=FALSE)
nzero <- sum( y==0)
resids[y==0,2] <- runif( nzero, min=rep( 0, nzero), max=2*resids[y==0,1])
resids <- qnorm( resids)
colnames( resids) <- c("expect","random")
}
else{
if( trace!=0)
print( "Not calculating quantile residuals")
resids <- NULL
}
if( trace!=0)
print( "Done")
ICadj <- 0
if( !is.null( phi))
ICadj <- ICadj+1
if( !is.null( p))
ICadj <- ICadj+1
AIC <- -2*(-fm$objective) + 2*(length( parms)-ICadj)
BIC <- -2*(-fm$objective) + log( nrow( x))*(length( parms)-ICadj)
res <- list( coef=parms, logl=-fm$objective, scores=scores, vcov=vcovar, conv=fm$convergence, message=fm$message, niter=fm$iterations, evals=fm$evaluations, call=match.call(), fitted=mu, residuals=resids, AIC=AIC, BIC=BIC)
class( res) <- "tglm"
return( res)
}
| /fishMod/R/fishMod.R | no_license | ingted/R-Examples | R | false | false | 25,888 | r | # This is package fishMod
"deltaLN" <-
function ( ln.form, binary.form, data, residuals=TRUE)
{
temp <- model.frame( ln.form, data=as.data.frame( data))
X <- model.matrix( ln.form, temp)
offy <- model.offset( temp)
if( is.null( offy))
offy <- rep( 0, nrow( X))
nonzeros <- model.response( temp)>0
temp.ln <- model.frame( ln.form, data=as.data.frame( data[nonzeros,]))
nz.y <- log( model.response( temp.ln))
nz.X <- model.matrix( ln.form, temp.ln)
nz.offset <- model.offset( temp.ln)
if (is.null(nz.offset))
nz.offset <- rep(0, length(nz.y))
temp.bin <- model.frame( binary.form, data=as.data.frame( data))
temp.bin <- model.matrix( binary.form, temp.bin)
bin.X <- cbind( as.numeric( nonzeros), temp.bin)
colnames( bin.X) <- c("positive", colnames( bin.X)[-1])
# if( length( colnames( temp.bin))==1)
# binary.form <- as.formula( paste( colnames( temp.bin)[1],"~1"))
# else
# binary.form <- as.formula( paste( colnames( temp.bin)[1],"~1+",paste( colnames( temp.bin)[-1], collapse="+")))
#positive data log-normal
lnMod <- lm( nz.y~-1+nz.X, offset=nz.offset)#ln.form, temp.ln)
#bianry glm
binMod <- glm( bin.X[,"positive"]~-1+bin.X[,-1], family=binomial())
stdev <- summary( lnMod)$sigma
coefs <- list( binary=binMod$coef, ln=lnMod$coef, ln.sigma2=stdev^2)
logl <- sum( dlnorm( exp(nz.y), lnMod$fitted, sdlog=stdev, log=TRUE)) + sum( dbinom( nonzeros, size=1, prob=binMod$fitted, log=TRUE))
n <- nrow( temp)
ncovars <- length( lnMod$coef) + 1 + length( binMod$coef)
nnonzero <- sum( nonzeros)
nzero <- n-nnonzero
AIC <- -2*logl + 2*ncovars
BIC <- -2*logl + log( n)*ncovars
fitted <- var <- rep( -999, n)
# colnames( temp)[ncol( temp)] <- "nz.offset"
# lpv <- predict( lnMod, temp)
lpv <- X %*% lnMod$coef + offy
pv <- exp( lpv + 0.5*(stdev^2))
fitted <- binMod$fitted * pv
var <- binMod$fitted*exp( 2*lpv+stdev^2)*(exp( stdev^2)-binMod$fitted)
if( residuals){
resids <- matrix( rep( -999, 2*n), ncol=2, dimnames=list(NULL,c("quantile","Pearson")))
resids[nonzeros,"quantile"] <- (1-binMod$fitted[nonzeros]) + binMod$fitted[nonzeros] * plnorm( temp.ln[,1], lnMod$fitted, stdev, log.p=FALSE)
resids[!nonzeros,"quantile"] <- runif( n=sum( !nonzeros), min=rep( 0, nzero), max=1-binMod$fitted[!nonzeros])
resids[,"quantile"] <- qnorm( resids[,"quantile"])
resids[,"Pearson"] <- ( model.response(temp)-fitted) / sqrt(var)
}
else
resids <- NULL
res <- list( coefs=coefs, logl=logl, AIC=AIC, BIC=BIC, fitted=fitted, fittedVar=var, residuals=resids, n=n, ncovars=ncovars, nzero=nzero, lnMod=lnMod, binMod=binMod)
class( res) <- "DeltaLNmod"
return( res)
}
"dPoisGam" <-
function ( y, lambda, mu.Z, alpha, LOG=TRUE)
{
#function to calculate Random sum (Tweedie) densities.
#y is the value of the r.v. Can be a vector
#mu.N is the mean of the Poisson summing r.v. Can be a vector of length(y)
#mu.Z is the mean of the Gamma rv Can be a vector of length(y)
#alpha is the `other' parameter of the gamma distribution s.t. var = ( mu.Z^2)/alpha Can be a vector of length(y)
#If mu.N, mu.Z or alpha are scalare but y isn't then they will be used for all y. If lengths mis-match then error
#LOG=TRUE gives the density on the log scale
#do.checks=TRUE checks the input vectors for compatability and gives errors / changes them as appropriate.
#do.checks=FALSE doesn't check and relies on the user to have things right. If not right then catastrophic failure may occur.
# if( any( is.null( c( y, mu.N, mu.Z, alpha)))){
# print( "Error: null input values -- please check. Null values are:")
# tmp <- double( is.null( c( y, mu.N, mu.Z, alpha)))
# names( tmp) <- c( "y", "mu.N","mu.Z","alpha")
# print( tmp)
# print( "Exitting")
# return()
# }
mu.N <- lambda
if( !all( is.element( c( length( mu.N), length( mu.Z), length( alpha)), c( length( y), 1)))){
print( "Error: length of parameter vectors does not match length of random variable vector")
print( "Exitting")
return()
}
if( length( mu.N) != length( y))
mu.N <- rep( mu.N, length( y))
if( length( mu.Z) != length( y))
mu.Z <- rep( mu.Z, length( y))
if( length( alpha) != length( y))
alpha <- rep( alpha, length( y))
res <- .Call( "dTweedie", as.numeric( y), as.numeric( mu.N), as.numeric( mu.Z), as.numeric( alpha), as.integer( LOG), PACKAGE="fishMod")
return( res)
}
"dPoisGamDerivs" <-
function ( y=NULL, lambda=NULL, mu.Z=NULL, alpha=NULL, do.checks=TRUE)
{
#function to calculate Random sum (Tweedie) densities.
#y is the value of the r.v. Can be a vector
#mu.N is the mean of the Poisson summing r.v. Can be a vector of length(y)
#mu.Z is the mean of the Gamma rv Can be a vector of length(y)
#alpha is the `other' parameter of the gamma distribution s.t. var = ( mu.Z^2)/alpha Can be a vector of length(y)
#If mu.N, mu.Z or alpha are scalare but y isn't then they will be used for all y. If lengths mis-match then error
#LOG=TRUE gives the density on the log scale
#do.checks=TRUE checks the input vectors for compatability and gives errors / changes them as appropriate.
#do.checks=FALSE doesn't check and relies on the user to have things right. If not right then catastrophic failure may occur.
mu.N <- lambda
if( do.checks){
if( any( is.null( c( y, mu.N, mu.Z, alpha)))){
print( "Error: null input values -- please check. Null values are:")
tmp <- double( is.null( c( y, mu.N, mu.Z, alpha)))
names( tmp) <- c( "y", "mu.N","mu.Z","alpha")
print( tmp)
print( "Exitting")
return()
}
if( !all( is.element( c( length( mu.N), length( mu.Z), length( alpha)), c( length( y), 1)))){
print( "Error: length of parameter vectors does not match length of random variable vector")
print( "Exitting")
}
if( length( mu.N) != length( y))
mu.N <- rep( mu.N, length( y))
if( length( mu.Z) != length( y))
mu.Z <- rep( mu.Z, length( y))
if( length( alpha) != length( y))
alpha <- rep( alpha, length( y))
}
res <- .Call( "dTweedieDeriv", as.numeric( y), as.numeric( mu.N), as.numeric( mu.Z), as.numeric( alpha), PACKAGE="fishMod")
colnames( res) <- c("lambda","mu.Z","alpha")
return( res)
}
"dTweedie" <-
function ( y, mu, phi, p, LOG=TRUE)
{
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
dens <- dPoisGam( y, lambda, mu.Z, alpha, LOG)
return( dens)
}
"ldPoisGam.lp" <-
function ( parms, y, X.p, X.g, offsetty, alpha, wts=rep( 1, length( y)))
{
mu.p <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
mu.g <- exp( X.g %*% parms[ncol( X.p)+1:ncol( X.g)])
if( is.null( alpha))
return( - sum( wts * dPoisGam( y, lambda=mu.p, mu.Z=mu.g, alpha=exp(tail( parms, 1)), LOG=TRUE)))
else
return( - sum( wts * dPoisGam( y, lambda=mu.p, mu.Z=mu.g, alpha=alpha, LOG=TRUE)))
}
"ldPoisGam.lp.deriv" <-
function ( parms, y, X.p, X.g, offsetty, alpha, wts=rep( 1, length( y)))
{
mu.p <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
mu.g <- exp( X.g %*% parms[ncol( X.p)+1:ncol( X.g)])
if( is.null( alpha))
alpha1 <- exp( tail( parms,1))
else
alpha1 <- alpha
dTweedparms <- - wts * dPoisGamDerivs( y, lambda=mu.p, mu.Z=mu.g, alpha=alpha1) #wts should replicate appropriately
deri.lambda <- ( as.numeric( dTweedparms[,"lambda"] * mu.p)) * X.p
deri.mu <- ( as.numeric( dTweedparms[,"mu.Z"] * mu.g)) * X.g
deri.alpha <- as.numeric( dTweedparms[,"alpha"]) * alpha1
deri.all <- c( colSums( deri.lambda), colSums( deri.mu), sum( deri.alpha))
if( is.null( alpha))
return( deri.all)
else
return( deri.all[-length( deri.all)])
}
"ldTweedie.lp" <-
function ( parms, y, X.p, offsetty, phi, p, wts=rep( 1, length( y)))
{
mu <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
if( is.null( phi) & is.null( p)){
phi <- parms[ncol( X.p) + 1]
p <- parms[ncol( X.p) + 2]
}
if( is.null( phi) & !is.null( p))
phi <- parms[ncol( X.p)+1]
if( !is.null( phi) & is.null( p))
p <- parms[ncol( X.p)+1]
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*(mu^(p-1))
mu.Z <- alpha * tau
return( -sum( wts * dPoisGam( y, lambda=lambda, mu.Z=mu.Z, alpha=alpha, LOG=TRUE)))
}
"ldTweedie.lp.deriv" <-
function ( parms, y, X.p, offsetty, phi, p, wts=rep( 1, length( y)))
{
mu <- exp( X.p %*% parms[1:ncol( X.p)] + offsetty)
p.flag <- phi.flag <- FALSE
if( is.null( phi) & is.null( p)){
p.flag <- phi.flag <- TRUE
phi <- parms[ncol( X.p) + 1]
p <- parms[ncol( X.p) + 2]
}
if( is.null( phi) & !is.null( p)){
phi <- parms[ncol( X.p)+1]
phi.flag <- TRUE
}
if( !is.null( phi) & is.null( p)){
p <- parms[ncol( X.p)+1]
p.flag <- TRUE
}
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
dTweedparms <- -wts * dPoisGamDerivs( y, lambda=lambda, mu.Z=mu.Z, alpha=alpha)
DTweedparmsDmu <- matrix( c( ( mu^(1-p)) / phi, alpha*phi*( ( p-1)^2)*( mu^(p-2)), rep( 0, length( mu))), nrow=3, byrow=T)
tmp <- rowSums( dTweedparms * t( DTweedparmsDmu))
tmp <- tmp * mu
tmp <- apply( X.p, 2, function( x) x*tmp)
derivs <- colSums( tmp)
if( phi.flag){
DTweedparmsDphi <- matrix( c( -( ( mu^(2-p)) / ( ( phi^2)*(2-p))), alpha*( p-1)*( mu^( p-1)), rep( 0, length( mu))), nrow=3, byrow=T)
tmpPhi <- rowSums( dTweedparms * t( DTweedparmsDphi)) #vectorised way of doing odd calculation
derivs <- c( derivs, sum( tmpPhi))
names( derivs)[length( derivs)] <- "phi"
}
if( p.flag){
dalphadp <- -( 1+alpha) / ( p-1)
DTweedparmsDp <- matrix( c( lambda*( 1/(2-p) - log( mu)), mu.Z*( dalphadp/alpha + 1/( p-1) + log( mu)), rep( dalphadp, length( y))), nrow=3, byrow=T)
tmpP <- rowSums( dTweedparms * t( DTweedparmsDp))
derivs <- c( derivs, sum( tmpP))
names( derivs)[length( derivs)] <- "p"
}
return( derivs)
}
"nd2" <-
function(x0, f, m=NULL, D.accur=4, eps=NULL, ...) {
# A function to compute highly accurate first-order derivatives
# Stolen (mostly) from the net and adapted / modified by Scott
# From Fornberg and Sloan (Acta Numerica, 1994, p. 203-267; Table 1, page 213)
# x0 is the point where the derivative is to be evaluated,
# f is the function that requires differentiating
# m is output dimension of f, that is f:R^n -> R^m
#D.accur is the required accuracy of the resulting derivative. Options are 2 and 4. The 2 choice does a two point finite difference approximation and the 4 choice does a four point finite difference approximation.
#eps is the
# Report any bugs to Scott as he uses this extensively!
D.n<-length(x0)
if (is.null(m)) {
D.f0<-f(x0, ...)
m<-length(D.f0) }
if (D.accur==2) {
D.w<-tcrossprod(rep(1,m),c(-1/2,1/2))
D.co<-c(-1,1) }
else {
D.w<-tcrossprod(rep(1,m),c(1/12,-2/3,2/3,-1/12))
D.co<-c(-2,-1,1,2) }
D.n.c<-length(D.co)
if( is.null( eps)) {
macheps<-.Machine$double.eps
D.h<-macheps^(1/3)*abs(x0)
}
else
D.h <- rep( eps, D.accur)
D.deriv<-matrix(NA,nrow=m,ncol=D.n)
for (ii in 1:D.n) {
D.temp.f<-matrix(0,m,D.n.c)
for (jj in 1:D.n.c) {
D.xd<-x0+D.h[ii]*D.co[jj]*(1:D.n==ii)
D.temp.f[,jj]<-f(D.xd, ...) }
D.deriv[,ii]<-rowSums(D.w*D.temp.f)/D.h[ii] }
return( D.deriv)
}
".onLoad" <-
function( libname, pkgname){
# Generic DLL loader
dll.path <- file.path( libname, pkgname, 'libs')
if( nzchar( subarch <- .Platform$r_arch))
dll.path <- file.path( dll.path, subarch)
this.ext <- paste( sub( '.', '[.]', .Platform$dynlib.ext, fixed=TRUE), '$', sep='')
dlls <- dir( dll.path, pattern=this.ext, full.names=FALSE)
names( dlls) <- dlls
if( length( dlls))
lapply( dlls, function( x) library.dynam( sub( this.ext, '', x), package=pkgname, lib.loc=libname))
}
"pgm" <-
function ( p.form, g.form, data, wts=NULL, alpha=NULL, inits=NULL, vcov=TRUE, residuals=TRUE, trace=1)
{
if( is.null( wts))
wts <- rep( 1, nrow( data))
# temp.p <- model.frame( p.form, data=as.data.frame( data))#, weights=wts)
e<-new.env()
e$wts <- wts
environment(p.form) <- e
temp.p <- model.frame( p.form, data=as.data.frame( data), weights=wts)
y <- model.response( temp.p)
names( y) <- NULL
X.p <- model.matrix( p.form, data)
offset.p <- model.offset( temp.p)
if ( is.null( offset.p))
offset.p <- rep( 0, nrow( temp.p))
wts1 <- as.vector( model.weights( temp.p))
# if( is.null( wts))
# wts <- rep( 1, length( y))
tmp.form <- g.form
tmp.form[[2]] <- p.form[[2]]
tmp.form[[3]] <- g.form[[2]]
temp.g <- model.frame( tmp.form, as.data.frame( data))
names( y) <- NULL
X.g <- model.matrix( tmp.form, data)
# if( is.null( weights)){
# weights <- rep( 1, length( y))
# if( trace!=0)
# print( "all weights are unity")
# }
# if( length( weights) != length( y)){
# print( "number of weights does not match number of observations")
# return( NULL)
# }
if( is.null( inits) & is.null( alpha))
inits <- rep( 0, ncol( X.p)+ncol( X.g)+1)
if( is.null( inits) & !is.null( alpha))
inits <- rep( 0, ncol( X.p)+ncol( X.g))
if( !is.element( length( inits),ncol( X.p)+ncol( X.g)+0:1)) {
print( "Initial values supplied are of the wrong length -- please check")
tmp <- c( length( inits), ncol( X.p)+ncol( X.g)+1)
names( tmp) <- c( "inits", "ncolDesign")
return( tmp)
}
if( trace!=0){
print( "Estimating parameters")
if( is.null( alpha))
cat("iter:", "-logl", paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'), "log( alpha)", "\n", sep = "\t")
else
cat("iter:", "-logl", paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'), "\n", sep = "\t")
}
fm <- nlminb( start=inits, objective=ldPoisGam.lp, gradient=ldPoisGam.lp.deriv, hessian = NULL, y=y, X.p=X.p, X.g=X.g, offsetty=offset.p, alpha=alpha, control=list(trace=trace), wts=wts1)
parms <- fm$par
if( is.null( alpha))
names( parms) <- c( paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'), "logalpha")
else
names( parms) <- c( paste( colnames(X.p),"Poisson",sep='.'), paste( colnames(X.g),"Gamma",sep='.'))
if( vcov){
if( trace!=0)
print( "Calculating variance matrix of estimates")
vcovar <- nd2(x0=parms, f=ldPoisGam.lp.deriv, y=y, X.p=X.p, X.g=X.g, offsetty=offset.p, alpha=alpha, wts=wts1)
vcovar <- 0.5 * ( vcovar + t( vcovar))
vcovar <- solve( vcovar)
rownames( vcovar) <- colnames( vcovar) <- names( parms)
}
else{
if( trace!=0)
print( "Not calculating variance matrix of estimates")
vcovar <- NULL
}
scores <- -ldPoisGam.lp.deriv( parms=parms, y=y, X.p=X.p, X.g=X.g, offsetty=offset.p, alpha=alpha, wts=wts1)
if( trace !=0)
print( "Calculating means")
muLamb <- exp( X.p %*% parms[1:ncol( X.p)] + offset.p)
muMuZ <- exp( X.g %*% parms[ncol( X.p)+1:ncol( X.g)])
mu <- muLamb * muMuZ
fitMu <- cbind( mu, muLamb, muMuZ)
colnames( fitMu) <- c("total","Lambda","muZ")
if( residuals){
if( trace!=0)
print( "Calculating quantile residuals")
if( is.null( alpha)){
resids <- matrix( rep( pPoisGam( y, muLamb, muMuZ, exp( tail( parms,1))), 2), ncol=2)
resids[y==0,1] <- 0.5 * dPoisGam( y[y==0], muLamb[y==0], muMuZ[y==0], exp( tail( parms,1)), LOG=FALSE)
}
else{
resids <- matrix( rep( pPoisGam( y, muLamb, muMuZ, alpha),2), ncol=2)
resids[y==0,1] <- 0.5 * dPoisGam( y[y==0], muLamb[y==0], muMuZ[y==0], alpha, LOG=FALSE)
}
nzero <- sum( y==0)
resids[y==0,2] <- runif( nzero, min=rep( 0, nzero), max=2*resids[y==0,1])
resids <- qnorm( resids)
colnames( resids) <- c("expect","random")
}
else{
if( trace!=0)
print( "Not calculating quantile residuals")
resids <- NULL
}
if( trace!=0)
print( "Done")
ICadj <- 0
if( !is.null( alpha))
ICadj <- ICadj + 1
AIC <- -2*(-fm$objective) + 2*(length( parms)-ICadj)
BIC <- -2*(-fm$objective) + log( nrow( X.p))*(length( parms)-ICadj)
res <- list( coef=parms, logl=-fm$objective, scores=scores, vcov=vcovar, conv=fm$convergence, message=fm$message, niter=fm$iterations, evals=fm$evaluations, call=match.call(), fitted=fitMu, residuals=resids, AIC=AIC, BIC=BIC)
class( res) <- "pgm"
return( res)
}
"pPoisGam" <-
function ( q, lambda, mu.Z, alpha)
{
tmp <- c( length( q), length( lambda), length(mu.Z), length( alpha))
names( tmp) <- c( "n","lambda","mu.Z","alpha")
if( !all( is.element( tmp[-1], c( 1, tmp[1])))) {
print( "pPoisGam: error -- length of arguments are not compatible")
return( tmp)
}
#from here on taken from Dunn's function ptweedie.series
#I have to admit that I don't quite get it. I think that it is some sort of quadrature (this is a guess)
y <- q
drop <- 39
lambdaMax <- N <- max(lambda)
logfmax <- -log(lambdaMax)/2
estlogf <- logfmax
while ((estlogf > (logfmax - drop)) & (N > 1)) {
N <- max(1, N - 2)
estlogf <- -lambdaMax + N * (log(lambdaMax) - log(N) + 1) - log(N)/2
}
lo.N <- max(1, floor(N))
lambdaMin <- N <- min(lambda)
logfmax <- -log(lambdaMin)/2
estlogf <- logfmax
while (estlogf > (logfmax - drop)) {
N <- N + 1
estlogf <- -lambdaMin + N * (log(lambdaMin) - log(N) + 1) - log(N)/2
}
hi.N <- max(ceiling(N))
cdf <- matrix( 0, nrow=length( y), ncol=1) #array(dim = length(y), 0)
for (N in (lo.N:hi.N)) {
pois.den <- dpois(N, lambda)
incgamma.den <- pchisq(2 * as.numeric( y) * alpha / as.numeric( mu.Z), 2 * alpha * N)
cdf <- cdf + pois.den * incgamma.den
}
cdf <- cdf + exp(-lambda)
its <- hi.N - lo.N + 1
return( cdf)
}
"pTweedie" <-
function ( q, mu, phi, p)
{
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
ps <- pPoisGam( q, lambda, mu.Z, alpha)
# require( tweedie)
# ps <- ptweedie( as.numeric( q), mu=as.numeric( mu), phi=as.numeric( phi), power=as.numeric( p))
return( ps)
}
"rPoisGam" <-
function ( n, lambda, mu.Z, alpha)
{
mu.N <- lambda
#simulate n random variables from the same compound poisson distribution
my.fun <- function (parms)
return( rgamma( n=1, scale=parms[3], shape=parms[1]*parms[2]))
# return( sum( rgamma( n=parms[1], scale=parms[3], shape=parms[2])))
tmp <- c( n, length( mu.N), length(mu.Z), length( alpha))
names( tmp) <- c( "n","mu.N","mu.Z","alpha")
if( !all( is.element( tmp[-1], c( 1, tmp[1])))) {
print( "rPoisGam: error -- length of arguments are not compatible")
return( tmp)
}
if( tmp["mu.N"]==1)
mu.N <- rep( mu.N, tmp["n"])
if( tmp["mu.Z"]==1)
mu.Z <- rep( mu.Z, tmp["n"])
if( tmp["alpha"]==1)
alpha <- rep( alpha, tmp["n"])
np <- matrix( rpois( n, mu.N), ncol=1)
beta <- mu.Z / alpha
y <- apply( cbind( np, alpha, beta), 1, my.fun)
return( y)
}
"rTweedie" <-
function ( n, mu, phi, p)
{
lambda <- ( mu^( 2-p)) / ( phi*(2-p))
alpha <- ( 2-p) / ( p-1)
tau <- phi*(p-1)*mu^(p-1)
mu.Z <- alpha * tau
rans <- rPoisGam( n, lambda, mu.Z, alpha)
return( rans)
}
"simReg" <-
function (n, lambda.tau, mu.Z.tau, alpha, offset1=NULL, X=NULL)
{
if (length(lambda.tau) != length(mu.Z.tau) && length(alpha) !=
1) {
print("coefficient vectors are inconsistent -- try again")
return()
}
if( is.null( X))
X <- cbind(1, matrix(rnorm(n * (length(lambda.tau) - 1)), nrow = n))
else
X <- as.matrix( X)
offset2 <- offset1
if( is.null( offset1))
offset2 <- rep( 0, nrow( X))
if( ncol( X) != length( lambda.tau)) {
print( "Supplied X does not match coefficients")
X <- cbind(1, matrix(rnorm(n * (length(lambda.tau) - 1)), nrow = n)) }
lambda <- exp(X %*% lambda.tau + offset2)
mu <- exp(X %*% mu.Z.tau)
y <- rPoisGam(n = n, lambda = lambda, mu.Z = mu, alpha = alpha[1])
if( is.null( offset1)){
res <- as.data.frame(cbind(y, X))
colnames(res) <- c("y", "const", paste("x", 1:(length(lambda.tau) - 1), sep = ""))
}
else{
res <- as.data.frame( cbind( y, offset2, X))
colnames(res) <- c("y", "offset", "const", paste("x", 1:(length(lambda.tau) - 1), sep = ""))
}
attr(res, "coefs") <- list(lambda.tau = lambda.tau, mu.Z.tau = mu.Z.tau,
alpha = alpha)
return(res)
}
"tglm" <-
function ( mean.form, data, wts=NULL, phi=NULL, p=NULL, inits=NULL, vcov=TRUE, residuals=TRUE, trace=1, iter.max=150)
{
if( is.null( wts))
wts <- rep( 1, nrow( data))
e<-new.env()
e$wts <- wts
environment(mean.form) <- e
temp.p <- model.frame( mean.form, data=as.data.frame( data), weights=wts)
y <- model.response( temp.p)
names( y) <- NULL
X.p <- model.matrix( mean.form, data)
offset.p <- model.offset( temp.p)
if ( is.null( offset.p))
offset.p <- rep( 0, nrow( temp.p))
wts1 <- as.vector( model.weights( temp.p))
fm1 <- NULL
if( is.null( inits)){
inits <- rep( 0, ncol( X.p))
if( trace!=0)
print( "Obtaining initial mean values from log-linear Poisson model -- this might be stupid")
abit <- 1
ystar <- round( y, digits=0)
fm1 <- glm( ystar~-1+X.p, family=poisson( link="log"), weights=wts1)
inits <- fm1$coef
}
if( is.null( phi) & length( inits)==ncol( X.p)){
if( is.null( fm1))
inits <- c( inits, 1)
else{
if( trace!=0)
print( "Obtaining initial dispersion from the smaller of the Pearson or Deviance estimator (or 25)")
if( is.null( p) & length( inits)==ncol( X.p))
ptemp <- 1.9
else
ptemp <- p
disDev <- fm1$deviance/( length( y) - length( fm1$coef))
disPear <- sum( ( wts1 * ( y-fm1$fitted)^2) / ( fm1$fitted^ptemp)) / ( length( y) - length( fm1$coef))
dis <- min( disDev, disPear, 25)
inits <- c( inits, dis)
}
}
if( is.null( p) & length( inits)==ncol( X.p) + is.null( phi))
inits <- c( inits, 1.6)
if( length( inits) != ncol( X.p) + is.null( phi) + is.null( p)) {
print( "Initial values supplied are of the wrong length -- please check")
tmp <- c( length( inits), ncol( X.p) + is.null( phi) + is.null( p))
names( tmp) <- c( "inits", "nParams")
return( tmp)
}
fmTGLM <- tglm.fit( x=X.p, y=y, wts=wts1, offset=offset.p, inits=inits, phi=phi, p=p, vcov=vcov, residuals=residuals, trace=trace, iter.max=iter.max)
return( fmTGLM)
}
"tglm.fit" <-
function ( x, y, wts=NULL, offset=rep( 0, length( y)), inits=rnorm( ncol( x)), phi=NULL, p=NULL, vcov=TRUE, residuals=TRUE, trace=1, iter.max=150)
{
if( trace!=0){
print( "Estimating parameters")
if( is.null( phi) & is.null( p))
cat("iter:", "-logl", colnames(x), "phi", "p", "\n", sep = "\t")
if( is.null( phi) & !is.null( p))
cat("iter:", "-logl", colnames(x), "phi", "\n", sep = "\t")
if( !is.null( phi) & is.null( p))
cat("iter:", "-logl", colnames(x), "p", "\n", sep = "\t")
if( !is.null( phi) & !is.null( p))
cat("iter:", "-logl", colnames(x), "\n", sep = "\t")
}
eps <- 1e-5
my.lower <- rep( -Inf, ncol( x))
my.upper <- rep( Inf, ncol( x))
if( is.null( phi))
{my.lower <- c( my.lower, eps); my.upper <- c( my.upper, Inf)}
if( is.null( p))
{my.lower <- c( my.lower, 1+eps); my.upper <- c( my.upper, 2-eps)}
fm <- nlminb( start=inits, objective=ldTweedie.lp, gradient=ldTweedie.lp.deriv, hessian=NULL, lower=my.lower, upper=my.upper, y=y, X.p=x, offsetty=offset, phi=phi, p=p, control=list(trace=trace, iter.max=iter.max), wts=wts)
parms <- fm$par
tmp <- colnames( x)
if( !is.null( phi) & !is.null( p))
tmp <- tmp
if( !is.null( phi) & is.null( p))
tmp <- c( tmp, "p")
if( is.null( phi) & !is.null( p))
tmp <- c( tmp, "phi")
if( is.null( phi) & is.null( p))
tmp <- c( tmp, "phi", "p")
names( parms) <- tmp
if( vcov){
if( trace!=0)
print( "Calculating variance matrix of estimates")
vcovar <- nd2(x0=parms, f=ldTweedie.lp.deriv, y=y, X.p=x, offsetty=offset, phi=phi, p=p, wts=wts)
vcovar <- 0.5 * ( vcovar + t( vcovar))
vcovar <- solve( vcovar)
rownames( vcovar) <- colnames( vcovar) <- names( parms)
}
else{
if( trace!=0)
print( "Not calculating variance matrix of estimates")
vcovar <- NULL
}
scores <- -ldTweedie.lp.deriv( parms=parms, y=y, X.p=x, offsetty=offset, phi=phi, p=p, wts=wts)
if( trace !=0)
print( "Calculating means")
mu <- exp( x %*% parms[1:ncol( x)] + offset)
if( residuals){
if( trace!=0)
print( "Calculating quantile residuals")
if( is.null( phi)){
phi1 <- parms[ncol( x)+1]
if( is.null( p))
p1 <- parms[ncol(x)+2]
else
p1 <- p
}
else{
phi1 <- phi
if( is.null( p))
p1 <- parms[ncol( x)+1]
else
p1 <- p
}
resids <- matrix( rep( pTweedie( y, mu, phi1, p1), 2), ncol=2)
resids[y==0,1] <- 0.5 * dTweedie( y[y==0], mu[y==0], phi1, p1, LOG=FALSE)
nzero <- sum( y==0)
resids[y==0,2] <- runif( nzero, min=rep( 0, nzero), max=2*resids[y==0,1])
resids <- qnorm( resids)
colnames( resids) <- c("expect","random")
}
else{
if( trace!=0)
print( "Not calculating quantile residuals")
resids <- NULL
}
if( trace!=0)
print( "Done")
ICadj <- 0
if( !is.null( phi))
ICadj <- ICadj+1
if( !is.null( p))
ICadj <- ICadj+1
AIC <- -2*(-fm$objective) + 2*(length( parms)-ICadj)
BIC <- -2*(-fm$objective) + log( nrow( x))*(length( parms)-ICadj)
res <- list( coef=parms, logl=-fm$objective, scores=scores, vcov=vcovar, conv=fm$convergence, message=fm$message, niter=fm$iterations, evals=fm$evaluations, call=match.call(), fitted=mu, residuals=resids, AIC=AIC, BIC=BIC)
class( res) <- "tglm"
return( res)
}
|
library(ggplot2)
library(plyr)
#============
# Data Setup
#============
setwd("C:/Users/cwale/OneDrive/Desktop/SMU/Winter18/Doing_Data_Science/Case_Study_1/data")
beers <- read.csv("Beers.csv")
colnames(beers)[1] <- c("Beer_Name")
breweries <- read.csv("Breweries.csv")
colnames(breweries)[2] <- c("Brewery_Name")
breweries$State <- trimws(breweries$State)
state_size <- read.csv("statesize.csv")
#============
# Question 1
#============
brew_count <- count(breweries$State)
colnames(brew_count) <- c("State", "Brewery_Count")
brew_count <- merge(brew_count, state_size[, c("Abbrev", "SqMiles")], by.x=c("State"), by.y=c("Abbrev"))
brew_count$brewery_density <- brew_count$Brewery_Count / (brew_count$SqMiles/1000)
x <- with(brew_count, order(-brewery_density))
brew_count <- brew_count[x, ]
# barplot(brew_count)
#============
# Question 2
#============
final <- merge(beers, breweries, by.x=c("Brewery_id"), by.y=c("Brew_ID"))
head(final, 6)
#============
# Question 3
#============
# NA's Count
colSums(is.na(final))
#============
# Question 4
#============
state_ibu_medians <- tapply(final$IBU, final$State, median, na.rm = T)
barplot(state_ibu_medians)
#============
# Question 5
#============
#State with max ABV
final[which.max(final$ABV), "State"]
#State with max IBU
final[which.max(final$IBU), "State"]
#============
# Question 6
#============
summary(final$ABV)
#============
# Question 7
#============
# Basic scatter plot
# ggplot(mtcars, aes(x=ABV, y=IBU)) + geom_point()
# Change the point size, and shape
ggplot(final, aes(x=ABV, y=IBU)) + geom_point() + geom_smooth(method=lm) #size=2, shape=23
| /cs_code_cjw.R | no_license | Ujustwaite/dds_case_study_1 | R | false | false | 1,635 | r | library(ggplot2)
library(plyr)
#============
# Data Setup
#============
setwd("C:/Users/cwale/OneDrive/Desktop/SMU/Winter18/Doing_Data_Science/Case_Study_1/data")
beers <- read.csv("Beers.csv")
colnames(beers)[1] <- c("Beer_Name")
breweries <- read.csv("Breweries.csv")
colnames(breweries)[2] <- c("Brewery_Name")
breweries$State <- trimws(breweries$State)
state_size <- read.csv("statesize.csv")
#============
# Question 1
#============
brew_count <- count(breweries$State)
colnames(brew_count) <- c("State", "Brewery_Count")
brew_count <- merge(brew_count, state_size[, c("Abbrev", "SqMiles")], by.x=c("State"), by.y=c("Abbrev"))
brew_count$brewery_density <- brew_count$Brewery_Count / (brew_count$SqMiles/1000)
x <- with(brew_count, order(-brewery_density))
brew_count <- brew_count[x, ]
# barplot(brew_count)
#============
# Question 2
#============
final <- merge(beers, breweries, by.x=c("Brewery_id"), by.y=c("Brew_ID"))
head(final, 6)
#============
# Question 3
#============
# NA's Count
colSums(is.na(final))
#============
# Question 4
#============
state_ibu_medians <- tapply(final$IBU, final$State, median, na.rm = T)
barplot(state_ibu_medians)
#============
# Question 5
#============
#State with max ABV
final[which.max(final$ABV), "State"]
#State with max IBU
final[which.max(final$IBU), "State"]
#============
# Question 6
#============
summary(final$ABV)
#============
# Question 7
#============
# Basic scatter plot
# ggplot(mtcars, aes(x=ABV, y=IBU)) + geom_point()
# Change the point size, and shape
ggplot(final, aes(x=ABV, y=IBU)) + geom_point() + geom_smooth(method=lm) #size=2, shape=23
|
if ( requireNamespace("tinytest", quietly=TRUE) ){
tinytest::test_package("cartography")
}
| /tests/tinytest.R | no_license | riatelab/cartography | R | false | false | 95 | r |
if ( requireNamespace("tinytest", quietly=TRUE) ){
tinytest::test_package("cartography")
}
|
\name{pSet-class}
\Rdversion{1.1}
\docType{class}
\alias{pSet-class}
\alias{pSet}
\alias{class:pSet}
\alias{[,pSet-method}
\alias{[,pSet,ANY,ANY-method}
\alias{[,pSet,ANY,ANY,ANY-method}
\alias{[[,pSet-method}
\alias{[[,pSet,ANY,ANY-method}
\alias{abstract,pSet-method}
\alias{acquisitionNum,pSet-method}
\alias{scanIndex,pSet-method}
\alias{assayData,pSet-method}
\alias{collisionEnergy,pSet-method}
\alias{dim,pSet-method}
\alias{dim}
\alias{experimentData,pSet-method}
\alias{fData,pSet-method}
\alias{fData<-,pSet,data.frame-method}
\alias{featureData,pSet-method}
\alias{featureNames,pSet-method}
\alias{fileNames,pSet-method}
\alias{fileNames}
\alias{fromFile,pSet-method}
\alias{centroided,pSet-method}
\alias{centroided<-,pSet,ANY-method}
\alias{centroided<-,pSet,logical-method}
\alias{fvarLabels,pSet-method}
\alias{fvarMetadata,pSet-method}
\alias{header,pSet,missing-method}
\alias{header,pSet,numeric-method}
\alias{header}
\alias{intensity,pSet-method}
\alias{length,pSet-method}
\alias{length}
\alias{msInfo,pSet-method}
\alias{msLevel,pSet-method}
\alias{mz,pSet-method}
\alias{notes,pSet-method}
\alias{pData,pSet-method}
\alias{peaksCount,pSet,missing-method}
\alias{peaksCount,pSet,numeric-method}
\alias{phenoData,pSet-method}
\alias{polarity,pSet-method}
\alias{precursorCharge,pSet-method}
\alias{precursorIntensity,pSet-method}
\alias{precursorMz,pSet-method}
\alias{precScanNum,pSet-method}
\alias{precAcquisitionNum,pSet-method}
\alias{processingData,pSet-method}
\alias{processingData}
\alias{protocolData,pSet-method}
\alias{pubMedIds,pSet-method}
\alias{rtime,pSet-method}
\alias{sampleNames,pSet-method}
\alias{spectra,pSet-method}
\alias{spectra}
\alias{tic,pSet-method}
\alias{ionCount,pSet-method}
\alias{varLabels,pSet-method}
\alias{varMetadata,pSet-method}
\alias{exptitle,pSet-method}
\alias{expemail,pSet-method}
\alias{ionSource,pSet-method}
\alias{ionSourceDetails,pSet-method}
\alias{analyser,pSet-method}
\alias{analyzer,pSet-method}
\alias{analyserDetails,pSet-method}
\alias{analyzerDetails,pSet-method}
\alias{instrumentModel,pSet-method}
\alias{instrumentManufacturer,pSet-method}
\alias{instrumentCustomisations,pSet-method}
\alias{detectorType,pSet-method}
\alias{description,pSet-method}
\title{
Class to Contain Raw Mass-Spectrometry Assays and Experimental
Metadata
}
\description{
Container for high-throughput mass-spectrometry assays and
experimental metadata. This class is based on Biobase's
\code{"\linkS4class{eSet}"} virtual class, with the notable exception
that 'assayData' slot is an environment contain objects of class
\code{"\linkS4class{Spectrum}"}.
}
\section{Objects from the Class}{
A virtual Class: No objects may be created from it.
See \code{"\linkS4class{MSnExp}"} for instantiatable sub-classes.
}
\section{Slots}{
\describe{
\item{\code{assayData}:}{Object of class \code{"environment"}
containing the MS spectra (see \code{"\linkS4class{Spectrum1}"}
and \code{"\linkS4class{Spectrum2}"}).
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{phenoData}:}{Object of class
\code{"\linkS4class{AnnotatedDataFrame}"} containing
experimenter-supplied variables describing sample (i.e the
individual tags for an labelled MS experiment)
See \code{\link{phenoData}} for more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{featureData}:}{Object of class
\code{"\linkS4class{AnnotatedDataFrame}"} containing variables
describing features (spectra in our case), e.g. identificaiton data,
peptide sequence, identification score,... (inherited from
\code{"\linkS4class{eSet}"}). See \code{\link{featureData}} for
more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{experimentData}:}{Object of class
\code{"\linkS4class{MIAPE}"}, containing details of experimental
methods. See \code{\link{experimentData}} for more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{protocolData}:}{Object of class
\code{"\linkS4class{AnnotatedDataFrame}"} containing
equipment-generated variables (inherited from
\code{"\linkS4class{eSet}"}). See \code{\link{protocolData}} for
more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{processingData}:}{Object of class
\code{"\linkS4class{MSnProcess}"} that records all processing.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{.cache}:}{Object of class \code{environment} used to
cache data. Under development. }
\item{\code{.__classVersion__}:}{Object of class
\code{"\linkS4class{Versions}"} describing the versions of the class.
}
}
}
\section{Extends}{
Class \code{"\linkS4class{VersionedBiobase}"}, directly.
Class \code{"\linkS4class{Versioned}"}, by class "VersionedBiobase", distance 2.
}
\section{Methods}{
Methods defined in derived classes may override the methods described
here.
\describe{
\item{[}{\code{signature(x = "pSet")}: Subset current object and
return object of same class. }
\item{[[}{\code{signature(x = "pSet")}: Direct access to individual
spectra. }
\item{abstract}{Access abstract in \code{experimentData}. }
\item{assayData}{\code{signature(object = "pSet")}: Access the
\code{assayData} slot. Returns an \code{environment}. }
\item{desciption}{\code{signature(x = "pSet")}: Synonymous with
experimentData. }
\item{dim}{\code{signature(x = "pSet")}: Returns the dimensions of
the \code{phenoData} slot. }
\item{experimentData}{\code{signature(x = "pSet")}: Access details
of experimental methods. }
\item{featureData}{\code{signature(x = "pSet")}: Access the
\code{featureData} slot. }
\item{fData}{\code{signature(x = "pSet")}: Access feature data
information. }
\item{featureNames}{\code{signature(x = "pSet")}: Coordinate access
of feature names (e.g spectra, peptides or proteins) in
\code{assayData} slot. }
\item{fileNames}{\code{signature(object = "pSet")}: Access file
names in the \code{processingData} slot. }
\item{fromFile}{\code{signature(object = "pSet")}: Access raw data
file indexes (to be found in the 'code{processingData}' slot) from
which the individual object's spectra where read from. }
\item{centroided}{\code{signature(object = "pSet")}: Indicates
whether individual spectra are centroided ('TRUE') of uncentroided
('FALSE'). Use \code{centroided(object) <- value} to update a
whole experiment, ensuring that \code{object} and \code{value}
have the same length. }
\item{fvarMetadata}{\code{signature(x = "pSet")}: Access metadata
describing features reported in \code{fData}. }
\item{fvarLabels}{\code{signature(x = "pSet")}: Access variable
labels in \code{featureData}. }
\item{length}{\code{signature(x = "pSet")}: Returns the number of
features in the \code{assayData} slot. }
\item{notes}{\code{signature(x = "pSet")}: Retrieve and
unstructured notes associated with \code{pSet} in the
\code{experimentData} slot. }
\item{pData}{\code{signature(x = "pSet")}: Access sample data
information. }
\item{phenoData}{\code{signature(x = "pSet")}: Access the
\code{phenoData} slot. }
\item{processingData}{\code{signature(object = "pSet")}: Access the
\code{processingData} slot. }
\item{protocolData}{\code{signature(x = "pSet")}: Access the
\code{protocolData} slot. }
\item{pubMedIds}{\code{signature(x = "pSet")}: Access PMIDs in
\code{experimentData}. }
\item{sampleNames}{\code{signature(x = "pSet")}: Access sample names
in \code{phenoData}. }
\item{spectra}{\code{signature(x = "pSet", ...)}: Access the
\code{assayData} slot, returning the features as a \code{list}.
Additional arguments are currently ignored. }
\item{varMetadata}{\code{signature(x = "pSet")}: Access metadata
describing variables reported in \code{pData}. }
\item{varLabels}{\code{signature(x = "pSet")}: Access variable
labels in \code{phenoData}. }
\item{acquisitionNum}{\code{signature(object = "pSet")}: Accessor
for spectra acquisition numbers. }
\item{scanIndex}{\code{signature(object = "pSet")}: Accessor
for spectra scan indices. }
\item{collisionEnergy}{\code{signature(object = "pSet")}: Accessor
for MS2 spectra collision energies. }
\item{intensity}{\code{signature(object = "pSet", ...)}: Accessor
for spectra instenities, returned as named list. Additional
arguments are currently ignored. }
\item{msInfo}{\code{signature(object = "pSet")}: Prints the MIAPE-MS
meta-data stored in the \code{experimentData} slot. }
\item{msLevel}{\code{signature(object = "pSet")}: Accessor for
spectra MS levels. }
\item{mz}{\code{signature(object = "pSet", ...)}: Accessor for spectra
M/Z values, returned as a named list. Additional arguments are
currently ignored. }
\item{peaksCount}{\code{signature(object = "pSet")}: Accessor for
spectra preak counts. }
\item{peaksCount}{\code{signature(object = "pSet", scans =
"numeric")}: Accessor to \code{scans} spectra preak counts. }
\item{polarity}{\code{signature(object = "pSet")}: Accessor for MS1
spectra polarities. }
\item{precursorCharge}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor charges. }
\item{precursorIntensity}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor intensity. }
\item{precursorMz}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor M/Z values. }
\item{precAcquisitionNum}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor scan numbers. }
\item{precScanNum}{see \code{precAcquisitionNum}. }
\item{rtime}{\code{signature(object = "pSet", ...)}: Accessor for spectra
retention times. Additional arguments are currently ignored. }
\item{tic}{\code{signature(object = "pSet", ...)}: Accessor for spectra
total ion counts. Additional arguments are currently ignored. }
\item{ionCount}{\code{signature(object = "pSet")}: Accessor for spectra
total ion current. }
\item{header}{\code{signature(object = "pSet")}: Returns a data
frame containing all available spectra parameters (MSn only). }
\item{header}{\code{signature(object = "pSet", scans = "numeric")}:
Returns a data frame containing \code{scans} spectra parameters
(MSn only). }
}
Additional accessors for the experimental metadata
(\code{experimentData} slot) are defined. See
\code{"\linkS4class{MIAPE}"} for details.
}
\references{
The \code{"\linkS4class{eSet}"} class, on which \code{pSet} is based.
}
\author{
Laurent Gatto <lg390@cam.ac.uk>
}
\seealso{
\code{"\linkS4class{MSnExp}"} for an instantiatable application of
\code{pSet}.
}
\examples{
showClass("pSet")
}
\keyword{classes}
| /man/pSet-class.Rd | no_license | martifis/MSnbase | R | false | false | 11,124 | rd | \name{pSet-class}
\Rdversion{1.1}
\docType{class}
\alias{pSet-class}
\alias{pSet}
\alias{class:pSet}
\alias{[,pSet-method}
\alias{[,pSet,ANY,ANY-method}
\alias{[,pSet,ANY,ANY,ANY-method}
\alias{[[,pSet-method}
\alias{[[,pSet,ANY,ANY-method}
\alias{abstract,pSet-method}
\alias{acquisitionNum,pSet-method}
\alias{scanIndex,pSet-method}
\alias{assayData,pSet-method}
\alias{collisionEnergy,pSet-method}
\alias{dim,pSet-method}
\alias{dim}
\alias{experimentData,pSet-method}
\alias{fData,pSet-method}
\alias{fData<-,pSet,data.frame-method}
\alias{featureData,pSet-method}
\alias{featureNames,pSet-method}
\alias{fileNames,pSet-method}
\alias{fileNames}
\alias{fromFile,pSet-method}
\alias{centroided,pSet-method}
\alias{centroided<-,pSet,ANY-method}
\alias{centroided<-,pSet,logical-method}
\alias{fvarLabels,pSet-method}
\alias{fvarMetadata,pSet-method}
\alias{header,pSet,missing-method}
\alias{header,pSet,numeric-method}
\alias{header}
\alias{intensity,pSet-method}
\alias{length,pSet-method}
\alias{length}
\alias{msInfo,pSet-method}
\alias{msLevel,pSet-method}
\alias{mz,pSet-method}
\alias{notes,pSet-method}
\alias{pData,pSet-method}
\alias{peaksCount,pSet,missing-method}
\alias{peaksCount,pSet,numeric-method}
\alias{phenoData,pSet-method}
\alias{polarity,pSet-method}
\alias{precursorCharge,pSet-method}
\alias{precursorIntensity,pSet-method}
\alias{precursorMz,pSet-method}
\alias{precScanNum,pSet-method}
\alias{precAcquisitionNum,pSet-method}
\alias{processingData,pSet-method}
\alias{processingData}
\alias{protocolData,pSet-method}
\alias{pubMedIds,pSet-method}
\alias{rtime,pSet-method}
\alias{sampleNames,pSet-method}
\alias{spectra,pSet-method}
\alias{spectra}
\alias{tic,pSet-method}
\alias{ionCount,pSet-method}
\alias{varLabels,pSet-method}
\alias{varMetadata,pSet-method}
\alias{exptitle,pSet-method}
\alias{expemail,pSet-method}
\alias{ionSource,pSet-method}
\alias{ionSourceDetails,pSet-method}
\alias{analyser,pSet-method}
\alias{analyzer,pSet-method}
\alias{analyserDetails,pSet-method}
\alias{analyzerDetails,pSet-method}
\alias{instrumentModel,pSet-method}
\alias{instrumentManufacturer,pSet-method}
\alias{instrumentCustomisations,pSet-method}
\alias{detectorType,pSet-method}
\alias{description,pSet-method}
\title{
Class to Contain Raw Mass-Spectrometry Assays and Experimental
Metadata
}
\description{
Container for high-throughput mass-spectrometry assays and
experimental metadata. This class is based on Biobase's
\code{"\linkS4class{eSet}"} virtual class, with the notable exception
that 'assayData' slot is an environment contain objects of class
\code{"\linkS4class{Spectrum}"}.
}
\section{Objects from the Class}{
A virtual Class: No objects may be created from it.
See \code{"\linkS4class{MSnExp}"} for instantiatable sub-classes.
}
\section{Slots}{
\describe{
\item{\code{assayData}:}{Object of class \code{"environment"}
containing the MS spectra (see \code{"\linkS4class{Spectrum1}"}
and \code{"\linkS4class{Spectrum2}"}).
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{phenoData}:}{Object of class
\code{"\linkS4class{AnnotatedDataFrame}"} containing
experimenter-supplied variables describing sample (i.e the
individual tags for an labelled MS experiment)
See \code{\link{phenoData}} for more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{featureData}:}{Object of class
\code{"\linkS4class{AnnotatedDataFrame}"} containing variables
describing features (spectra in our case), e.g. identificaiton data,
peptide sequence, identification score,... (inherited from
\code{"\linkS4class{eSet}"}). See \code{\link{featureData}} for
more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{experimentData}:}{Object of class
\code{"\linkS4class{MIAPE}"}, containing details of experimental
methods. See \code{\link{experimentData}} for more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{protocolData}:}{Object of class
\code{"\linkS4class{AnnotatedDataFrame}"} containing
equipment-generated variables (inherited from
\code{"\linkS4class{eSet}"}). See \code{\link{protocolData}} for
more details.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{processingData}:}{Object of class
\code{"\linkS4class{MSnProcess}"} that records all processing.
Slot is inherited from \code{"\linkS4class{pSet}"}. }
\item{\code{.cache}:}{Object of class \code{environment} used to
cache data. Under development. }
\item{\code{.__classVersion__}:}{Object of class
\code{"\linkS4class{Versions}"} describing the versions of the class.
}
}
}
\section{Extends}{
Class \code{"\linkS4class{VersionedBiobase}"}, directly.
Class \code{"\linkS4class{Versioned}"}, by class "VersionedBiobase", distance 2.
}
\section{Methods}{
Methods defined in derived classes may override the methods described
here.
\describe{
\item{[}{\code{signature(x = "pSet")}: Subset current object and
return object of same class. }
\item{[[}{\code{signature(x = "pSet")}: Direct access to individual
spectra. }
\item{abstract}{Access abstract in \code{experimentData}. }
\item{assayData}{\code{signature(object = "pSet")}: Access the
\code{assayData} slot. Returns an \code{environment}. }
\item{desciption}{\code{signature(x = "pSet")}: Synonymous with
experimentData. }
\item{dim}{\code{signature(x = "pSet")}: Returns the dimensions of
the \code{phenoData} slot. }
\item{experimentData}{\code{signature(x = "pSet")}: Access details
of experimental methods. }
\item{featureData}{\code{signature(x = "pSet")}: Access the
\code{featureData} slot. }
\item{fData}{\code{signature(x = "pSet")}: Access feature data
information. }
\item{featureNames}{\code{signature(x = "pSet")}: Coordinate access
of feature names (e.g spectra, peptides or proteins) in
\code{assayData} slot. }
\item{fileNames}{\code{signature(object = "pSet")}: Access file
names in the \code{processingData} slot. }
\item{fromFile}{\code{signature(object = "pSet")}: Access raw data
file indexes (to be found in the 'code{processingData}' slot) from
which the individual object's spectra where read from. }
\item{centroided}{\code{signature(object = "pSet")}: Indicates
whether individual spectra are centroided ('TRUE') of uncentroided
('FALSE'). Use \code{centroided(object) <- value} to update a
whole experiment, ensuring that \code{object} and \code{value}
have the same length. }
\item{fvarMetadata}{\code{signature(x = "pSet")}: Access metadata
describing features reported in \code{fData}. }
\item{fvarLabels}{\code{signature(x = "pSet")}: Access variable
labels in \code{featureData}. }
\item{length}{\code{signature(x = "pSet")}: Returns the number of
features in the \code{assayData} slot. }
\item{notes}{\code{signature(x = "pSet")}: Retrieve and
unstructured notes associated with \code{pSet} in the
\code{experimentData} slot. }
\item{pData}{\code{signature(x = "pSet")}: Access sample data
information. }
\item{phenoData}{\code{signature(x = "pSet")}: Access the
\code{phenoData} slot. }
\item{processingData}{\code{signature(object = "pSet")}: Access the
\code{processingData} slot. }
\item{protocolData}{\code{signature(x = "pSet")}: Access the
\code{protocolData} slot. }
\item{pubMedIds}{\code{signature(x = "pSet")}: Access PMIDs in
\code{experimentData}. }
\item{sampleNames}{\code{signature(x = "pSet")}: Access sample names
in \code{phenoData}. }
\item{spectra}{\code{signature(x = "pSet", ...)}: Access the
\code{assayData} slot, returning the features as a \code{list}.
Additional arguments are currently ignored. }
\item{varMetadata}{\code{signature(x = "pSet")}: Access metadata
describing variables reported in \code{pData}. }
\item{varLabels}{\code{signature(x = "pSet")}: Access variable
labels in \code{phenoData}. }
\item{acquisitionNum}{\code{signature(object = "pSet")}: Accessor
for spectra acquisition numbers. }
\item{scanIndex}{\code{signature(object = "pSet")}: Accessor
for spectra scan indices. }
\item{collisionEnergy}{\code{signature(object = "pSet")}: Accessor
for MS2 spectra collision energies. }
\item{intensity}{\code{signature(object = "pSet", ...)}: Accessor
for spectra instenities, returned as named list. Additional
arguments are currently ignored. }
\item{msInfo}{\code{signature(object = "pSet")}: Prints the MIAPE-MS
meta-data stored in the \code{experimentData} slot. }
\item{msLevel}{\code{signature(object = "pSet")}: Accessor for
spectra MS levels. }
\item{mz}{\code{signature(object = "pSet", ...)}: Accessor for spectra
M/Z values, returned as a named list. Additional arguments are
currently ignored. }
\item{peaksCount}{\code{signature(object = "pSet")}: Accessor for
spectra preak counts. }
\item{peaksCount}{\code{signature(object = "pSet", scans =
"numeric")}: Accessor to \code{scans} spectra preak counts. }
\item{polarity}{\code{signature(object = "pSet")}: Accessor for MS1
spectra polarities. }
\item{precursorCharge}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor charges. }
\item{precursorIntensity}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor intensity. }
\item{precursorMz}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor M/Z values. }
\item{precAcquisitionNum}{\code{signature(object = "pSet")}: Accessor
for MS2 precursor scan numbers. }
\item{precScanNum}{see \code{precAcquisitionNum}. }
\item{rtime}{\code{signature(object = "pSet", ...)}: Accessor for spectra
retention times. Additional arguments are currently ignored. }
\item{tic}{\code{signature(object = "pSet", ...)}: Accessor for spectra
total ion counts. Additional arguments are currently ignored. }
\item{ionCount}{\code{signature(object = "pSet")}: Accessor for spectra
total ion current. }
\item{header}{\code{signature(object = "pSet")}: Returns a data
frame containing all available spectra parameters (MSn only). }
\item{header}{\code{signature(object = "pSet", scans = "numeric")}:
Returns a data frame containing \code{scans} spectra parameters
(MSn only). }
}
Additional accessors for the experimental metadata
(\code{experimentData} slot) are defined. See
\code{"\linkS4class{MIAPE}"} for details.
}
\references{
The \code{"\linkS4class{eSet}"} class, on which \code{pSet} is based.
}
\author{
Laurent Gatto <lg390@cam.ac.uk>
}
\seealso{
\code{"\linkS4class{MSnExp}"} for an instantiatable application of
\code{pSet}.
}
\examples{
showClass("pSet")
}
\keyword{classes}
|
#============================== CONFIG ============================
ABS_PATH = 'C:/Users/marco/Documents/UNISA/SDA/progetto/SDAgruppo2'
DATASET_FILENAME = 'RegressionData_final.csv'
Y_LABEL = 'Y_PressingCapability'
PREDICTORS_NUMBER = 10
#================================ START =================================
setwd(ABS_PATH)
source('./utils.R')
ds = ds.init(DATASET_FILENAME, Y_LABEL, PREDICTORS_NUMBER)
#==================== REGRESSION WITHOUT INTERACTIONS ====================
baseModel=lm.byIndices(ds, -1)
lm.inspect(baseModel, 10, 10)
'[1] "================= SUMMARY ================="
Call:
lm(formula = f, data = data, x = T, y = T)
Residuals:
Min 1Q Median 3Q Max
-3.6467 -1.0333 -0.0055 1.2385 4.4370
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.03174 0.17001 -0.187 0.852
X_Temperature -0.98848 0.16675 -5.928 5.72e-08 ***
X_Humidity -0.02227 0.19662 -0.113 0.910
X_Altitude -0.17045 0.16865 -1.011 0.315
X_ClimaticConditions -1.06987 0.15826 -6.760 1.39e-09 ***
X_RestTimeFromLastMatch 4.82436 0.17740 27.195 < 2e-16 ***
X_AvgPlayerValue 6.13091 0.17089 35.876 < 2e-16 ***
X_MatchRelevance 8.03031 0.15943 50.367 < 2e-16 ***
X_AvgGoalConcededLastMatches 0.92545 0.18139 5.102 1.88e-06 ***
X_SupportersImpact 2.05277 0.16156 12.706 < 2e-16 ***
X_OpposingSupportersImpact -0.83955 0.17224 -4.874 4.73e-06 ***
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residual standard error: 1.635 on 89 degrees of freedom
Multiple R-squared: 0.9867, Adjusted R-squared: 0.9852
F-statistic: 662 on 10 and 89 DF, p-value: < 2.2e-16
[1] "================== MSE =================="
[1] 3.002713'
#====================== INSPECT RELATIONSHIPS ============================
showPlotsAgainstOutput(ds, 2:(PREDICTORS_NUMBER+1))
#======================== TEST RELATIONSHIPS =============================
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'I(X_RestTimeFromLastMatch^2)',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
dependencyModel = lm.byFormulaChunks(ds, possibleDependencies)
lm.inspect(dependencyModel, 5)
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
dependencyModel = lm.byFormulaChunks(ds, possibleDependencies)
lm.inspect(dependencyModel, 5)
# RSquared: 0.9872, MSE: 2.881993
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'I(X_AvgPlayerValue^2)',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
dependencyModel = lm.byFormulaChunks(ds, possibleDependencies)
lm.inspect(dependencyModel, 5)
# RSquared: 0.9879, MSE: 2.838846
#======================== INSPECT INTERACTIONS =============================
# Collect rsquared for every linear model obtained by adding every possible
# interaction between two distinct predictors to the base model.
# Set base rsquared as default value
baseRSquared = summary( lm.byIndices(ds, -1) )$r.squared
interactionMatrix = inspectInteractionMatrix(ds, default=baseRSquared, showHeatmap = T)
#======================== TEST INTERACTIONS =============================
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'I(X_AvgPlayerValue^2)',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
possibleInteractions = list('X_RestTimeFromLastMatch*X_OpposingSupportersImpact')
dependencyModelWithPossibleInteractions = lm.byFormulaChunks(ds, append(possibleDependencies, possibleInteractions))
lm.inspect(dependencyModelWithPossibleInteractions, 5)
possibleInteractions = list('X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity'
)
dependencyModelWithPossibleInteractions = lm.byFormulaChunks(ds, append(possibleDependencies, possibleInteractions))
lm.inspect(dependencyModelWithPossibleInteractions, 5)
possibleInteractions = list('X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
dependencyModelWithPossibleInteractions = lm.byFormulaChunks(ds, append(possibleDependencies, possibleInteractions))
lm.inspect(dependencyModelWithPossibleInteractions, 5)
#==================== BEST SUBSET SELECTION WITH INTERACTIONS ===============
# add non linearities for best subset selection
possibleRelationships = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
bestSubsets = bestSubsetSelection(ds, relationships=possibleRelationships, nMSE=10, folds=5, verbose=T, method="exhaustive")
ds.prettyPlot(bestSubsets$MSE, xlab="Number of predictors", ylab="CV test MSE", title="5-fold cross-validation Test MSE")
bestSubset = bestSubsets$model[[10]]
lm.inspect(bestSubset, 10, 10)
# [1] "================= SUMMARY ================="
#
# Call:
# lm(formula = f, data = data, x = TRUE, y = TRUE)
#
# Residuals:
# Min 1Q Median 3Q Max
# -3.1699 -0.9574 -0.0742 0.8877 4.2548
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -0.4759 0.2377 -2.002 0.04836 *
# X_Temperature -0.9226 0.1554 -5.937 5.50e-08 ***
# X_ClimaticConditions -1.0942 0.1445 -7.575 3.21e-11 ***
# X_AvgPlayerValue 6.1266 0.1607 38.127 < 2e-16 ***
# X_MatchRelevance 7.9999 0.1506 53.124 < 2e-16 ***
# X_AvgGoalConcededLastMatches 0.9607 0.1661 5.786 1.06e-07 ***
# X_SupportersImpact 2.0948 0.1525 13.734 < 2e-16 ***
# I(X_AvgPlayerValue^2) 0.3824 0.1702 2.246 0.02718 *
# X_RestTimeFromLastMatch 4.7379 0.1677 28.249 < 2e-16 ***
# X_OpposingSupportersImpact -0.6892 0.1691 -4.076 9.93e-05 ***
# X_RestTimeFromLastMatch:X_OpposingSupportersImpact 0.5148 0.1852 2.779 0.00665 **
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
# Residual standard error: 1.541 on 89 degrees of freedom
# Multiple R-squared: 0.9882, Adjusted R-squared: 0.9869
# F-statistic: 746.1 on 10 and 89 DF, p-value: < 2.2e-16
#
# [1] "================== MSE =================="
# [1] 2.713025
#============= BEST SUBSETS FOR SELECTED NUMBER OF PREDICTORS ===============
# add non linearities for best subset selection
possibleRelationships = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
N_PREDICTORS_TO_INSPECT = 7
bestSubsets = bestSubsetsByPredictorsNumber(ds, relationships=possibleRelationships, nMSE=10, folds=5, nPredictors=N_PREDICTORS_TO_INSPECT, nSubsets=10, verbose=T)
ds.prettyPlot(bestSubsets$MSE, xlab="Rank", ylab="CV test MSE", title="5-fold cross-validation Test MSE")
bestSubset = bestSubsets$model[[which.min(bestSubsets$MSE)]]
lm.inspect(bestSubset, 10, 10)
#=================== FORWARD SELECTION WITH INTERACTIONS =======================
possibleRelationships = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
)
bestSubsets = bestSubsetSelection(ds, relationships=possibleRelationships, nMSE=10, folds=5, method="forward", nvmax=8, verbose=T)
bestSubset = bestSubsets$model[[which.min(bestSubsets$MSE)]]
ds.prettyPlot(bestSubsets$MSE, xdata=unlist(map(bestSubsets$model, function(model) summary(model)$r.squared)), xlab="Number of predictors", ylab="CV test MSE", title="5-fold cross-validation Test MSE")
#============================ RIDGE E LASSO =================================
# Add non linearities before scaling
bestInteractions = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
ds = ds.init(DATASET_FILENAME, Y_LABEL, PREDICTORS_NUMBER)
ds_scaled = addNonLinearities(ds, bestInteractions)
lambda_grid = 10^seq(4, -6, length = 10000)
models = lm.shrinkage(ds_scaled, lambda_grid, nMSE=10, folds=10, showPlot=T)
min(models$ridge$cvm)
models$ridge$bestlambda
coef(models$lasso$model, s = models$lasso$bestlambda)
coef(models$ridge$model, s = models$ridge$bestlambda)
#============================= ELASTIC NET ===============================
bestInteractions = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
ds = ds.init(DATASET_FILENAME, Y_LABEL, PREDICTORS_NUMBER)
ds_scaled = addNonLinearities(ds, bestInteractions)
lambda_grid = 10^seq(4, -6, length = 2000)
alpha_grid = seq(0,1,length = 100)
best_mse = mean_cvMSE(bestSubset, 10, 10)
MSEs = lm.elasticNet(ds_scaled, alpha_grid, lambda_grid, nMSE=5, folds=5, best_mse = best_mse, showPlot = T, verbose = T)
lm.plotElasticNet(alpha_grid, MSEs, best_mse)
#======================= CONCLUSION =======================
exportCOEF(coef(models$ridge$model, s = models$ridge$bestlambda), T)
| /consegna/PressingCapability.R | no_license | UniversityProjectsAtUnisa/exam-project_sda | R | false | false | 11,760 | r | #============================== CONFIG ============================
ABS_PATH = 'C:/Users/marco/Documents/UNISA/SDA/progetto/SDAgruppo2'
DATASET_FILENAME = 'RegressionData_final.csv'
Y_LABEL = 'Y_PressingCapability'
PREDICTORS_NUMBER = 10
#================================ START =================================
setwd(ABS_PATH)
source('./utils.R')
ds = ds.init(DATASET_FILENAME, Y_LABEL, PREDICTORS_NUMBER)
#==================== REGRESSION WITHOUT INTERACTIONS ====================
baseModel=lm.byIndices(ds, -1)
lm.inspect(baseModel, 10, 10)
'[1] "================= SUMMARY ================="
Call:
lm(formula = f, data = data, x = T, y = T)
Residuals:
Min 1Q Median 3Q Max
-3.6467 -1.0333 -0.0055 1.2385 4.4370
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.03174 0.17001 -0.187 0.852
X_Temperature -0.98848 0.16675 -5.928 5.72e-08 ***
X_Humidity -0.02227 0.19662 -0.113 0.910
X_Altitude -0.17045 0.16865 -1.011 0.315
X_ClimaticConditions -1.06987 0.15826 -6.760 1.39e-09 ***
X_RestTimeFromLastMatch 4.82436 0.17740 27.195 < 2e-16 ***
X_AvgPlayerValue 6.13091 0.17089 35.876 < 2e-16 ***
X_MatchRelevance 8.03031 0.15943 50.367 < 2e-16 ***
X_AvgGoalConcededLastMatches 0.92545 0.18139 5.102 1.88e-06 ***
X_SupportersImpact 2.05277 0.16156 12.706 < 2e-16 ***
X_OpposingSupportersImpact -0.83955 0.17224 -4.874 4.73e-06 ***
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residual standard error: 1.635 on 89 degrees of freedom
Multiple R-squared: 0.9867, Adjusted R-squared: 0.9852
F-statistic: 662 on 10 and 89 DF, p-value: < 2.2e-16
[1] "================== MSE =================="
[1] 3.002713'
#====================== INSPECT RELATIONSHIPS ============================
showPlotsAgainstOutput(ds, 2:(PREDICTORS_NUMBER+1))
#======================== TEST RELATIONSHIPS =============================
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'I(X_RestTimeFromLastMatch^2)',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
dependencyModel = lm.byFormulaChunks(ds, possibleDependencies)
lm.inspect(dependencyModel, 5)
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
dependencyModel = lm.byFormulaChunks(ds, possibleDependencies)
lm.inspect(dependencyModel, 5)
# RSquared: 0.9872, MSE: 2.881993
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'I(X_AvgPlayerValue^2)',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
dependencyModel = lm.byFormulaChunks(ds, possibleDependencies)
lm.inspect(dependencyModel, 5)
# RSquared: 0.9879, MSE: 2.838846
#======================== INSPECT INTERACTIONS =============================
# Collect rsquared for every linear model obtained by adding every possible
# interaction between two distinct predictors to the base model.
# Set base rsquared as default value
baseRSquared = summary( lm.byIndices(ds, -1) )$r.squared
interactionMatrix = inspectInteractionMatrix(ds, default=baseRSquared, showHeatmap = T)
#======================== TEST INTERACTIONS =============================
possibleDependencies = list(
'X_Temperature',
'X_ClimaticConditions',
'X_RestTimeFromLastMatch',
'I(X_AvgPlayerValue^2)',
'X_AvgPlayerValue',
'X_MatchRelevance',
'X_AvgGoalConcededLastMatches',
'X_SupportersImpact',
'X_OpposingSupportersImpact',
'I(X_OpposingSupportersImpact^2)'
)
possibleInteractions = list('X_RestTimeFromLastMatch*X_OpposingSupportersImpact')
dependencyModelWithPossibleInteractions = lm.byFormulaChunks(ds, append(possibleDependencies, possibleInteractions))
lm.inspect(dependencyModelWithPossibleInteractions, 5)
possibleInteractions = list('X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity'
)
dependencyModelWithPossibleInteractions = lm.byFormulaChunks(ds, append(possibleDependencies, possibleInteractions))
lm.inspect(dependencyModelWithPossibleInteractions, 5)
possibleInteractions = list('X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
dependencyModelWithPossibleInteractions = lm.byFormulaChunks(ds, append(possibleDependencies, possibleInteractions))
lm.inspect(dependencyModelWithPossibleInteractions, 5)
#==================== BEST SUBSET SELECTION WITH INTERACTIONS ===============
# add non linearities for best subset selection
possibleRelationships = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
bestSubsets = bestSubsetSelection(ds, relationships=possibleRelationships, nMSE=10, folds=5, verbose=T, method="exhaustive")
ds.prettyPlot(bestSubsets$MSE, xlab="Number of predictors", ylab="CV test MSE", title="5-fold cross-validation Test MSE")
bestSubset = bestSubsets$model[[10]]
lm.inspect(bestSubset, 10, 10)
# [1] "================= SUMMARY ================="
#
# Call:
# lm(formula = f, data = data, x = TRUE, y = TRUE)
#
# Residuals:
# Min 1Q Median 3Q Max
# -3.1699 -0.9574 -0.0742 0.8877 4.2548
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -0.4759 0.2377 -2.002 0.04836 *
# X_Temperature -0.9226 0.1554 -5.937 5.50e-08 ***
# X_ClimaticConditions -1.0942 0.1445 -7.575 3.21e-11 ***
# X_AvgPlayerValue 6.1266 0.1607 38.127 < 2e-16 ***
# X_MatchRelevance 7.9999 0.1506 53.124 < 2e-16 ***
# X_AvgGoalConcededLastMatches 0.9607 0.1661 5.786 1.06e-07 ***
# X_SupportersImpact 2.0948 0.1525 13.734 < 2e-16 ***
# I(X_AvgPlayerValue^2) 0.3824 0.1702 2.246 0.02718 *
# X_RestTimeFromLastMatch 4.7379 0.1677 28.249 < 2e-16 ***
# X_OpposingSupportersImpact -0.6892 0.1691 -4.076 9.93e-05 ***
# X_RestTimeFromLastMatch:X_OpposingSupportersImpact 0.5148 0.1852 2.779 0.00665 **
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
# Residual standard error: 1.541 on 89 degrees of freedom
# Multiple R-squared: 0.9882, Adjusted R-squared: 0.9869
# F-statistic: 746.1 on 10 and 89 DF, p-value: < 2.2e-16
#
# [1] "================== MSE =================="
# [1] 2.713025
#============= BEST SUBSETS FOR SELECTED NUMBER OF PREDICTORS ===============
# add non linearities for best subset selection
possibleRelationships = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
N_PREDICTORS_TO_INSPECT = 7
bestSubsets = bestSubsetsByPredictorsNumber(ds, relationships=possibleRelationships, nMSE=10, folds=5, nPredictors=N_PREDICTORS_TO_INSPECT, nSubsets=10, verbose=T)
ds.prettyPlot(bestSubsets$MSE, xlab="Rank", ylab="CV test MSE", title="5-fold cross-validation Test MSE")
bestSubset = bestSubsets$model[[which.min(bestSubsets$MSE)]]
lm.inspect(bestSubset, 10, 10)
#=================== FORWARD SELECTION WITH INTERACTIONS =======================
possibleRelationships = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
)
bestSubsets = bestSubsetSelection(ds, relationships=possibleRelationships, nMSE=10, folds=5, method="forward", nvmax=8, verbose=T)
bestSubset = bestSubsets$model[[which.min(bestSubsets$MSE)]]
ds.prettyPlot(bestSubsets$MSE, xdata=unlist(map(bestSubsets$model, function(model) summary(model)$r.squared)), xlab="Number of predictors", ylab="CV test MSE", title="5-fold cross-validation Test MSE")
#============================ RIDGE E LASSO =================================
# Add non linearities before scaling
bestInteractions = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
ds = ds.init(DATASET_FILENAME, Y_LABEL, PREDICTORS_NUMBER)
ds_scaled = addNonLinearities(ds, bestInteractions)
lambda_grid = 10^seq(4, -6, length = 10000)
models = lm.shrinkage(ds_scaled, lambda_grid, nMSE=10, folds=10, showPlot=T)
min(models$ridge$cvm)
models$ridge$bestlambda
coef(models$lasso$model, s = models$lasso$bestlambda)
coef(models$ridge$model, s = models$ridge$bestlambda)
#============================= ELASTIC NET ===============================
bestInteractions = list(
'I(X_AvgPlayerValue^2)',
'I(X_OpposingSupportersImpact^2)',
'I(X_Temperature^2)',
'X_RestTimeFromLastMatch*X_OpposingSupportersImpact',
'X_AvgGoalConcededLastMatches*X_Humidity',
'X_Altitude*X_RestTimeFromLastMatch'
)
ds = ds.init(DATASET_FILENAME, Y_LABEL, PREDICTORS_NUMBER)
ds_scaled = addNonLinearities(ds, bestInteractions)
lambda_grid = 10^seq(4, -6, length = 2000)
alpha_grid = seq(0,1,length = 100)
best_mse = mean_cvMSE(bestSubset, 10, 10)
MSEs = lm.elasticNet(ds_scaled, alpha_grid, lambda_grid, nMSE=5, folds=5, best_mse = best_mse, showPlot = T, verbose = T)
lm.plotElasticNet(alpha_grid, MSEs, best_mse)
#======================= CONCLUSION =======================
exportCOEF(coef(models$ridge$model, s = models$ridge$bestlambda), T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_lm.R
\name{my_lm}
\alias{my_lm}
\title{Function: my_lm}
\usage{
my_lm(formula, data)
}
\description{
Function: my_lm Return a table of information includes "Estimate","Std. Error","t value","Pr(>|t|)" in the form of table
It's a brief combination of lm() and summary()
Input: regression formula, data frame
Output: A table includes related data
}
\examples{
my_lm(mpg ~ hp + wt, data = mtcars)
my_lm(mpg ~ hp + qsec + wt, data = mtcars)
}
| /man/my_lm.Rd | no_license | Qiaozf/Stat302.Proj3 | R | false | true | 532 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_lm.R
\name{my_lm}
\alias{my_lm}
\title{Function: my_lm}
\usage{
my_lm(formula, data)
}
\description{
Function: my_lm Return a table of information includes "Estimate","Std. Error","t value","Pr(>|t|)" in the form of table
It's a brief combination of lm() and summary()
Input: regression formula, data frame
Output: A table includes related data
}
\examples{
my_lm(mpg ~ hp + wt, data = mtcars)
my_lm(mpg ~ hp + qsec + wt, data = mtcars)
}
|
#############################################################################
# Ciencia de datos - R - Parte 03: manipulaciรณn avanzada de datos con R
# cgb@datanalytics.com, 2016-05-21
#
# El objetivo de esta sesiรณn es recorrer aprender a manipular datos usando dos
# paquetes importantes de R: reshape2 y plyr
#############################################################################
#############################################################################
# reshape2
#############################################################################
# Instalaciรณn:
install.packages("reshape2")
install.packages("plyr")
# Nota: tambiรฉn puedes usar los menรบs de RStudio para instalar paquetes (Tools...)
# Carga:
library(reshape2)
library(plyr)
#----------------------------------------------------------------------------
# formato largo (melted)
#----------------------------------------------------------------------------
pob.aragon.2014 <- read.table("data/pob_aragon_2014.csv", header = T, sep = "\t")
pob.aragon.2014
melt(pob.aragon.2014) # mismos datos en otro formato... ยกformato largo!
# Ejercicio: pasa el tiempo que consideres necesario para entender muy bien:
# - cรณmo se ha transformado pob.aragon.2014
# - que la informaciรณn contenida en ambos conjuntos de datos es la misma
pob.aragon <- read.table("data/pob_aragon.csv", header = T, sep = "\t")
pob.aragon
melt(pob.aragon) # ยกhorrible!
melt(pob.aragon, id.vars = c("Provincia", "Periodo")) # Ahora sรญ
# Ejercicio: ยฟquรฉ pasa si alteras el orden de provincia y periodo?
pob.aragon.long <- melt(pob.aragon, id.vars = c("Provincia", "Periodo"))
# Una pequeรฑa digresiรณn:
arrange(pob.aragon.long, Periodo, Provincia) # ยฟte gusta mรกs ordenar asรญ?
# Nota: la funciรณn arrange estรก en el paquete plyr...
# Ejercicio: busca en ?arrange cรณmo ordenar descendentemente
# Ejercicio: toma el conjunto de datos airquality y disponlo en formato largo
# Ejercicio: calcula el valor mediano (median) de las variables de long.airquality
#----------------------------------------------------------------------------
# Formato ancho (cast)
#----------------------------------------------------------------------------
pob.aragon.2014.largo <- melt(pob.aragon.2014)
pob.aragon.2014.largo
# a partir del formato largo se puede pasar a distintos tipos de formatos anchos:
dcast(pob.aragon.2014.largo, Provincia ~ variable)
dcast(pob.aragon.2014.largo, variable ~ Provincia)
# Agregaciones
iris.long <- melt(iris)
head(iris.long)
dcast(iris.long, Species ~ variable)
# Ejercicio: ยฟquรฉ ha pasado?
dcast(iris.long, Species ~ variable, fun.aggregate = mean)
dcast(iris.long, Species ~ variable, value.var = "value", fun.aggregate = mean)
# Nota: generalmente, no hay que especificar "value.var": dcast la adivina. Pero a veces se
# equivoca, por lo que...
paro <- read.table("data/paro.csv", header = T, sep = "\t")
# vamos a arreglar un poco los datos (los detalles, mรกs adelante)
paro$Periodo <- gsub("IV", "4", paro$Periodo)
paro$Periodo <- gsub("III", "3", paro$Periodo)
paro$Periodo <- gsub("II", "2", paro$Periodo)
paro$Periodo <- gsub("I", "1", paro$Periodo)
paro$Situation <- as.character(paro$Situation)
paro$Situation[paro$Situation == "Active population"] <- "active"
paro$Situation[paro$Situation == "Inactive persons"] <- "inactive"
paro$Situation[paro$Situation == "Unemployed persons"] <- "unemployed"
paro$Situation[paro$Situation == "Employed persons"] <- "employed"
paro$Situation[paro$Situation == "Parados que buscan primer empleo"] <- "never_employed"
paro$Situation <- factor(paro$Situation)
# paro estรก en formato largo, pero...
paro.alt <- dcast(paro, Gender + Provinces + Periodo ~ Situation)
# Ejercicio: aรฑade a paro.alt una columna adicional con la tasa de paro (desempleados entre
# poblaciรณn activa)
# Nota: este ejercicio demuestra que en ocasiones es bueno crear un determinado tipo de formato
# largo para crear nuevas variables fรกcilmente.
# Ejercicio: agrega los datos del paro para toda Espaรฑa usando dcast y fun.aggregate = sum.
# Pista: si ignoras la provincia en dcast se producen "duplicados"
# Ejercicio: identifica las provincias y periodos en los que la tasa de paro masculina es
# mayor que la femenina (nota: la tasa de paro es "unemployed" dividido por "active")
#----------------------------------------------------------------------------
# plyr: procesamiento de tablas por trozos
#----------------------------------------------------------------------------
# la expresiรณn fundamental:
res <- ddply(paro, .(Gender, Periodo, Situation), summarize, total = sum(value))
# elementos de la expresiรณn anterior:
# ddply: transforma una tabla en otra tabla
# paro: un dataframe
# .(...): variables de la tabla de entrada por las que se parte
# summarize: cualquier funciรณn que opera sobre tablas
# total = ...: argumentos de la funciรณn
# Ejercicio: pon airquality en formato largo y saca la media y la mediana de cada variable por mes
# otras funciones que se pueden usar en ddply:
foo <- function(x) lm(Temp ~ Solar.R, data = x)$coefficients
ddply(airquality, .(Month), foo)
# En general, insisto, la funciรณn puede ser cualquiera que admita como argumento una tabla
# Los demรกs argumentos de la funciรณn (arbitraria) se pasan a travรฉs de ddply (detrรกs de la llamada a
# la funciรณn)
# variantes de la fรณrmula anterior: dlply
res <- dlply(airquality, .(Month), function(x) lm(Temp ~ Solar.R, data = x)) # una lista!
lapply(res, coefficients)
ldply(res, coefficients)
# existen tambiรฉn llply, laply, alply... e incluso d_ply
# ejercicio: completa la funciรณn siguiente y รบsala para guardar un grรกfico de la relaciรณn entre la temperatura
# y la irradiaciรณn solar en cada mes
foo <- function(x){
nombre.fichero <- paste0(unique(x$Month), ".png")
png(nombre.fichero)
plot(x$Solar.R, x$Temp, main = "...", xlab = "...", ylab = "...")
abline(lm(Temp ~ Solar.R, data = x), col = "red")
dev.off()
}
# transformaciones por trozos
tasa.paro <- dcast(paro, Gender + Provinces + Periodo ~ Situation)
tasa.paro <- transform(tasa.paro, tasa.paro = unemployed / active)
tasa.paro <- tasa.paro[, c("Gender", "Provinces", "Periodo", "tasa.paro")]
# Para seleccionar el perido de mayor tasa de paro en cada provincia y sexo, con plyr,
tmp <- ddply(tasa.paro, .(Gender, Provinces), transform, rank = rank(-tasa.paro, ties = "random"))
res <- tmp[tmp$rank == 1,]
# Ejercicio: selecciona en cada provincia el periodo en el que fue mรกximo el nรบmero total (hombres + mujeres) de parados
# Un ejemplo de regresiones por trozos y asignar el valor predicho.
# Usamos data de http://www.unt.edu/rss/class/Jon/R_SC/Module9/lmm.data.txt
dat <- read.table("data/lmm_data.txt", header = T, sep = ",")
dat.preds <- ddply(dat, .(school), transform,
pred = predict(lm(extro ~ open + agree + social + class)))
# Alternativamente:
foo <- function(x){
modelo <- lm(extro ~ open + agree + social + class, data = x)
res <- x
res$preds <- predict(modelo, new.data = x)
res
}
dat.preds <- ddply(dat, .(school), function(x) foo(x))
dat.preds <- ddply(dat, .(school), foo) | /sesion_03_manipulacion_datos(1).R | no_license | arrpak/material | R | false | false | 7,237 | r | #############################################################################
# Ciencia de datos - R - Parte 03: manipulaciรณn avanzada de datos con R
# cgb@datanalytics.com, 2016-05-21
#
# El objetivo de esta sesiรณn es recorrer aprender a manipular datos usando dos
# paquetes importantes de R: reshape2 y plyr
#############################################################################
#############################################################################
# reshape2
#############################################################################
# Instalaciรณn:
install.packages("reshape2")
install.packages("plyr")
# Nota: tambiรฉn puedes usar los menรบs de RStudio para instalar paquetes (Tools...)
# Carga:
library(reshape2)
library(plyr)
#----------------------------------------------------------------------------
# formato largo (melted)
#----------------------------------------------------------------------------
pob.aragon.2014 <- read.table("data/pob_aragon_2014.csv", header = T, sep = "\t")
pob.aragon.2014
melt(pob.aragon.2014) # mismos datos en otro formato... ยกformato largo!
# Ejercicio: pasa el tiempo que consideres necesario para entender muy bien:
# - cรณmo se ha transformado pob.aragon.2014
# - que la informaciรณn contenida en ambos conjuntos de datos es la misma
pob.aragon <- read.table("data/pob_aragon.csv", header = T, sep = "\t")
pob.aragon
melt(pob.aragon) # ยกhorrible!
melt(pob.aragon, id.vars = c("Provincia", "Periodo")) # Ahora sรญ
# Ejercicio: ยฟquรฉ pasa si alteras el orden de provincia y periodo?
pob.aragon.long <- melt(pob.aragon, id.vars = c("Provincia", "Periodo"))
# Una pequeรฑa digresiรณn:
arrange(pob.aragon.long, Periodo, Provincia) # ยฟte gusta mรกs ordenar asรญ?
# Nota: la funciรณn arrange estรก en el paquete plyr...
# Ejercicio: busca en ?arrange cรณmo ordenar descendentemente
# Ejercicio: toma el conjunto de datos airquality y disponlo en formato largo
# Ejercicio: calcula el valor mediano (median) de las variables de long.airquality
#----------------------------------------------------------------------------
# Formato ancho (cast)
#----------------------------------------------------------------------------
pob.aragon.2014.largo <- melt(pob.aragon.2014)
pob.aragon.2014.largo
# a partir del formato largo se puede pasar a distintos tipos de formatos anchos:
dcast(pob.aragon.2014.largo, Provincia ~ variable)
dcast(pob.aragon.2014.largo, variable ~ Provincia)
# Agregaciones
iris.long <- melt(iris)
head(iris.long)
dcast(iris.long, Species ~ variable)
# Ejercicio: ยฟquรฉ ha pasado?
dcast(iris.long, Species ~ variable, fun.aggregate = mean)
dcast(iris.long, Species ~ variable, value.var = "value", fun.aggregate = mean)
# Nota: generalmente, no hay que especificar "value.var": dcast la adivina. Pero a veces se
# equivoca, por lo que...
paro <- read.table("data/paro.csv", header = T, sep = "\t")
# vamos a arreglar un poco los datos (los detalles, mรกs adelante)
paro$Periodo <- gsub("IV", "4", paro$Periodo)
paro$Periodo <- gsub("III", "3", paro$Periodo)
paro$Periodo <- gsub("II", "2", paro$Periodo)
paro$Periodo <- gsub("I", "1", paro$Periodo)
paro$Situation <- as.character(paro$Situation)
paro$Situation[paro$Situation == "Active population"] <- "active"
paro$Situation[paro$Situation == "Inactive persons"] <- "inactive"
paro$Situation[paro$Situation == "Unemployed persons"] <- "unemployed"
paro$Situation[paro$Situation == "Employed persons"] <- "employed"
paro$Situation[paro$Situation == "Parados que buscan primer empleo"] <- "never_employed"
paro$Situation <- factor(paro$Situation)
# paro estรก en formato largo, pero...
paro.alt <- dcast(paro, Gender + Provinces + Periodo ~ Situation)
# Ejercicio: aรฑade a paro.alt una columna adicional con la tasa de paro (desempleados entre
# poblaciรณn activa)
# Nota: este ejercicio demuestra que en ocasiones es bueno crear un determinado tipo de formato
# largo para crear nuevas variables fรกcilmente.
# Ejercicio: agrega los datos del paro para toda Espaรฑa usando dcast y fun.aggregate = sum.
# Pista: si ignoras la provincia en dcast se producen "duplicados"
# Ejercicio: identifica las provincias y periodos en los que la tasa de paro masculina es
# mayor que la femenina (nota: la tasa de paro es "unemployed" dividido por "active")
#----------------------------------------------------------------------------
# plyr: procesamiento de tablas por trozos
#----------------------------------------------------------------------------
# la expresiรณn fundamental:
res <- ddply(paro, .(Gender, Periodo, Situation), summarize, total = sum(value))
# elementos de la expresiรณn anterior:
# ddply: transforma una tabla en otra tabla
# paro: un dataframe
# .(...): variables de la tabla de entrada por las que se parte
# summarize: cualquier funciรณn que opera sobre tablas
# total = ...: argumentos de la funciรณn
# Ejercicio: pon airquality en formato largo y saca la media y la mediana de cada variable por mes
# otras funciones que se pueden usar en ddply:
foo <- function(x) lm(Temp ~ Solar.R, data = x)$coefficients
ddply(airquality, .(Month), foo)
# En general, insisto, la funciรณn puede ser cualquiera que admita como argumento una tabla
# Los demรกs argumentos de la funciรณn (arbitraria) se pasan a travรฉs de ddply (detrรกs de la llamada a
# la funciรณn)
# variantes de la fรณrmula anterior: dlply
res <- dlply(airquality, .(Month), function(x) lm(Temp ~ Solar.R, data = x)) # una lista!
lapply(res, coefficients)
ldply(res, coefficients)
# existen tambiรฉn llply, laply, alply... e incluso d_ply
# ejercicio: completa la funciรณn siguiente y รบsala para guardar un grรกfico de la relaciรณn entre la temperatura
# y la irradiaciรณn solar en cada mes
foo <- function(x){
nombre.fichero <- paste0(unique(x$Month), ".png")
png(nombre.fichero)
plot(x$Solar.R, x$Temp, main = "...", xlab = "...", ylab = "...")
abline(lm(Temp ~ Solar.R, data = x), col = "red")
dev.off()
}
# transformaciones por trozos
tasa.paro <- dcast(paro, Gender + Provinces + Periodo ~ Situation)
tasa.paro <- transform(tasa.paro, tasa.paro = unemployed / active)
tasa.paro <- tasa.paro[, c("Gender", "Provinces", "Periodo", "tasa.paro")]
# Para seleccionar el perido de mayor tasa de paro en cada provincia y sexo, con plyr,
tmp <- ddply(tasa.paro, .(Gender, Provinces), transform, rank = rank(-tasa.paro, ties = "random"))
res <- tmp[tmp$rank == 1,]
# Ejercicio: selecciona en cada provincia el periodo en el que fue mรกximo el nรบmero total (hombres + mujeres) de parados
# Un ejemplo de regresiones por trozos y asignar el valor predicho.
# Usamos data de http://www.unt.edu/rss/class/Jon/R_SC/Module9/lmm.data.txt
dat <- read.table("data/lmm_data.txt", header = T, sep = ",")
dat.preds <- ddply(dat, .(school), transform,
pred = predict(lm(extro ~ open + agree + social + class)))
# Alternativamente:
foo <- function(x){
modelo <- lm(extro ~ open + agree + social + class, data = x)
res <- x
res$preds <- predict(modelo, new.data = x)
res
}
dat.preds <- ddply(dat, .(school), function(x) foo(x))
dat.preds <- ddply(dat, .(school), foo) |
test_that("create a PCA using the psych package", {
require(psych)
})
| /tests/testthat/test-reportPCA.R | permissive | statisticsforsocialscience/reportPCA | R | false | false | 74 | r | test_that("create a PCA using the psych package", {
require(psych)
})
|
## This functions expand classic matrix function, adding possibility to lookup cache for return of matrix.
## Seting default matrix, binding "result" variable, making list of operating functions.
makeCacheMatrix <- function(x <- matrix()) {
# Empty value
result <- NULL
set <- function(y) {
# <<- helps to assign value for variable, "from outside", from another enviroment.
x <<- y
result <<- NULL
}
#making list of operating functions
get <- function() x
setresult <- function(inverse) result <<- inverse
getresult <- function() result
list(set = set, get = get, setresult = setresult, getresult = getresult)
}
## This function has 3 parts: 1. Taking output form previous function. 2. Making check if result already in cache. 3. If it is not - performing calculations.
cacheSolve <- function(x, ...) {
#link to previous function
result <- x$getresult() result
#chekcing cache for results
if (!is.null(result)) {
message("......readaing cache......getting data from cache.......")
return(result)
}
#if result = NULL, run calculations
matxdata <- x$get()
#now we should use our key function
result <- solve(matxdata, ...)
#that`s all, now lets drop result to cache
x$setresult(result)
#print
return(result)
}
| /cachematrix.R | no_license | kvcha/ProgrammingAssignment2 | R | false | false | 1,391 | r | ## This functions expand classic matrix function, adding possibility to lookup cache for return of matrix.
## Seting default matrix, binding "result" variable, making list of operating functions.
makeCacheMatrix <- function(x <- matrix()) {
# Empty value
result <- NULL
set <- function(y) {
# <<- helps to assign value for variable, "from outside", from another enviroment.
x <<- y
result <<- NULL
}
#making list of operating functions
get <- function() x
setresult <- function(inverse) result <<- inverse
getresult <- function() result
list(set = set, get = get, setresult = setresult, getresult = getresult)
}
## This function has 3 parts: 1. Taking output form previous function. 2. Making check if result already in cache. 3. If it is not - performing calculations.
cacheSolve <- function(x, ...) {
#link to previous function
result <- x$getresult() result
#chekcing cache for results
if (!is.null(result)) {
message("......readaing cache......getting data from cache.......")
return(result)
}
#if result = NULL, run calculations
matxdata <- x$get()
#now we should use our key function
result <- solve(matxdata, ...)
#that`s all, now lets drop result to cache
x$setresult(result)
#print
return(result)
}
|
##########################################################
# FISH 604
# Homework 2 script file
# Chatham sablefish growth
# Luke Henslee- Sep 16, 2021
##########################################################
library(tidyverse)
############ Prob. 1: Data import
sable <- read.csv("sablefish.csv", as.is=F)
head(sable) # Look at first few rows
hist(sable$Age) # Age composition
table(sable$Age) # Same in table format
summary(sable) # Basic summary of each variable
is.factor(sable$Sex)
############ Prob. 2: Data exploration:
### 1. Plot Length-at-age, grouped by sex:
plot(Length ~ Age, data=sable, subset = Sex == "Female", col=2,
xlab = "Age", ylab = "Length (mm)")
points(Length ~ Age, data = sable, subset = Sex == "Male", col=4)
# ggplot version
p1 <- ggplot(data = sable, aes(x= Age, y= Length))
p1 + geom_point(aes(color = Sex))
## Jittering coordinates to show points that overlap
# base graphics:
plot(jitter(Length) ~ jitter(Age), data=sable, subset = Sex == "Female",
col=2, xlab = "Age", ylab = "Length (mm)")
points(jitter(Length) ~ jitter(Age+0.5), data = sable,
subset = Sex == "Male", col=4)
# ggplot version:
p1 + geom_jitter(aes(color = Sex))
### 2. Plot Length-at-age by year, grouped by sex:
sable$Year <- factor(sable$Year)
# From 'lattice' package:
library(lattice)
xyplot(jitter(Length) ~ jitter(Age) | factor(Year), data=sable, groups = Sex)
# Females only, base graphics, with scatterplot smoother (spline):
sub <- sable$Sex == "Female"
scatter.smooth(jitter(sable$Age[sub]), jitter(sable$Length[sub]), col=2,
xlab = "Age", ylab = "Length (mm)")
# ggplot, females only
ggplot(data = subset(sable, Sex == "Female"), aes(x = Age, y = Length)) +
geom_jitter() +
geom_smooth(method = "loess")
# lattice graphics, scatterplots and scatterplot smoothers, by year
xyplot(jitter(Length) ~ jitter(Age) | factor(Year), data=sable, groups=Sex, auto.key=T)
xyplot(jitter(Length) ~ jitter(Age) | factor(Year), data=sable, groups=Sex,
xlim = c(0,50), ylim = c(450, 1000), auto.key=T)
xyplot(Length ~ Age | factor(Year), data=sable, groups=Sex, panel = "panel.superpose",
panel.groups = "panel.loess", auto.key=T)
# ggplot version, with loess smoother and 95% confidence bands:
p1 + geom_smooth(method = "loess", aes (color = Sex)) +
facet_wrap(~Year)
# scatterplot + loess smoother:
p1 + geom_jitter(aes(color = Sex), pch=1) +
geom_smooth(aes(color=Sex), method="loess", level=0.95) +
facet_wrap(~Year)
########### Prob. 3: Fit Ludwig van Bertalanffy (LVB)
########### growth model to sablefish data
# LvB growth model function to compute predicted values:
LvB <- function(a, k, L.inf, a0) {
L.inf*(1-exp(-k*(a-a0)))
}
# Try out some starting values and superimpose on scatterplot:
ST <- c(k = 0.05, L.inf = 800 , a0 = -15)
# Make sure to pick sensible values for L.inf and a0, then add line:
plot(jitter(Length) ~ jitter(Age), data=sable, col=2)
lines(1:80, LvB(1:80, k = ST[1], L.inf = ST[2], a0 = ST[3]), lwd=3)
# Fit the model using 'nls()' with the regression equation
# 'nls' finds parameters that minimize the sum of squared
# differences between the left-hand side (observed lengths)
# and the right-hand side (predicted lengths) of the formula
fit <- nls(Length ~ LvB(Age, k, L.inf, a0), data = sable, start = ST)
summary(fit)
coef(fit)
# Visualize the overall model fit
plot(jitter(Length) ~ jitter(Age), data=sable, col=2)
cf <- coef(fit)
# Add the fitted model to the scatterplot...
lines(1:80, LvB(1:80, k = cf[1], L.inf = cf[2], a0 = cf[3]), lwd=3)
##### Prob. 4: Diagnostics
r <- resid(fit) # Extract residuals (make sure 'fit' is the combined model fit)
cf <- coef(fit)
r2 <- sable$Length - LvB(sable$Age,k=cf[1],L.inf=cf[2], a0=cf[3])
# Example plot:
boxplot(r ~ Year, data = sable); abline(h=0, col=2)
##### Prob. 5: Analyses by sex
| /604/HW/HW2/Hwk2_Luke Henslee.R | no_license | lukehenslee/Coursework | R | false | false | 3,887 | r | ##########################################################
# FISH 604
# Homework 2 script file
# Chatham sablefish growth
# Luke Henslee- Sep 16, 2021
##########################################################
library(tidyverse)
############ Prob. 1: Data import
sable <- read.csv("sablefish.csv", as.is=F)
head(sable) # Look at first few rows
hist(sable$Age) # Age composition
table(sable$Age) # Same in table format
summary(sable) # Basic summary of each variable
is.factor(sable$Sex)
############ Prob. 2: Data exploration:
### 1. Plot Length-at-age, grouped by sex:
plot(Length ~ Age, data=sable, subset = Sex == "Female", col=2,
xlab = "Age", ylab = "Length (mm)")
points(Length ~ Age, data = sable, subset = Sex == "Male", col=4)
# ggplot version
p1 <- ggplot(data = sable, aes(x= Age, y= Length))
p1 + geom_point(aes(color = Sex))
## Jittering coordinates to show points that overlap
# base graphics:
plot(jitter(Length) ~ jitter(Age), data=sable, subset = Sex == "Female",
col=2, xlab = "Age", ylab = "Length (mm)")
points(jitter(Length) ~ jitter(Age+0.5), data = sable,
subset = Sex == "Male", col=4)
# ggplot version:
p1 + geom_jitter(aes(color = Sex))
### 2. Plot Length-at-age by year, grouped by sex:
sable$Year <- factor(sable$Year)
# From 'lattice' package:
library(lattice)
xyplot(jitter(Length) ~ jitter(Age) | factor(Year), data=sable, groups = Sex)
# Females only, base graphics, with scatterplot smoother (spline):
sub <- sable$Sex == "Female"
scatter.smooth(jitter(sable$Age[sub]), jitter(sable$Length[sub]), col=2,
xlab = "Age", ylab = "Length (mm)")
# ggplot, females only
ggplot(data = subset(sable, Sex == "Female"), aes(x = Age, y = Length)) +
geom_jitter() +
geom_smooth(method = "loess")
# lattice graphics, scatterplots and scatterplot smoothers, by year
xyplot(jitter(Length) ~ jitter(Age) | factor(Year), data=sable, groups=Sex, auto.key=T)
xyplot(jitter(Length) ~ jitter(Age) | factor(Year), data=sable, groups=Sex,
xlim = c(0,50), ylim = c(450, 1000), auto.key=T)
xyplot(Length ~ Age | factor(Year), data=sable, groups=Sex, panel = "panel.superpose",
panel.groups = "panel.loess", auto.key=T)
# ggplot version, with loess smoother and 95% confidence bands:
p1 + geom_smooth(method = "loess", aes (color = Sex)) +
facet_wrap(~Year)
# scatterplot + loess smoother:
p1 + geom_jitter(aes(color = Sex), pch=1) +
geom_smooth(aes(color=Sex), method="loess", level=0.95) +
facet_wrap(~Year)
########### Prob. 3: Fit Ludwig van Bertalanffy (LVB)
########### growth model to sablefish data
# LvB growth model function to compute predicted values:
LvB <- function(a, k, L.inf, a0) {
L.inf*(1-exp(-k*(a-a0)))
}
# Try out some starting values and superimpose on scatterplot:
ST <- c(k = 0.05, L.inf = 800 , a0 = -15)
# Make sure to pick sensible values for L.inf and a0, then add line:
plot(jitter(Length) ~ jitter(Age), data=sable, col=2)
lines(1:80, LvB(1:80, k = ST[1], L.inf = ST[2], a0 = ST[3]), lwd=3)
# Fit the model using 'nls()' with the regression equation
# 'nls' finds parameters that minimize the sum of squared
# differences between the left-hand side (observed lengths)
# and the right-hand side (predicted lengths) of the formula
fit <- nls(Length ~ LvB(Age, k, L.inf, a0), data = sable, start = ST)
summary(fit)
coef(fit)
# Visualize the overall model fit
plot(jitter(Length) ~ jitter(Age), data=sable, col=2)
cf <- coef(fit)
# Add the fitted model to the scatterplot...
lines(1:80, LvB(1:80, k = cf[1], L.inf = cf[2], a0 = cf[3]), lwd=3)
##### Prob. 4: Diagnostics
r <- resid(fit) # Extract residuals (make sure 'fit' is the combined model fit)
cf <- coef(fit)
r2 <- sable$Length - LvB(sable$Age,k=cf[1],L.inf=cf[2], a0=cf[3])
# Example plot:
boxplot(r ~ Year, data = sable); abline(h=0, col=2)
##### Prob. 5: Analyses by sex
|
#GOOGLE_MAPS_KEY = #
install.packages(c("rtweet","dplyr","jsonlite","syuzhet","rcurl","httr","sentimentr","ggmap","ggplot2","colorramps"))
library(rtweet)
library(dplyr)
library(jsonlite)
library(syuzhet)
library(RCurl)
library(httr)
library(sentimentr)
library(ggmap)
library(ggplot2)
library(colorRamps)
library(rworldmap)
#rm(archie_royal_us,citizens_united_us,dianne_feinstein_us,kraft_heinz_us,met_gala_us,nipsey_us,precious_harris_us,school_shooting_us,sri_lanka_us,unemployment_rate_us)
#rm(clean_loc,i,lat,lng,loc,map,str,temp2)
################
# Gather tweets
################
#get tweets
tweets_all <- search_tweets(type = 'recent',
q = '(archie royal) OR (royal baby) OR (archie baby)',
include_rts = FALSE,
n = 18000,
langs = 'EN',
geocode = lookup_coords('country:us',key = GOOGLE_MAPS_KEY),
since = '2019-05-08',
until = '2019-05-09')
#get columns we're interested in
tweets_wanted <- dianne_feinstein_us %>%
select(user_id,status_id,created_at,lang,account_lang, account_created_at, followers_count,
location, country,country_code,geo_coords,coords_coords,is_retweet, bbox_coords,text)
#add new columns
tweets_wanted$c_lat = 0
tweets_wanted$c_long = 0
tweets_wanted$new_loc = ""
#gets coords from loc from each tweet
for (i in 1:nrow(tweets_wanted)) {
get_loc <- tweets_wanted$location[i]
clean_loc <- rm_accent(get_loc)
clean_loc <- gsub("[^A-z,-]", "", clean_loc)
clean_loc <- gsub("[,-]", " ", clean_loc)
str <- funct(clean_loc)
if(length(str) == 0){
str <- funct("singapore")
}
loc <- str[1]
lat <- str[2]
lng <- str[3]
tweets_wanted$new_loc[i] <- loc
tweets_wanted$c_lat[i] <- as.numeric(lat)
tweets_wanted$c_long[i] <- as.numeric(lng)
tweets_wanted$coords_coords[i] <- paste(lat,",",lng)
}
################
# Sentiment
################
#clean
tweets_wanted$text = gsub("&", "", tweets_wanted$text)
tweets_wanted$text = gsub("&", "", tweets_wanted$text)
tweets_wanted$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", tweets_wanted$text)
tweets_wanted$text = gsub("@\\w+", "", tweets_wanted$text)
tweets_wanted$text = gsub("[[:punct:]]", "", tweets_wanted$text)
tweets_wanted$text = gsub("[[:digit:]]", "", tweets_wanted$text)
tweets_wanted$text = gsub("http\\w+", "", tweets_wanted$text)
tweets_wanted$text = gsub("[ \t]{2,}", "", tweets_wanted$text)
tweets_wanted$text = gsub("^\\s+|\\s+$", "", tweets_wanted$text)
tweets_wanted$text <- iconv(tweets_wanted$text, "UTF-8", "ASCII", sub="")
#new column
tweets_wanted$sentiment = 0
for (i in 1:nrow(tweets_wanted)) {
tweets_wanted$sentiment[i] = sentiment(get_sentences(tweets_wanted$text[i]))$sentiment
}
################
# Lat Long corr
# Final matrix
################
#dianne_feinstein_us <- dianne_feinstein_us[!(substr(dianne_feinstein_us$created_at,1,10) == '2019-02-22'),]
#kraft_heinz_us <- kraft_heinz_us[!(substr(kraft_heinz_us$created_at,1,10) == '2019-02-23'),]
#precious_harris_us <- precious_harris_us[!(substr(precious_harris_us$created_at,1,10) == '2019-02-23'),]
tweets_wanted <- dianne_feinstein_us
#remove non-existing lat long (from errors before) and that whole row
tweets_wanted <- tweets_wanted[!(as.numeric(tweets_wanted$c_lat) < -125),]
tweets_wanted <- tweets_wanted[!(as.numeric(tweets_wanted$c_lat) > -70),]
tweets_wanted <- tweets_wanted[!(abs(as.numeric(tweets_wanted$c_long)) > 50),]
tweets_wanted <- tweets_wanted[!(abs(as.numeric(tweets_wanted$c_long)) < 25),]
tweets_wanted <- tweets_wanted[rowSums(is.na(tweets_wanted)) != ncol(tweets_wanted),]
# final matrix form
tweets_wanted <- tweets_wanted %>%
select(status_id,created_at,c_lat,c_long,sentiment,text)
################
# Mapping
################
register_google(GOOGLE_MAPS_KEY)
cnt <- 0
for (i in 1:nrow(tweets_wanted)) {
#tweets_wanted$created_at[i] <- substr(tweets_wanted$created_at[i],12,19)
if (substr(tweets_wanted$created_at[1],11,19) >= "23:30:01") {
cnt <- cnt +1
}
}
tmp <- sqldf("
select distinct a.c_lat,a.c_long,min(b.created_at) as tm,b.sentiment from unique_pos as a
join tweets_wanted as b on a.c_lat = b.c_lat and a.c_long = b.c_long
group by a.c_lat,a.c_long
")
us <- map_data("usa")
states <- map_data("state")
gg1 <- ggplot() + geom_polygon(data = us, aes(x=long, y = lat, group = group)) +
geom_polygon(data = states, aes(x = long, y = lat, color = 'white', group = group), color = "white") +
coord_fixed(1.3)
cc <- scales::seq_gradient_pal("green", "red", "Lab")(seq(0,1,length.out=nrow(tmp)))
gg1 + geom_point(data = tmp,aes(x = c_lat,y = c_long,color = tmp$tm),show.legend = FALSE) +
scale_color_manual(values = cc)
unique_pos <- unique(tweets_wanted[,c('c_lat','c_long')])
####### GOOGLE MAP HรR ########
map <- get_map(location="united states", zoom=4, maptype = "toner", source = "google",scale = 2)
ggmap(map)
tweets_wanted2 <- tweets_wanted
tweets_wanted2$created_at <- cut(tweets_wanted2$created_at,breaks = '00:30:00')
ggmap(map) +
geom_point(data = tweets_wanted2,show.legend = FALSE,
aes(x = c_lat,y = c_long, color = tweets_wanted2$created_at,size = 1.5)) +
scale_color_discrete()
scale_fill_gradient(low='green',high = 'red')
####### GOOGLE MAP HรR ########
saveRDS(tweets_wanted,file =
"C:\\Users//Diar//Desktop//Ny mapp//KEX19//KEX19//News//us//P2//Map//Sentiment//Fin/citizens_united_us.rds")
| /kex.R | no_license | diarsabri/BSC-KTH-2019 | R | false | false | 5,598 | r | #GOOGLE_MAPS_KEY = #
install.packages(c("rtweet","dplyr","jsonlite","syuzhet","rcurl","httr","sentimentr","ggmap","ggplot2","colorramps"))
library(rtweet)
library(dplyr)
library(jsonlite)
library(syuzhet)
library(RCurl)
library(httr)
library(sentimentr)
library(ggmap)
library(ggplot2)
library(colorRamps)
library(rworldmap)
#rm(archie_royal_us,citizens_united_us,dianne_feinstein_us,kraft_heinz_us,met_gala_us,nipsey_us,precious_harris_us,school_shooting_us,sri_lanka_us,unemployment_rate_us)
#rm(clean_loc,i,lat,lng,loc,map,str,temp2)
################
# Gather tweets
################
#get tweets
tweets_all <- search_tweets(type = 'recent',
q = '(archie royal) OR (royal baby) OR (archie baby)',
include_rts = FALSE,
n = 18000,
langs = 'EN',
geocode = lookup_coords('country:us',key = GOOGLE_MAPS_KEY),
since = '2019-05-08',
until = '2019-05-09')
#get columns we're interested in
tweets_wanted <- dianne_feinstein_us %>%
select(user_id,status_id,created_at,lang,account_lang, account_created_at, followers_count,
location, country,country_code,geo_coords,coords_coords,is_retweet, bbox_coords,text)
#add new columns
tweets_wanted$c_lat = 0
tweets_wanted$c_long = 0
tweets_wanted$new_loc = ""
#gets coords from loc from each tweet
for (i in 1:nrow(tweets_wanted)) {
get_loc <- tweets_wanted$location[i]
clean_loc <- rm_accent(get_loc)
clean_loc <- gsub("[^A-z,-]", "", clean_loc)
clean_loc <- gsub("[,-]", " ", clean_loc)
str <- funct(clean_loc)
if(length(str) == 0){
str <- funct("singapore")
}
loc <- str[1]
lat <- str[2]
lng <- str[3]
tweets_wanted$new_loc[i] <- loc
tweets_wanted$c_lat[i] <- as.numeric(lat)
tweets_wanted$c_long[i] <- as.numeric(lng)
tweets_wanted$coords_coords[i] <- paste(lat,",",lng)
}
################
# Sentiment
################
#clean
tweets_wanted$text = gsub("&", "", tweets_wanted$text)
tweets_wanted$text = gsub("&", "", tweets_wanted$text)
tweets_wanted$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", tweets_wanted$text)
tweets_wanted$text = gsub("@\\w+", "", tweets_wanted$text)
tweets_wanted$text = gsub("[[:punct:]]", "", tweets_wanted$text)
tweets_wanted$text = gsub("[[:digit:]]", "", tweets_wanted$text)
tweets_wanted$text = gsub("http\\w+", "", tweets_wanted$text)
tweets_wanted$text = gsub("[ \t]{2,}", "", tweets_wanted$text)
tweets_wanted$text = gsub("^\\s+|\\s+$", "", tweets_wanted$text)
tweets_wanted$text <- iconv(tweets_wanted$text, "UTF-8", "ASCII", sub="")
#new column
tweets_wanted$sentiment = 0
for (i in 1:nrow(tweets_wanted)) {
tweets_wanted$sentiment[i] = sentiment(get_sentences(tweets_wanted$text[i]))$sentiment
}
################
# Lat Long corr
# Final matrix
################
#dianne_feinstein_us <- dianne_feinstein_us[!(substr(dianne_feinstein_us$created_at,1,10) == '2019-02-22'),]
#kraft_heinz_us <- kraft_heinz_us[!(substr(kraft_heinz_us$created_at,1,10) == '2019-02-23'),]
#precious_harris_us <- precious_harris_us[!(substr(precious_harris_us$created_at,1,10) == '2019-02-23'),]
tweets_wanted <- dianne_feinstein_us
#remove non-existing lat long (from errors before) and that whole row
tweets_wanted <- tweets_wanted[!(as.numeric(tweets_wanted$c_lat) < -125),]
tweets_wanted <- tweets_wanted[!(as.numeric(tweets_wanted$c_lat) > -70),]
tweets_wanted <- tweets_wanted[!(abs(as.numeric(tweets_wanted$c_long)) > 50),]
tweets_wanted <- tweets_wanted[!(abs(as.numeric(tweets_wanted$c_long)) < 25),]
tweets_wanted <- tweets_wanted[rowSums(is.na(tweets_wanted)) != ncol(tweets_wanted),]
# final matrix form
tweets_wanted <- tweets_wanted %>%
select(status_id,created_at,c_lat,c_long,sentiment,text)
################
# Mapping
################
register_google(GOOGLE_MAPS_KEY)
cnt <- 0
for (i in 1:nrow(tweets_wanted)) {
#tweets_wanted$created_at[i] <- substr(tweets_wanted$created_at[i],12,19)
if (substr(tweets_wanted$created_at[1],11,19) >= "23:30:01") {
cnt <- cnt +1
}
}
tmp <- sqldf("
select distinct a.c_lat,a.c_long,min(b.created_at) as tm,b.sentiment from unique_pos as a
join tweets_wanted as b on a.c_lat = b.c_lat and a.c_long = b.c_long
group by a.c_lat,a.c_long
")
us <- map_data("usa")
states <- map_data("state")
gg1 <- ggplot() + geom_polygon(data = us, aes(x=long, y = lat, group = group)) +
geom_polygon(data = states, aes(x = long, y = lat, color = 'white', group = group), color = "white") +
coord_fixed(1.3)
cc <- scales::seq_gradient_pal("green", "red", "Lab")(seq(0,1,length.out=nrow(tmp)))
gg1 + geom_point(data = tmp,aes(x = c_lat,y = c_long,color = tmp$tm),show.legend = FALSE) +
scale_color_manual(values = cc)
unique_pos <- unique(tweets_wanted[,c('c_lat','c_long')])
####### GOOGLE MAP HรR ########
map <- get_map(location="united states", zoom=4, maptype = "toner", source = "google",scale = 2)
ggmap(map)
tweets_wanted2 <- tweets_wanted
tweets_wanted2$created_at <- cut(tweets_wanted2$created_at,breaks = '00:30:00')
ggmap(map) +
geom_point(data = tweets_wanted2,show.legend = FALSE,
aes(x = c_lat,y = c_long, color = tweets_wanted2$created_at,size = 1.5)) +
scale_color_discrete()
scale_fill_gradient(low='green',high = 'red')
####### GOOGLE MAP HรR ########
saveRDS(tweets_wanted,file =
"C:\\Users//Diar//Desktop//Ny mapp//KEX19//KEX19//News//us//P2//Map//Sentiment//Fin/citizens_united_us.rds")
|
### Assign 1.3: Demographics & Employment Data Analysis
str(CPS)
# Summarising Data set with sort(table())
sort(table(CPS$Industry))
sort(table(CPS$State))
sort(table(CPS$Citizenship))
(116639+7073) /131302
table(CPS$Hispanic,CPS$Race)
## Missing Values - use summary
summary(CPS)
## Patterns in missing variables> Is the reported variable missing - table variable with NA against one with no NA
TRUE means missing
table(CPS$Region, is.na(CPS$Married))
table(CPS$Sex, is.na(CPS$Married))
table(CPS$Age, is.na(CPS$Married))
table(CPS$Citizenship, is.na(CPS$Married))
table(CPS$State, is.na(CPS$MetroAreaCode))
table(CPS$Region,is.na(CPS$MetroAreaCode))
10674/(20010 +10674)
5609/(20330 + 5609)
9871/(31631 + 9871)
8084/(25093 + 8084)
## Trick: Calculating TRUE FALSE Proportions automatically using means.
# Proportion of True (i.e. in this case live non-metro)
# Use sort to pareto the data
sort(tapply(is.na(CPS$MetroAreaCode), CPS$State,mean))
##Integrating Metro Area Data
str(MetroAreaMap)
str(CountryMap)
CPS<-merge(CPS,MetroAreaMap,by.x="MetroAreaCode",by.y = "Code",all.x = TRUE)
summary(CPS$MetroArea)
table(CPS$MetroArea)
#3.4 Highest proportion of Hispanic in metro area
sort(tapply(CPS$Hispanic, CPS$MetroArea,mean))
#3.5 creating TRUE FALSE numeric to generate proportions
sort(tapply(CPS$Race=="Asian", CPS$MetroArea,mean))
hist(tapply(CPS$Race=="Asian", CPS$MetroArea,mean))
#3.6
sort(tapply(CPS$Education=="No high school diploma", CPS$MetroArea,mean, na.rm=TRUE))
# 4 Merge (integrate) data - note link of original name in CPS "CountryOfBirthCode"
CPS<-merge(CPS,CountryMap,by.x="CountryOfBirthCode",by.y = "Code",all.x = TRUE)
str(CPS)
summary(CPS$Country)
sort(table(CPS$Country))
#4.3 Proportion Here use summary from tapply and table to calc total n for NY-NJ-PA
tapply(CPS$MetroArea=="New York-Northern New Jersey-Long Island, NY-NJ-PA",CPS$Country,summary,na.rm=TRUE)
table(CPS$MetroArea=="New York-Northern New Jersey-Long Island, NY-NJ-PA")
1-(3736/5409)
#4.4 Count - Derive from results using summary
tapply(CPS$Country=="Somalia",CPS$MetroArea,summary,na.rm=TRUE)
| /Analytics_Edge/Unit 1/Assign 1.3.R | no_license | Andrewcraig1/LearningR | R | false | false | 2,128 | r | ### Assign 1.3: Demographics & Employment Data Analysis
str(CPS)
# Summarising Data set with sort(table())
sort(table(CPS$Industry))
sort(table(CPS$State))
sort(table(CPS$Citizenship))
(116639+7073) /131302
table(CPS$Hispanic,CPS$Race)
## Missing Values - use summary
summary(CPS)
## Patterns in missing variables> Is the reported variable missing - table variable with NA against one with no NA
TRUE means missing
table(CPS$Region, is.na(CPS$Married))
table(CPS$Sex, is.na(CPS$Married))
table(CPS$Age, is.na(CPS$Married))
table(CPS$Citizenship, is.na(CPS$Married))
table(CPS$State, is.na(CPS$MetroAreaCode))
table(CPS$Region,is.na(CPS$MetroAreaCode))
10674/(20010 +10674)
5609/(20330 + 5609)
9871/(31631 + 9871)
8084/(25093 + 8084)
## Trick: Calculating TRUE FALSE Proportions automatically using means.
# Proportion of True (i.e. in this case live non-metro)
# Use sort to pareto the data
sort(tapply(is.na(CPS$MetroAreaCode), CPS$State,mean))
##Integrating Metro Area Data
str(MetroAreaMap)
str(CountryMap)
CPS<-merge(CPS,MetroAreaMap,by.x="MetroAreaCode",by.y = "Code",all.x = TRUE)
summary(CPS$MetroArea)
table(CPS$MetroArea)
#3.4 Highest proportion of Hispanic in metro area
sort(tapply(CPS$Hispanic, CPS$MetroArea,mean))
#3.5 creating TRUE FALSE numeric to generate proportions
sort(tapply(CPS$Race=="Asian", CPS$MetroArea,mean))
hist(tapply(CPS$Race=="Asian", CPS$MetroArea,mean))
#3.6
sort(tapply(CPS$Education=="No high school diploma", CPS$MetroArea,mean, na.rm=TRUE))
# 4 Merge (integrate) data - note link of original name in CPS "CountryOfBirthCode"
CPS<-merge(CPS,CountryMap,by.x="CountryOfBirthCode",by.y = "Code",all.x = TRUE)
str(CPS)
summary(CPS$Country)
sort(table(CPS$Country))
#4.3 Proportion Here use summary from tapply and table to calc total n for NY-NJ-PA
tapply(CPS$MetroArea=="New York-Northern New Jersey-Long Island, NY-NJ-PA",CPS$Country,summary,na.rm=TRUE)
table(CPS$MetroArea=="New York-Northern New Jersey-Long Island, NY-NJ-PA")
1-(3736/5409)
#4.4 Count - Derive from results using summary
tapply(CPS$Country=="Somalia",CPS$MetroArea,summary,na.rm=TRUE)
|
{{rimport}}('__init__.r', 'plot.r', 'sampleinfo.r')
library(methods)
options(stringsAsFactors = FALSE)
infile = {{i.infile | quote}}
gfile = {{i.gfile | quote}}
prefix = {{i.infile | fn2 | quote}}
outdir = {{o.outdir | quote}}
inopts = {{args.inopts | R}}
tsform = {{args.tsform or 'NULL'}}
filter = {{args.filter or 'NULL'}}
plots = {{args.plot | R}}
ggs = {{args.ggs | R}}
params = {{args.params | R}}
devpars = {{args.devpars | R}}
expr = read.table.inopts(infile, inopts)
if (is.function(filter)) {
expr = filter(expr)
outfile = file.path(outdir, basename(infile))
write.table(expr, outfile, row.names = TRUE, col.names = TRUE, sep = "\t", quote = FALSE)
}
if (is.function(tsform)) {
expr = tsform(expr)
}
saminfo = NULL
if (gfile != "") {
saminfo = SampleInfo2$new(gfile)
groups = saminfo$all.groups()
}
if (plots$boxplot) {
bpfile = file.path(outdir, paste0(prefix, '.boxplot.png'))
plot.boxplot(expr, bpfile, stacked = F, devpars = devpars, params = params$boxplot, ggs = ggs$boxplot)
if (!is.null(saminfo)) {
for (group in groups) {
bpfile = file.path(outdir, paste0(prefix, '.', group, '.boxplot.png'))
plot.boxplot(
expr[, saminfo$get.samples('Group', group), drop = FALSE],
bpfile, stacked = F, devpars = devpars, params = params$boxplot, ggs = ggs$boxplot)
}
}
}
if (plots$histogram) {
histfile = file.path(outdir, paste0(prefix, ".histo.png"))
plot.histo(stack(as.data.frame(expr)), histfile, devpars = devpars, params = params$histogram, ggs = ggs$histogram)
if (!is.null(saminfo)) {
for (group in groups) {
histfile = file.path(outdir, paste0(prefix, '.', group, '.histo.png'))
plot.histo(
stack(expr[, saminfo$get.samples('Group', group), drop = FALSE]),
histfile, devpars = devpars, params = params$histogram, ggs = ggs$histogram)
}
}
}
if (plots$qqplot) {
qqfile = file.path(outdir, paste0(prefix, ".qq.png"))
plot.qq(expr, qqfile, stacked = FALSE, devpars = devpars, params = params$qqplot, ggs = ggs$qqplot)
if (!is.null(saminfo)) {
for (group in groups) {
qqfile = file.path(outdir, paste0(prefix, '.', group, '.qq.png'))
plot.qq(
stack(expr[, saminfo$get.samples('Group', group), drop = FALSE]),
qqfile, stacked = FALSE, devpars = devpars, params = params$qqplot, ggs = ggs$qqplot)
}
}
} | /bioprocs/scripts/rnaseq/pExprStats.r | permissive | LeaveYeah/bioprocs | R | false | false | 2,306 | r |
{{rimport}}('__init__.r', 'plot.r', 'sampleinfo.r')
library(methods)
options(stringsAsFactors = FALSE)
infile = {{i.infile | quote}}
gfile = {{i.gfile | quote}}
prefix = {{i.infile | fn2 | quote}}
outdir = {{o.outdir | quote}}
inopts = {{args.inopts | R}}
tsform = {{args.tsform or 'NULL'}}
filter = {{args.filter or 'NULL'}}
plots = {{args.plot | R}}
ggs = {{args.ggs | R}}
params = {{args.params | R}}
devpars = {{args.devpars | R}}
expr = read.table.inopts(infile, inopts)
if (is.function(filter)) {
expr = filter(expr)
outfile = file.path(outdir, basename(infile))
write.table(expr, outfile, row.names = TRUE, col.names = TRUE, sep = "\t", quote = FALSE)
}
if (is.function(tsform)) {
expr = tsform(expr)
}
saminfo = NULL
if (gfile != "") {
saminfo = SampleInfo2$new(gfile)
groups = saminfo$all.groups()
}
if (plots$boxplot) {
bpfile = file.path(outdir, paste0(prefix, '.boxplot.png'))
plot.boxplot(expr, bpfile, stacked = F, devpars = devpars, params = params$boxplot, ggs = ggs$boxplot)
if (!is.null(saminfo)) {
for (group in groups) {
bpfile = file.path(outdir, paste0(prefix, '.', group, '.boxplot.png'))
plot.boxplot(
expr[, saminfo$get.samples('Group', group), drop = FALSE],
bpfile, stacked = F, devpars = devpars, params = params$boxplot, ggs = ggs$boxplot)
}
}
}
if (plots$histogram) {
histfile = file.path(outdir, paste0(prefix, ".histo.png"))
plot.histo(stack(as.data.frame(expr)), histfile, devpars = devpars, params = params$histogram, ggs = ggs$histogram)
if (!is.null(saminfo)) {
for (group in groups) {
histfile = file.path(outdir, paste0(prefix, '.', group, '.histo.png'))
plot.histo(
stack(expr[, saminfo$get.samples('Group', group), drop = FALSE]),
histfile, devpars = devpars, params = params$histogram, ggs = ggs$histogram)
}
}
}
if (plots$qqplot) {
qqfile = file.path(outdir, paste0(prefix, ".qq.png"))
plot.qq(expr, qqfile, stacked = FALSE, devpars = devpars, params = params$qqplot, ggs = ggs$qqplot)
if (!is.null(saminfo)) {
for (group in groups) {
qqfile = file.path(outdir, paste0(prefix, '.', group, '.qq.png'))
plot.qq(
stack(expr[, saminfo$get.samples('Group', group), drop = FALSE]),
qqfile, stacked = FALSE, devpars = devpars, params = params$qqplot, ggs = ggs$qqplot)
}
}
} |
#install packages
#We highly reccomend keeping all install.packages in its own seperate file.
#Pros: running scripts easier, debugging easier, and keeps track of installed packages
#and guarantees installed with checkpoint date
library(checkpoint) #Load checkpoint for reproducibility
checkpoint("2019-01-21") #Set snapshot of CRAN date.
#Add package here for relative file paths
install.packages("here") #makes sharing files much easier (in combination with Rprojects)
#Add custom packages
install.packages("tidyverse") #All things
install.packages("stargazer")
install.packages("httr") #Tools for Working with URLs and HTTP
install.packages("rjstats") #Read and Write 'JSON-stat' Data Sets
install.packages("shiny") #For nice interactive figures
install.packages("readxl") #For reading Excel files
install.packages("skimr") #For nice summary statistics
#If installing directly in a Markdown document with checkpoint make sure to use:
#install.packages("httr", repos = "https://mran.microsoft.com/")
#If you face any issues at all, above code will fix it.
#Shiny and its dependencies
install.packages("shiny")
install.packages("plotly")
install.packages("shinythemes")
install.packages("DT")
install.packages("shinyWidgets")
| /scripts/install_packages.R | no_license | andreasolden/ssb-api-og-shiny | R | false | false | 1,238 | r | #install packages
#We highly reccomend keeping all install.packages in its own seperate file.
#Pros: running scripts easier, debugging easier, and keeps track of installed packages
#and guarantees installed with checkpoint date
library(checkpoint) #Load checkpoint for reproducibility
checkpoint("2019-01-21") #Set snapshot of CRAN date.
#Add package here for relative file paths
install.packages("here") #makes sharing files much easier (in combination with Rprojects)
#Add custom packages
install.packages("tidyverse") #All things
install.packages("stargazer")
install.packages("httr") #Tools for Working with URLs and HTTP
install.packages("rjstats") #Read and Write 'JSON-stat' Data Sets
install.packages("shiny") #For nice interactive figures
install.packages("readxl") #For reading Excel files
install.packages("skimr") #For nice summary statistics
#If installing directly in a Markdown document with checkpoint make sure to use:
#install.packages("httr", repos = "https://mran.microsoft.com/")
#If you face any issues at all, above code will fix it.
#Shiny and its dependencies
install.packages("shiny")
install.packages("plotly")
install.packages("shinythemes")
install.packages("DT")
install.packages("shinyWidgets")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transition-manual.R
\name{transition_manual}
\alias{transition_manual}
\title{Create an animation by specifying the frame membership directly}
\usage{
transition_manual(frames)
}
\arguments{
\item{frames}{The unquoted name of the column holding the frame membership.}
}
\description{
This transition allows you to map a variable in your data to a specific frame
in the animation. No tweening of data will be made and the number of frames
in the animation will be decided by the number of levels in the frame
variable.
}
\section{Label variables}{
\code{transition_states} makes the following variables available for string
literal interpretation:
\itemize{
\item \strong{previous_frame} The name of the last frame the animation was at
\item \strong{current_frame} The name of the current frame
\item \strong{next_frame} The name of the next frame in the animation
}
}
\seealso{
Other transitions: \code{\link{transition_components}},
\code{\link{transition_events}},
\code{\link{transition_layers}},
\code{\link{transition_null}},
\code{\link{transition_states}},
\code{\link{transition_time}}
}
\concept{transitions}
| /man/transition_manual.Rd | no_license | kanishkamisra/gganimate | R | false | true | 1,208 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transition-manual.R
\name{transition_manual}
\alias{transition_manual}
\title{Create an animation by specifying the frame membership directly}
\usage{
transition_manual(frames)
}
\arguments{
\item{frames}{The unquoted name of the column holding the frame membership.}
}
\description{
This transition allows you to map a variable in your data to a specific frame
in the animation. No tweening of data will be made and the number of frames
in the animation will be decided by the number of levels in the frame
variable.
}
\section{Label variables}{
\code{transition_states} makes the following variables available for string
literal interpretation:
\itemize{
\item \strong{previous_frame} The name of the last frame the animation was at
\item \strong{current_frame} The name of the current frame
\item \strong{next_frame} The name of the next frame in the animation
}
}
\seealso{
Other transitions: \code{\link{transition_components}},
\code{\link{transition_events}},
\code{\link{transition_layers}},
\code{\link{transition_null}},
\code{\link{transition_states}},
\code{\link{transition_time}}
}
\concept{transitions}
|
##
## Solutions des exercices d'applications.
##
library(ggplot2)
twogr <- function(x1, x2, levels = NULL) {
require(reshape2)
d <- melt(list(x1, x2))
names(d)[2] <- "variable"
d$variable <- factor(d$variable)
if (!is.null(levels)) levels(d$variable) <- levels
return(d)
}
################
## Exercice 1 ##
################
x1 <- c(11.1, 12.2, 15.5, 17.6, 13.0, 7.5, 9.1, 6.6, 9.5, 19.0, 12.6)
x2 <- c(18.2, 14.1, 13.8, 12.1, 34.1, 12.0, 14.1, 14.5, 12.6, 12.5, 19.8, 13.4, 16.8, 14.1, 12.9)
d <- twogr(x1, x2, c("x1", "x2"))
ggplot(data = d, aes(x = variable, y = value)) +
geom_boxplot(outlier.color = "transparent") +
geom_jitter(width = .1, size = 1)
## Determine whether the groups differ based on Welch's test. Use ๐ผ = 0.05.
t.test(value ~ variable, data = d, var.equal = FALSE) ## p = 0.073 > 0.05, not significant
## Compare the observed p-value with that obtained from a permutation test.
library(coin)
oneway_test(value ~ variable, data = d) ## same conclusion
## What are the results of applying a Yuenโs test (using 10% trimming) on this dataset?
source("R/yuen.r")
yuen(x1, x2, tr = 0.1) ## still not significant with 10% trimmed means
################
## Exercice 2 ##
################
x1 <- c(1.96, 2.24, 1.71, 2.41, 1.62, 1.93)
x2 <- c(2.11, 2.43, 2.07, 2.71, 2.50, 2.84, 2.88)
d <- twogr(x1, x2, c("A", "B"))
## Compute average ranks in the two groups.
aggregate(value ~ variable, data = d, function(x) mean(rank(x)))
## Verify that the Wilcoxon-Mann-Whitney test rejects with ๐ผ = 0.05.
wilcox.test(value ~ variable, data = d) ## p = 0.014 > 0.05
## Estimate the probability that a randomly sampled participant receiving drug A will have
## a smaller reduction in reaction time than a randomly sampled participant receiving drug B.
## Verify that the estimate is 0.9.
oneway_test(value ~ variable, data = d, distribution = "exact")
################
## Exercice 3 ##
################
x1 <- c(10, 14, 15, 18, 20, 29, 30, 40)
x2 <- c(40, 8, 15, 20, 10, 8, 2, 3)
## Compare the two groups with the sign test and the Wilcoxon signed rank test with ๐ผ = 0.05.
wilcox.test(x1, x2, paired = TRUE, correct = FALSE)
## Verify that according to the sign test, $\hat p = 0.29$ and that the 0.95 confidence interval
## for p is (0.04, 0.71), and that the Wilcoxon signed rank test has an approcimate p-value of 0.08.
di <- x1 - x2
di[di == 0] <- NA
ni <- sum(!is.na(di))
vi <- sum(di < 0, na.rm = TRUE)
binom.test(vi, ni)
################
## Exercice 4 ##
################
t.test(extra ~ group, data = sleep, paired = TRUE)
## Quelle est la puissance a posteriori d'une telle comparaison (10 sujets) ?
## Calculer les scores de diffรฉrences entre les deux hypnotiques et rรฉaliser un test t
## ร un รฉchantillon en testant la nullitรฉ de la moyenne des diffรฉrences.
d <- cbind(sleep$extra[sleep$group == 1], sleep$extra[sleep$group == 2])
di <- d[,1] - d[,2]
t.test(di)
power.t.test(n = 10, delta = mean(di), sd = sd(di), type = "paired") ## power โ 0.95
## Estimer un intervalle de confiance aฬ 95 % pour cette moyenne des diffeฬrences par bootstrap
library(boot)
f <- function(data, i) mean(data[i])
bs <- boot(di, statistic = f, R = 500)
boot.ci(bs, conf = 0.95, type = "perc")
################
## Exercice 5 ##
################
d <- data.frame(value = c(10, 11, 12, 9, 8, 7,
10, 66, 15, 32, 22, 51,
1, 12, 42, 31, 55, 19),
variable = gl(3, 6, labels = paste0("g", 1:3)))
summary(aov(value ~ variable, data = d))
oneway.test(value ~ variable, data = d) ## reduced WSS via ranks
################
## Exercice 6 ##
################
tmp <- read.table("../data/weight.dat")
d <- data.frame(weight = as.numeric(unlist(tmp)),
type = gl(2, 20, labels = c("Beef", "Cereal")),
level = gl(2, 10, labels = c("Low", "High")))
fm <- weight ~ type * level
## Quels sont les effets significatifs ?
m <- aov(fm, data = d)
summary(m) ## level
## Effectuer toutes les comparaisons de paires de moyennes par des tests de Student
## en assumant ou non une variance commune, et comparer les rรฉsultats avec des tests
## protรฉgรฉs de type LSD ou Tukey.
with(d, pairwise.t.test(weight,
interaction(type, level),
p.adjust.method = "none"))
with(d, pairwise.t.test(weight,
interaction(type, level),
p.adjust.method = "none",
pool.sd = FALSE))
library(multcomp)
summary(glht(m, linfct = mcp(type = "Tukey")))
## To test for all pairs of means, see how to build the corresponding design matrix
## and apply a Tukey test in the following vignette (page 9):
## https://cran.r-project.org/web/packages/multcomp/vignettes/multcomp-examples.pdf
################
## Exercice 7 ##
################
load("../data/rats.rda") ## "rat" data frame
rat <- within(rat, {
Diet.Amount <- factor(Diet.Amount, levels = 1:2, labels = c("High", "Low"))
Diet.Type <- factor(Diet.Type, levels = 1:3, labels = c("Beef", "Pork", "Cereal"))
})
## Show average responses for the 60 rats in an interaction plot.
ratm <- aggregate(Weight.Gain ~ Diet.Amount + Diet.Type, data = rat, mean)
ggplot(data = rat, aes(x = Diet.Type, y = Weight.Gain, color = Diet.Amount)) +
geom_jitter() +
geom_line(data = ratm, aes(x = Diet.Type, y = Weight.Gain, color = Diet.Amount, group = Diet.Amount))
## Consider the following 6 treatments: Beef/High, Beef/Low, Pork/High, Pork/Low,
## Cereal/High, et Cereal/Low. What is the result of the F-test for a one-way ANOVA?
tx <- with(rat, interaction(Diet.Amount, Diet.Type, sep="/"))
head(tx)
unique(table(tx)) ## obs / treatment
m <- aov(Weight.Gain ~ tx, data=rat)
summary(m)
## Use `pairwise.t.test()` with Bonferroni correction to identify pairs of treatments
## that differ significantly one from the other.
pairwise.t.test(rat$Weight.Gain, tx, p.adjust="bonf")
## Based on these 6 treatments, build a matrix of 5 contrasts allowing to test the following
## conditions: beef vs. cereal and beef vs. porc (2 DF); high vs. low dose (1 DF);
## and interaction type/amount (2 DF). Partition the SS associated to treatment computed
## in (2) according to these contrasts, and test each contrast at a 5% level (you can
## consider that there's no need to correct the multiple tests if contrasts were defined a priori).
ctr <- cbind(c(-1,-1,-1,-1,2,2)/6,
c(-1,-1,1,1,0,0)/4,
c(-1,1,-1,1,-1,1)/6,
c(1,-1,1,-1,-2,2)/6, # C1 x C3
c(1,-1,-1,1,0,0)/4) # C2 x C3
crossprod(ctr)
contrasts(tx) <- ctr
m <- aov(rat$Weight.Gain ~ tx)
summary(m, split=list(tx=1:5))
## Compare those results with a two-way ANOVA including the interaction between type and amount.
summary(aov(Weight.Gain ~ Diet.Type * Diet.Amount, data=rat))
## Test the following post-hoc contrast: beef and pork at high dose (i.e., Beef/High
## and Pork/High) vs. the average of all other treatments. Is the result significant
## at the 5% level? What does this result suggest?
tx <- with(rat, interaction(Diet.Amount, Diet.Type, sep="/"))
C6 <- c(2,-1,2,-1,-1,-1)/6
summary(glht(aov(rat$Weight.Gain ~ tx), linfct=mcp(tx=C6)))
################
## Exercice 8 ##
################
## Construire un graphique d'interaction (`dose` en abscisses) et commenter.
tooth.mean = aggregate(len ~ dose + supp, data = ToothGrowth, mean)
ggplot(data = ToothGrowth, aes(x = dose, y = len, color = supp)) +
geom_point(position = position_jitterdodge(jitter.width = .1, dodge.width = 0.25)) +
geom_line(data = tooth.mean, aes(x = dose, y = len, color = supp)) +
scale_color_manual(values = c("dodgerblue", "coral")) +
labs(x = "Dose (mg/day)", y = "Length (oc. unit)")
## Rรฉaliser un test de Student pour comparer les groupes `OJ` et `VC` pour `dose == 0.5`. Les deux groupes peuvent-ils รชtre considรฉrรฉs comme diffรฉrent au seuil $\alpha = 0.05$ ?
t.test(len ~ supp, data = subset(ToothGrowth, dose == 0.5))
## Rรฉaliser une ANOVA ร deux facteurs avec interaction. Peut-on conclure ร l'existence d'un effet dose dรฉpendant du mode d'administration de la vitamine C ?
ToothGrowth$dose <- factor(ToothGrowth$dose)
m <- aov(len ~ supp * dose, data = ToothGrowth)
summary(m)
## Tester la linรฉaritรฉ de l'effet dose ร l'aide d'un modรจle de rรฉgression linรฉaire.
ToothGrowth$dose <- as.numeric(as.character(ToothGrowth$dose))
m <- lm(len ~ supp * dose, data = ToothGrowth)
summary(m)
################
## Exercice 9 ##
################
data(anorexia, package = "MASS")
## Construire un diagramme de dispersion (`Prewt` en abscisses, `Postwt` en ordonnรฉes)
## avec des droites de rรฉgression conditionnelles pour chacun des trois groupes.
ggplot(data = anorexia, aes(x = Prewt, y = Postwt, color = Treat)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
## Comparer ร l'aide de tests de Student les 3 groupes dans la pรฉriode prรฉ-traitement.
with(anorexia, pairwise.t.test(Prewt, Treat, p.adjust.method = "none", pool.sd = FALSE))
## Rรฉaliser un modรจle d'ANCOVA en testant l'interaction `Treat:Prewt`, en incluant
## le groupe contrรดle ou non.
m0 <- lm(Postwt ~ Prewt + Treat, data = anorexia)
m1 <- lm(Postwt ~ Prewt * Treat, data = anorexia)
anova(m0, m1)
m2 <- lm(Postwt ~ Prewt * Treat, data = subset(anorexia, Treat != "Cont"))
summary(m2)
################
## Exercice 10 ##
################
babies <- read.table("../data/babies.txt", header = TRUE, na.string = ".")
babies$baby <- factor(babies$baby)
babies$loginsp <- log(babies$inspirat)
m1 <- aov(loginsp ~ maxp + Error(baby), data = babies[complete.cases(babies),])
summary(m1)
library(nlme)
m2 <- lme(loginsp ~ maxp, data=babies, random= ~ 1 | baby, na.action=na.omit)
anova(m2)
| /handouts/solution.r | no_license | even4void/rstats-ssample | R | false | false | 9,943 | r | ##
## Solutions des exercices d'applications.
##
library(ggplot2)
twogr <- function(x1, x2, levels = NULL) {
require(reshape2)
d <- melt(list(x1, x2))
names(d)[2] <- "variable"
d$variable <- factor(d$variable)
if (!is.null(levels)) levels(d$variable) <- levels
return(d)
}
################
## Exercice 1 ##
################
x1 <- c(11.1, 12.2, 15.5, 17.6, 13.0, 7.5, 9.1, 6.6, 9.5, 19.0, 12.6)
x2 <- c(18.2, 14.1, 13.8, 12.1, 34.1, 12.0, 14.1, 14.5, 12.6, 12.5, 19.8, 13.4, 16.8, 14.1, 12.9)
d <- twogr(x1, x2, c("x1", "x2"))
ggplot(data = d, aes(x = variable, y = value)) +
geom_boxplot(outlier.color = "transparent") +
geom_jitter(width = .1, size = 1)
## Determine whether the groups differ based on Welch's test. Use ๐ผ = 0.05.
t.test(value ~ variable, data = d, var.equal = FALSE) ## p = 0.073 > 0.05, not significant
## Compare the observed p-value with that obtained from a permutation test.
library(coin)
oneway_test(value ~ variable, data = d) ## same conclusion
## What are the results of applying a Yuenโs test (using 10% trimming) on this dataset?
source("R/yuen.r")
yuen(x1, x2, tr = 0.1) ## still not significant with 10% trimmed means
################
## Exercice 2 ##
################
x1 <- c(1.96, 2.24, 1.71, 2.41, 1.62, 1.93)
x2 <- c(2.11, 2.43, 2.07, 2.71, 2.50, 2.84, 2.88)
d <- twogr(x1, x2, c("A", "B"))
## Compute average ranks in the two groups.
aggregate(value ~ variable, data = d, function(x) mean(rank(x)))
## Verify that the Wilcoxon-Mann-Whitney test rejects with ๐ผ = 0.05.
wilcox.test(value ~ variable, data = d) ## p = 0.014 > 0.05
## Estimate the probability that a randomly sampled participant receiving drug A will have
## a smaller reduction in reaction time than a randomly sampled participant receiving drug B.
## Verify that the estimate is 0.9.
oneway_test(value ~ variable, data = d, distribution = "exact")
################
## Exercice 3 ##
################
x1 <- c(10, 14, 15, 18, 20, 29, 30, 40)
x2 <- c(40, 8, 15, 20, 10, 8, 2, 3)
## Compare the two groups with the sign test and the Wilcoxon signed rank test with ๐ผ = 0.05.
wilcox.test(x1, x2, paired = TRUE, correct = FALSE)
## Verify that according to the sign test, $\hat p = 0.29$ and that the 0.95 confidence interval
## for p is (0.04, 0.71), and that the Wilcoxon signed rank test has an approcimate p-value of 0.08.
di <- x1 - x2
di[di == 0] <- NA
ni <- sum(!is.na(di))
vi <- sum(di < 0, na.rm = TRUE)
binom.test(vi, ni)
################
## Exercice 4 ##
################
t.test(extra ~ group, data = sleep, paired = TRUE)
## Quelle est la puissance a posteriori d'une telle comparaison (10 sujets) ?
## Calculer les scores de diffรฉrences entre les deux hypnotiques et rรฉaliser un test t
## ร un รฉchantillon en testant la nullitรฉ de la moyenne des diffรฉrences.
d <- cbind(sleep$extra[sleep$group == 1], sleep$extra[sleep$group == 2])
di <- d[,1] - d[,2]
t.test(di)
power.t.test(n = 10, delta = mean(di), sd = sd(di), type = "paired") ## power โ 0.95
## Estimer un intervalle de confiance aฬ 95 % pour cette moyenne des diffeฬrences par bootstrap
library(boot)
f <- function(data, i) mean(data[i])
bs <- boot(di, statistic = f, R = 500)
boot.ci(bs, conf = 0.95, type = "perc")
################
## Exercice 5 ##
################
d <- data.frame(value = c(10, 11, 12, 9, 8, 7,
10, 66, 15, 32, 22, 51,
1, 12, 42, 31, 55, 19),
variable = gl(3, 6, labels = paste0("g", 1:3)))
summary(aov(value ~ variable, data = d))
oneway.test(value ~ variable, data = d) ## reduced WSS via ranks
################
## Exercice 6 ##
################
tmp <- read.table("../data/weight.dat")
d <- data.frame(weight = as.numeric(unlist(tmp)),
type = gl(2, 20, labels = c("Beef", "Cereal")),
level = gl(2, 10, labels = c("Low", "High")))
fm <- weight ~ type * level
## Quels sont les effets significatifs ?
m <- aov(fm, data = d)
summary(m) ## level
## Effectuer toutes les comparaisons de paires de moyennes par des tests de Student
## en assumant ou non une variance commune, et comparer les rรฉsultats avec des tests
## protรฉgรฉs de type LSD ou Tukey.
with(d, pairwise.t.test(weight,
interaction(type, level),
p.adjust.method = "none"))
with(d, pairwise.t.test(weight,
interaction(type, level),
p.adjust.method = "none",
pool.sd = FALSE))
library(multcomp)
summary(glht(m, linfct = mcp(type = "Tukey")))
## To test for all pairs of means, see how to build the corresponding design matrix
## and apply a Tukey test in the following vignette (page 9):
## https://cran.r-project.org/web/packages/multcomp/vignettes/multcomp-examples.pdf
################
## Exercice 7 ##
################
load("../data/rats.rda") ## "rat" data frame
rat <- within(rat, {
Diet.Amount <- factor(Diet.Amount, levels = 1:2, labels = c("High", "Low"))
Diet.Type <- factor(Diet.Type, levels = 1:3, labels = c("Beef", "Pork", "Cereal"))
})
## Show average responses for the 60 rats in an interaction plot.
ratm <- aggregate(Weight.Gain ~ Diet.Amount + Diet.Type, data = rat, mean)
ggplot(data = rat, aes(x = Diet.Type, y = Weight.Gain, color = Diet.Amount)) +
geom_jitter() +
geom_line(data = ratm, aes(x = Diet.Type, y = Weight.Gain, color = Diet.Amount, group = Diet.Amount))
## Consider the following 6 treatments: Beef/High, Beef/Low, Pork/High, Pork/Low,
## Cereal/High, et Cereal/Low. What is the result of the F-test for a one-way ANOVA?
tx <- with(rat, interaction(Diet.Amount, Diet.Type, sep="/"))
head(tx)
unique(table(tx)) ## obs / treatment
m <- aov(Weight.Gain ~ tx, data=rat)
summary(m)
## Use `pairwise.t.test()` with Bonferroni correction to identify pairs of treatments
## that differ significantly one from the other.
pairwise.t.test(rat$Weight.Gain, tx, p.adjust="bonf")
## Based on these 6 treatments, build a matrix of 5 contrasts allowing to test the following
## conditions: beef vs. cereal and beef vs. porc (2 DF); high vs. low dose (1 DF);
## and interaction type/amount (2 DF). Partition the SS associated to treatment computed
## in (2) according to these contrasts, and test each contrast at a 5% level (you can
## consider that there's no need to correct the multiple tests if contrasts were defined a priori).
ctr <- cbind(c(-1,-1,-1,-1,2,2)/6,
c(-1,-1,1,1,0,0)/4,
c(-1,1,-1,1,-1,1)/6,
c(1,-1,1,-1,-2,2)/6, # C1 x C3
c(1,-1,-1,1,0,0)/4) # C2 x C3
crossprod(ctr)
contrasts(tx) <- ctr
m <- aov(rat$Weight.Gain ~ tx)
summary(m, split=list(tx=1:5))
## Compare those results with a two-way ANOVA including the interaction between type and amount.
summary(aov(Weight.Gain ~ Diet.Type * Diet.Amount, data=rat))
## Test the following post-hoc contrast: beef and pork at high dose (i.e., Beef/High
## and Pork/High) vs. the average of all other treatments. Is the result significant
## at the 5% level? What does this result suggest?
tx <- with(rat, interaction(Diet.Amount, Diet.Type, sep="/"))
C6 <- c(2,-1,2,-1,-1,-1)/6
summary(glht(aov(rat$Weight.Gain ~ tx), linfct=mcp(tx=C6)))
################
## Exercice 8 ##
################
## Construire un graphique d'interaction (`dose` en abscisses) et commenter.
tooth.mean = aggregate(len ~ dose + supp, data = ToothGrowth, mean)
ggplot(data = ToothGrowth, aes(x = dose, y = len, color = supp)) +
geom_point(position = position_jitterdodge(jitter.width = .1, dodge.width = 0.25)) +
geom_line(data = tooth.mean, aes(x = dose, y = len, color = supp)) +
scale_color_manual(values = c("dodgerblue", "coral")) +
labs(x = "Dose (mg/day)", y = "Length (oc. unit)")
## Rรฉaliser un test de Student pour comparer les groupes `OJ` et `VC` pour `dose == 0.5`. Les deux groupes peuvent-ils รชtre considรฉrรฉs comme diffรฉrent au seuil $\alpha = 0.05$ ?
t.test(len ~ supp, data = subset(ToothGrowth, dose == 0.5))
## Rรฉaliser une ANOVA ร deux facteurs avec interaction. Peut-on conclure ร l'existence d'un effet dose dรฉpendant du mode d'administration de la vitamine C ?
ToothGrowth$dose <- factor(ToothGrowth$dose)
m <- aov(len ~ supp * dose, data = ToothGrowth)
summary(m)
## Tester la linรฉaritรฉ de l'effet dose ร l'aide d'un modรจle de rรฉgression linรฉaire.
ToothGrowth$dose <- as.numeric(as.character(ToothGrowth$dose))
m <- lm(len ~ supp * dose, data = ToothGrowth)
summary(m)
################
## Exercice 9 ##
################
data(anorexia, package = "MASS")
## Construire un diagramme de dispersion (`Prewt` en abscisses, `Postwt` en ordonnรฉes)
## avec des droites de rรฉgression conditionnelles pour chacun des trois groupes.
ggplot(data = anorexia, aes(x = Prewt, y = Postwt, color = Treat)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
## Comparer ร l'aide de tests de Student les 3 groupes dans la pรฉriode prรฉ-traitement.
with(anorexia, pairwise.t.test(Prewt, Treat, p.adjust.method = "none", pool.sd = FALSE))
## Rรฉaliser un modรจle d'ANCOVA en testant l'interaction `Treat:Prewt`, en incluant
## le groupe contrรดle ou non.
m0 <- lm(Postwt ~ Prewt + Treat, data = anorexia)
m1 <- lm(Postwt ~ Prewt * Treat, data = anorexia)
anova(m0, m1)
m2 <- lm(Postwt ~ Prewt * Treat, data = subset(anorexia, Treat != "Cont"))
summary(m2)
################
## Exercice 10 ##
################
babies <- read.table("../data/babies.txt", header = TRUE, na.string = ".")
babies$baby <- factor(babies$baby)
babies$loginsp <- log(babies$inspirat)
m1 <- aov(loginsp ~ maxp + Error(baby), data = babies[complete.cases(babies),])
summary(m1)
library(nlme)
m2 <- lme(loginsp ~ maxp, data=babies, random= ~ 1 | baby, na.action=na.omit)
anova(m2)
|
#' Save R session information
#'
#' Creates a dated text file (.txt) containing the contents of
#' \code{sessioninfo::\link[sessioninfo]{session_info}()}.
#'
#' The date and time when this function was run is included in the resulting
#' .txt file's name and first line. This date and time is obtained from
#' \code{\link{Sys.time}()}.
#'
#' For the file name, hyphens (-) are removed from the date, spaces are replaced
#' with underscores (_), and colons (:) are replaced with a modifier letter
#' colon (U+A789).
#'
#' @param path_dir The full path of the directory where the session information
#' text file shall be written. If it doesn't exist, it is written with
#' \code{fs::\link[fs]{dir_create}()}.
#'
#' @return A list of two:
#'
#' \code{$ time :} the value of \code{\link{Sys.time}()} that the
#' function used
#'
#' \code{$ session_info() :} the value of
#' \code{sessioninfo::\link[sessioninfo]{session_info}()} that the function
#' used
#'
#' @export
save_session_info <- function(path_dir = here::here("progs", "session_info")) {
if (!fs::file_exists(path_dir)) {
fs::dir_create(path_dir)
}
time <- Sys.time()
time_string <- as.character(time, usetz = TRUE)
session_info <- sessioninfo::session_info()
txt_lines <-
utils::capture.output(
cat(paste0("Run time: ", time_string, "\n\n")),
session_info
)
readr::write_lines(
txt_lines,
fs::path(
path_dir,
paste0(
"session_info_",
stringr::str_replace_all(
time_string,
c("-" = "", " " = "_", ":" = "\uA789")
)
),
ext = "txt"
)
) %>%
cat(sep = "\n")
invisible(
list(
time = time,
session_info = session_info
)
)
}
#' Compressed a project folder
#'
#' Creates a compressed file out of a user-specified project folder for sharing.
#'
#' Currently, this function uses \code{zip::\link[zip]{zipr}()}.
#'
#' @param project Project \code{id} or unambiguous substring of the project name
#' from the \code{\link{projects}()} table.
#' @param zipfile Desired file path of the resulting compressed folder file,
#' including the file's desired name and file extension. See the
#' \code{zipfile} argument for the \code{zip::\link[zip]{zipr}()} function.
#' @param include_hidden Logical indicating whether or not to include hidden
#' folders and files (e.g., those with names that begin with a period).
#' Defaults to \code{FALSE}.
#' @param exclude Character vector of exact names of first-level subdirectories
#' of the project folder to exclude from the resulting compressed folder file.
#'
#' @return The name of the created zip file, invisibly.
#'
#' @export
export_project <- function(project,
zipfile,
include_hidden = FALSE,
exclude = NULL) {
exclude <- as.character(exclude)
if (!all(nchar(exclude) > 0L)) {
stop("Each element of exclude must have at least 1 character.")
}
project_dir <-
validate_unique_entry(
x = project,
table = projects_internal(),
what = "project"
)$path
files <-
fs::dir_ls(project_dir, all = include_hidden) %>%
`[`(!(fs::path_file(.) %in% exclude))
zip::zipr(zipfile, files = files, recurse = TRUE)
}
| /R/reproducibility.R | permissive | NicholasMcKie/projects | R | false | false | 3,323 | r |
#' Save R session information
#'
#' Creates a dated text file (.txt) containing the contents of
#' \code{sessioninfo::\link[sessioninfo]{session_info}()}.
#'
#' The date and time when this function was run is included in the resulting
#' .txt file's name and first line. This date and time is obtained from
#' \code{\link{Sys.time}()}.
#'
#' For the file name, hyphens (-) are removed from the date, spaces are replaced
#' with underscores (_), and colons (:) are replaced with a modifier letter
#' colon (U+A789).
#'
#' @param path_dir The full path of the directory where the session information
#' text file shall be written. If it doesn't exist, it is written with
#' \code{fs::\link[fs]{dir_create}()}.
#'
#' @return A list of two:
#'
#' \code{$ time :} the value of \code{\link{Sys.time}()} that the
#' function used
#'
#' \code{$ session_info() :} the value of
#' \code{sessioninfo::\link[sessioninfo]{session_info}()} that the function
#' used
#'
#' @export
save_session_info <- function(path_dir = here::here("progs", "session_info")) {
if (!fs::file_exists(path_dir)) {
fs::dir_create(path_dir)
}
time <- Sys.time()
time_string <- as.character(time, usetz = TRUE)
session_info <- sessioninfo::session_info()
txt_lines <-
utils::capture.output(
cat(paste0("Run time: ", time_string, "\n\n")),
session_info
)
readr::write_lines(
txt_lines,
fs::path(
path_dir,
paste0(
"session_info_",
stringr::str_replace_all(
time_string,
c("-" = "", " " = "_", ":" = "\uA789")
)
),
ext = "txt"
)
) %>%
cat(sep = "\n")
invisible(
list(
time = time,
session_info = session_info
)
)
}
#' Compressed a project folder
#'
#' Creates a compressed file out of a user-specified project folder for sharing.
#'
#' Currently, this function uses \code{zip::\link[zip]{zipr}()}.
#'
#' @param project Project \code{id} or unambiguous substring of the project name
#' from the \code{\link{projects}()} table.
#' @param zipfile Desired file path of the resulting compressed folder file,
#' including the file's desired name and file extension. See the
#' \code{zipfile} argument for the \code{zip::\link[zip]{zipr}()} function.
#' @param include_hidden Logical indicating whether or not to include hidden
#' folders and files (e.g., those with names that begin with a period).
#' Defaults to \code{FALSE}.
#' @param exclude Character vector of exact names of first-level subdirectories
#' of the project folder to exclude from the resulting compressed folder file.
#'
#' @return The name of the created zip file, invisibly.
#'
#' @export
export_project <- function(project,
zipfile,
include_hidden = FALSE,
exclude = NULL) {
exclude <- as.character(exclude)
if (!all(nchar(exclude) > 0L)) {
stop("Each element of exclude must have at least 1 character.")
}
project_dir <-
validate_unique_entry(
x = project,
table = projects_internal(),
what = "project"
)$path
files <-
fs::dir_ls(project_dir, all = include_hidden) %>%
`[`(!(fs::path_file(.) %in% exclude))
zip::zipr(zipfile, files = files, recurse = TRUE)
}
|
randomDesign <- function(s, p, v, v.rep, balance.s=FALSE, balance.p=FALSE, model, C) {
if (missing(v.rep)) {
v.rep <- rep((s*p) %/% v, v) + c(rep(1, (s*p) %% v), rep(0, v-((s*p) %% v)))
}
design <- randomDesignWithoutCheck(s, p, v, v.rep, balance.s, balance.p, model)
i <- 0
# We should disable the really rare warnings estimable_R could throw.
while (!estimable_R(design, v, model, C)) {
i <- i + 1
if (i>1000) stop("Could not find design that allows estimation of contrasts after 1000 tries.")
design <- randomDesignWithoutCheck(s, p, v, v.rep, balance.s, balance.p, model)
}
return(design)
}
randomDesignWithoutCheck <- function(s, p, v, v.rep, balance.s=FALSE, balance.p=FALSE, model) {
if (balance.s) {
design <- matrix(unlist(tapply(rep(1:v, v.rep), as.factor(rep(1:s,p)), sample)), p, s)
} else if (balance.p) {
design <- matrix(unlist(tapply(rep(1:v, v.rep), as.factor(rep(1:p,s)), sample)), p, s, byrow=TRUE)
} else {
design <- matrix(sample(rep(1:v, v.rep)), p, s)
}
return(design)
} | /pkg/Crossover/R/randomDesign.R | no_license | kornl/crossover | R | false | false | 1,056 | r | randomDesign <- function(s, p, v, v.rep, balance.s=FALSE, balance.p=FALSE, model, C) {
if (missing(v.rep)) {
v.rep <- rep((s*p) %/% v, v) + c(rep(1, (s*p) %% v), rep(0, v-((s*p) %% v)))
}
design <- randomDesignWithoutCheck(s, p, v, v.rep, balance.s, balance.p, model)
i <- 0
# We should disable the really rare warnings estimable_R could throw.
while (!estimable_R(design, v, model, C)) {
i <- i + 1
if (i>1000) stop("Could not find design that allows estimation of contrasts after 1000 tries.")
design <- randomDesignWithoutCheck(s, p, v, v.rep, balance.s, balance.p, model)
}
return(design)
}
randomDesignWithoutCheck <- function(s, p, v, v.rep, balance.s=FALSE, balance.p=FALSE, model) {
if (balance.s) {
design <- matrix(unlist(tapply(rep(1:v, v.rep), as.factor(rep(1:s,p)), sample)), p, s)
} else if (balance.p) {
design <- matrix(unlist(tapply(rep(1:v, v.rep), as.factor(rep(1:p,s)), sample)), p, s, byrow=TRUE)
} else {
design <- matrix(sample(rep(1:v, v.rep)), p, s)
}
return(design)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.R
\name{fit.model_spec}
\alias{fit.model_spec}
\alias{fit_xy.model_spec}
\title{Fit a Model Specification to a Dataset}
\usage{
\method{fit}{model_spec}(object, formula = NULL, data = NULL,
control = fit_control(), ...)
\method{fit_xy}{model_spec}(object, x = NULL, y = NULL,
control = fit_control(), ...)
}
\arguments{
\item{object}{An object of class \code{model_spec} that has a chosen engine
(via \code{\link[=set_engine]{set_engine()}}).}
\item{formula}{An object of class "formula" (or one that can
be coerced to that class): a symbolic description of the model
to be fitted.}
\item{data}{Optional, depending on the interface (see Details
below). A data frame containing all relevant variables (e.g.
outcome(s), predictors, case weights, etc). Note: when needed, a
\emph{named argument} should be used.}
\item{control}{A named list with elements \code{verbosity} and
\code{catch}. See \code{\link[=fit_control]{fit_control()}}.}
\item{...}{Not currently used; values passed here will be
ignored. Other options required to fit the model should be
passed using \code{set_engine}.}
\item{x}{A matrix or data frame of predictors.}
\item{y}{A vector, matrix or data frame of outcome data.}
}
\value{
A \code{model_fit} object that contains several elements:
\itemize{
\item \code{lvl}: If the outcome is a factor, this contains
the factor levels at the time of model fitting.
\item \code{spec}: The model specification object
(\code{object} in the call to \code{fit})
\item \code{fit}: when the model is executed without error,
this is the model object. Otherwise, it is a \code{try-error}
object with the error message.
\item \code{preproc}: any objects needed to convert between
a formula and non-formula interface (such as the \code{terms}
object)
}
The return value will also have a class related to the fitted model (e.g.
\code{"_glm"}) before the base class of \code{"model_fit"}.
}
\description{
\code{fit} and \code{fit_xy} take a model specification, translate the required
code by substituting arguments, and execute the model fit
routine.
}
\details{
\code{fit} and \code{fit_xy} substitute the current arguments in the model
specification into the computational engine's code, checks them
for validity, then fits the model using the data and the
engine-specific code. Different model functions have different
interfaces (e.g. formula or \code{x}/\code{y}) and these functions translate
between the interface used when \code{fit} or \code{fit_xy} were invoked and the one
required by the underlying model.
When possible, these functions attempt to avoid making copies of the
data. For example, if the underlying model uses a formula and
\code{fit} is invoked, the original data are references
when the model is fit. However, if the underlying model uses
something else, such as \code{x}/\code{y}, the formula is evaluated and
the data are converted to the required format. In this case, any
calls in the resulting model objects reference the temporary
objects used to fit the model.
}
\examples{
# Although `glm` only has a formula interface, different
# methods for specifying the model can be used
library(dplyr)
data("lending_club")
lr_mod <- logistic_reg()
lr_mod <- logistic_reg()
using_formula <-
lr_mod \%>\%
set_engine("glm") \%>\%
fit(Class ~ funded_amnt + int_rate, data = lending_club)
using_xy <-
lr_mod \%>\%
set_engine("glm") \%>\%
fit_xy(x = lending_club[, c("funded_amnt", "int_rate")],
y = lending_club$Class)
using_formula
using_xy
}
\seealso{
\code{\link[=set_engine]{set_engine()}}, \code{\link[=fit_control]{fit_control()}}, \code{model_spec}, \code{model_fit}
}
| /man/fit.Rd | no_license | malcolmbarrett/parsnip | R | false | true | 3,718 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.R
\name{fit.model_spec}
\alias{fit.model_spec}
\alias{fit_xy.model_spec}
\title{Fit a Model Specification to a Dataset}
\usage{
\method{fit}{model_spec}(object, formula = NULL, data = NULL,
control = fit_control(), ...)
\method{fit_xy}{model_spec}(object, x = NULL, y = NULL,
control = fit_control(), ...)
}
\arguments{
\item{object}{An object of class \code{model_spec} that has a chosen engine
(via \code{\link[=set_engine]{set_engine()}}).}
\item{formula}{An object of class "formula" (or one that can
be coerced to that class): a symbolic description of the model
to be fitted.}
\item{data}{Optional, depending on the interface (see Details
below). A data frame containing all relevant variables (e.g.
outcome(s), predictors, case weights, etc). Note: when needed, a
\emph{named argument} should be used.}
\item{control}{A named list with elements \code{verbosity} and
\code{catch}. See \code{\link[=fit_control]{fit_control()}}.}
\item{...}{Not currently used; values passed here will be
ignored. Other options required to fit the model should be
passed using \code{set_engine}.}
\item{x}{A matrix or data frame of predictors.}
\item{y}{A vector, matrix or data frame of outcome data.}
}
\value{
A \code{model_fit} object that contains several elements:
\itemize{
\item \code{lvl}: If the outcome is a factor, this contains
the factor levels at the time of model fitting.
\item \code{spec}: The model specification object
(\code{object} in the call to \code{fit})
\item \code{fit}: when the model is executed without error,
this is the model object. Otherwise, it is a \code{try-error}
object with the error message.
\item \code{preproc}: any objects needed to convert between
a formula and non-formula interface (such as the \code{terms}
object)
}
The return value will also have a class related to the fitted model (e.g.
\code{"_glm"}) before the base class of \code{"model_fit"}.
}
\description{
\code{fit} and \code{fit_xy} take a model specification, translate the required
code by substituting arguments, and execute the model fit
routine.
}
\details{
\code{fit} and \code{fit_xy} substitute the current arguments in the model
specification into the computational engine's code, checks them
for validity, then fits the model using the data and the
engine-specific code. Different model functions have different
interfaces (e.g. formula or \code{x}/\code{y}) and these functions translate
between the interface used when \code{fit} or \code{fit_xy} were invoked and the one
required by the underlying model.
When possible, these functions attempt to avoid making copies of the
data. For example, if the underlying model uses a formula and
\code{fit} is invoked, the original data are references
when the model is fit. However, if the underlying model uses
something else, such as \code{x}/\code{y}, the formula is evaluated and
the data are converted to the required format. In this case, any
calls in the resulting model objects reference the temporary
objects used to fit the model.
}
\examples{
# Although `glm` only has a formula interface, different
# methods for specifying the model can be used
library(dplyr)
data("lending_club")
lr_mod <- logistic_reg()
lr_mod <- logistic_reg()
using_formula <-
lr_mod \%>\%
set_engine("glm") \%>\%
fit(Class ~ funded_amnt + int_rate, data = lending_club)
using_xy <-
lr_mod \%>\%
set_engine("glm") \%>\%
fit_xy(x = lending_club[, c("funded_amnt", "int_rate")],
y = lending_club$Class)
using_formula
using_xy
}
\seealso{
\code{\link[=set_engine]{set_engine()}}, \code{\link[=fit_control]{fit_control()}}, \code{model_spec}, \code{model_fit}
}
|
if (!exists("context_of")) source("initialize.R")
pageURL <- paste0(site_url, "/project/Studies/SDY269/begin.view?pageId=Modules")
context_of(
file = "test-modules.R",
what = "Modules",
url = pageURL
)
test_connection(remDr, pageURL, "Modules: /Studies/SDY269")
test_that("'Active Modules' module is present", {
panel <- remDr$findElements(using = "class", value = "x-panel")
expect_equal(length(panel), 1)
dea_link <- remDr$findElements(using = "css selector", value = "a[href$='/DifferentialExpressionAnalysis/Studies/SDY269/begin.view']")
gee_link <- remDr$findElements(using = "css selector", value = "a[href$='/GeneExpressionExplorer/Studies/SDY269/begin.view']")
expect_equal(length(gee_link), 1)
gsea_link <- remDr$findElements(using = "css selector", value = "a[href$='/GeneSetEnrichmentAnalysis/Studies/SDY269/begin.view']")
expect_equal(length(gsea_link), 1)
irp_link <- remDr$findElements(using = "css selector", value = "a[href$='/ImmuneResponsePredictor/Studies/SDY269/begin.view']")
expect_equal(length(irp_link), 1)
})
| /tests/test-modules.R | no_license | RGLab/UITesting | R | false | false | 1,065 | r | if (!exists("context_of")) source("initialize.R")
pageURL <- paste0(site_url, "/project/Studies/SDY269/begin.view?pageId=Modules")
context_of(
file = "test-modules.R",
what = "Modules",
url = pageURL
)
test_connection(remDr, pageURL, "Modules: /Studies/SDY269")
test_that("'Active Modules' module is present", {
panel <- remDr$findElements(using = "class", value = "x-panel")
expect_equal(length(panel), 1)
dea_link <- remDr$findElements(using = "css selector", value = "a[href$='/DifferentialExpressionAnalysis/Studies/SDY269/begin.view']")
gee_link <- remDr$findElements(using = "css selector", value = "a[href$='/GeneExpressionExplorer/Studies/SDY269/begin.view']")
expect_equal(length(gee_link), 1)
gsea_link <- remDr$findElements(using = "css selector", value = "a[href$='/GeneSetEnrichmentAnalysis/Studies/SDY269/begin.view']")
expect_equal(length(gsea_link), 1)
irp_link <- remDr$findElements(using = "css selector", value = "a[href$='/ImmuneResponsePredictor/Studies/SDY269/begin.view']")
expect_equal(length(irp_link), 1)
})
|
library(rgdal)
library(raster)
library(gdistance)
library(readr)
#Creat an empty raster
new = raster(ncol=360*3, nrow= 180*3)
projection(new) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
#Import Shapefile
map <- readOGR(dsn = "~/ne_50m_admin_0_countries/" , layer = "ne_50m_admin_0_countries")
plot(map)
#rasterize the map to raster map
r <- rasterize(map, new)
#Replace value with 1, 99999 to the point where ship can go and cannot
values(r)[is.na(values(r))] <- 1
values(r)[values(r)>1] <- 99999
plot(r)
points(port$longitude, port$latitude, col = "red", cex = 0.01)
#Prepare transition object
p <- transition(r, function(x){1/mean(x)}, 8)
p <- geoCorrection(p)
#Imort and transform port data to dataframe object
port <- read_csv("~/port.csv")
View(port)
ports <- data.frame(port)
#Self defined distance calculator, iput two pairs of longitude and latitude to get the shortest distance
DistanceCalculator <- function(port1, port2){
path <- shortestPath(p, port1, port2, output = "SpatialLines")
plot(r)
lines(path)
return(SpatialLinesLengths(path ,longlat=TRUE)*0.539957)
}
#Test
ptm = proc.time()
DistanceCalculator(cbind(ports[2206,2],ports[2206,3]),cbind(ports[3505,2],ports[3505,3]))
proc.time() - ptm
#Loop to precalculate the distance and store it in the DB
x = data.frame(port1=character(), port2=character(), distance=numeric(), stringsAsFactors = FALSE)
for(i in 1:10){
for(j in 1:i){
x[nrow(x)+1, ] = c(ports$Port[j], ports$Port[1+i],
DistanceCalculator(cbind(ports[j,2],ports[j,3]),cbind(ports[1+i,2],ports[1+i,3])))
}
}
#Just some other Tests not necessary to run
path <- shortestPath(p, cbind(port[1,2],port[1,3]),cbind(port[2392,2],port[2392,3]), output = "SpatialLines")
SpatialLinesLengths(path ,longlat=TRUE)
print(path)
lines(path)
| /PortsDistance.R | no_license | dimasanggafm/R-Ports-Distance-Calculator | R | false | false | 1,825 | r | library(rgdal)
library(raster)
library(gdistance)
library(readr)
#Creat an empty raster
new = raster(ncol=360*3, nrow= 180*3)
projection(new) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
#Import Shapefile
map <- readOGR(dsn = "~/ne_50m_admin_0_countries/" , layer = "ne_50m_admin_0_countries")
plot(map)
#rasterize the map to raster map
r <- rasterize(map, new)
#Replace value with 1, 99999 to the point where ship can go and cannot
values(r)[is.na(values(r))] <- 1
values(r)[values(r)>1] <- 99999
plot(r)
points(port$longitude, port$latitude, col = "red", cex = 0.01)
#Prepare transition object
p <- transition(r, function(x){1/mean(x)}, 8)
p <- geoCorrection(p)
#Imort and transform port data to dataframe object
port <- read_csv("~/port.csv")
View(port)
ports <- data.frame(port)
#Self defined distance calculator, iput two pairs of longitude and latitude to get the shortest distance
DistanceCalculator <- function(port1, port2){
path <- shortestPath(p, port1, port2, output = "SpatialLines")
plot(r)
lines(path)
return(SpatialLinesLengths(path ,longlat=TRUE)*0.539957)
}
#Test
ptm = proc.time()
DistanceCalculator(cbind(ports[2206,2],ports[2206,3]),cbind(ports[3505,2],ports[3505,3]))
proc.time() - ptm
#Loop to precalculate the distance and store it in the DB
x = data.frame(port1=character(), port2=character(), distance=numeric(), stringsAsFactors = FALSE)
for(i in 1:10){
for(j in 1:i){
x[nrow(x)+1, ] = c(ports$Port[j], ports$Port[1+i],
DistanceCalculator(cbind(ports[j,2],ports[j,3]),cbind(ports[1+i,2],ports[1+i,3])))
}
}
#Just some other Tests not necessary to run
path <- shortestPath(p, cbind(port[1,2],port[1,3]),cbind(port[2392,2],port[2392,3]), output = "SpatialLines")
SpatialLinesLengths(path ,longlat=TRUE)
print(path)
lines(path)
|
# Data 608 Homework 3
# Armenoush Aslanian-Persico
# Problem 2
# UI Script
## Question 2
#Often you are asked whether particular States are improving their mortality rates (per cause)
#faster than, or slower than, the national average.
#Create a visualization that lets your clients see this for themselves
#for one cause of death at the time.
#Keep in mind that the national average should be weighted by the national population.
# Load data
df <- read.csv("https://raw.githubusercontent.com/charleyferrari/CUNY_DATA608/master/lecture3/data/cleaned-cdc-mortality-1999-2010-2.csv", header = TRUE, stringsAsFactors = FALSE)
# Get unique lists of inputs
allcauses<-unique(df$ICD.Chapter)
allstates<-unique(df$State)
# Create UI script
shinyUI(fluidPage(
title = "State Mortality Rates Over Time",
fluidRow(
column(6, selectInput('causes', 'Cause of Death', choices=sort(allcauses)) ),
column(6, selectInput('states', 'State', choices=sort(allstates)) )
),
fluidRow(
plotOutput('myplot')
)
)) | /608-HW3/608hw3-2/ui.R | no_license | spsstudent15/2017-01-608 | R | false | false | 1,022 | r | # Data 608 Homework 3
# Armenoush Aslanian-Persico
# Problem 2
# UI Script
## Question 2
#Often you are asked whether particular States are improving their mortality rates (per cause)
#faster than, or slower than, the national average.
#Create a visualization that lets your clients see this for themselves
#for one cause of death at the time.
#Keep in mind that the national average should be weighted by the national population.
# Load data
df <- read.csv("https://raw.githubusercontent.com/charleyferrari/CUNY_DATA608/master/lecture3/data/cleaned-cdc-mortality-1999-2010-2.csv", header = TRUE, stringsAsFactors = FALSE)
# Get unique lists of inputs
allcauses<-unique(df$ICD.Chapter)
allstates<-unique(df$State)
# Create UI script
shinyUI(fluidPage(
title = "State Mortality Rates Over Time",
fluidRow(
column(6, selectInput('causes', 'Cause of Death', choices=sort(allcauses)) ),
column(6, selectInput('states', 'State', choices=sort(allstates)) )
),
fluidRow(
plotOutput('myplot')
)
)) |
#' Make daily Holiday and Weekend date sequences
#'
#'
#' @param start_date Used to define the starting date for date sequence generation.
#' Provide in "YYYY-MM-DD" format.
#' @param end_date Used to define the ending date for date sequence generation.
#' Provide in "YYYY-MM-DD" format.
#' @param calendar The calendar to be used in Date Sequence calculations for Holidays
#' from the `timeDate` package.
#' Acceptable values are: `"NYSE"`, `"LONDON"`, `"NERC"`, `"TSX"`, `"ZURICH"`.
#' @param skip_values A daily date sequence to skip
#' @param insert_values A daily date sequence to insert
#' @param remove_holidays A logical value indicating whether or not to
#' remove common holidays from the date sequence
#' @param remove_weekends A logical value indicating whether or not to
#' remove weekends (Saturday and Sunday) from the date sequence
#'
#' @details
#'
#' __Start and End Date Specification__
#'
#' - Accept shorthand notation (i.e. `tk_make_timeseries()` specifications apply)
#' - Only available in Daily Periods.
#'
#' __Holiday Sequences__
#'
#' `tk_make_holiday_sequence()` is a wrapper for various holiday calendars from the `timeDate` package,
#' making it easy to generate holiday sequences for common business calendars:
#'
#' - New York Stock Exchange: `calendar = "NYSE"`
#' - Londo Stock Exchange: `"LONDON"`
#' - North American Reliability Council: `"NERC"`
#' - Toronto Stock Exchange: `"TSX"`
#' - Zurich Stock Exchange: `"ZURICH"`
#'
#' __Weekend and Weekday Sequences__
#'
#' These simply populate
#'
#'
#' @return A vector containing future dates
#'
#' @seealso
#' - Intelligent date or date-time sequence creation: [tk_make_timeseries()]
#' - Holidays and weekends: [tk_make_holiday_sequence()], [tk_make_weekend_sequence()], [tk_make_weekday_sequence()]
#' - Make future index from existing: [tk_make_future_timeseries()]
#'
#' @examples
#' library(dplyr)
#' library(timetk)
#'
#' # Set max.print to 50
#' options_old <- options()$max.print
#' options(max.print = 50)
#'
#'
#' # ---- HOLIDAYS & WEEKENDS ----
#'
#' # Business Holiday Sequence
#' tk_make_holiday_sequence("2017-01-01", "2017-12-31", calendar = "NYSE")
#'
#' tk_make_holiday_sequence("2017", calendar = "NYSE") # Same thing as above (just shorter)
#'
#' # Weekday Sequence
#' tk_make_weekday_sequence("2017", "2018", remove_holidays = TRUE)
#'
#' # Weekday Sequence + Removing Business Holidays
#' tk_make_weekday_sequence("2017", "2018", remove_holidays = TRUE)
#'
#'
#' # ---- COMBINE HOLIDAYS WITH MAKE FUTURE TIMESERIES FROM EXISTING ----
#' # - A common machine learning application is creating a future time series data set
#' # from an existing
#'
#' # Create index of days that FB stock will be traded in 2017 based on 2016 + holidays
#' FB_tbl <- FANG %>% filter(symbol == "FB")
#'
#' holidays <- tk_make_holiday_sequence(
#' start_date = "2016",
#' end_date = "2017",
#' calendar = "NYSE")
#'
#' weekends <- tk_make_weekend_sequence(
#' start_date = "2016",
#' end_date = "2017")
#'
#' # Remove holidays and weekends with skip_values
#' # We could also remove weekends with inspect_weekdays = TRUE
#' FB_tbl %>%
#' tk_index() %>%
#' tk_make_future_timeseries(length_out = 366,
#' skip_values = c(holidays, weekends))
#'
#'
#' options(max.print = options_old)
#'
#' @name tk_make_holiday_sequence
NULL
# DATE SEQUENCE ----
# HOLIDAYS -----
#' @rdname tk_make_holiday_sequence
#' @export
tk_make_holiday_sequence <- function(start_date, end_date,
calendar = c("NYSE", "LONDON", "NERC", "TSX", "ZURICH"),
skip_values = NULL, insert_values = NULL) {
fun <- switch(
tolower(calendar[1]),
"nyse" = timeDate::holidayNYSE,
"london" = timeDate::holidayLONDON,
"nerc" = timeDate::holidayNERC,
"tsx" = timeDate::holidayTSX,
"zurich" = timeDate::holidayZURICH
)
date_seq <- tk_make_timeseries(
start_date = start_date,
end_date = end_date,
by = "day")
# Find holidays
years <- date_seq %>% lubridate::year() %>% unique()
holiday_sequence <- fun(year = years) %>% lubridate::as_date()
holidays <- holiday_sequence[holiday_sequence %in% date_seq]
# Add/Subtract
holidays <- add_subtract_sequence(holidays, skip_values, insert_values)
return(holidays)
}
# WEEKENDS ----
#' @rdname tk_make_holiday_sequence
#' @export
tk_make_weekend_sequence <- function(start_date, end_date) {
date_sequence <- tk_make_timeseries(
start_date = start_date,
end_date = end_date,
by = "day")
ret_tbl <- tibble::tibble(date_sequence = date_sequence) %>%
dplyr::mutate(weekday = lubridate::wday(date_sequence, week_start = 7)) %>%
dplyr::filter((weekday == 1 | weekday == 7))
ret_tbl %>% dplyr::pull(date_sequence)
}
# WEEKDAYS ----
#' @rdname tk_make_holiday_sequence
#' @export
tk_make_weekday_sequence <- function(start_date, end_date,
remove_weekends = TRUE, remove_holidays = FALSE,
calendar = c("NYSE", "LONDON", "NERC", "TSX", "ZURICH"),
skip_values = NULL, insert_values = NULL
) {
date_sequence <- tk_make_timeseries(
start_date = start_date,
end_date = end_date,
by = "day")
# Remove weekends
if (remove_weekends) {
weekend_sequence <- tk_make_weekend_sequence(start_date, end_date)
date_sequence <- date_sequence[!date_sequence %in% weekend_sequence]
}
# Remove Holidays
if (remove_holidays) {
holiday_sequence <- tk_make_holiday_sequence(start_date, end_date, calendar)
date_sequence <- date_sequence[!date_sequence %in% holiday_sequence]
}
# Skip/Insert
date_sequence <- add_subtract_sequence(date_sequence, skip_values, insert_values)
return(date_sequence)
}
| /R/make-tk_make_holiday_sequences.R | no_license | business-science/timetk | R | false | false | 6,102 | r | #' Make daily Holiday and Weekend date sequences
#'
#'
#' @param start_date Used to define the starting date for date sequence generation.
#' Provide in "YYYY-MM-DD" format.
#' @param end_date Used to define the ending date for date sequence generation.
#' Provide in "YYYY-MM-DD" format.
#' @param calendar The calendar to be used in Date Sequence calculations for Holidays
#' from the `timeDate` package.
#' Acceptable values are: `"NYSE"`, `"LONDON"`, `"NERC"`, `"TSX"`, `"ZURICH"`.
#' @param skip_values A daily date sequence to skip
#' @param insert_values A daily date sequence to insert
#' @param remove_holidays A logical value indicating whether or not to
#' remove common holidays from the date sequence
#' @param remove_weekends A logical value indicating whether or not to
#' remove weekends (Saturday and Sunday) from the date sequence
#'
#' @details
#'
#' __Start and End Date Specification__
#'
#' - Accept shorthand notation (i.e. `tk_make_timeseries()` specifications apply)
#' - Only available in Daily Periods.
#'
#' __Holiday Sequences__
#'
#' `tk_make_holiday_sequence()` is a wrapper for various holiday calendars from the `timeDate` package,
#' making it easy to generate holiday sequences for common business calendars:
#'
#' - New York Stock Exchange: `calendar = "NYSE"`
#' - Londo Stock Exchange: `"LONDON"`
#' - North American Reliability Council: `"NERC"`
#' - Toronto Stock Exchange: `"TSX"`
#' - Zurich Stock Exchange: `"ZURICH"`
#'
#' __Weekend and Weekday Sequences__
#'
#' These simply populate
#'
#'
#' @return A vector containing future dates
#'
#' @seealso
#' - Intelligent date or date-time sequence creation: [tk_make_timeseries()]
#' - Holidays and weekends: [tk_make_holiday_sequence()], [tk_make_weekend_sequence()], [tk_make_weekday_sequence()]
#' - Make future index from existing: [tk_make_future_timeseries()]
#'
#' @examples
#' library(dplyr)
#' library(timetk)
#'
#' # Set max.print to 50
#' options_old <- options()$max.print
#' options(max.print = 50)
#'
#'
#' # ---- HOLIDAYS & WEEKENDS ----
#'
#' # Business Holiday Sequence
#' tk_make_holiday_sequence("2017-01-01", "2017-12-31", calendar = "NYSE")
#'
#' tk_make_holiday_sequence("2017", calendar = "NYSE") # Same thing as above (just shorter)
#'
#' # Weekday Sequence
#' tk_make_weekday_sequence("2017", "2018", remove_holidays = TRUE)
#'
#' # Weekday Sequence + Removing Business Holidays
#' tk_make_weekday_sequence("2017", "2018", remove_holidays = TRUE)
#'
#'
#' # ---- COMBINE HOLIDAYS WITH MAKE FUTURE TIMESERIES FROM EXISTING ----
#' # - A common machine learning application is creating a future time series data set
#' # from an existing
#'
#' # Create index of days that FB stock will be traded in 2017 based on 2016 + holidays
#' FB_tbl <- FANG %>% filter(symbol == "FB")
#'
#' holidays <- tk_make_holiday_sequence(
#' start_date = "2016",
#' end_date = "2017",
#' calendar = "NYSE")
#'
#' weekends <- tk_make_weekend_sequence(
#' start_date = "2016",
#' end_date = "2017")
#'
#' # Remove holidays and weekends with skip_values
#' # We could also remove weekends with inspect_weekdays = TRUE
#' FB_tbl %>%
#' tk_index() %>%
#' tk_make_future_timeseries(length_out = 366,
#' skip_values = c(holidays, weekends))
#'
#'
#' options(max.print = options_old)
#'
#' @name tk_make_holiday_sequence
NULL
# DATE SEQUENCE ----
# HOLIDAYS -----
#' @rdname tk_make_holiday_sequence
#' @export
tk_make_holiday_sequence <- function(start_date, end_date,
calendar = c("NYSE", "LONDON", "NERC", "TSX", "ZURICH"),
skip_values = NULL, insert_values = NULL) {
fun <- switch(
tolower(calendar[1]),
"nyse" = timeDate::holidayNYSE,
"london" = timeDate::holidayLONDON,
"nerc" = timeDate::holidayNERC,
"tsx" = timeDate::holidayTSX,
"zurich" = timeDate::holidayZURICH
)
date_seq <- tk_make_timeseries(
start_date = start_date,
end_date = end_date,
by = "day")
# Find holidays
years <- date_seq %>% lubridate::year() %>% unique()
holiday_sequence <- fun(year = years) %>% lubridate::as_date()
holidays <- holiday_sequence[holiday_sequence %in% date_seq]
# Add/Subtract
holidays <- add_subtract_sequence(holidays, skip_values, insert_values)
return(holidays)
}
# WEEKENDS ----
#' @rdname tk_make_holiday_sequence
#' @export
tk_make_weekend_sequence <- function(start_date, end_date) {
date_sequence <- tk_make_timeseries(
start_date = start_date,
end_date = end_date,
by = "day")
ret_tbl <- tibble::tibble(date_sequence = date_sequence) %>%
dplyr::mutate(weekday = lubridate::wday(date_sequence, week_start = 7)) %>%
dplyr::filter((weekday == 1 | weekday == 7))
ret_tbl %>% dplyr::pull(date_sequence)
}
# WEEKDAYS ----
#' @rdname tk_make_holiday_sequence
#' @export
tk_make_weekday_sequence <- function(start_date, end_date,
remove_weekends = TRUE, remove_holidays = FALSE,
calendar = c("NYSE", "LONDON", "NERC", "TSX", "ZURICH"),
skip_values = NULL, insert_values = NULL
) {
date_sequence <- tk_make_timeseries(
start_date = start_date,
end_date = end_date,
by = "day")
# Remove weekends
if (remove_weekends) {
weekend_sequence <- tk_make_weekend_sequence(start_date, end_date)
date_sequence <- date_sequence[!date_sequence %in% weekend_sequence]
}
# Remove Holidays
if (remove_holidays) {
holiday_sequence <- tk_make_holiday_sequence(start_date, end_date, calendar)
date_sequence <- date_sequence[!date_sequence %in% holiday_sequence]
}
# Skip/Insert
date_sequence <- add_subtract_sequence(date_sequence, skip_values, insert_values)
return(date_sequence)
}
|
\name{evaluate}
\Rdversion{1.1}
\alias{evaluate}
\alias{evaluate,evaluationScheme,character-method}
\alias{evaluate,evaluationScheme,list-method}
\title{
Evaluate a Recommender Models
}
\description{
Evaluates a single or a list of recommender model given an evaluation scheme.
}
\usage{
evaluate(x, method, ...)
\S4method{evaluate}{evaluationScheme,character}(x, method, type="topNList",
n=1:10, parameter=NULL, progress = TRUE, keepModel=FALSE)
\S4method{evaluate}{evaluationScheme,list}(x, method, type="topNList",
n=1:10, parameter=NULL, progress = TRUE, keepModel=FALSE)
}
\arguments{
\item{x}{an evaluation scheme (class \code{"evaluationScheme"}).}
\item{method}{a character string or a list. If
a single character string is given it defines the recommender method
used for evaluation. If several recommender methods need to be compared,
\code{method} contains a nested list. Each element describes a recommender
method and consists of a list with two elements: a character string
named \code{"name"} containing the method and a list named
\code{"parameters"} containing the parameters used for this recommender method.
See \code{Recommender} for available methods.}
\item{type}{evaluate "topNList" or "ratings"?}
\item{n}{N (number of recommendations) of the top-N lists generated (only if type="topNList").}
\item{parameter}{a list with parameters for the recommender algorithm (only
used when \code{method} is a single method).}
\item{progress}{logical; report progress?}
\item{keepModel}{logical; store used recommender models?}
\item{\dots}{further arguments. }
}
\value{
Returns an object of class \code{"evaluationResults"}
or if \code{method} is a list an object of class \code{"evaluationResultList"}.
}
\seealso{
\code{\linkS4class{evaluationScheme}},
\code{\linkS4class{evaluationResults}}.
\code{\linkS4class{evaluationResultList}}.
}
\examples{
### evaluate top-N list recommendations on a 0-1 data set
## Note: we sample only 100 users to make the example run faster
data("MSWeb")
MSWeb10 <- sample(MSWeb[rowCounts(MSWeb) >10,], 100)
## create an evaluation scheme (10-fold cross validation, given-3 scheme)
es <- evaluationScheme(MSWeb10, method="cross-validation",
k=10, given=3)
## run evaluation
ev <- evaluate(es, "POPULAR", n=c(1,3,5,10))
ev
## look at the results (by the length of the topNList)
avg(ev)
plot(ev, annotate = TRUE)
## evaluate several algorithms with a list
algorithms <- list(
RANDOM = list(name = "RANDOM", param = NULL),
POPULAR = list(name = "POPULAR", param = NULL)
)
evlist <- evaluate(es, algorithms, n=c(1,3,5,10))
plot(evlist, legend="topright")
## select the first results
evlist[[1]]
### Evaluate using a data set with real-valued ratings
## Note: we sample only 100 users to make the example run faster
data("Jester5k")
es <- evaluationScheme(Jester5k[1:100], method="cross-validation",
k=10, given=10, goodRating=5)
## Note: goodRating is used to determine positive ratings
## predict top-N recommendation lists
## (results in TPR/FPR and precision/recall)
ev <- evaluate(es, "RANDOM", type="topNList", n=10)
avg(ev)
## predict missing ratings
## (results in RMSE, MSE and MAE)
ev <- evaluate(es, "RANDOM", type="ratings")
avg(ev)
}
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/evaluate.Rd | no_license | NanaAkwasiAbayieBoateng/recommenderlab | R | false | false | 3,322 | rd | \name{evaluate}
\Rdversion{1.1}
\alias{evaluate}
\alias{evaluate,evaluationScheme,character-method}
\alias{evaluate,evaluationScheme,list-method}
\title{
Evaluate a Recommender Models
}
\description{
Evaluates a single or a list of recommender model given an evaluation scheme.
}
\usage{
evaluate(x, method, ...)
\S4method{evaluate}{evaluationScheme,character}(x, method, type="topNList",
n=1:10, parameter=NULL, progress = TRUE, keepModel=FALSE)
\S4method{evaluate}{evaluationScheme,list}(x, method, type="topNList",
n=1:10, parameter=NULL, progress = TRUE, keepModel=FALSE)
}
\arguments{
\item{x}{an evaluation scheme (class \code{"evaluationScheme"}).}
\item{method}{a character string or a list. If
a single character string is given it defines the recommender method
used for evaluation. If several recommender methods need to be compared,
\code{method} contains a nested list. Each element describes a recommender
method and consists of a list with two elements: a character string
named \code{"name"} containing the method and a list named
\code{"parameters"} containing the parameters used for this recommender method.
See \code{Recommender} for available methods.}
\item{type}{evaluate "topNList" or "ratings"?}
\item{n}{N (number of recommendations) of the top-N lists generated (only if type="topNList").}
\item{parameter}{a list with parameters for the recommender algorithm (only
used when \code{method} is a single method).}
\item{progress}{logical; report progress?}
\item{keepModel}{logical; store used recommender models?}
\item{\dots}{further arguments. }
}
\value{
Returns an object of class \code{"evaluationResults"}
or if \code{method} is a list an object of class \code{"evaluationResultList"}.
}
\seealso{
\code{\linkS4class{evaluationScheme}},
\code{\linkS4class{evaluationResults}}.
\code{\linkS4class{evaluationResultList}}.
}
\examples{
### evaluate top-N list recommendations on a 0-1 data set
## Note: we sample only 100 users to make the example run faster
data("MSWeb")
MSWeb10 <- sample(MSWeb[rowCounts(MSWeb) >10,], 100)
## create an evaluation scheme (10-fold cross validation, given-3 scheme)
es <- evaluationScheme(MSWeb10, method="cross-validation",
k=10, given=3)
## run evaluation
ev <- evaluate(es, "POPULAR", n=c(1,3,5,10))
ev
## look at the results (by the length of the topNList)
avg(ev)
plot(ev, annotate = TRUE)
## evaluate several algorithms with a list
algorithms <- list(
RANDOM = list(name = "RANDOM", param = NULL),
POPULAR = list(name = "POPULAR", param = NULL)
)
evlist <- evaluate(es, algorithms, n=c(1,3,5,10))
plot(evlist, legend="topright")
## select the first results
evlist[[1]]
### Evaluate using a data set with real-valued ratings
## Note: we sample only 100 users to make the example run faster
data("Jester5k")
es <- evaluationScheme(Jester5k[1:100], method="cross-validation",
k=10, given=10, goodRating=5)
## Note: goodRating is used to determine positive ratings
## predict top-N recommendation lists
## (results in TPR/FPR and precision/recall)
ev <- evaluate(es, "RANDOM", type="topNList", n=10)
avg(ev)
## predict missing ratings
## (results in RMSE, MSE and MAE)
ev <- evaluate(es, "RANDOM", type="ratings")
avg(ev)
}
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# Startup message
.onAttach <-
function(libname, pkgname) {
packageStartupMessage("\nTo cite electionsBR in publications, use: citation('electionsBR')")
packageStartupMessage("To learn more, visit: http://electionsbr.com\n")
}
#' Returns a vector with the abbreviations of all Brazilian states
#'
#' @export
uf_br <- function() {
c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA",
"MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN",
"RO", "RR", "RS", "SC", "SE", "SP", "TO")
}
#' Returns a vector with the abbreviations of all Brazilian parties
#'
#' The character vector includes only parties that ran in elections in 2016.
#'
#' @export
parties_br <- function() {
c("AVANTE", "CIDADANIA", "DC", "DEM", "MDB", "NOVO", "PATRIOTA",
"PC do B", "PCB", "PCO", "PDT", "PEN", "PHS", "PMB", "PMN", "PODE",
"PP", "PPL", "PPS", "PR", "PRB", "PROS", "PRP", "PRTB", "PSB",
"PSC", "PSD", "PSDB", "PSDC", "PSL", "PSOL", "PSTU", "PT", "PT do B",
"PTB", "PTC", "PTN", "PV", "REDE", "REPUBLICANOS", "SD", "SOLIEDARIEDADE",
"UP")
}
# Reads and rbinds multiple data.frames in the same directory
#' @import dplyr
juntaDados <- function(uf, encoding, br_archive){
archive <- Sys.glob("*")[grepl(".pdf", Sys.glob("*")) == FALSE] %>%
.[grepl(uf, .)] %>%
file.info() %>%
.[.$size > 200, ] %>%
row.names()
if(!br_archive){
archive <- archive[grepl("BR", archive) == FALSE]
} else {
archive <- archive[grepl("BR", archive) == TRUE]
}
if(grepl(".csv", archive[1])){
test_col_names <- TRUE
}else{
test_col_names <- FALSE
}
lapply(archive, function(x) tryCatch(
suppressWarnings(readr::read_delim(x, col_names = test_col_names,
delim = ";",
locale = readr::locale(encoding = encoding),
col_types = readr::cols(), progress = F,
escape_double = F)),
error = function(e) NULL)) %>%
data.table::rbindlist() %>%
dplyr::as_tibble()
}
# Converts electoral data from Latin-1 to ASCII
#' @import dplyr
to_ascii <- function(banco, encoding){
if(encoding == "Latin-1") encoding <- "latin1"
dplyr::mutate_if(banco, is.character, dplyr::funs(iconv(., from = encoding, to = "ASCII//TRANSLIT")))
}
# Tests federal election year inputs
test_fed_year <- function(year){
if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1994, 2018, 4)) stop("Invalid input. Please, check the documentation and try again.")
}
# Tests federal election year inputs
test_local_year <- function(year){
if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1996, 2020, 4)) stop("Invalid input. Please, check the documentation and try again.")
}
# Test federal positions
#test_fed_position <- function(position){
# position <- tolower(position)
# if(!is.character(position) | length(position) != 1 | !position %in% c("presidente",
# "governador",
# "senador",
# "deputado federal",
# "deputado estadual",
# "deputado distrital")) stop("Invalid input. Please, check the documentation and try again.")
#}
# Test federal positions
#test_local_position <- function(position){
# position <- tolower(position)
# if(!is.character(position) | length(position) != 1 | !position %in% c("prefeito",
# "vereador")) stop("Invalid input. Please, check the documentation and try again.")
#}
# Converts electoral data from Latin-1 to ASCII
test_encoding <- function(encoding){
if(encoding == "Latin-1") encoding <- "latin1"
if(!encoding %in% tolower(iconvlist())) stop("Invalid encoding. Check iconvlist() to view a list with all valid encodings.")
}
# Test br types
test_br <- function(br_archive){
if(!is.logical(br_archive)) message("'br_archive' must be logical (TRUE or FALSE).")
}
# Tests state acronyms
test_uf <- function(uf) {
uf <- gsub(" ", "", uf) %>%
toupper()
uf <- match.arg(uf, c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA",
"MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN",
"RO", "RR", "RS", "SC", "SE", "SP", "TO", "ALL"), several.ok = T)
if("ALL" %in% uf) return(".")
else return(paste(uf, collapse = "|"))
}
# Replace position by cod position
# replace_position_cod <- function(position){
# position <- tolower(position)
# return(switch(position, "presidente" = 1,
# "governador" = 3,
# "senador" = 5,
# "deputado federal" = 6,
# "deputado estadual" = 7,
# "deputado distrital" = 8,
# "prefeito" = 11,
# "vereador" = 13))
# }
# Function to export data to .dta and .sav
export_data <- function(df) {
haven::write_dta(df, "electoral_data.dta")
haven::write_sav(df, "electoral_data.sav")
message(paste0("Electoral data files were saved on: ", getwd(), ".\n"))
}
# Data download
download_unzip <- function(url, dados, filenames, year){
if(!file.exists(dados)){
sprintf(url, filenames) %>%
download.file(dados)
message("Processing the data...")
unzip(dados, exdir = paste0("./", year))
} else{
message("Processing the data...")
unzip(dados, exdir = paste0("./", year))
}
}
# Avoid the R CMD check note about magrittr's dot
utils::globalVariables(".")
| /R/utils.R | no_license | omarbenites/electionsBR | R | false | false | 5,874 | r | # Startup message
.onAttach <-
function(libname, pkgname) {
packageStartupMessage("\nTo cite electionsBR in publications, use: citation('electionsBR')")
packageStartupMessage("To learn more, visit: http://electionsbr.com\n")
}
#' Returns a vector with the abbreviations of all Brazilian states
#'
#' @export
uf_br <- function() {
c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA",
"MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN",
"RO", "RR", "RS", "SC", "SE", "SP", "TO")
}
#' Returns a vector with the abbreviations of all Brazilian parties
#'
#' The character vector includes only parties that ran in elections in 2016.
#'
#' @export
parties_br <- function() {
c("AVANTE", "CIDADANIA", "DC", "DEM", "MDB", "NOVO", "PATRIOTA",
"PC do B", "PCB", "PCO", "PDT", "PEN", "PHS", "PMB", "PMN", "PODE",
"PP", "PPL", "PPS", "PR", "PRB", "PROS", "PRP", "PRTB", "PSB",
"PSC", "PSD", "PSDB", "PSDC", "PSL", "PSOL", "PSTU", "PT", "PT do B",
"PTB", "PTC", "PTN", "PV", "REDE", "REPUBLICANOS", "SD", "SOLIEDARIEDADE",
"UP")
}
# Reads and rbinds multiple data.frames in the same directory
#' @import dplyr
juntaDados <- function(uf, encoding, br_archive){
archive <- Sys.glob("*")[grepl(".pdf", Sys.glob("*")) == FALSE] %>%
.[grepl(uf, .)] %>%
file.info() %>%
.[.$size > 200, ] %>%
row.names()
if(!br_archive){
archive <- archive[grepl("BR", archive) == FALSE]
} else {
archive <- archive[grepl("BR", archive) == TRUE]
}
if(grepl(".csv", archive[1])){
test_col_names <- TRUE
}else{
test_col_names <- FALSE
}
lapply(archive, function(x) tryCatch(
suppressWarnings(readr::read_delim(x, col_names = test_col_names,
delim = ";",
locale = readr::locale(encoding = encoding),
col_types = readr::cols(), progress = F,
escape_double = F)),
error = function(e) NULL)) %>%
data.table::rbindlist() %>%
dplyr::as_tibble()
}
# Converts electoral data from Latin-1 to ASCII
#' @import dplyr
to_ascii <- function(banco, encoding){
if(encoding == "Latin-1") encoding <- "latin1"
dplyr::mutate_if(banco, is.character, dplyr::funs(iconv(., from = encoding, to = "ASCII//TRANSLIT")))
}
# Tests federal election year inputs
test_fed_year <- function(year){
if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1994, 2018, 4)) stop("Invalid input. Please, check the documentation and try again.")
}
# Tests federal election year inputs
test_local_year <- function(year){
if(!is.numeric(year) | length(year) != 1 | !year %in% seq(1996, 2020, 4)) stop("Invalid input. Please, check the documentation and try again.")
}
# Test federal positions
#test_fed_position <- function(position){
# position <- tolower(position)
# if(!is.character(position) | length(position) != 1 | !position %in% c("presidente",
# "governador",
# "senador",
# "deputado federal",
# "deputado estadual",
# "deputado distrital")) stop("Invalid input. Please, check the documentation and try again.")
#}
# Test federal positions
#test_local_position <- function(position){
# position <- tolower(position)
# if(!is.character(position) | length(position) != 1 | !position %in% c("prefeito",
# "vereador")) stop("Invalid input. Please, check the documentation and try again.")
#}
# Converts electoral data from Latin-1 to ASCII
test_encoding <- function(encoding){
if(encoding == "Latin-1") encoding <- "latin1"
if(!encoding %in% tolower(iconvlist())) stop("Invalid encoding. Check iconvlist() to view a list with all valid encodings.")
}
# Test br types
test_br <- function(br_archive){
if(!is.logical(br_archive)) message("'br_archive' must be logical (TRUE or FALSE).")
}
# Tests state acronyms
test_uf <- function(uf) {
uf <- gsub(" ", "", uf) %>%
toupper()
uf <- match.arg(uf, c("AC", "AL", "AM", "AP", "BA", "CE", "DF", "ES", "GO", "MA",
"MG", "MS", "MT", "PA", "PB", "PE", "PI", "PR", "RJ", "RN",
"RO", "RR", "RS", "SC", "SE", "SP", "TO", "ALL"), several.ok = T)
if("ALL" %in% uf) return(".")
else return(paste(uf, collapse = "|"))
}
# Replace position by cod position
# replace_position_cod <- function(position){
# position <- tolower(position)
# return(switch(position, "presidente" = 1,
# "governador" = 3,
# "senador" = 5,
# "deputado federal" = 6,
# "deputado estadual" = 7,
# "deputado distrital" = 8,
# "prefeito" = 11,
# "vereador" = 13))
# }
# Function to export data to .dta and .sav
export_data <- function(df) {
haven::write_dta(df, "electoral_data.dta")
haven::write_sav(df, "electoral_data.sav")
message(paste0("Electoral data files were saved on: ", getwd(), ".\n"))
}
# Data download
download_unzip <- function(url, dados, filenames, year){
if(!file.exists(dados)){
sprintf(url, filenames) %>%
download.file(dados)
message("Processing the data...")
unzip(dados, exdir = paste0("./", year))
} else{
message("Processing the data...")
unzip(dados, exdir = paste0("./", year))
}
}
# Avoid the R CMD check note about magrittr's dot
utils::globalVariables(".")
|
library("shiny")
shinyUI(fluidPage(
titlePanel("Building a Shiny App around the UDPipe NLP workflow"), # end of title panel
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("file", "Choose File"),
# Horizontal line ----
tags$hr(),
# Input: Select number of rows to display ----
radioButtons("disp", "Select File Type",
choices = c(PDF = "PDF",
Text = "Text"),
selected = "PDF"),
textInput("Path", "Upload english model location along with model and extension"),
checkboxGroupInput("Checkbox","Select required UPOS for co-occurance plot",
choices = c("ADJ","NOUN","PROPN","ADV","VERB"),
selected = c("ADJ","NOUN","PROPN")) ,
downloadButton("Download","Download Annotated dataset with csv extension"," ")
), #end of sidebar panel
mainPanel(
tabsetPanel(type = "tabs", # builds tab struc
tabPanel("Overview", # leftmost tab
h4(p("Data input")),
p("This app supports pdf or txt file for NLP processing.", align="justify"),
br(),
h4('How to use this App'),
p('To use this app, click on',
span(strong("Upload any text document")),
'You can also choose english model and POS for which you want to build co-occurance plot',
'and you have data cloud for nouns and verbs.')),
# second tab coming up:
tabPanel("Annotated documents",
tableOutput('Andoc')),
# third tab coming up:
tabPanel("Data cloud",
plotOutput('Data_cloudn'),plotOutput('Data_cloudv')),
# fourth tab coming up:
tabPanel("Co-occ",
plotOutput('Co_oc'))
) # end of tabsetPanel
) # end of mail panel
) #end of sidebar layout
) #end of fluidpage
) # end of shinyUI
| /ui.R | no_license | Madhusudhanbandi/Shiny-datacloud-and-co-occurence-plot | R | false | false | 2,717 | r | library("shiny")
shinyUI(fluidPage(
titlePanel("Building a Shiny App around the UDPipe NLP workflow"), # end of title panel
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
fileInput("file", "Choose File"),
# Horizontal line ----
tags$hr(),
# Input: Select number of rows to display ----
radioButtons("disp", "Select File Type",
choices = c(PDF = "PDF",
Text = "Text"),
selected = "PDF"),
textInput("Path", "Upload english model location along with model and extension"),
checkboxGroupInput("Checkbox","Select required UPOS for co-occurance plot",
choices = c("ADJ","NOUN","PROPN","ADV","VERB"),
selected = c("ADJ","NOUN","PROPN")) ,
downloadButton("Download","Download Annotated dataset with csv extension"," ")
), #end of sidebar panel
mainPanel(
tabsetPanel(type = "tabs", # builds tab struc
tabPanel("Overview", # leftmost tab
h4(p("Data input")),
p("This app supports pdf or txt file for NLP processing.", align="justify"),
br(),
h4('How to use this App'),
p('To use this app, click on',
span(strong("Upload any text document")),
'You can also choose english model and POS for which you want to build co-occurance plot',
'and you have data cloud for nouns and verbs.')),
# second tab coming up:
tabPanel("Annotated documents",
tableOutput('Andoc')),
# third tab coming up:
tabPanel("Data cloud",
plotOutput('Data_cloudn'),plotOutput('Data_cloudv')),
# fourth tab coming up:
tabPanel("Co-occ",
plotOutput('Co_oc'))
) # end of tabsetPanel
) # end of mail panel
) #end of sidebar layout
) #end of fluidpage
) # end of shinyUI
|
testlist <- list(x = c(1.36218194413714e+248, 9.56978493741917e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_harmonic_mean,testlist)
str(result) | /myTAI/inst/testfiles/cpp_harmonic_mean/AFL_cpp_harmonic_mean/cpp_harmonic_mean_valgrind_files/1615844503-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 288 | r | testlist <- list(x = c(1.36218194413714e+248, 9.56978493741917e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_harmonic_mean,testlist)
str(result) |
colourspaces <- c(
"cmy", # 0
"cmyk", # 1
"hsl", # 2
"hsb", # 3
"hsv", # 4
"lab", # 5
"hunterlab", # 6
"lch", # 7
"luv", # 8
"rgb", # 9
"xyz", # 10
"yxy" # 11
)
| /R/aaa.R | permissive | GapData/farver | R | false | false | 251 | r | colourspaces <- c(
"cmy", # 0
"cmyk", # 1
"hsl", # 2
"hsb", # 3
"hsv", # 4
"lab", # 5
"hunterlab", # 6
"lch", # 7
"luv", # 8
"rgb", # 9
"xyz", # 10
"yxy" # 11
)
|
library(shiny)
library(ggplot2)
library(plotly)
library(DT)
library(shinycssloaders)
library(shinyWidgets)
#setwd("/home/alicja.wolny-dominiak/covid")
##main function
covidExp <- function(data, m, cutL, cutU, date, startVal){
# cuttL <- which(data$report == cutL)
# cuttU <- which(data$report == cutU)
cuttL <- data$report[which(data$d == as.Date(cutL))]
cuttU <- data$report[which(data$d == as.Date(cutU))]
dataCut <- subset(data, report %in% cuttL:cuttU)
# fit non-linear model y = a* e^(b*x)
modE <- nls(dataCut$all ~ a * exp(b*dataCut$report), data = dataCut, start = list(a = startVal, b = 0.1))
a <- summary(modE)$parameters[1]
b <- summary(modE)$parameters[2]
#fitted values
modEFit <- round(a * exp(b*dataCut$report))
error <- modEFit - dataCut$all
rmse <- sqrt(sum(error^2))
dfFitted <- data.frame(dataCut, allFitted = modEFit)
#predict
x <- 1:m + dataCut$report[length(dataCut$report)]
modEPr <- round(a * exp(b*x))
dfPred <- data.frame(report = x, all = modEPr)
dfPred$d <- subset(date, report %in% x)$d
return(list(modE = modE, dfPred = dfPred, dfFitted = dfFitted, rsme = rmse, a = a, b = b, error = error,
data = data, dataCut = dataCut))
}
###Date
datee <<- data.frame(report = 1:100, d = seq( as.Date("2020-01-21"), by=1, len = 100))
##WHO data
dfcovv <- read.csv2('http://web.ue.katowice.pl/woali/Rdane/shinyCovid/data.csv')
dfcovv$d <- subset(datee, report %in% dfcovv$report)$d
| /global.R | no_license | woali/covid | R | false | false | 1,535 | r | library(shiny)
library(ggplot2)
library(plotly)
library(DT)
library(shinycssloaders)
library(shinyWidgets)
#setwd("/home/alicja.wolny-dominiak/covid")
##main function
covidExp <- function(data, m, cutL, cutU, date, startVal){
# cuttL <- which(data$report == cutL)
# cuttU <- which(data$report == cutU)
cuttL <- data$report[which(data$d == as.Date(cutL))]
cuttU <- data$report[which(data$d == as.Date(cutU))]
dataCut <- subset(data, report %in% cuttL:cuttU)
# fit non-linear model y = a* e^(b*x)
modE <- nls(dataCut$all ~ a * exp(b*dataCut$report), data = dataCut, start = list(a = startVal, b = 0.1))
a <- summary(modE)$parameters[1]
b <- summary(modE)$parameters[2]
#fitted values
modEFit <- round(a * exp(b*dataCut$report))
error <- modEFit - dataCut$all
rmse <- sqrt(sum(error^2))
dfFitted <- data.frame(dataCut, allFitted = modEFit)
#predict
x <- 1:m + dataCut$report[length(dataCut$report)]
modEPr <- round(a * exp(b*x))
dfPred <- data.frame(report = x, all = modEPr)
dfPred$d <- subset(date, report %in% x)$d
return(list(modE = modE, dfPred = dfPred, dfFitted = dfFitted, rsme = rmse, a = a, b = b, error = error,
data = data, dataCut = dataCut))
}
###Date
datee <<- data.frame(report = 1:100, d = seq( as.Date("2020-01-21"), by=1, len = 100))
##WHO data
dfcovv <- read.csv2('http://web.ue.katowice.pl/woali/Rdane/shinyCovid/data.csv')
dfcovv$d <- subset(datee, report %in% dfcovv$report)$d
|
#Forecast combination
#Consider, ETS, ARIMA, STL-ETS
train <- window(auscafe, end=c(2012,9))
h <- length(auscafe) - length(train)
ETS <- forecast(ets(train), h=h)
ARIMA <- forecast(auto.arima(train, lambda=0, biasadj=TRUE), h=h)
STL <- stlf(train, lambda=0, h=h, biasadj=TRUE) #estimate training set + forecast the training set into test set using ETS
Combination <- (ETS[["mean"]] + ARIMA[["mean"]] + STL[["mean"]])/3
autoplot(auscafe) +
autolayer(ETS, series="ETS", PI=FALSE) +
autolayer(ARIMA, series="ARIMA", PI=FALSE) +
autolayer(STL, series="STL", PI=FALSE) +
autolayer(Combination, series="Combination") +
xlab("Year") + ylab("$ billion") + ggtitle("Australian monthly expenditure on eating out")
c(ETS = accuracy(ETS, auscafe)["Test set","RMSE"],
ARIMA = accuracy(ARIMA, auscafe)["Test set","RMSE"],
`STL-ETS` = accuracy(STL, auscafe)["Test set","RMSE"],
Combination = accuracy(Combination, auscafe)["Test set","RMSE"])
#Combination has the lowest RMSE
#Dynamic Regression Models
library(fpp2)
library(series)
#In chapter 5, we look at the example of US personal consumption
autoplot(uschange[,1:2], facets=TRUE) +
xlab("Year") + ylab("") +
ggtitle("Quarterly changes in US consumption and personal income")
fit.ci <- tslm(Consumption ~ Income, data=uschange)
summary(fit.ci)
checkresiduals(fit.ci) #p-value is small, reject null hypothesis, significant autocorrelation
#Dynamic Regression Models - error allows autocorrelation
#All the variables in the model must first be stationary
#If any one variable is non-stationary, difference all variables to retain the form
#If a regression model has ARIMA errors, then this is equivalent to regression model in differences with ARMA errors
fit <- auto.arima(uschange[,"Consumption"], xreg=uschange[,"Income"])
summary(fit) #ARIMA(1,0,2)
#Recover estimates of both the nt and et using the residuals() function
cbind("Regression Errors" = residuals(fit, type="regression"), #nt
"ARIMA errors" = residuals(fit, type="innovation")) %>% autoplot(facet=TRUE) #et
checkresiduals(fit) #ACF plots have little significant spikes, p-value = 0.117, cannot reject null hypothesis - an improvement. Expect predition interval to be reliable
#Forecasting once errors look like white noise
fcast <- forecast(fit, xreg=rep(mean(uschange[,2])),8) #Assume that uschange for the next 8 periods is the historical mean
autoplot(fcast) + xlab("Year") + ylab("Percentage change")
#Using chnage of income as predictor, do not take into account the uncertainty for predictor variables
#Forecasting electricity and demand for next 14 days
qplot(Temperature, Demand, data=as.data.frame(elecdaily)) +
ylab("Elec Demand (GW)") + xlab("Max daily temperature (Celsius)") + ggtitle("Figure 9.5: Daily electricity demand versus maximum daily for the state of Victoria in Australia for 2014")
autoplot(elecdaily[,c(1,3)], facets=TRUE)
xreg <- cbind(MaxTemp = elecdaily[, "Temperature"],
MaxTempSq = elecdaily[, "Temperature"]^2,
Workday = elecdaily[,"WorkDay"])
fit <- auto.arima(elecdaily[,"Demand"], xreg=xreg)
checkresiduals(fit) #ARIMA(2,1,2)(2,0,0)[7] seasonality of 7 because daily data, can see significant spikes - autocorrelation in residuals
fcast <- forecast(fit,
xreg = cbind(MaxTemp=rep(26,14), maxTempSq=rep(26^2,14), #assume temp is constant at 26
Workday = c(0,1,0,0,1,1,1,1,1,0,0,1,1,1))) #Mon-Fri is 1, Sat-Sun + Public hols = 0
autoplot(fcast) + ylab("Electricity demand (GW")
#Shows the forecast from dynamic regression model when all future temperature set to 26 degrees, and working day dummy variable set to known future values
#Daily seasonality
#Point forecast looks reasonable for the next 2 weeks
#Deterministic vs Stochastic trend
autoplot(austa) + xlab("Year") +
ylab("millions of people") +
ggtitle("Total annual international visitors to Australia")
trend <- seq_along(austa)
(fit1 <- auto.arima(austa, d=0, xreg=trend)) #coefficient for t = 0.17, estimated growth in visitor numbers is 0.17mil per year
(fit2 <- auto.arima(austa, d=1)) #random walk model with drift
#ARIMA(0,1,1) with drift
#Although coefficient for trend is also 0.17, prediction interval varies alot
fc1 <- forecast(fit1,
xreg = length(austa) + 1:10) #10 period ahead forecast
fc2 <- forecast(fit2, h=10)
autoplot(austa) +
autolayer(fc2, series="Stochastic trend") +
autolayer(fc1, series="Deterministic trend") +
ggtitle("Forecasts from trend models") +
xlab("Year") + ylab("Visitors to Australia (millions)") +
guides(colour=guide_legend(title="Forecast"))
#PI for stochastic trend is so much wider than the deterministic trend
#Slope for deterministic trend is not changing over time
#If yt is stationary, use deterministic trend. If non-stationary, we difference yt to get stochastic trend
#Lagged predictors
autoplot(insurance, facets=TRUE) +
xlab("year") + ylab("") +
ggtitle("Insurance advertising and quotations")
#Lagged predictors. Test 0,1,2 or 3 lags.
Advert <- cbind(
AdLag0 = insurance[,"TV.advert"],
AdLag1 = stats::lag(insurance[,"TV.advert"], -1),
AdLag2 = stats::lag(insurance[,"TV.advert"], -2),
AdLag3 = stats::lag(insurance[,"TV.advert"], -3)) %>%
head(NROW(insurance)) #40 observations
#When comparing models, all models must use same training set
# Restrict data so models use same fitting period
fit1 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1],
stationary=TRUE)
fit2 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1:2],
stationary=TRUE)
fit3 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1:3],
stationary=TRUE)
fit4 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1:4],
stationary=TRUE)
#Choose the optimal lag length based on AICc
c(fit1[["aicc"]], fit2[["aicc"]], fit3[["aicc"]],fit4[["aicc"]])
#Best model with smalled AICc value has two lagged predictors (current + previous)
fit <- auto.arima(insurance[,1], xreg=Advert[,1:2], stationary=TRUE) #use autoarima to model
summary(fit) #AR3 model
#forecasts can be calculated using this model if the future values for advertising values are assumed
fc8 <- forecast(fit, h=20,
xreg=cbind(AdLag0 = rep(8,20),
AdLag1 = c(Advert[40,1], rep(8,19))))
autoplot(fc8) + ylab("Quotes") +
ggtitle("Forecast quotes with future advertising set to 8")
| /HE3022/Workbook/Seminar11_07042020.R | no_license | cmok1996/helpme | R | false | false | 6,444 | r | #Forecast combination
#Consider, ETS, ARIMA, STL-ETS
train <- window(auscafe, end=c(2012,9))
h <- length(auscafe) - length(train)
ETS <- forecast(ets(train), h=h)
ARIMA <- forecast(auto.arima(train, lambda=0, biasadj=TRUE), h=h)
STL <- stlf(train, lambda=0, h=h, biasadj=TRUE) #estimate training set + forecast the training set into test set using ETS
Combination <- (ETS[["mean"]] + ARIMA[["mean"]] + STL[["mean"]])/3
autoplot(auscafe) +
autolayer(ETS, series="ETS", PI=FALSE) +
autolayer(ARIMA, series="ARIMA", PI=FALSE) +
autolayer(STL, series="STL", PI=FALSE) +
autolayer(Combination, series="Combination") +
xlab("Year") + ylab("$ billion") + ggtitle("Australian monthly expenditure on eating out")
c(ETS = accuracy(ETS, auscafe)["Test set","RMSE"],
ARIMA = accuracy(ARIMA, auscafe)["Test set","RMSE"],
`STL-ETS` = accuracy(STL, auscafe)["Test set","RMSE"],
Combination = accuracy(Combination, auscafe)["Test set","RMSE"])
#Combination has the lowest RMSE
#Dynamic Regression Models
library(fpp2)
library(series)
#In chapter 5, we look at the example of US personal consumption
autoplot(uschange[,1:2], facets=TRUE) +
xlab("Year") + ylab("") +
ggtitle("Quarterly changes in US consumption and personal income")
fit.ci <- tslm(Consumption ~ Income, data=uschange)
summary(fit.ci)
checkresiduals(fit.ci) #p-value is small, reject null hypothesis, significant autocorrelation
#Dynamic Regression Models - error allows autocorrelation
#All the variables in the model must first be stationary
#If any one variable is non-stationary, difference all variables to retain the form
#If a regression model has ARIMA errors, then this is equivalent to regression model in differences with ARMA errors
fit <- auto.arima(uschange[,"Consumption"], xreg=uschange[,"Income"])
summary(fit) #ARIMA(1,0,2)
#Recover estimates of both the nt and et using the residuals() function
cbind("Regression Errors" = residuals(fit, type="regression"), #nt
"ARIMA errors" = residuals(fit, type="innovation")) %>% autoplot(facet=TRUE) #et
checkresiduals(fit) #ACF plots have little significant spikes, p-value = 0.117, cannot reject null hypothesis - an improvement. Expect predition interval to be reliable
#Forecasting once errors look like white noise
fcast <- forecast(fit, xreg=rep(mean(uschange[,2])),8) #Assume that uschange for the next 8 periods is the historical mean
autoplot(fcast) + xlab("Year") + ylab("Percentage change")
#Using chnage of income as predictor, do not take into account the uncertainty for predictor variables
#Forecasting electricity and demand for next 14 days
qplot(Temperature, Demand, data=as.data.frame(elecdaily)) +
ylab("Elec Demand (GW)") + xlab("Max daily temperature (Celsius)") + ggtitle("Figure 9.5: Daily electricity demand versus maximum daily for the state of Victoria in Australia for 2014")
autoplot(elecdaily[,c(1,3)], facets=TRUE)
xreg <- cbind(MaxTemp = elecdaily[, "Temperature"],
MaxTempSq = elecdaily[, "Temperature"]^2,
Workday = elecdaily[,"WorkDay"])
fit <- auto.arima(elecdaily[,"Demand"], xreg=xreg)
checkresiduals(fit) #ARIMA(2,1,2)(2,0,0)[7] seasonality of 7 because daily data, can see significant spikes - autocorrelation in residuals
fcast <- forecast(fit,
xreg = cbind(MaxTemp=rep(26,14), maxTempSq=rep(26^2,14), #assume temp is constant at 26
Workday = c(0,1,0,0,1,1,1,1,1,0,0,1,1,1))) #Mon-Fri is 1, Sat-Sun + Public hols = 0
autoplot(fcast) + ylab("Electricity demand (GW")
#Shows the forecast from dynamic regression model when all future temperature set to 26 degrees, and working day dummy variable set to known future values
#Daily seasonality
#Point forecast looks reasonable for the next 2 weeks
#Deterministic vs Stochastic trend
autoplot(austa) + xlab("Year") +
ylab("millions of people") +
ggtitle("Total annual international visitors to Australia")
trend <- seq_along(austa)
(fit1 <- auto.arima(austa, d=0, xreg=trend)) #coefficient for t = 0.17, estimated growth in visitor numbers is 0.17mil per year
(fit2 <- auto.arima(austa, d=1)) #random walk model with drift
#ARIMA(0,1,1) with drift
#Although coefficient for trend is also 0.17, prediction interval varies alot
fc1 <- forecast(fit1,
xreg = length(austa) + 1:10) #10 period ahead forecast
fc2 <- forecast(fit2, h=10)
autoplot(austa) +
autolayer(fc2, series="Stochastic trend") +
autolayer(fc1, series="Deterministic trend") +
ggtitle("Forecasts from trend models") +
xlab("Year") + ylab("Visitors to Australia (millions)") +
guides(colour=guide_legend(title="Forecast"))
#PI for stochastic trend is so much wider than the deterministic trend
#Slope for deterministic trend is not changing over time
#If yt is stationary, use deterministic trend. If non-stationary, we difference yt to get stochastic trend
#Lagged predictors
autoplot(insurance, facets=TRUE) +
xlab("year") + ylab("") +
ggtitle("Insurance advertising and quotations")
#Lagged predictors. Test 0,1,2 or 3 lags.
Advert <- cbind(
AdLag0 = insurance[,"TV.advert"],
AdLag1 = stats::lag(insurance[,"TV.advert"], -1),
AdLag2 = stats::lag(insurance[,"TV.advert"], -2),
AdLag3 = stats::lag(insurance[,"TV.advert"], -3)) %>%
head(NROW(insurance)) #40 observations
#When comparing models, all models must use same training set
# Restrict data so models use same fitting period
fit1 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1],
stationary=TRUE)
fit2 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1:2],
stationary=TRUE)
fit3 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1:3],
stationary=TRUE)
fit4 <- auto.arima(insurance[4:40,1], xreg=Advert[4:40,1:4],
stationary=TRUE)
#Choose the optimal lag length based on AICc
c(fit1[["aicc"]], fit2[["aicc"]], fit3[["aicc"]],fit4[["aicc"]])
#Best model with smalled AICc value has two lagged predictors (current + previous)
fit <- auto.arima(insurance[,1], xreg=Advert[,1:2], stationary=TRUE) #use autoarima to model
summary(fit) #AR3 model
#forecasts can be calculated using this model if the future values for advertising values are assumed
fc8 <- forecast(fit, h=20,
xreg=cbind(AdLag0 = rep(8,20),
AdLag1 = c(Advert[40,1], rep(8,19))))
autoplot(fc8) + ylab("Quotes") +
ggtitle("Forecast quotes with future advertising set to 8")
|
## Clear Workspace
rm(list=ls())
## Load libraries
library(caret)
library(xgboost)
library(readr)
library(dplyr)
library(tidyr)
## Data
df_x <- read_csv("data/train_predictors.txt", col_names = FALSE)
df_y <- read_csv("data/train_labels.txt", col_names = FALSE)
df_x_final <- read_csv("data/test_predictors.txt", col_names = FALSE)
df_sub <- data.frame(read_csv("data/sample_submission.txt", col_names = TRUE))
df_x <- data.frame(df_x)
df_y <- data.frame(df_y)
df_x_final <- data.frame(df_x_final)
names(df_y) <- "Y"
# Bind together
df <- cbind(df_y, df_x)
rm(df_x, df_y)
## Split dataset into test and train
set.seed(123)
sample_id <- sample(row.names(df), size = nrow(df) * 0.8, replace = FALSE)
df_train <- df[row.names(df) %in% sample_id, ]
df_test <- df[!row.names(df) %in% sample_id, ]
# Omit missings
df_train <- na.omit(df_train)
## Tuning
# num.class = length(unique(y))
# xgboost parameters
param <- list("objective" = "binary:logistic", # multiclass classification
"max_depth" = 6, # maximum depth of tree
"eta" = 0.5, # step size shrinkage
"gamma" = 1.5, # minimum loss reduction
"subsample" = 1, # part of data instances to grow tree
"colsample_bytree" = 1, # subsample ratio of columns when constructing each tree
"min_child_weight" = 0.8, # minimum sum of instance weight needed in a child
"scale_pos_weight" = 50,
"max_delta_step" = 0
)
# set random seed, for reproducibility
set.seed(1234)
bst.cv <- xgb.cv(param=param,
data = as.matrix(df[, 2:length(df)]),
label = df$Y,
nfold=5,
nrounds=100,
prediction=TRUE,
verbose=FALSE)
min.error.idx <- (1:length(bst.cv$evaluation_log$test_error_mean))[bst.cv$evaluation_log$test_error_mean == min(bst.cv$evaluation_log$test_error_mean)]
min.error.idx <- 55
# Testing
#--------
bst <- xgboost(param=param,
data=as.matrix(df[, 2:length(df)]),
label=df$Y,
nrounds=min.error.idx,
verbose=0)
# Predictions
preds = predict(bst, data.matrix(df_x_final))
label = ifelse(preds > 0.5, 1, 0)
df_sub <- data.frame(1:length(label), label)
names(df_sub) <- c('index', 'label')
write_csv(format(df_sub, digits=0), path = "data/submission.txt")
preds = predict(bst, data.matrix(df[, 2:length(df)]))
label = ifelse(preds > 0.5, 1, 0)
table(df$Y, label)
| /hw8/kaggle/scripts/submission.R | no_license | greenore/ac209a-coursework | R | false | false | 2,608 | r | ## Clear Workspace
rm(list=ls())
## Load libraries
library(caret)
library(xgboost)
library(readr)
library(dplyr)
library(tidyr)
## Data
df_x <- read_csv("data/train_predictors.txt", col_names = FALSE)
df_y <- read_csv("data/train_labels.txt", col_names = FALSE)
df_x_final <- read_csv("data/test_predictors.txt", col_names = FALSE)
df_sub <- data.frame(read_csv("data/sample_submission.txt", col_names = TRUE))
df_x <- data.frame(df_x)
df_y <- data.frame(df_y)
df_x_final <- data.frame(df_x_final)
names(df_y) <- "Y"
# Bind together
df <- cbind(df_y, df_x)
rm(df_x, df_y)
## Split dataset into test and train
set.seed(123)
sample_id <- sample(row.names(df), size = nrow(df) * 0.8, replace = FALSE)
df_train <- df[row.names(df) %in% sample_id, ]
df_test <- df[!row.names(df) %in% sample_id, ]
# Omit missings
df_train <- na.omit(df_train)
## Tuning
# num.class = length(unique(y))
# xgboost parameters
param <- list("objective" = "binary:logistic", # multiclass classification
"max_depth" = 6, # maximum depth of tree
"eta" = 0.5, # step size shrinkage
"gamma" = 1.5, # minimum loss reduction
"subsample" = 1, # part of data instances to grow tree
"colsample_bytree" = 1, # subsample ratio of columns when constructing each tree
"min_child_weight" = 0.8, # minimum sum of instance weight needed in a child
"scale_pos_weight" = 50,
"max_delta_step" = 0
)
# set random seed, for reproducibility
set.seed(1234)
bst.cv <- xgb.cv(param=param,
data = as.matrix(df[, 2:length(df)]),
label = df$Y,
nfold=5,
nrounds=100,
prediction=TRUE,
verbose=FALSE)
min.error.idx <- (1:length(bst.cv$evaluation_log$test_error_mean))[bst.cv$evaluation_log$test_error_mean == min(bst.cv$evaluation_log$test_error_mean)]
min.error.idx <- 55
# Testing
#--------
bst <- xgboost(param=param,
data=as.matrix(df[, 2:length(df)]),
label=df$Y,
nrounds=min.error.idx,
verbose=0)
# Predictions
preds = predict(bst, data.matrix(df_x_final))
label = ifelse(preds > 0.5, 1, 0)
df_sub <- data.frame(1:length(label), label)
names(df_sub) <- c('index', 'label')
write_csv(format(df_sub, digits=0), path = "data/submission.txt")
preds = predict(bst, data.matrix(df[, 2:length(df)]))
label = ifelse(preds > 0.5, 1, 0)
table(df$Y, label)
|
library(tidyverse)
library(dslabs)
data(heights)
x <- heights %>% filter(sex=="Male") %>% pull(height)
F <- function(a){
mean(x <= a)
}
1 - F(70) # probability of male taller than 70 inches
# plot distribution of exact heights in data
plot(prop.table(table(x)), xlab = "a = Height in inches", ylab = "Pr(x = a)")
# probabilities in actual data over length 1 ranges containing an integer
mean(x <= 68.5) - mean(x <= 67.5)
mean(x <= 69.5) - mean(x <= 68.5)
mean(x <= 70.5) - mean(x <= 69.5)
# probabilities in normal approximation match well
pnorm(68.5, mean(x), sd(x)) - pnorm(67.5, mean(x), sd(x))
pnorm(69.5, mean(x), sd(x)) - pnorm(68.5, mean(x), sd(x))
pnorm(70.5, mean(x), sd(x)) - pnorm(69.5, mean(x), sd(x))
# probabilities in actual data over other ranges don't match normal approx as well
mean(x <= 70.9) - mean(x <= 70.1)
pnorm(70.9, mean(x), sd(x)) - pnorm(70.1, mean(x), sd(x))
# Plotting the probability density for the normal distribution.
# dnorm(z) gives the probability density f(z) of a certain z-score
# Note that dnorm() gives densities for the standard normal distribution by default.
# Probabilities for alternative normal distributions with mean mu
# and standard deviation sigma can be evaluated with:
x <- seq(-4, 4, length = 100)
data.frame( x, f = dnorm(x) ) %>%
ggplot( aes(x, f) ) +
geom_line() | /probability_continuos.R | no_license | baleeiro17/HarvardX-s-Data-Science- | R | false | false | 1,343 | r | library(tidyverse)
library(dslabs)
data(heights)
x <- heights %>% filter(sex=="Male") %>% pull(height)
F <- function(a){
mean(x <= a)
}
1 - F(70) # probability of male taller than 70 inches
# plot distribution of exact heights in data
plot(prop.table(table(x)), xlab = "a = Height in inches", ylab = "Pr(x = a)")
# probabilities in actual data over length 1 ranges containing an integer
mean(x <= 68.5) - mean(x <= 67.5)
mean(x <= 69.5) - mean(x <= 68.5)
mean(x <= 70.5) - mean(x <= 69.5)
# probabilities in normal approximation match well
pnorm(68.5, mean(x), sd(x)) - pnorm(67.5, mean(x), sd(x))
pnorm(69.5, mean(x), sd(x)) - pnorm(68.5, mean(x), sd(x))
pnorm(70.5, mean(x), sd(x)) - pnorm(69.5, mean(x), sd(x))
# probabilities in actual data over other ranges don't match normal approx as well
mean(x <= 70.9) - mean(x <= 70.1)
pnorm(70.9, mean(x), sd(x)) - pnorm(70.1, mean(x), sd(x))
# Plotting the probability density for the normal distribution.
# dnorm(z) gives the probability density f(z) of a certain z-score
# Note that dnorm() gives densities for the standard normal distribution by default.
# Probabilities for alternative normal distributions with mean mu
# and standard deviation sigma can be evaluated with:
x <- seq(-4, 4, length = 100)
data.frame( x, f = dnorm(x) ) %>%
ggplot( aes(x, f) ) +
geom_line() |
pontos_l <-read.table(file="D:\\3o Semestre\\Calculo Numerico\\Trabalho\\00070-00080-l.txt", sep="\t")
plot(pontos_l$V1, pontos_l$V2, type="l", col="black")
pontos_d <-read.table(file="D:\\3o Semestre\\Calculo Numerico\\Trabalho\\00070-00080.txt", sep="\t")
plot(pontos_d$V1, pontos_d$V2, type="l", col="red")
| /teste-leitura.R | no_license | fer-ferreira/Interpolacao_Som | R | false | false | 312 | r |
pontos_l <-read.table(file="D:\\3o Semestre\\Calculo Numerico\\Trabalho\\00070-00080-l.txt", sep="\t")
plot(pontos_l$V1, pontos_l$V2, type="l", col="black")
pontos_d <-read.table(file="D:\\3o Semestre\\Calculo Numerico\\Trabalho\\00070-00080.txt", sep="\t")
plot(pontos_d$V1, pontos_d$V2, type="l", col="red")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.transcribeservice_operations.R
\name{list_vocabularies}
\alias{list_vocabularies}
\title{Returns a list of vocabularies that match the specified criteria}
\usage{
list_vocabularies(NextToken = NULL, MaxResults = NULL,
StateEquals = NULL, NameContains = NULL)
}
\arguments{
\item{NextToken}{If the result of the previous request to \code{ListVocabularies} was truncated, include the \code{NextToken} to fetch the next set of jobs.}
\item{MaxResults}{The maximum number of vocabularies to return in the response. If there are fewer results in the list, this response contains only the actual results.}
\item{StateEquals}{When specified, only returns vocabularies with the \code{VocabularyState} field equal to the specified state.}
\item{NameContains}{When specified, the vocabularies returned in the list are limited to vocabularies whose name contains the specified string. The search is case-insensitive, \code{ListVocabularies} will return both "vocabularyname" and "VocabularyName" in the response list.}
}
\description{
Returns a list of vocabularies that match the specified criteria. If no criteria are specified, returns the entire list of vocabularies.
}
\section{Accepted Parameters}{
\preformatted{list_vocabularies(
NextToken = "string",
MaxResults = 123,
StateEquals = "PENDING"|"READY"|"FAILED",
NameContains = "string"
)
}
}
| /service/paws.transcribeservice/man/list_vocabularies.Rd | permissive | CR-Mercado/paws | R | false | true | 1,436 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.transcribeservice_operations.R
\name{list_vocabularies}
\alias{list_vocabularies}
\title{Returns a list of vocabularies that match the specified criteria}
\usage{
list_vocabularies(NextToken = NULL, MaxResults = NULL,
StateEquals = NULL, NameContains = NULL)
}
\arguments{
\item{NextToken}{If the result of the previous request to \code{ListVocabularies} was truncated, include the \code{NextToken} to fetch the next set of jobs.}
\item{MaxResults}{The maximum number of vocabularies to return in the response. If there are fewer results in the list, this response contains only the actual results.}
\item{StateEquals}{When specified, only returns vocabularies with the \code{VocabularyState} field equal to the specified state.}
\item{NameContains}{When specified, the vocabularies returned in the list are limited to vocabularies whose name contains the specified string. The search is case-insensitive, \code{ListVocabularies} will return both "vocabularyname" and "VocabularyName" in the response list.}
}
\description{
Returns a list of vocabularies that match the specified criteria. If no criteria are specified, returns the entire list of vocabularies.
}
\section{Accepted Parameters}{
\preformatted{list_vocabularies(
NextToken = "string",
MaxResults = 123,
StateEquals = "PENDING"|"READY"|"FAILED",
NameContains = "string"
)
}
}
|
#' @title Sorting with an arbitrarily defined comparator ('<=' by
#' default).
#' @description A quicksort implementation. It's generic so with the
#' right comparator it will do dependency sorting on
#' function lists...
#' @param x The thing to be sorted.
#' @param cmp The comparator, '<=' or similar.
#' @return x, but sorted according to cmp.
#' @details
#' Naive, after reading a few web pages about how to do it...
#' I just need to sort a short list with a given comparator...
#' Based on:
#'
#' http://algs4.cs.princeton.edu/23quicksort/
#' http://en.wikipedia.org/wiki/Quicksort
#' http://rosettacode.org/wiki/Sorting_algorithms/Quicksort
#' Thanks internet, that Pascal intro course was a long time ago...
#' @examples
#' o <- quicksort(rbinom(n=30, size=15, prob=0.8))
#' @aliases quicksort.list
quicksort <- function(x, cmp=`<=`) {
A <- new.env()
A[['x']] <- x
swap <- function(i,j) {
tmp <- A$x[i]
A$x[i] <- A$x[j]
A$x[j] <- tmp
}
qs <- function(i, k) {
if (i < k) {
p <- partition(i, k)
if (p != 1) {
qs(i, p-1)
}
if (p != length(A$x)) {
qs(p+1, k)
}
}
}
partition <- function(l, r) {
pivot_index <- choose_pivot(l, r)
pivot_value <- A$x[pivot_index]
swap(r,pivot_index)
store_index <- l
for ( i in l:(r-1)) {
if (cmp(A$x[i],pivot_value)) {
swap(i,store_index)
store_index <- store_index + 1
}
}
swap(r,store_index)
return(store_index)
}
choose_pivot <- function(l, r) {
return(l)
}
qs(1,length(A$x))
return(A$x)
}
#' @title Sorting with an arbitrarily defined comparator ('<=' by
#' default).
#' @description A quicksort implementation. It's generic so with the
#' right comparator it will do dependency sorting on
#' function lists...
#' @param x The thing to be sorted.
#' @param cmp The comparator, '<=' or similar.
#' @return x, but sorted according to cmp.
#' @details
#' Naive, after reading a few web pages about how to do it...
#' I just need to sort a short list with a given comparator...
#' Based on:
#'
#' http://algs4.cs.princeton.edu/23quicksort/
#' http://en.wikipedia.org/wiki/Quicksort
#' http://rosettacode.org/wiki/Sorting_algorithms/Quicksort
#' Thanks internet, that Pascal intro course was a long time ago...
#' @examples
#' o <- quicksort.list(as.list(rbinom(n=30, size=15, prob=0.8)))
#' @aliases quicksort
quicksort.list <- function(x, cmp=`<=`) {
A <- new.env()
A[['x']] <- x
## Save names as attributes for x
for ( i in 1:length(A[['x']])) {
attr(x=A[['x']][[i]], which='name') <- names(x)[i]
}
swap <- function(i,j) {
tmp <- A$x[[i]]
tmp_attr <- attributes(A$x[[i]])
A$x[[i]] <- A$x[[j]]
attributes(A$x[[i]]) <- attributes(A$x[[j]])
A$x[[j]] <- tmp
attributes(A$x[[j]]) <- tmp_attr
}
qs <- function(i, k) {
if (i < k) {
p <- partition(i, k)
if (p != 1) {
qs(i, p-1)
}
if (p != length(A$x)) {
qs(p+1, k)
}
}
}
partition <- function(l, r) {
pivot_index <- choose_pivot(l, r)
pivot_value <- A$x[[pivot_index]]
swap(r,pivot_index)
store_index <- l
for ( i in l:(r-1)) {
if (cmp(A$x[[i]],pivot_value)) {
swap(i,store_index)
store_index <- store_index + 1
}
}
swap(r,store_index)
return(store_index)
}
choose_pivot <- function(l, r) {
return(l)
}
qs(1,length(A$x))
## Recover names from attributes for x
for ( i in 1:length(A[['x']])) {
names(A$x)[i] <- attr(x=A[['x']][[i]], which='name')
}
return(A$x)
}
#' @title A comparator ('<=' equivalent) for quicksort.
#' @description Compares functions based on dependencies!
#' @param f A function.
#' @param g Another function.
#' @return Returns TRUE if g does not depend on f.
cmp_function_dependence <- function(f, g) {
nom_f <- attr(f,'name')
args_f <- formalArgs(f)
if (!is.null(args_f)) args_f <- sort(args_f)
nom_g <- attr(g,'name')
args_g <- formalArgs(g)
if (!is.null(args_g)) args_g <- sort(args_g)
if (is.null(args_f) && !is.null(args_g)) return(TRUE)
if (is.null(args_g) && !is.null(args_f)) return(FALSE)
if (is.null(args_f) && is.null(args_g)) return(TRUE)
if (isTRUE(all.equal(args_f, args_g))) return(TRUE)
if (nom_g %in% args_f) {
return(FALSE)
} else {
return(TRUE)
}
stop("BAD THINGS.")
}
#' @title Dependency resolver for function lists.
#' @description Returns the function with independent functions coming
#' first.
#' @param f_list A list of functions to be sorted.
#' @examples
#' test_list <- list(
#' g = function(f, q) { f+q },
#' f = function(x) { x^2 },
#' h = function() { 10 }
#' )
#'
#' sorted_test_list <- dependency_resolver(test_list)
#' @seealso cmp_function_dependence, quicksort.list
dependency_resolver <- function(f_list) {
f_list <- quicksort.list(f_list, cmp=cmp_function_dependence)
return(f_list)
}
| /package_dir/R/simple-quicksort.R | no_license | sakrejda/data-integrator | R | false | false | 4,851 | r |
#' @title Sorting with an arbitrarily defined comparator ('<=' by
#' default).
#' @description A quicksort implementation. It's generic so with the
#' right comparator it will do dependency sorting on
#' function lists...
#' @param x The thing to be sorted.
#' @param cmp The comparator, '<=' or similar.
#' @return x, but sorted according to cmp.
#' @details
#' Naive, after reading a few web pages about how to do it...
#' I just need to sort a short list with a given comparator...
#' Based on:
#'
#' http://algs4.cs.princeton.edu/23quicksort/
#' http://en.wikipedia.org/wiki/Quicksort
#' http://rosettacode.org/wiki/Sorting_algorithms/Quicksort
#' Thanks internet, that Pascal intro course was a long time ago...
#' @examples
#' o <- quicksort(rbinom(n=30, size=15, prob=0.8))
#' @aliases quicksort.list
quicksort <- function(x, cmp=`<=`) {
A <- new.env()
A[['x']] <- x
swap <- function(i,j) {
tmp <- A$x[i]
A$x[i] <- A$x[j]
A$x[j] <- tmp
}
qs <- function(i, k) {
if (i < k) {
p <- partition(i, k)
if (p != 1) {
qs(i, p-1)
}
if (p != length(A$x)) {
qs(p+1, k)
}
}
}
partition <- function(l, r) {
pivot_index <- choose_pivot(l, r)
pivot_value <- A$x[pivot_index]
swap(r,pivot_index)
store_index <- l
for ( i in l:(r-1)) {
if (cmp(A$x[i],pivot_value)) {
swap(i,store_index)
store_index <- store_index + 1
}
}
swap(r,store_index)
return(store_index)
}
choose_pivot <- function(l, r) {
return(l)
}
qs(1,length(A$x))
return(A$x)
}
#' @title Sorting with an arbitrarily defined comparator ('<=' by
#' default).
#' @description A quicksort implementation. It's generic so with the
#' right comparator it will do dependency sorting on
#' function lists...
#' @param x The thing to be sorted.
#' @param cmp The comparator, '<=' or similar.
#' @return x, but sorted according to cmp.
#' @details
#' Naive, after reading a few web pages about how to do it...
#' I just need to sort a short list with a given comparator...
#' Based on:
#'
#' http://algs4.cs.princeton.edu/23quicksort/
#' http://en.wikipedia.org/wiki/Quicksort
#' http://rosettacode.org/wiki/Sorting_algorithms/Quicksort
#' Thanks internet, that Pascal intro course was a long time ago...
#' @examples
#' o <- quicksort.list(as.list(rbinom(n=30, size=15, prob=0.8)))
#' @aliases quicksort
quicksort.list <- function(x, cmp=`<=`) {
A <- new.env()
A[['x']] <- x
## Save names as attributes for x
for ( i in 1:length(A[['x']])) {
attr(x=A[['x']][[i]], which='name') <- names(x)[i]
}
swap <- function(i,j) {
tmp <- A$x[[i]]
tmp_attr <- attributes(A$x[[i]])
A$x[[i]] <- A$x[[j]]
attributes(A$x[[i]]) <- attributes(A$x[[j]])
A$x[[j]] <- tmp
attributes(A$x[[j]]) <- tmp_attr
}
qs <- function(i, k) {
if (i < k) {
p <- partition(i, k)
if (p != 1) {
qs(i, p-1)
}
if (p != length(A$x)) {
qs(p+1, k)
}
}
}
partition <- function(l, r) {
pivot_index <- choose_pivot(l, r)
pivot_value <- A$x[[pivot_index]]
swap(r,pivot_index)
store_index <- l
for ( i in l:(r-1)) {
if (cmp(A$x[[i]],pivot_value)) {
swap(i,store_index)
store_index <- store_index + 1
}
}
swap(r,store_index)
return(store_index)
}
choose_pivot <- function(l, r) {
return(l)
}
qs(1,length(A$x))
## Recover names from attributes for x
for ( i in 1:length(A[['x']])) {
names(A$x)[i] <- attr(x=A[['x']][[i]], which='name')
}
return(A$x)
}
#' @title A comparator ('<=' equivalent) for quicksort.
#' @description Compares functions based on dependencies!
#' @param f A function.
#' @param g Another function.
#' @return Returns TRUE if g does not depend on f.
cmp_function_dependence <- function(f, g) {
nom_f <- attr(f,'name')
args_f <- formalArgs(f)
if (!is.null(args_f)) args_f <- sort(args_f)
nom_g <- attr(g,'name')
args_g <- formalArgs(g)
if (!is.null(args_g)) args_g <- sort(args_g)
if (is.null(args_f) && !is.null(args_g)) return(TRUE)
if (is.null(args_g) && !is.null(args_f)) return(FALSE)
if (is.null(args_f) && is.null(args_g)) return(TRUE)
if (isTRUE(all.equal(args_f, args_g))) return(TRUE)
if (nom_g %in% args_f) {
return(FALSE)
} else {
return(TRUE)
}
stop("BAD THINGS.")
}
#' @title Dependency resolver for function lists.
#' @description Returns the function with independent functions coming
#' first.
#' @param f_list A list of functions to be sorted.
#' @examples
#' test_list <- list(
#' g = function(f, q) { f+q },
#' f = function(x) { x^2 },
#' h = function() { 10 }
#' )
#'
#' sorted_test_list <- dependency_resolver(test_list)
#' @seealso cmp_function_dependence, quicksort.list
dependency_resolver <- function(f_list) {
f_list <- quicksort.list(f_list, cmp=cmp_function_dependence)
return(f_list)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.R
\name{var_lab}
\alias{var_lab}
\alias{var_lab.data.frame}
\alias{var_lab<-}
\alias{has.label}
\alias{drop_lab}
\title{Set or get variable label}
\usage{
var_lab(x)
\method{var_lab}{data.frame}(x)
var_lab(x) <- value
has.label(x)
drop_lab(x)
}
\arguments{
\item{x}{Variable. In the most cases it is numeric vector.}
\item{value}{A character scalar - label for the variable x.}
}
\value{
\code{var_lab} return variable label. If label doesn't exist it return
NULL . \code{var_lab<-} return variable (vector x) with attribute "label" which equals submitted value.
}
\description{
These functions set/get/drop variable labels. For
value labels see \link{val_lab}.
\itemize{
\item{\code{var_lab}}{ returns variable label or NULL if label doesn't
exist.}
\item{\code{var_lab<-}}{ set variable label.}
\item{\code{drop_lab}}{ drops variable label.}
\item{\code{has.label}}{ check if variable label exists.}
}
}
\details{
Variable label is stored in attribute "label" (\code{attr(x,"label")}).
To drop variable label use \code{var_lab(var) <- NULL} or \code{drop_lab(var)}.
}
\examples{
data(mtcars)
mtcars = within(mtcars,{
var_lab(mpg) = "Miles/(US) gallon"
var_lab(cyl) = "Number of cylinders"
var_lab(disp) = "Displacement (cu.in.)"
var_lab(hp) = "Gross horsepower"
var_lab(drat) = "Rear axle ratio"
var_lab(wt) = "Weight (lb/1000)"
var_lab(qsec) = "1/4 mile time"
var_lab(vs) = "V/S"
val_lab(vs) = c("V-shaped" = 0, "straight"=1)
var_lab(am) = "Transmission"
val_lab(am) = c(automatic = 0, manual=1)
var_lab(gear) = "Number of forward gears"
var_lab(carb) = "Number of carburetors"
})
table(mtcars$am)
}
\references{
This is a modified version from `expss` package.
}
| /man/var_lab.Rd | no_license | shug0131/cctu | R | false | true | 1,970 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.R
\name{var_lab}
\alias{var_lab}
\alias{var_lab.data.frame}
\alias{var_lab<-}
\alias{has.label}
\alias{drop_lab}
\title{Set or get variable label}
\usage{
var_lab(x)
\method{var_lab}{data.frame}(x)
var_lab(x) <- value
has.label(x)
drop_lab(x)
}
\arguments{
\item{x}{Variable. In the most cases it is numeric vector.}
\item{value}{A character scalar - label for the variable x.}
}
\value{
\code{var_lab} return variable label. If label doesn't exist it return
NULL . \code{var_lab<-} return variable (vector x) with attribute "label" which equals submitted value.
}
\description{
These functions set/get/drop variable labels. For
value labels see \link{val_lab}.
\itemize{
\item{\code{var_lab}}{ returns variable label or NULL if label doesn't
exist.}
\item{\code{var_lab<-}}{ set variable label.}
\item{\code{drop_lab}}{ drops variable label.}
\item{\code{has.label}}{ check if variable label exists.}
}
}
\details{
Variable label is stored in attribute "label" (\code{attr(x,"label")}).
To drop variable label use \code{var_lab(var) <- NULL} or \code{drop_lab(var)}.
}
\examples{
data(mtcars)
mtcars = within(mtcars,{
var_lab(mpg) = "Miles/(US) gallon"
var_lab(cyl) = "Number of cylinders"
var_lab(disp) = "Displacement (cu.in.)"
var_lab(hp) = "Gross horsepower"
var_lab(drat) = "Rear axle ratio"
var_lab(wt) = "Weight (lb/1000)"
var_lab(qsec) = "1/4 mile time"
var_lab(vs) = "V/S"
val_lab(vs) = c("V-shaped" = 0, "straight"=1)
var_lab(am) = "Transmission"
val_lab(am) = c(automatic = 0, manual=1)
var_lab(gear) = "Number of forward gears"
var_lab(carb) = "Number of carburetors"
})
table(mtcars$am)
}
\references{
This is a modified version from `expss` package.
}
|
library(ppitables)
### Name: ppiKHM2015_wb
### Title: ppiKHM2015_wb
### Aliases: ppiKHM2015_wb
### Keywords: datasets
### ** Examples
# Access Cambodia PPI table
ppiKHM2015_wb
# Given a specific PPI score (from 0 - 100), get the row of poverty
# probabilities from PPI table it corresponds to
ppiScore <- 50
ppiKHM2015_wb[ppiKHM2015_wb$score == ppiScore, ]
# Use subset() function to get the row of poverty probabilities corresponding
# to specific PPI score
ppiScore <- 50
subset(ppiKHM2015_wb, score == ppiScore)
# Given a specific PPI score (from 0 - 100), get a poverty probability
# based on a specific poverty definition. In this example, the national
# poverty line definition
ppiScore <- 50
ppiKHM2015_wb[ppiKHM2015_wb$score == ppiScore, "nl100"]
| /data/genthat_extracted_code/ppitables/examples/ppiKHM2015_wb.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 796 | r | library(ppitables)
### Name: ppiKHM2015_wb
### Title: ppiKHM2015_wb
### Aliases: ppiKHM2015_wb
### Keywords: datasets
### ** Examples
# Access Cambodia PPI table
ppiKHM2015_wb
# Given a specific PPI score (from 0 - 100), get the row of poverty
# probabilities from PPI table it corresponds to
ppiScore <- 50
ppiKHM2015_wb[ppiKHM2015_wb$score == ppiScore, ]
# Use subset() function to get the row of poverty probabilities corresponding
# to specific PPI score
ppiScore <- 50
subset(ppiKHM2015_wb, score == ppiScore)
# Given a specific PPI score (from 0 - 100), get a poverty probability
# based on a specific poverty definition. In this example, the national
# poverty line definition
ppiScore <- 50
ppiKHM2015_wb[ppiKHM2015_wb$score == ppiScore, "nl100"]
|
#' Plot frequency against mean for each feature
#'
#' Plot the frequency of expression (i.e., percentage of expressing cells) against the mean expression level for each feature in a SingleCellExperiment object.
#' This is deprecated in favour of directly using \code{\link{plotRowData}}.
#'
#' @param object A SingleCellExperiment object.
#' @param freq_exprs String specifying the column-level metadata field containing the number of expressing cells per feature.
#' Alternatively, an \link{AsIs} vector or data.frame, see \code{?\link{retrieveFeatureInfo}}.
#' @param mean_exprs String specifying the column-level metadata fielcontaining the mean expression of each feature.
#' Alternatively, an \link{AsIs} vector or data.frame, see \code{?\link{retrieveFeatureInfo}}.
#' @param controls Deprecated and ignored.
#' @param exprs_values String specifying the assay used for the default \code{freq_exprs} and \code{mean_exprs}.
#' This can be set to, e.g., \code{"logcounts"} so that \code{freq_exprs} defaults to \code{"n_cells_by_logcounts"}.
#' @param by_show_single Deprecated and ignored.
#' @param show_smooth Logical scalar, should a smoothed fit be shown on the plot?
#' See \code{\link[ggplot2]{geom_smooth}} for details.
#' @param show_se Logical scalar, should the standard error be shown for a smoothed fit?
#' @param ... Further arguments passed to \code{\link{plotRowData}}.
#'
#' @details
#' This function plots gene expression frequency versus mean expression level, which can be useful to assess the effects of technical dropout in the dataset.
#' We fit a non-linear least squares curve for the relationship between expression frequency and mean expression.
#' We use this curve to define the number of genes above high technical dropout and the numbers of genes that are expressed in at least 50\% and at least 25\% of cells.
#'
#' @return A \link{ggplot} object.
#'
#' @seealso \code{\link{plotRowData}}
#'
#' @examples
#' example_sce <- mockSCE()
#' example_sce <- logNormCounts(example_sce)
#'
#' example_sce <- calculateQCMetrics(example_sce,
#' feature_controls = list(set1 = 1:500))
#' plotExprsFreqVsMean(example_sce)
#'
#' plotExprsFreqVsMean(example_sce, size_by = "is_feature_control")
#'
#' @export
#' @importFrom methods is
#' @importClassesFrom SingleCellExperiment SingleCellExperiment
#' @importFrom ggplot2 ggplot geom_hline geom_vline annotate geom_smooth scale_shape_manual
#' @importFrom stats nls coef
plotExprsFreqVsMean <- function(object, freq_exprs, mean_exprs, controls, exprs_values = "counts", by_show_single = FALSE, show_smooth = TRUE, show_se = TRUE, ...)
{
.Deprecated(new="plotRowData")
if ( !is(object, "SingleCellExperiment") ) {
stop("Object must be an SingleCellExperiment")
}
if (missing(freq_exprs) || is.null(freq_exprs)) {
freq_exprs <- .qc_hunter(object, paste0("n_cells_by_", exprs_values), mode = 'row')
}
if (missing(mean_exprs) || is.null(mean_exprs)) {
mean_exprs <- .qc_hunter(object, paste0("mean_", exprs_values), mode = 'row')
}
# Calculating the percentage and storing it somewhere obscure.
freqs <- retrieveFeatureInfo(object, freq_exprs, search = "rowData")$val / ncol(object) * 100
means <- log2(retrieveFeatureInfo(object, mean_exprs, search = "rowData")$val + 1)
plot_out <- plotRowData(object,
y = data.frame(`Percentage of expressing cells`=freqs, check.names=FALSE),
x = data.frame(`Mean expression` = means, check.names=FALSE),
...) + scale_shape_manual(values = c(1, 17))
## data frame with expression mean and frequency.
mn_vs_fq <- data.frame(mn = means, fq = freqs / 100)
text_x_loc <- min(mn_vs_fq$mn) + 0.6 * diff(range(mn_vs_fq$mn))
if ( show_smooth ) {
tmp_tab <- mn_vs_fq
plot_out <- plot_out + geom_smooth(aes_string(x = "mn", y = "100 * fq"), data = tmp_tab,
colour = "firebrick", size = 1, se = show_se)
}
## add annotations to existing plot
plot_out <- plot_out +
geom_hline(yintercept = 50, linetype = 2) + # 50% dropout
annotate("text", x = text_x_loc, y = 40, label = paste(
sum(mn_vs_fq$fq >= 0.5),
" genes are expressed\nin at least 50% of cells", sep = "" )) +
annotate("text", x = text_x_loc, y = 20, label = paste(
sum(mn_vs_fq$fq >= 0.25),
" genes are expressed\nin at least 25% of cells", sep = "" ))
## return the plot object
plot_out
}
| /R/plotExprsFreqVsMean.R | no_license | liyueyikate/scater | R | false | false | 4,609 | r | #' Plot frequency against mean for each feature
#'
#' Plot the frequency of expression (i.e., percentage of expressing cells) against the mean expression level for each feature in a SingleCellExperiment object.
#' This is deprecated in favour of directly using \code{\link{plotRowData}}.
#'
#' @param object A SingleCellExperiment object.
#' @param freq_exprs String specifying the column-level metadata field containing the number of expressing cells per feature.
#' Alternatively, an \link{AsIs} vector or data.frame, see \code{?\link{retrieveFeatureInfo}}.
#' @param mean_exprs String specifying the column-level metadata fielcontaining the mean expression of each feature.
#' Alternatively, an \link{AsIs} vector or data.frame, see \code{?\link{retrieveFeatureInfo}}.
#' @param controls Deprecated and ignored.
#' @param exprs_values String specifying the assay used for the default \code{freq_exprs} and \code{mean_exprs}.
#' This can be set to, e.g., \code{"logcounts"} so that \code{freq_exprs} defaults to \code{"n_cells_by_logcounts"}.
#' @param by_show_single Deprecated and ignored.
#' @param show_smooth Logical scalar, should a smoothed fit be shown on the plot?
#' See \code{\link[ggplot2]{geom_smooth}} for details.
#' @param show_se Logical scalar, should the standard error be shown for a smoothed fit?
#' @param ... Further arguments passed to \code{\link{plotRowData}}.
#'
#' @details
#' This function plots gene expression frequency versus mean expression level, which can be useful to assess the effects of technical dropout in the dataset.
#' We fit a non-linear least squares curve for the relationship between expression frequency and mean expression.
#' We use this curve to define the number of genes above high technical dropout and the numbers of genes that are expressed in at least 50\% and at least 25\% of cells.
#'
#' @return A \link{ggplot} object.
#'
#' @seealso \code{\link{plotRowData}}
#'
#' @examples
#' example_sce <- mockSCE()
#' example_sce <- logNormCounts(example_sce)
#'
#' example_sce <- calculateQCMetrics(example_sce,
#' feature_controls = list(set1 = 1:500))
#' plotExprsFreqVsMean(example_sce)
#'
#' plotExprsFreqVsMean(example_sce, size_by = "is_feature_control")
#'
#' @export
#' @importFrom methods is
#' @importClassesFrom SingleCellExperiment SingleCellExperiment
#' @importFrom ggplot2 ggplot geom_hline geom_vline annotate geom_smooth scale_shape_manual
#' @importFrom stats nls coef
plotExprsFreqVsMean <- function(object, freq_exprs, mean_exprs, controls, exprs_values = "counts", by_show_single = FALSE, show_smooth = TRUE, show_se = TRUE, ...)
{
.Deprecated(new="plotRowData")
if ( !is(object, "SingleCellExperiment") ) {
stop("Object must be an SingleCellExperiment")
}
if (missing(freq_exprs) || is.null(freq_exprs)) {
freq_exprs <- .qc_hunter(object, paste0("n_cells_by_", exprs_values), mode = 'row')
}
if (missing(mean_exprs) || is.null(mean_exprs)) {
mean_exprs <- .qc_hunter(object, paste0("mean_", exprs_values), mode = 'row')
}
# Calculating the percentage and storing it somewhere obscure.
freqs <- retrieveFeatureInfo(object, freq_exprs, search = "rowData")$val / ncol(object) * 100
means <- log2(retrieveFeatureInfo(object, mean_exprs, search = "rowData")$val + 1)
plot_out <- plotRowData(object,
y = data.frame(`Percentage of expressing cells`=freqs, check.names=FALSE),
x = data.frame(`Mean expression` = means, check.names=FALSE),
...) + scale_shape_manual(values = c(1, 17))
## data frame with expression mean and frequency.
mn_vs_fq <- data.frame(mn = means, fq = freqs / 100)
text_x_loc <- min(mn_vs_fq$mn) + 0.6 * diff(range(mn_vs_fq$mn))
if ( show_smooth ) {
tmp_tab <- mn_vs_fq
plot_out <- plot_out + geom_smooth(aes_string(x = "mn", y = "100 * fq"), data = tmp_tab,
colour = "firebrick", size = 1, se = show_se)
}
## add annotations to existing plot
plot_out <- plot_out +
geom_hline(yintercept = 50, linetype = 2) + # 50% dropout
annotate("text", x = text_x_loc, y = 40, label = paste(
sum(mn_vs_fq$fq >= 0.5),
" genes are expressed\nin at least 50% of cells", sep = "" )) +
annotate("text", x = text_x_loc, y = 20, label = paste(
sum(mn_vs_fq$fq >= 0.25),
" genes are expressed\nin at least 25% of cells", sep = "" ))
## return the plot object
plot_out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/move_fish_none.R
\name{move_fish_none}
\alias{move_fish_none}
\title{Move Fish None}
\usage{
move_fish_none(fish_area, max_prob, min_prob)
}
\arguments{
\item{fish_area}{Input matrix of distributed fish}
\item{max_prob}{not applicable to this function}
\item{min_prob}{not applicable to this function}
}
\description{
This function doesn't move any fish. Easiest to add no movement into the package
with this dummy function
}
\examples{
}
\keyword{movement}
| /man/move_fish_none.Rd | no_license | peterkuriyama/hlsimulator | R | false | true | 539 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/move_fish_none.R
\name{move_fish_none}
\alias{move_fish_none}
\title{Move Fish None}
\usage{
move_fish_none(fish_area, max_prob, min_prob)
}
\arguments{
\item{fish_area}{Input matrix of distributed fish}
\item{max_prob}{not applicable to this function}
\item{min_prob}{not applicable to this function}
}
\description{
This function doesn't move any fish. Easiest to add no movement into the package
with this dummy function
}
\examples{
}
\keyword{movement}
|
#### Econometrics Project - 12/4/2015 #####
options(warn=-1)
suppressMessages(library(dplyr))
suppressMessages(library(magrittr))
suppressMessages(library(car))
# **** place the file path to the dataset here ****
file_path <- "~/Desktop/full_dataset_final.csv"
# Load, clean dataset ####
fdi <- read.csv(file_path, stringsAsFactors=FALSE)
fdi[,-1] <- apply(fdi[,-1], 2, as.numeric)
fdi %<>% rename(electricity = electricitiy)
# replaced education with mean for: Australia, Bahrain, Bosnia and Herzegovina,
# Brazil, France, United Kingdom, Singapore, New Zealand
# imputed development vars for: Bahamas (2) & Qatar (1)
fdi$Dev.DV[fdi$Country == 'Qatar'] <- 1
fdi$Dev.DV[fdi$Country == 'Bahamas, The'] <- 2
fdi %<>% group_by(Dev.DV) %>%
summarise(edu = mean(education, na.rm = TRUE)) %>%
inner_join(fdi, .) %>%
mutate(education = ifelse(is.na(education), edu, education)) %>%
select(-edu)
# save full file
full <- fdi
fdi <- fdi[complete.cases(fdi),]
# make Dummy Variables factors
DVs <- which(grepl('DV$', colnames(fdi)))
fdi[,DVs] <- apply(fdi[,DVs], 2, as.factor)
rm(DVs)
# Model 1 (all variables - no logs) ####
fit1 <- lm(FDI.sum ~ tax + warehouse + tariff + broadband + education + electricity +
corrupt + law + politic + REER.vol + REER.avg + REER.chg + resource +
infl + GDP.avg + GDP.PPP.chg + GDP.pc.lvl.avg + Openness + Credit +
Dev.DV + ER.DV + FDI.ctrl.DV, data = fdi)
summary(fit1)
summary(step(fit1, trace = F))
# Model 2 (Logs & Select variables) ####
# add DV for each individual levels Development and Exch rate DVs
fdi %<>% mutate(ER.float.DV = ER.DV == '3', ER.mngd.DV = ER.DV == '2',
Dev2.DV = Dev.DV == '2', Dev3.DV = Dev.DV == '3')
fit2 <- lm(log(FDI.sum+1) ~ log(tax+1) + log(warehouse+1) + tariff + broadband +
I((education > 100)*log(education)) + I((education <= 100)*education) +
log(101 - electricity) + corrupt + law + politic + log(REER.vol+1) + REER.avg + REER.chg + resource +
infl + GDP.avg + log(GDP.pc.lvl.avg+1) + log(Openness+1) + Credit +
ER.float.DV + ER.mngd.DV + FDI.ctrl.DV, data = fdi)
summary(fit2)
summary(step(fit2, trace = F))
# paring down the stepwise ####
f <- 'log(FDI.sum+1) ~ log(warehouse + 1) + resource +
I((education > 100)*log(education + 1)) +
I((education <= 100)*education) +
politic + ER.float.DV + gdp.growth.2002 +
log(GDP.pc.lvl.2002 + 1) + log(Openness + 1)'
f <- as.formula(f)
fit3 <- lm(f, fdi)
summary(fit3)
summary(update(fit3, .~. -resource))
summary(update(fit3, .~. -resource -log(warehouse+1)))
summary(update(fit3, .~. -resource -log(warehouse+1) -I((education <= 100) * education)))
# go back to full dataset and get records where our explanatory variables are populated
# increases sample size significantly
full %<>% select(Country, FDI.sum, education, politic, ER.DV,
gdp.growth.2002, GDP.pc.lvl.2002, Openness) %>%
mutate(ER.float.DV = ER.DV == 3) %>%
select(-ER.DV) %>%
filter(complete.cases(.))
mod <- lm(log(FDI.sum+1) ~ I((education > 100)*log(education + 1)) +
politic + ER.float.DV + gdp.growth.2002 +
log(GDP.pc.lvl.2002 + 1) + log(Openness + 1), full)
summary(mod)
# covariance matrix
options(digits = 2)
vcov(mod)
# anova table
anova(mod)
## Distribution of Residuals ####
## leaving out studentized
resid <- residuals(mod)
rs <- e1071::skewness(resid)
#histogram
hist(resid, col = 'grey', 10, main = 'Distribution of Residuals',
xlab = paste('Skewness:', round(rs, 4)))
# qqPlot
qqPlot(mod)
# kernel density
d <- density(resid)
plot(d, main="Kernel Density of Residuals")
polygon(d, col="red", border="black")
# Tests ####
## Heteroskedasticity
white = function(model) {
u <- residuals(model)^2
yh <- fitted(model)
ru2 <- summary(lm(u ~ yh + I(yh^2)))$r.squared
l <- nrow(model$model)*ru2
p <- 1-pchisq(l, length(coef(mod))-1)
return(p)
}
white(mod) # White
lmtest::bptest(mod)$p.value # Breusch-Pagan
lmtest::gqtest(mod)$p.value # GoldfeldโQuandt
## Multi-Collinearity
vif(mod) # variance inflation factors
cor(mod$model)
## Model Specification (Ramsey Reset test)
lmtest::reset(mod)
## Endogeneity
mod_new <- update(mod, .~. -log(GDP.pc.lvl.avg+1) + log(GDP.pc.lvl.2002+1) )
r <- Matrix::rankMatrix(vcov(mod_new) - vcov(mod))[1]
# Hausman-Wu test
hw <- t(coef(mod) - coef(mod_new)) %*%
MASS::ginv(vcov(mod_new) - vcov(mod)) %*%
t(t(coef(mod) - coef(mod_new)))
1-pchisq(hw, r) # not finding endogeneity
# histograms, plots ####
suppressMessages(require(ggplot2))
suppressMessages(require(reshape2))
# histogram of explanatory variables (pre-tranformations)
full %>%
select(-Country, -ER.float.DV) %>%
melt(.) %>%
ggplot(., aes(x = value)) +
facet_wrap(~variable, scales = "free") +
geom_histogram() +
xlab("Variable Values") +
ylab("Frequency") +
ggtitle("Explanatory Variable Distributions")
# histogram of explanatory variables (post-transformation)
mod$model %>%
select(-ER.float.DV) %>%
melt(.) %>%
filter(!(grepl('education', variable) & value == 0)) %>%
ggplot(., aes(x = value, color = ifelse(grepl('log', variable), 'red', NA),
fill = ifelse(grepl('log', variable), 'red', NA))) +
facet_wrap(~variable, scales = "free") +
geom_histogram() +
xlab("Post Transformation Values") +
ylab("Frequency") +
ggtitle(expression(atop("Explanatory Variable Distributions", atop(italic("Post Transformation"), "")))) +
theme(legend.position = 'none')
# plot of explanatory variables vs FDI (post-transformation)
mod$model %>%
select(-ER.float.DV) %>%
mutate(Country = full$Country) %>%
melt(., id.vars = 'Country') %>%
filter(!(grepl('education', variable) & value == 0)) %>%
inner_join(., select(fdi, Country, FDI.sum)) %>%
filter(variable != 'log(FDI.sum + 1)') %>%
ggplot(., aes(x = value, y = log(FDI.sum +1))) +
facet_wrap(~variable, scales = 'free') +
geom_point() +
geom_smooth(method = 'loess') + xlab("") +
ggtitle(expression(atop("X vs FDI Scatter Plots", atop(italic("with Conditional Mean"), ""))))
# histogram of coefficients after repeated sampling
n <- nrow(full)
p <- 1 # size of sample
c <- replicate(5000, coef(update(mod, subset = sample(n, n*p, replace = TRUE))))
c <- as.data.frame(t(c))
c %>% melt(.) %>%
ggplot(., aes(x = value)) +
facet_wrap(~variable, scales = "free") +
geom_histogram() + xlab("") +
ggtitle(expression(atop("Reapeated Sampling", atop(italic("Coefficient Distributions"), ""))))
# mean, stdev, skew of each beta distribution
c %>% melt(.) %>%
group_by(variable) %>%
summarise(beta_mean = round(mean(value), 3),
beta_sd = round(sd(value), 3),
beta_skew = round(e1071::skewness(value), 3)) %>%
mutate(conf_int_95 = paste0('(', round(beta_mean - beta_sd*1.96, 2), ', ',
round(beta_mean + beta_sd*1.96, 2), ')')) %>%
as.data.frame
| /fdi_model.R | no_license | cdschaller/fdi_code | R | false | false | 6,980 | r | #### Econometrics Project - 12/4/2015 #####
options(warn=-1)
suppressMessages(library(dplyr))
suppressMessages(library(magrittr))
suppressMessages(library(car))
# **** place the file path to the dataset here ****
file_path <- "~/Desktop/full_dataset_final.csv"
# Load, clean dataset ####
fdi <- read.csv(file_path, stringsAsFactors=FALSE)
fdi[,-1] <- apply(fdi[,-1], 2, as.numeric)
fdi %<>% rename(electricity = electricitiy)
# replaced education with mean for: Australia, Bahrain, Bosnia and Herzegovina,
# Brazil, France, United Kingdom, Singapore, New Zealand
# imputed development vars for: Bahamas (2) & Qatar (1)
fdi$Dev.DV[fdi$Country == 'Qatar'] <- 1
fdi$Dev.DV[fdi$Country == 'Bahamas, The'] <- 2
fdi %<>% group_by(Dev.DV) %>%
summarise(edu = mean(education, na.rm = TRUE)) %>%
inner_join(fdi, .) %>%
mutate(education = ifelse(is.na(education), edu, education)) %>%
select(-edu)
# save full file
full <- fdi
fdi <- fdi[complete.cases(fdi),]
# make Dummy Variables factors
DVs <- which(grepl('DV$', colnames(fdi)))
fdi[,DVs] <- apply(fdi[,DVs], 2, as.factor)
rm(DVs)
# Model 1 (all variables - no logs) ####
fit1 <- lm(FDI.sum ~ tax + warehouse + tariff + broadband + education + electricity +
corrupt + law + politic + REER.vol + REER.avg + REER.chg + resource +
infl + GDP.avg + GDP.PPP.chg + GDP.pc.lvl.avg + Openness + Credit +
Dev.DV + ER.DV + FDI.ctrl.DV, data = fdi)
summary(fit1)
summary(step(fit1, trace = F))
# Model 2 (Logs & Select variables) ####
# add DV for each individual levels Development and Exch rate DVs
fdi %<>% mutate(ER.float.DV = ER.DV == '3', ER.mngd.DV = ER.DV == '2',
Dev2.DV = Dev.DV == '2', Dev3.DV = Dev.DV == '3')
fit2 <- lm(log(FDI.sum+1) ~ log(tax+1) + log(warehouse+1) + tariff + broadband +
I((education > 100)*log(education)) + I((education <= 100)*education) +
log(101 - electricity) + corrupt + law + politic + log(REER.vol+1) + REER.avg + REER.chg + resource +
infl + GDP.avg + log(GDP.pc.lvl.avg+1) + log(Openness+1) + Credit +
ER.float.DV + ER.mngd.DV + FDI.ctrl.DV, data = fdi)
summary(fit2)
summary(step(fit2, trace = F))
# paring down the stepwise ####
f <- 'log(FDI.sum+1) ~ log(warehouse + 1) + resource +
I((education > 100)*log(education + 1)) +
I((education <= 100)*education) +
politic + ER.float.DV + gdp.growth.2002 +
log(GDP.pc.lvl.2002 + 1) + log(Openness + 1)'
f <- as.formula(f)
fit3 <- lm(f, fdi)
summary(fit3)
summary(update(fit3, .~. -resource))
summary(update(fit3, .~. -resource -log(warehouse+1)))
summary(update(fit3, .~. -resource -log(warehouse+1) -I((education <= 100) * education)))
# go back to full dataset and get records where our explanatory variables are populated
# increases sample size significantly
full %<>% select(Country, FDI.sum, education, politic, ER.DV,
gdp.growth.2002, GDP.pc.lvl.2002, Openness) %>%
mutate(ER.float.DV = ER.DV == 3) %>%
select(-ER.DV) %>%
filter(complete.cases(.))
mod <- lm(log(FDI.sum+1) ~ I((education > 100)*log(education + 1)) +
politic + ER.float.DV + gdp.growth.2002 +
log(GDP.pc.lvl.2002 + 1) + log(Openness + 1), full)
summary(mod)
# covariance matrix
options(digits = 2)
vcov(mod)
# anova table
anova(mod)
## Distribution of Residuals ####
## leaving out studentized
resid <- residuals(mod)
rs <- e1071::skewness(resid)
#histogram
hist(resid, col = 'grey', 10, main = 'Distribution of Residuals',
xlab = paste('Skewness:', round(rs, 4)))
# qqPlot
qqPlot(mod)
# kernel density
d <- density(resid)
plot(d, main="Kernel Density of Residuals")
polygon(d, col="red", border="black")
# Tests ####
## Heteroskedasticity
white = function(model) {
u <- residuals(model)^2
yh <- fitted(model)
ru2 <- summary(lm(u ~ yh + I(yh^2)))$r.squared
l <- nrow(model$model)*ru2
p <- 1-pchisq(l, length(coef(mod))-1)
return(p)
}
white(mod) # White
lmtest::bptest(mod)$p.value # Breusch-Pagan
lmtest::gqtest(mod)$p.value # GoldfeldโQuandt
## Multi-Collinearity
vif(mod) # variance inflation factors
cor(mod$model)
## Model Specification (Ramsey Reset test)
lmtest::reset(mod)
## Endogeneity
mod_new <- update(mod, .~. -log(GDP.pc.lvl.avg+1) + log(GDP.pc.lvl.2002+1) )
r <- Matrix::rankMatrix(vcov(mod_new) - vcov(mod))[1]
# Hausman-Wu test
hw <- t(coef(mod) - coef(mod_new)) %*%
MASS::ginv(vcov(mod_new) - vcov(mod)) %*%
t(t(coef(mod) - coef(mod_new)))
1-pchisq(hw, r) # not finding endogeneity
# histograms, plots ####
suppressMessages(require(ggplot2))
suppressMessages(require(reshape2))
# histogram of explanatory variables (pre-tranformations)
full %>%
select(-Country, -ER.float.DV) %>%
melt(.) %>%
ggplot(., aes(x = value)) +
facet_wrap(~variable, scales = "free") +
geom_histogram() +
xlab("Variable Values") +
ylab("Frequency") +
ggtitle("Explanatory Variable Distributions")
# histogram of explanatory variables (post-transformation)
mod$model %>%
select(-ER.float.DV) %>%
melt(.) %>%
filter(!(grepl('education', variable) & value == 0)) %>%
ggplot(., aes(x = value, color = ifelse(grepl('log', variable), 'red', NA),
fill = ifelse(grepl('log', variable), 'red', NA))) +
facet_wrap(~variable, scales = "free") +
geom_histogram() +
xlab("Post Transformation Values") +
ylab("Frequency") +
ggtitle(expression(atop("Explanatory Variable Distributions", atop(italic("Post Transformation"), "")))) +
theme(legend.position = 'none')
# plot of explanatory variables vs FDI (post-transformation)
mod$model %>%
select(-ER.float.DV) %>%
mutate(Country = full$Country) %>%
melt(., id.vars = 'Country') %>%
filter(!(grepl('education', variable) & value == 0)) %>%
inner_join(., select(fdi, Country, FDI.sum)) %>%
filter(variable != 'log(FDI.sum + 1)') %>%
ggplot(., aes(x = value, y = log(FDI.sum +1))) +
facet_wrap(~variable, scales = 'free') +
geom_point() +
geom_smooth(method = 'loess') + xlab("") +
ggtitle(expression(atop("X vs FDI Scatter Plots", atop(italic("with Conditional Mean"), ""))))
# histogram of coefficients after repeated sampling
n <- nrow(full)
p <- 1 # size of sample
c <- replicate(5000, coef(update(mod, subset = sample(n, n*p, replace = TRUE))))
c <- as.data.frame(t(c))
c %>% melt(.) %>%
ggplot(., aes(x = value)) +
facet_wrap(~variable, scales = "free") +
geom_histogram() + xlab("") +
ggtitle(expression(atop("Reapeated Sampling", atop(italic("Coefficient Distributions"), ""))))
# mean, stdev, skew of each beta distribution
c %>% melt(.) %>%
group_by(variable) %>%
summarise(beta_mean = round(mean(value), 3),
beta_sd = round(sd(value), 3),
beta_skew = round(e1071::skewness(value), 3)) %>%
mutate(conf_int_95 = paste0('(', round(beta_mean - beta_sd*1.96, 2), ', ',
round(beta_mean + beta_sd*1.96, 2), ')')) %>%
as.data.frame
|
# Run ACAT on miRNA families
# Author: Michael Geaghan(2020)
source("acat.R")
load("dbmts.RData")
# Read in GWAS data
gwas_data <- list(adhd.2018 = './adhd.2018.gwas.RData',
an.2019 = './an.2019.gwas.RData',
asd.2019 = './asd.2019.gwas.RData',
bip.2018 = './bip.2018.gwas.RData',
mdd.2019 = './mdd.2019.gwas.RData',
ocd.2018 = './ocd.2018.gwas.RData',
ptsd.2019 = './ptsd.2019.gwas.RData',
scz.clozuk2018 = './scz.clozuk2018.gwas.RData',
ts.2019 = './ts.2019.gwas.RData')
mirna.families <- unique(dbmts.agree.best.best.0.2.short$mirna.family)
acat_df <- list()
for(g in names(gwas_data)) {
acat_df[[g]] <- data.frame(fam = mirna.families, mbsv = NA, mbsv.gtex.eqtl = NA, mbsv.gtex.brain.eqtl = NA, mbsv.psychencode.eqtl = NA, row.names = 1)
}
acat_all_df <- data.frame(gwas = names(gwas_data), mbsv = NA, mbsv.gtex.eqtl = NA, mbsv.gtex.brain.eqtl = NA, mbsv.psychencode.eqtl = NA, row.names = 1)
minP <- .Machine$double.xmin
maxP <- 1 - .Machine$double.eps
for(g in names(gwas_data)) {
load(gwas_data[[g]])
gwas.df <- gwas.df[!is.na(gwas.df$non.mhc) &
!is.na(gwas.df$variable) &
gwas.df$non.mhc &
gwas.df$variable != "mbsv.any.eqtl" &
(gwas.df$variable == "GWAS" | (!is.na(gwas.df$diffScore) & abs(gwas.df$diffScore) >= 0.2)),]
filters <- c("mbsv", "mbsv.gtex.eqtl", "mbsv.gtex.brain.eqtl", "mbsv.psychencode.eqtl")
for(f in filters) {
tmp_g <- gwas.df[gwas.df$variable == f,]
tmp_d <- dbmts.agree.best.best.0.2.short
# All MBSVs (weighted by |diffScore|)
p = tmp_g$P[tmp_g$SNP %in% tmp_d$dbsnp]
p[p < minP] <- minP
p[p > maxP] <- maxP
w = abs(tmp_g$diffScore[tmp_g$SNP %in% tmp_d$dbsnp])
acat_all_df[g,f] = ACAT(p, w)
# Individual families (weighted by |diffScore|)
acat_df[[g]][f] <- do.call(c, lapply(mirna.families, function(x) {
tmp_d_m <- tmp_d[tmp_d$mirna.family == x,]
p = tmp_g$P[tmp_g$SNP %in% tmp_d_m$dbsnp]
p[p < minP] <- minP
p[p > maxP] <- maxP
w = abs(tmp_g$diffScore[tmp_g$SNP %in% tmp_d_m$dbsnp])
return(ACAT(p, w))
}))
}
}
save(acat_all_df, acat_df, mirna.families, file = "acat.RData")
write.table(acat_all_df, "acat.all.txt", quote = FALSE, sep = "\t", row.names = TRUE, col.names = TRUE)
for (g in names(acat_df)) {
write.table(acat_df[[g]], paste("acat.", g, ".txt", sep = ""), quote = FALSE, sep = "\t", row.names = TRUE, col.names = TRUE)
}
| /analysis/mbsv.analysis.9a.R | no_license | mgeaghan/mbsv | R | false | false | 2,603 | r | # Run ACAT on miRNA families
# Author: Michael Geaghan(2020)
source("acat.R")
load("dbmts.RData")
# Read in GWAS data
gwas_data <- list(adhd.2018 = './adhd.2018.gwas.RData',
an.2019 = './an.2019.gwas.RData',
asd.2019 = './asd.2019.gwas.RData',
bip.2018 = './bip.2018.gwas.RData',
mdd.2019 = './mdd.2019.gwas.RData',
ocd.2018 = './ocd.2018.gwas.RData',
ptsd.2019 = './ptsd.2019.gwas.RData',
scz.clozuk2018 = './scz.clozuk2018.gwas.RData',
ts.2019 = './ts.2019.gwas.RData')
mirna.families <- unique(dbmts.agree.best.best.0.2.short$mirna.family)
acat_df <- list()
for(g in names(gwas_data)) {
acat_df[[g]] <- data.frame(fam = mirna.families, mbsv = NA, mbsv.gtex.eqtl = NA, mbsv.gtex.brain.eqtl = NA, mbsv.psychencode.eqtl = NA, row.names = 1)
}
acat_all_df <- data.frame(gwas = names(gwas_data), mbsv = NA, mbsv.gtex.eqtl = NA, mbsv.gtex.brain.eqtl = NA, mbsv.psychencode.eqtl = NA, row.names = 1)
minP <- .Machine$double.xmin
maxP <- 1 - .Machine$double.eps
for(g in names(gwas_data)) {
load(gwas_data[[g]])
gwas.df <- gwas.df[!is.na(gwas.df$non.mhc) &
!is.na(gwas.df$variable) &
gwas.df$non.mhc &
gwas.df$variable != "mbsv.any.eqtl" &
(gwas.df$variable == "GWAS" | (!is.na(gwas.df$diffScore) & abs(gwas.df$diffScore) >= 0.2)),]
filters <- c("mbsv", "mbsv.gtex.eqtl", "mbsv.gtex.brain.eqtl", "mbsv.psychencode.eqtl")
for(f in filters) {
tmp_g <- gwas.df[gwas.df$variable == f,]
tmp_d <- dbmts.agree.best.best.0.2.short
# All MBSVs (weighted by |diffScore|)
p = tmp_g$P[tmp_g$SNP %in% tmp_d$dbsnp]
p[p < minP] <- minP
p[p > maxP] <- maxP
w = abs(tmp_g$diffScore[tmp_g$SNP %in% tmp_d$dbsnp])
acat_all_df[g,f] = ACAT(p, w)
# Individual families (weighted by |diffScore|)
acat_df[[g]][f] <- do.call(c, lapply(mirna.families, function(x) {
tmp_d_m <- tmp_d[tmp_d$mirna.family == x,]
p = tmp_g$P[tmp_g$SNP %in% tmp_d_m$dbsnp]
p[p < minP] <- minP
p[p > maxP] <- maxP
w = abs(tmp_g$diffScore[tmp_g$SNP %in% tmp_d_m$dbsnp])
return(ACAT(p, w))
}))
}
}
save(acat_all_df, acat_df, mirna.families, file = "acat.RData")
write.table(acat_all_df, "acat.all.txt", quote = FALSE, sep = "\t", row.names = TRUE, col.names = TRUE)
for (g in names(acat_df)) {
write.table(acat_df[[g]], paste("acat.", g, ".txt", sep = ""), quote = FALSE, sep = "\t", row.names = TRUE, col.names = TRUE)
}
|
\name{primeFactorize}
\alias{primeFactorize}
\title{
Vectorized Prime Factorization
}
\description{
Implementation of Pollard's rho algorithm for generating the prime factorization. The algorithm is based on the "factorize.c" source file from the gmp library found here \url{http://gmplib.org}.
}
\usage{
primeFactorize(v, namedList = FALSE, nThreads = NULL)
}
\arguments{
\item{v}{Vector of integers or numeric values. Non-integral values will be cured to whole numbers.}
\item{namedList}{Logical flag. If \code{TRUE} and the \code{length(v) > 1}, a named list is returned. The default is \code{FALSE}.}
\item{nThreads}{Specific number of threads to be used. The default is \code{NULL}.}
}
\details{
As noted in the Description section above, this algorithm is based on the "factorize.c" source code from the gmp library. Much of the code in RcppAlgos::primeFactorize is a straightforward translation from multiple precision C data types to standard C++ data types. A crucial part of the algorithm's efficiency is based on quickly determining \href{https://en.wikipedia.org/wiki/Primality_test}{primality}, which is easily computed with gmp. However, with standard C++, this is quite challenging. Much of the research for RcppAlgos::primeFactorize was focused on developing an algorithm that could accurately and efficiently compute primality.
For more details, see the documentation for \code{\link{isPrimeRcpp}}.
}
\value{
\itemize{
\item{Returns an unnamed vector if \code{length(v) == 1} regardless of the value of \code{namedList}. If \eqn{v < 2^{31}}{v < 2^31}, the class of the returned vector will be integer, otherwise the class will be numeric.}
\item{If \code{length(v) > 1}, a named/unnamed list of vectors will be returned. If \code{max(bound1, bound2)} \eqn{< 2^{31}}{< 2^31}, the class of each vector will be integer, otherwise the class will be numeric.}
}
}
\references{
\itemize{
\item{\href{https://en.wikipedia.org/wiki/Pollard\%27s_rho_algorithm}{Pollard's rho algorithm}}
\item{\href{https://en.wikipedia.org/wiki/Miller-Rabin_primality_test}{Miller-Rabin primality test}}
\item{\href{https://codereview.stackexchange.com/questions/186751/accurate-modular-arithmetic-with-double-precision}{Accurate Modular Arithmetic with Double Precision}}
\item{\href{https://en.wikipedia.org/wiki/Double-precision_floating-point_format}{53-bit significand precision}}
}
}
\author{
Joseph Wood
}
\note{
The maximum value for each element in \eqn{v} is \eqn{2^{53} - 1}{2^53 - 1}.
}
\seealso{
\code{\link{primeFactorizeSieve}}, \code{\link[gmp]{factorize}}, \code{\link[numbers]{primeFactors}}
}
\examples{
## Get the prime factorization of a single number
primeFactorize(10^8)
## Or get the prime factorization of many numbers
set.seed(29)
myVec <- sample(-1000000:1000000, 1000)
system.time(pFacs <- primeFactorize(myVec))
## Return named list
pFacsWithNames <- primeFactorize(myVec, namedList = TRUE)
## Using nThreads
system.time(primeFactorize(myVec, nThreads = 2))
}
| /fuzzedpackages/RcppAlgos/man/primeFactorize.Rd | no_license | akhikolla/testpackages | R | false | false | 2,997 | rd | \name{primeFactorize}
\alias{primeFactorize}
\title{
Vectorized Prime Factorization
}
\description{
Implementation of Pollard's rho algorithm for generating the prime factorization. The algorithm is based on the "factorize.c" source file from the gmp library found here \url{http://gmplib.org}.
}
\usage{
primeFactorize(v, namedList = FALSE, nThreads = NULL)
}
\arguments{
\item{v}{Vector of integers or numeric values. Non-integral values will be cured to whole numbers.}
\item{namedList}{Logical flag. If \code{TRUE} and the \code{length(v) > 1}, a named list is returned. The default is \code{FALSE}.}
\item{nThreads}{Specific number of threads to be used. The default is \code{NULL}.}
}
\details{
As noted in the Description section above, this algorithm is based on the "factorize.c" source code from the gmp library. Much of the code in RcppAlgos::primeFactorize is a straightforward translation from multiple precision C data types to standard C++ data types. A crucial part of the algorithm's efficiency is based on quickly determining \href{https://en.wikipedia.org/wiki/Primality_test}{primality}, which is easily computed with gmp. However, with standard C++, this is quite challenging. Much of the research for RcppAlgos::primeFactorize was focused on developing an algorithm that could accurately and efficiently compute primality.
For more details, see the documentation for \code{\link{isPrimeRcpp}}.
}
\value{
\itemize{
\item{Returns an unnamed vector if \code{length(v) == 1} regardless of the value of \code{namedList}. If \eqn{v < 2^{31}}{v < 2^31}, the class of the returned vector will be integer, otherwise the class will be numeric.}
\item{If \code{length(v) > 1}, a named/unnamed list of vectors will be returned. If \code{max(bound1, bound2)} \eqn{< 2^{31}}{< 2^31}, the class of each vector will be integer, otherwise the class will be numeric.}
}
}
\references{
\itemize{
\item{\href{https://en.wikipedia.org/wiki/Pollard\%27s_rho_algorithm}{Pollard's rho algorithm}}
\item{\href{https://en.wikipedia.org/wiki/Miller-Rabin_primality_test}{Miller-Rabin primality test}}
\item{\href{https://codereview.stackexchange.com/questions/186751/accurate-modular-arithmetic-with-double-precision}{Accurate Modular Arithmetic with Double Precision}}
\item{\href{https://en.wikipedia.org/wiki/Double-precision_floating-point_format}{53-bit significand precision}}
}
}
\author{
Joseph Wood
}
\note{
The maximum value for each element in \eqn{v} is \eqn{2^{53} - 1}{2^53 - 1}.
}
\seealso{
\code{\link{primeFactorizeSieve}}, \code{\link[gmp]{factorize}}, \code{\link[numbers]{primeFactors}}
}
\examples{
## Get the prime factorization of a single number
primeFactorize(10^8)
## Or get the prime factorization of many numbers
set.seed(29)
myVec <- sample(-1000000:1000000, 1000)
system.time(pFacs <- primeFactorize(myVec))
## Return named list
pFacsWithNames <- primeFactorize(myVec, namedList = TRUE)
## Using nThreads
system.time(primeFactorize(myVec, nThreads = 2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zeitzeiger_cv.R
\name{zeitzeigerSpcCv}
\alias{zeitzeigerSpcCv}
\title{Calculate sparse principal components of time-dependent variation
on cross-validation.}
\usage{
zeitzeigerSpcCv(fitResultList, nTime = 10, useSpc = TRUE, sumabsv = 1,
orth = TRUE)
}
\arguments{
\item{fitResultList}{Result from \code{zeitzeigerFitCv}.}
\item{nTime}{Number of time-points by which to discretize the time-dependent
behavior of each feature. Corresponds to the number of rows in the matrix for
which the SPCs will be calculated.}
\item{sumabsv}{L1-constraint on the SPCs, passed to \code{SPC}.}
\item{orth}{Logical indicating whether to require left singular vectors
be orthogonal to each other, passed to \code{SPC}.}
\item{useSPC}{Logical indicating whether to use \code{SPC} (default) or \code{svd}.}
}
\value{
A list consisting of the result from \code{zeitzeigerSpc} for each fold.
}
\description{
\code{zeitzeigerSpcCv} calls \code{zeitzeigerFit} for each fold of
cross-validation. This function uses \code{doParallel}, so prior to
running this function, use \code{registerDoParallel} to set the number of cores.
}
| /man/zeitzeigerSpcCv.Rd | no_license | xflicsu/zeitzeiger | R | false | true | 1,189 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zeitzeiger_cv.R
\name{zeitzeigerSpcCv}
\alias{zeitzeigerSpcCv}
\title{Calculate sparse principal components of time-dependent variation
on cross-validation.}
\usage{
zeitzeigerSpcCv(fitResultList, nTime = 10, useSpc = TRUE, sumabsv = 1,
orth = TRUE)
}
\arguments{
\item{fitResultList}{Result from \code{zeitzeigerFitCv}.}
\item{nTime}{Number of time-points by which to discretize the time-dependent
behavior of each feature. Corresponds to the number of rows in the matrix for
which the SPCs will be calculated.}
\item{sumabsv}{L1-constraint on the SPCs, passed to \code{SPC}.}
\item{orth}{Logical indicating whether to require left singular vectors
be orthogonal to each other, passed to \code{SPC}.}
\item{useSPC}{Logical indicating whether to use \code{SPC} (default) or \code{svd}.}
}
\value{
A list consisting of the result from \code{zeitzeigerSpc} for each fold.
}
\description{
\code{zeitzeigerSpcCv} calls \code{zeitzeigerFit} for each fold of
cross-validation. This function uses \code{doParallel}, so prior to
running this function, use \code{registerDoParallel} to set the number of cores.
}
|
app <- ShinyDriver$new("../../", loadTimeout = 15000, seed = 100, shinyOptions = list(display.mode = "normal"))
app$snapshotInit("mytest")
app$snapshotDownload("download")
app$setInputs(throw = TRUE)
app$snapshotDownload("download")
Sys.sleep(4)
app$snapshot()
| /shinycoreci-apps-master/shinycoreci-apps-master/124-async-download/tests/shinytests/mytest.R | no_license | RohanYashraj/R-Tutorials-Code | R | false | false | 262 | r | app <- ShinyDriver$new("../../", loadTimeout = 15000, seed = 100, shinyOptions = list(display.mode = "normal"))
app$snapshotInit("mytest")
app$snapshotDownload("download")
app$setInputs(throw = TRUE)
app$snapshotDownload("download")
Sys.sleep(4)
app$snapshot()
|
library(secr)
load("inputs.RData")
# is density affected by site
mClosedD2_HN <- secr.fit(cptr_hst, model=list(D~session, g0~session), mask=maskClosed, detectfn=0, CL=FALSE)
saveRDS(mClosedD2_HN, file = "mClosedD2_HN.rds") | /hpc61.R | no_license | samual-williams/Hyaena-density | R | false | false | 225 | r | library(secr)
load("inputs.RData")
# is density affected by site
mClosedD2_HN <- secr.fit(cptr_hst, model=list(D~session, g0~session), mask=maskClosed, detectfn=0, CL=FALSE)
saveRDS(mClosedD2_HN, file = "mClosedD2_HN.rds") |
#reading the data
#reding the table "features.txt" to rename the columns of the X_test and X_train datasets
features <- read.table("features.txt")
#selecting only the name of those features
feat <- as.character(features[,2])
#reading the name of the activities: to be used later to name
act <- read.table("activity_labels.txt")
#change to test subfolder
setwd("test")
#reading the subject data
sub <- read.table("subject_test.txt")
#rename the columns
colnames(sub) <- ("subject")
#reading the Acc and Gyro data
x <- read.table("X_test.txt")
#rename the columns
colnames(x) <- feat
#read the activity data
y <- read.table("y_test.txt")
#rename the column
colnames(y) <- ("activity")
#form the fisrt table_text
table_test <- cbind(sub,y,x)
setwd("..")
#change to train subfolder
setwd("train")
#reading the same three files to form the second table_train
sub <- read.table("subject_train.txt")
colnames(sub) <- ("subject")
x <- read.table("X_train.txt")
colnames(x) <- feat
y <- read.table("y_train.txt")
colnames(y) <- ("activity")
table_train <- cbind(sub,y,x)
##pay attention that the 4th step on the programing assignment has already been completed here by renaming the variables with descriptive names
setwd("..")
#making only one table
table <- rbind(table_test, table_train)
#removing unnecessary data
#the step one of the coursera programing assignment is here completed
#extract the column names from table (I culd have used the feat variable)
meas_name <- colnames(table)
#setting counter
column <- ncol(table)
#used for the size of the select dataset during initialization
rows <- nrow(table)
#initializing necessary variables
names <- "empty"
select <- data.frame(matrix(ncol = 1, nrow = rows))
#loop used to select the columns with names containing mean() or std()
for (i in 1:column){
# using regular expressions to select the write columns (result is 1 when it's TRUE)
if (length(grep("mean\\(\\)|std\\(\\)", meas_name[i], ignore.case = FALSE, perl = TRUE, value = FALSE, fixed = FALSE, useBytes = FALSE, invert = FALSE) != 0)) {
#select the right columns in the table
select <- cbind(select, table[,i])
#make a vector with the names selected
names <- c(names, meas_name[i])
}
}
#rename the columns of select (pay attention that names has one value that is "empty" to name the empty column used to initialize the data.frame
colnames(select) <- names
#reset the numver of columns <- counter
column <- ncol(select)
#make a new table with the fisrt results of table and select by cbind
table2 <- cbind(table[,1:2], select[,2:column])
#remove unnecesary data
#here the second task of the programming asignemtn is ready
#reset a counter
rows <- nrow(table2)
#make the loop to replace the number by the activity name
for (i in 1:rows){
#check the value
value <- table2[i,2]
#replace the value with string
table2[i,2] <- as.character(act[value,2])
}
#remove unnecesary variables
#completed the step number 4 of the programming assignment
#group first by subject and then by activty and obtain the mean value
av <- aggregate(table2[,3:68], list(table2$subject, table2$activity), mean)
av
##end of the 5th assignment
#clean up the memory
#rm(feat,sub,x,y,features,table_test, table_train,value,i,table, select,meas_name,names, column, rows, table2, act)
| /run_analysis.R | no_license | vivianel/Project_clean_data | R | false | false | 3,599 | r | #reading the data
#reding the table "features.txt" to rename the columns of the X_test and X_train datasets
features <- read.table("features.txt")
#selecting only the name of those features
feat <- as.character(features[,2])
#reading the name of the activities: to be used later to name
act <- read.table("activity_labels.txt")
#change to test subfolder
setwd("test")
#reading the subject data
sub <- read.table("subject_test.txt")
#rename the columns
colnames(sub) <- ("subject")
#reading the Acc and Gyro data
x <- read.table("X_test.txt")
#rename the columns
colnames(x) <- feat
#read the activity data
y <- read.table("y_test.txt")
#rename the column
colnames(y) <- ("activity")
#form the fisrt table_text
table_test <- cbind(sub,y,x)
setwd("..")
#change to train subfolder
setwd("train")
#reading the same three files to form the second table_train
sub <- read.table("subject_train.txt")
colnames(sub) <- ("subject")
x <- read.table("X_train.txt")
colnames(x) <- feat
y <- read.table("y_train.txt")
colnames(y) <- ("activity")
table_train <- cbind(sub,y,x)
##pay attention that the 4th step on the programing assignment has already been completed here by renaming the variables with descriptive names
setwd("..")
#making only one table
table <- rbind(table_test, table_train)
#removing unnecessary data
#the step one of the coursera programing assignment is here completed
#extract the column names from table (I culd have used the feat variable)
meas_name <- colnames(table)
#setting counter
column <- ncol(table)
#used for the size of the select dataset during initialization
rows <- nrow(table)
#initializing necessary variables
names <- "empty"
select <- data.frame(matrix(ncol = 1, nrow = rows))
#loop used to select the columns with names containing mean() or std()
for (i in 1:column){
# using regular expressions to select the write columns (result is 1 when it's TRUE)
if (length(grep("mean\\(\\)|std\\(\\)", meas_name[i], ignore.case = FALSE, perl = TRUE, value = FALSE, fixed = FALSE, useBytes = FALSE, invert = FALSE) != 0)) {
#select the right columns in the table
select <- cbind(select, table[,i])
#make a vector with the names selected
names <- c(names, meas_name[i])
}
}
#rename the columns of select (pay attention that names has one value that is "empty" to name the empty column used to initialize the data.frame
colnames(select) <- names
#reset the numver of columns <- counter
column <- ncol(select)
#make a new table with the fisrt results of table and select by cbind
table2 <- cbind(table[,1:2], select[,2:column])
#remove unnecesary data
#here the second task of the programming asignemtn is ready
#reset a counter
rows <- nrow(table2)
#make the loop to replace the number by the activity name
for (i in 1:rows){
#check the value
value <- table2[i,2]
#replace the value with string
table2[i,2] <- as.character(act[value,2])
}
#remove unnecesary variables
#completed the step number 4 of the programming assignment
#group first by subject and then by activty and obtain the mean value
av <- aggregate(table2[,3:68], list(table2$subject, table2$activity), mean)
av
##end of the 5th assignment
#clean up the memory
#rm(feat,sub,x,y,features,table_test, table_train,value,i,table, select,meas_name,names, column, rows, table2, act)
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
mtclimRun <- function(forcing_dataR, settings) {
.Call('_mtclimR_mtclimRun', PACKAGE = 'mtclimR', forcing_dataR, settings)
}
| /R/RcppExports.R | no_license | wietsefranssen/mtclimR | R | false | false | 257 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
mtclimRun <- function(forcing_dataR, settings) {
.Call('_mtclimR_mtclimRun', PACKAGE = 'mtclimR', forcing_dataR, settings)
}
|
#' Copies the demo file
#' @export
#' @return Copies a file named \code{demo_examples.R} to the working directory.
#' @examples
#' # demos()
#' @keywords functions
demos <- function() {
file.copy(from=system.file("extdata", "demo_examples.R", package = "oec"), to=getwd())
}
| /r_package/R/demos.R | permissive | desval/oec | R | false | false | 278 | r | #' Copies the demo file
#' @export
#' @return Copies a file named \code{demo_examples.R} to the working directory.
#' @examples
#' # demos()
#' @keywords functions
demos <- function() {
file.copy(from=system.file("extdata", "demo_examples.R", package = "oec"), to=getwd())
}
|
\name{vareff-methods}
\docType{methods}
\alias{vareff-methods}
\alias{vareff,cold-method}
\title{Methods for function \code{vareff}}
\description{ Methods for function \code{vareff} extracting the variance estimates of random effects of a fitted model object. }
\section{Methods}{
\describe{
\item{\code{signature(object="cold")}:}{\code{vareff} for \code{\link{cold}} object.}
}}
\examples{
##### data = seizure
### indR
seiz0R <- cold(y ~ lage + lbase + trt + trt:lbase + v4, random = ~ 1,
data = seizure, dependence = "indR")
vareff(seiz0R)
}
\keyword{methods}
| /man/vareff-methods.Rd | no_license | cran/cold | R | false | false | 608 | rd | \name{vareff-methods}
\docType{methods}
\alias{vareff-methods}
\alias{vareff,cold-method}
\title{Methods for function \code{vareff}}
\description{ Methods for function \code{vareff} extracting the variance estimates of random effects of a fitted model object. }
\section{Methods}{
\describe{
\item{\code{signature(object="cold")}:}{\code{vareff} for \code{\link{cold}} object.}
}}
\examples{
##### data = seizure
### indR
seiz0R <- cold(y ~ lage + lbase + trt + trt:lbase + v4, random = ~ 1,
data = seizure, dependence = "indR")
vareff(seiz0R)
}
\keyword{methods}
|
library(here)
library(jsonlite)
source(here("Automation/00_Functions_automation.R"))
# assigning Drive user in case the script is verified manually
if (!"email" %in% ls()){
email <- "cimentadaj@gmail.com"
}
# info country and N drive address
ctr <- "Venezuela"
dir_n <- "N:/COVerAGE-DB/Automation/Hydra/"
# Drive credentials
drive_auth(email = email)
gs4_auth(email = email)
VE_rubric <- get_input_rubric() %>% filter(Short == "VE")
ss_i <- VE_rubric %>% dplyr::pull(Sheet)
ss_db <- VE_rubric %>% dplyr::pull(Source)
# reading data from Montreal and last date entered
db_drive <- read_sheet(ss_i, sheet = "database")
last_date_drive <-
db_drive %>%
mutate(date_f = dmy(Date)) %>%
dplyr::pull(date_f) %>%
max()
# reading data from the website
r <- GET("https://covid19.patria.org.ve/api/v1/summary")
a <- httr::content(r, "text", encoding = "ISO-8859-1")
b <- fromJSON(a)
r2 <- GET("https://covid19.patria.org.ve/api/v1/timeline")
a2 <- httr::content(r2, "text", encoding = "ISO-8859-1")
b2 <- fromJSON(a2)
date_f <-
b2 %>%
dplyr::pull(Date) %>%
max()
d <- paste(sprintf("%02d", day(date_f)), sprintf("%02d", month(date_f)), year(date_f), sep = ".")
if (date_f > last_date_drive) {
out <-
b$Confirmed$ByAgeRange %>%
bind_cols %>%
gather(key = age_g, value = Value) %>%
separate(age_g, c("Age", "res")) %>%
select(-res) %>%
bind_rows(tibble(Age = "TOT",
Value = b$Confirmed$Count)) %>%
mutate(Sex = "b") %>%
bind_rows(tibble(Age = "TOT",
Sex = c("f", "m"),
Value = c(b$Confirmed$ByGender$female, b$Confirmed$ByGender$male))) %>%
mutate(Measure = "Cases") %>%
bind_rows(tibble(Age = "TOT",
Sex = c("b"),
Measure = "Deaths",
Value = b$Deaths$Count)) %>%
mutate(Region = "All",
Date = d,
Country = "Venezuela",
Code = paste0("VE", Date),
AgeInt = case_when(Age == "TOT" ~ NA_real_,
Age == "90" ~ 15,
TRUE ~ 10),
Metric = "Count") %>%
select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value)
out
############################################
#### uploading database to Google Drive ####
############################################
# This command append new rows at the end of the sheet
sheet_append(out,
ss = ss_i,
sheet = "database")
log_update(pp = ctr, N = nrow(out))
############################################
#### uploading metadata to Google Drive ####
############################################
data_source <- paste0(dir_n,
"Data_sources/",
ctr,
"/",
ctr,
"_data_",
today(),
".txt")
writeLines(a, data_source)
} else {
cat(paste0("no new updates so far, last date: ", date_f))
log_update(pp = "Venezuela", N = 0)
}
| /Automation/00_hydra/Venezuela.R | permissive | yangboyubyron/covid_age | R | false | false | 3,106 | r | library(here)
library(jsonlite)
source(here("Automation/00_Functions_automation.R"))
# assigning Drive user in case the script is verified manually
if (!"email" %in% ls()){
email <- "cimentadaj@gmail.com"
}
# info country and N drive address
ctr <- "Venezuela"
dir_n <- "N:/COVerAGE-DB/Automation/Hydra/"
# Drive credentials
drive_auth(email = email)
gs4_auth(email = email)
VE_rubric <- get_input_rubric() %>% filter(Short == "VE")
ss_i <- VE_rubric %>% dplyr::pull(Sheet)
ss_db <- VE_rubric %>% dplyr::pull(Source)
# reading data from Montreal and last date entered
db_drive <- read_sheet(ss_i, sheet = "database")
last_date_drive <-
db_drive %>%
mutate(date_f = dmy(Date)) %>%
dplyr::pull(date_f) %>%
max()
# reading data from the website
r <- GET("https://covid19.patria.org.ve/api/v1/summary")
a <- httr::content(r, "text", encoding = "ISO-8859-1")
b <- fromJSON(a)
r2 <- GET("https://covid19.patria.org.ve/api/v1/timeline")
a2 <- httr::content(r2, "text", encoding = "ISO-8859-1")
b2 <- fromJSON(a2)
date_f <-
b2 %>%
dplyr::pull(Date) %>%
max()
d <- paste(sprintf("%02d", day(date_f)), sprintf("%02d", month(date_f)), year(date_f), sep = ".")
if (date_f > last_date_drive) {
out <-
b$Confirmed$ByAgeRange %>%
bind_cols %>%
gather(key = age_g, value = Value) %>%
separate(age_g, c("Age", "res")) %>%
select(-res) %>%
bind_rows(tibble(Age = "TOT",
Value = b$Confirmed$Count)) %>%
mutate(Sex = "b") %>%
bind_rows(tibble(Age = "TOT",
Sex = c("f", "m"),
Value = c(b$Confirmed$ByGender$female, b$Confirmed$ByGender$male))) %>%
mutate(Measure = "Cases") %>%
bind_rows(tibble(Age = "TOT",
Sex = c("b"),
Measure = "Deaths",
Value = b$Deaths$Count)) %>%
mutate(Region = "All",
Date = d,
Country = "Venezuela",
Code = paste0("VE", Date),
AgeInt = case_when(Age == "TOT" ~ NA_real_,
Age == "90" ~ 15,
TRUE ~ 10),
Metric = "Count") %>%
select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value)
out
############################################
#### uploading database to Google Drive ####
############################################
# This command append new rows at the end of the sheet
sheet_append(out,
ss = ss_i,
sheet = "database")
log_update(pp = ctr, N = nrow(out))
############################################
#### uploading metadata to Google Drive ####
############################################
data_source <- paste0(dir_n,
"Data_sources/",
ctr,
"/",
ctr,
"_data_",
today(),
".txt")
writeLines(a, data_source)
} else {
cat(paste0("no new updates so far, last date: ", date_f))
log_update(pp = "Venezuela", N = 0)
}
|
### ENSURE THAT THE SAME DATA SETS ARE CREATED EACH TIME THIS IS RUN
set.seed(12345)
### MAKE THE DIRECTORY TO HOLD THE DATA, SPLITS, AND RNG SEEDS
dir.create("problems", showWarnings=FALSE)
dir.create("problems/data", showWarnings=FALSE)
dir.create("problems/splits", showWarnings=FALSE)
dir.create("problems/seeds", showWarnings=FALSE)
### MAKE THE "MODIFIED BISHOP" DATA
mbishop <- function(x, a=0) 2.3 * (x - a) + sin(2 * pi * (x - a)^2)
n.samples <- 100
n.per.sample <- 25
n.test <- 1000
n <- n.samples * n.per.sample + n.test
p <- 1
X <- matrix(runif(n * p), ncol=p)
for (a in c(1, 5, 10)) {
h <- mbishop(X, a)
t <- h + rnorm(n, sd=0.3)
write.table(cbind(X, h + rnorm(n, sd=0.3)), sprintf("problems/data/mbishop-%02d", a), row.names=FALSE, col.names=FALSE)
writeBin(as.raw(as.integer(sample(0:255, 256 * n.samples, replace=TRUE))), sprintf("problems/seeds/mbishop-%02d", a))
idx <- sapply(seq(n.samples), function(i, nsamples, ntrain, ntest) {
idx <- rep(c("#", "1"), times=c(nsamples*ntrain, ntest))
idx[seq(ntrain) + (i - 1) * ntrain] <- "0"
paste(idx, collapse="")
}, nsamples=n.samples, ntrain=n.per.sample, ntest=n.test)
write(idx, sprintf("problems/splits/mbishop-%02d", a), sep="\n")
}
### MAKE THE FRIEDMAN1 DATA
friedman1 <- function(X) 10 * sin(pi * X[, 1] * X[, 2]) + 20 * (X[, 3] - 0.5)^2 + 10 * X[, 4] + 5 * X[, 5]
n.samples <- 100
n.per.sample <- 250
n.test <- 1000
n <- n.samples * n.per.sample + n.test
p <- 10
X <- matrix(runif(n * p), ncol=p)
h <- friedman1(X)
t <- h + rnorm(n)
write.table(cbind(X, t), "problems/data/friedman1", row.names=FALSE, col.names=FALSE)
writeBin(as.raw(as.integer(sample(0:255, 256 * n.samples, replace=TRUE))), "problems/seeds/friedman1")
idx <- sapply(seq(n.samples), function(i, nsamples, ntrain, ntest) {
idx <- rep(c("#", "1"), times=c(nsamples*ntrain, ntest))
idx[seq(ntrain) + (i - 1) * ntrain] <- "0"
paste(idx, collapse="")
}, nsamples=n.samples, ntrain=n.per.sample, ntest=n.test)
write(idx, "problems/splits/friedman1", sep="\n")
| /generate_data.R | no_license | grantdick/ECJ-Reproducibility-ErrorDecomp | R | false | false | 2,074 | r | ### ENSURE THAT THE SAME DATA SETS ARE CREATED EACH TIME THIS IS RUN
set.seed(12345)
### MAKE THE DIRECTORY TO HOLD THE DATA, SPLITS, AND RNG SEEDS
dir.create("problems", showWarnings=FALSE)
dir.create("problems/data", showWarnings=FALSE)
dir.create("problems/splits", showWarnings=FALSE)
dir.create("problems/seeds", showWarnings=FALSE)
### MAKE THE "MODIFIED BISHOP" DATA
mbishop <- function(x, a=0) 2.3 * (x - a) + sin(2 * pi * (x - a)^2)
n.samples <- 100
n.per.sample <- 25
n.test <- 1000
n <- n.samples * n.per.sample + n.test
p <- 1
X <- matrix(runif(n * p), ncol=p)
for (a in c(1, 5, 10)) {
h <- mbishop(X, a)
t <- h + rnorm(n, sd=0.3)
write.table(cbind(X, h + rnorm(n, sd=0.3)), sprintf("problems/data/mbishop-%02d", a), row.names=FALSE, col.names=FALSE)
writeBin(as.raw(as.integer(sample(0:255, 256 * n.samples, replace=TRUE))), sprintf("problems/seeds/mbishop-%02d", a))
idx <- sapply(seq(n.samples), function(i, nsamples, ntrain, ntest) {
idx <- rep(c("#", "1"), times=c(nsamples*ntrain, ntest))
idx[seq(ntrain) + (i - 1) * ntrain] <- "0"
paste(idx, collapse="")
}, nsamples=n.samples, ntrain=n.per.sample, ntest=n.test)
write(idx, sprintf("problems/splits/mbishop-%02d", a), sep="\n")
}
### MAKE THE FRIEDMAN1 DATA
friedman1 <- function(X) 10 * sin(pi * X[, 1] * X[, 2]) + 20 * (X[, 3] - 0.5)^2 + 10 * X[, 4] + 5 * X[, 5]
n.samples <- 100
n.per.sample <- 250
n.test <- 1000
n <- n.samples * n.per.sample + n.test
p <- 10
X <- matrix(runif(n * p), ncol=p)
h <- friedman1(X)
t <- h + rnorm(n)
write.table(cbind(X, t), "problems/data/friedman1", row.names=FALSE, col.names=FALSE)
writeBin(as.raw(as.integer(sample(0:255, 256 * n.samples, replace=TRUE))), "problems/seeds/friedman1")
idx <- sapply(seq(n.samples), function(i, nsamples, ntrain, ntest) {
idx <- rep(c("#", "1"), times=c(nsamples*ntrain, ntest))
idx[seq(ntrain) + (i - 1) * ntrain] <- "0"
paste(idx, collapse="")
}, nsamples=n.samples, ntrain=n.per.sample, ntest=n.test)
write(idx, "problems/splits/friedman1", sep="\n")
|
### ANALISYS OF SPI TREND ########
#
# Edmondo Di Giuseppe
# April, 2017
###############################################################
# 1) read SPI(3)(4)(6)(12)_(distribution)_CRU.nc data in /Dati (period gen1901-dec2015: 1380 months)
# 2) select seasons:
# SPI3 -> Jan to Dec
# SPI6 -> Jan to Dec
# SPI12 -> Jan to Dec
# and five negative SPI classes:
# - moderately dry (-1>SPI>-1.5)
# - severely dry (-1.5>SPI>-2)
# - extremely dry (SPI< -2)
# - extremely&severely dry (-1.5 >SPI> -1000)
# - drought risk (-1 >SPI> -1000)
#
# 3) trend analysis for 12*3 time series listed in 2)
# 4) test of trend significance:::
# *1 for positive trend
# *-1 for negative trend
# *0 for no trend OR no drought events !!!!!!
#---------------------------------------------------------------
#Load library and Functions:
#--------------------------
library(raster)
library(gdata) # for getYears()
# # librerie per lo sviluppo grafico
library(ggplot2) # per i grafici avanzati/mappe
library(latticeExtra) # visualizzazione di mappe spaziali di dati raster
library(rasterVis) # trasforma i Raster Object in formato leggile per ggplot2
# library(reshape2)
source("TrendFunctions.R")
#---------------
#Set data and stuff:
#------------------------------------------------
Tbegin<-as.Date(strptime("1901-01-16",format="%Y-%m-%d"))
Tend<-as.Date(strptime("2015-12-16",format="%Y-%m-%d"))
Tseries<-seq.Date(Tbegin,Tend,by="month")
#SPI time scale:
scaleSPI<-c()
#Test acceptance I type error:
alpha=c(0.10,0.05)
#drought class selection:
moderateT<-c(-1.5,-1)
severeT<-c(-2,-1.5)
extremeT<-c(-1000,-2)
ExtremeSevereT<-c(-1000,-1.5)
DroughtT<-c(-1000,-1)
drought.classes<-list(moderateT,severeT,extremeT,ExtremeSevereT,DroughtT)
names(drought.classes)<-c("ModerateT","SevereT","ExtremeT","Severe+ExtremeT","DroughtT")
#------------------------------------------------
# World borders::
#--------------------------------------------
require(maptools)
world.map <- readShapeSpatial("Dati/TM_WORLD_BORDERS_SIMPL-0.3.shp")
world.map2 <- readShapeLines("Dati/TM_WORLD_BORDERS_SIMPL-0.3.shp")
class(world.map2)
proj4string(world.map2)<-"+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
#head(world.map@data)
require(maps)
#Per aggiungere la mappa WORLD sui grafici:
world.map <- maps::map("world")
world.map.shp <-maptools::map2SpatialLines(world.map)#, proj4string=projection(precsat.raster))
world<-maps::map("world",fill=TRUE,col="transparent", plot=F)
p4s<-CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
#SLworld<-map2SpatialLines(world,proj4string=p4s) # se voglio LINEE con il dettaglio delle PROVINCE
countries<-world$names
SPworld<-map2SpatialPolygons(world,IDs=countries,proj4string=p4s)
#------------------------------------------------
#------------------------------------------------
#Read data (period gen1901-dec2015: 1380 months):
#------------------------------------------------
DataList<-list.files(paste(getwd(),"/Outputs/SPI/IndexTimeSeries",sep=""),pattern = "Pearson")
for(i in 1:length(DataList)){
N.ts=12; monthSPI<-month.name
if(grepl("12",DataList[i])==TRUE){scaleSPI=12}
if(grepl("3",DataList[i])==TRUE){scaleSPI=3}
if(grepl("4",DataList[i])==TRUE){scaleSPI=4}
if(grepl("6",DataList[i])==TRUE){scaleSPI=6}
datax<-brick(paste(getwd(),"/Outputs/SPI/IndexTimeSeries/",DataList[i],sep=""))
datax<-setZ(datax,Tseries)
names(datax)<-paste("X",Tseries,sep="")
for (m in 1:N.ts) {
#months selection in time series:
begin<-(match(monthSPI[m],month.name)+12)
single_seq<-seq(begin,dim(datax)[3],by=12)
#-----
#season selection:
datax1<-datax[[single_seq]]
datax1<-setZ(datax1,as.Date(substr(names(datax1),2,10),format = "%Y.%m.%d"))
# (Nyears=dim(datax1)[3])
# plot(datax1,1:4)
#Cycling on drought classes:
for (cl in 1:length(drought.classes)) {
ClassName<-substr(names(drought.classes[cl]),1,(nchar(names(drought.classes[cl]))-1))
# Counting events by means of plugged in function:
EventsNum<-TrendSPI_Raster(x=datax1,filename=paste("Outputs/SPI/EventsNumber/CRU_EventsNumber_",monthSPI[m],"_SPI-Pearson-",scaleSPI,"_DroughtClass_",ClassName,".nc",sep = ""),
threshold=drought.classes[[cl]],xSum=TRUE, monthSPI=monthSPI[m])
#-----------------------------------------------------------
#Maps (number of events)::
#-----------------------------------------------------------
#Set palette for events counting:
colori<-colorRampPalette(c("lightgreen","yellow","orange","red"))(20)
#my.at=c(0,1,5,10,15,20,25,max(getValues(EventsNum),na.rm = T))
#myColorkey <- list(at=my.at, ## where the colors change
# labels=list(at=c(0,1,5,10,15,20,25,max(getValues(EventsNum),na.rm = T)) )) ## where to print labels
#Trellis:
p<-levelplot(EventsNum,
layer=1,
main=list(paste("Cumulated events in ",ClassName," class, ",monthSPI[m]," SPI-",scaleSPI, " months (",
getYear(getZ(datax1)[1]),"-",getYear(getZ(datax1)[length(getZ(datax1))]),")",sep=""),cex=0.9),
interpolate=FALSE,
margin=F,
col.regions=colori,
contour=T,
#par.setting=myTheme,
#at=my.at,
#colorkey=myColorkey,
xlab="Longitudine", ylab="Latitudine",
panel = panel.levelplot.raster
)
p + layer(sp.lines(world.map2,col="grey",fill="transparent"))
trellis.device(png,file=paste("Plots/SPI/EventsNumber/SPI-",scaleSPI,"/CRU_EventsNumber_",monthSPI[m],"_SPI-Pearson-",scaleSPI,"_DroughtClass_",ClassName,".png",sep = ""))
print(p)
dev.off()
#------
# Testing trend by means of plugged in function:
#----------------------------------------------
# Looping alpha level for test
for(a in 1:2){
TrendTest<-TrendSPI_Raster(x=datax1,filename=paste("Outputs/SPI/TrendTest/CRU_TrendTest_alpha",alpha[a],"_",monthSPI[m],"_SPI-Pearson-",scaleSPI,"_DroughtClass_",ClassName,".nc",sep = ""),
alpha=alpha[a],threshold=drought.classes[[cl]],xSum=FALSE, monthSPI=monthSPI[m])
#Maps (time-trend)::
#Trellis
colori<-colorRampPalette(c("green","lightgrey","red"))
my.at=c(-1,-0.35,0.35,1)
myColorkey <- list(at=my.at, ## where the colors change
labels=list(at=c(-0.70,0,0.70),labels=c("neg","no sign","pos"))) ## where to print labels
p<-levelplot(TrendTest,
layer=1,
main=list(paste("Trend of events in ",ClassName," class, ",monthSPI[m]," SPI-Pearson-",scaleSPI, " months (",
getYear(getZ(datax1)[1]),"-",getYear(getZ(datax1)[length(getZ(datax1))]),")",sep=""),
cex=0.9),
interpolate=FALSE,
margin=F,
col.regions=colori(3),
contour=T,
#par.setting=myTheme,
at=my.at,
colorkey=myColorkey,
xlab="Longitudine", ylab="Latitudine",
panel = panel.levelplot.raster
)
p + layer(sp.lines(world.map2,col="grey",fill="transparent"))
trellis.device(png,file=paste("Plots/SPI/TrendTest/SPI-",scaleSPI,"/CRU_TrendTest_alpha",alpha,"_",monthSPI[m],"_SPI-",scaleSPI,"_DroughtClass_",ClassName,".png",sep = ""))
print(p)
dev.off()
#------
} #closing "a" in alpha levels
} #closing "cl" in drought.classes
} #closing "m" in monthsSPI
} #closing "i" in DataList
| /CRU_SPI_TrendAnalysis.R | no_license | edidigiu/DROUGHT-SPEI | R | false | false | 7,941 | r | ### ANALISYS OF SPI TREND ########
#
# Edmondo Di Giuseppe
# April, 2017
###############################################################
# 1) read SPI(3)(4)(6)(12)_(distribution)_CRU.nc data in /Dati (period gen1901-dec2015: 1380 months)
# 2) select seasons:
# SPI3 -> Jan to Dec
# SPI6 -> Jan to Dec
# SPI12 -> Jan to Dec
# and five negative SPI classes:
# - moderately dry (-1>SPI>-1.5)
# - severely dry (-1.5>SPI>-2)
# - extremely dry (SPI< -2)
# - extremely&severely dry (-1.5 >SPI> -1000)
# - drought risk (-1 >SPI> -1000)
#
# 3) trend analysis for 12*3 time series listed in 2)
# 4) test of trend significance:::
# *1 for positive trend
# *-1 for negative trend
# *0 for no trend OR no drought events !!!!!!
#---------------------------------------------------------------
#Load library and Functions:
#--------------------------
library(raster)
library(gdata) # for getYears()
# # librerie per lo sviluppo grafico
library(ggplot2) # per i grafici avanzati/mappe
library(latticeExtra) # visualizzazione di mappe spaziali di dati raster
library(rasterVis) # trasforma i Raster Object in formato leggile per ggplot2
# library(reshape2)
source("TrendFunctions.R")
#---------------
#Set data and stuff:
#------------------------------------------------
Tbegin<-as.Date(strptime("1901-01-16",format="%Y-%m-%d"))
Tend<-as.Date(strptime("2015-12-16",format="%Y-%m-%d"))
Tseries<-seq.Date(Tbegin,Tend,by="month")
#SPI time scale:
scaleSPI<-c()
#Test acceptance I type error:
alpha=c(0.10,0.05)
#drought class selection:
moderateT<-c(-1.5,-1)
severeT<-c(-2,-1.5)
extremeT<-c(-1000,-2)
ExtremeSevereT<-c(-1000,-1.5)
DroughtT<-c(-1000,-1)
drought.classes<-list(moderateT,severeT,extremeT,ExtremeSevereT,DroughtT)
names(drought.classes)<-c("ModerateT","SevereT","ExtremeT","Severe+ExtremeT","DroughtT")
#------------------------------------------------
# World borders::
#--------------------------------------------
require(maptools)
world.map <- readShapeSpatial("Dati/TM_WORLD_BORDERS_SIMPL-0.3.shp")
world.map2 <- readShapeLines("Dati/TM_WORLD_BORDERS_SIMPL-0.3.shp")
class(world.map2)
proj4string(world.map2)<-"+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
#head(world.map@data)
require(maps)
#Per aggiungere la mappa WORLD sui grafici:
world.map <- maps::map("world")
world.map.shp <-maptools::map2SpatialLines(world.map)#, proj4string=projection(precsat.raster))
world<-maps::map("world",fill=TRUE,col="transparent", plot=F)
p4s<-CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
#SLworld<-map2SpatialLines(world,proj4string=p4s) # se voglio LINEE con il dettaglio delle PROVINCE
countries<-world$names
SPworld<-map2SpatialPolygons(world,IDs=countries,proj4string=p4s)
#------------------------------------------------
#------------------------------------------------
#Read data (period gen1901-dec2015: 1380 months):
#------------------------------------------------
DataList<-list.files(paste(getwd(),"/Outputs/SPI/IndexTimeSeries",sep=""),pattern = "Pearson")
for(i in 1:length(DataList)){
N.ts=12; monthSPI<-month.name
if(grepl("12",DataList[i])==TRUE){scaleSPI=12}
if(grepl("3",DataList[i])==TRUE){scaleSPI=3}
if(grepl("4",DataList[i])==TRUE){scaleSPI=4}
if(grepl("6",DataList[i])==TRUE){scaleSPI=6}
datax<-brick(paste(getwd(),"/Outputs/SPI/IndexTimeSeries/",DataList[i],sep=""))
datax<-setZ(datax,Tseries)
names(datax)<-paste("X",Tseries,sep="")
for (m in 1:N.ts) {
#months selection in time series:
begin<-(match(monthSPI[m],month.name)+12)
single_seq<-seq(begin,dim(datax)[3],by=12)
#-----
#season selection:
datax1<-datax[[single_seq]]
datax1<-setZ(datax1,as.Date(substr(names(datax1),2,10),format = "%Y.%m.%d"))
# (Nyears=dim(datax1)[3])
# plot(datax1,1:4)
#Cycling on drought classes:
for (cl in 1:length(drought.classes)) {
ClassName<-substr(names(drought.classes[cl]),1,(nchar(names(drought.classes[cl]))-1))
# Counting events by means of plugged in function:
EventsNum<-TrendSPI_Raster(x=datax1,filename=paste("Outputs/SPI/EventsNumber/CRU_EventsNumber_",monthSPI[m],"_SPI-Pearson-",scaleSPI,"_DroughtClass_",ClassName,".nc",sep = ""),
threshold=drought.classes[[cl]],xSum=TRUE, monthSPI=monthSPI[m])
#-----------------------------------------------------------
#Maps (number of events)::
#-----------------------------------------------------------
#Set palette for events counting:
colori<-colorRampPalette(c("lightgreen","yellow","orange","red"))(20)
#my.at=c(0,1,5,10,15,20,25,max(getValues(EventsNum),na.rm = T))
#myColorkey <- list(at=my.at, ## where the colors change
# labels=list(at=c(0,1,5,10,15,20,25,max(getValues(EventsNum),na.rm = T)) )) ## where to print labels
#Trellis:
p<-levelplot(EventsNum,
layer=1,
main=list(paste("Cumulated events in ",ClassName," class, ",monthSPI[m]," SPI-",scaleSPI, " months (",
getYear(getZ(datax1)[1]),"-",getYear(getZ(datax1)[length(getZ(datax1))]),")",sep=""),cex=0.9),
interpolate=FALSE,
margin=F,
col.regions=colori,
contour=T,
#par.setting=myTheme,
#at=my.at,
#colorkey=myColorkey,
xlab="Longitudine", ylab="Latitudine",
panel = panel.levelplot.raster
)
p + layer(sp.lines(world.map2,col="grey",fill="transparent"))
trellis.device(png,file=paste("Plots/SPI/EventsNumber/SPI-",scaleSPI,"/CRU_EventsNumber_",monthSPI[m],"_SPI-Pearson-",scaleSPI,"_DroughtClass_",ClassName,".png",sep = ""))
print(p)
dev.off()
#------
# Testing trend by means of plugged in function:
#----------------------------------------------
# Looping alpha level for test
for(a in 1:2){
TrendTest<-TrendSPI_Raster(x=datax1,filename=paste("Outputs/SPI/TrendTest/CRU_TrendTest_alpha",alpha[a],"_",monthSPI[m],"_SPI-Pearson-",scaleSPI,"_DroughtClass_",ClassName,".nc",sep = ""),
alpha=alpha[a],threshold=drought.classes[[cl]],xSum=FALSE, monthSPI=monthSPI[m])
#Maps (time-trend)::
#Trellis
colori<-colorRampPalette(c("green","lightgrey","red"))
my.at=c(-1,-0.35,0.35,1)
myColorkey <- list(at=my.at, ## where the colors change
labels=list(at=c(-0.70,0,0.70),labels=c("neg","no sign","pos"))) ## where to print labels
p<-levelplot(TrendTest,
layer=1,
main=list(paste("Trend of events in ",ClassName," class, ",monthSPI[m]," SPI-Pearson-",scaleSPI, " months (",
getYear(getZ(datax1)[1]),"-",getYear(getZ(datax1)[length(getZ(datax1))]),")",sep=""),
cex=0.9),
interpolate=FALSE,
margin=F,
col.regions=colori(3),
contour=T,
#par.setting=myTheme,
at=my.at,
colorkey=myColorkey,
xlab="Longitudine", ylab="Latitudine",
panel = panel.levelplot.raster
)
p + layer(sp.lines(world.map2,col="grey",fill="transparent"))
trellis.device(png,file=paste("Plots/SPI/TrendTest/SPI-",scaleSPI,"/CRU_TrendTest_alpha",alpha,"_",monthSPI[m],"_SPI-",scaleSPI,"_DroughtClass_",ClassName,".png",sep = ""))
print(p)
dev.off()
#------
} #closing "a" in alpha levels
} #closing "cl" in drought.classes
} #closing "m" in monthsSPI
} #closing "i" in DataList
|
cars <- read.csv("~/Document/data_science/Visual/R_create_Visuals/dataset/dep_by_int.csv", header=TRUE, stringsAsFactors=FALSE, row.names = 1)
cars['year'] = 2017-cars['year']
#png(filename="~/Document/data_science/Visual/R_create_Visuals/dataset/tss.png")
cars1<-subset(cars, int_=='black_')
cars2<-subset(cars, int_=='beige_')
cars3<-subset(cars, int_=='brown_')
cars4<-subset(cars, int_=='grey_')
#cars5<-subset(cars, capa_range==5)
plot(x=0, y=0, type="l", xlim=c(0, 20), ylim=c(0.1, 1), main="Residual Values of Cars", ylab="Residual Ratio", xlab="Years", las=1, lwd=2, bty="n", cex.axis=0.7)
lines(x=cars1$year, y=cars1$dep_rate, col="blue", type='l', lwd=2.5)
lines(x=cars2$year, y=cars2$dep_rate, col="red", type='l', lwd=2.5)
lines(x=cars3$year, y=cars3$dep_rate, col="black", type='l', lwd=2.5)
lines(x=cars4$year, y=cars4$dep_rate, col="yellow", type='l', lwd=2.5)
legend("topright", inset=.05, c("LIA","ABB"), fill=c('red', 'blue'), horiz=TRUE)
dev.off()
def cat(curr):
x = float(curr)
if x<2.2:
return 1
elif x<2.4:
return 2
elif x<2.8:
return 3
elif x<3.2:
return 4
else:
return 5 | /R_create_Visuals/dep_by_int.R | no_license | lin1234227/EDA-Luxury-Sedan-Residual-Values | R | false | false | 1,110 | r | cars <- read.csv("~/Document/data_science/Visual/R_create_Visuals/dataset/dep_by_int.csv", header=TRUE, stringsAsFactors=FALSE, row.names = 1)
cars['year'] = 2017-cars['year']
#png(filename="~/Document/data_science/Visual/R_create_Visuals/dataset/tss.png")
cars1<-subset(cars, int_=='black_')
cars2<-subset(cars, int_=='beige_')
cars3<-subset(cars, int_=='brown_')
cars4<-subset(cars, int_=='grey_')
#cars5<-subset(cars, capa_range==5)
plot(x=0, y=0, type="l", xlim=c(0, 20), ylim=c(0.1, 1), main="Residual Values of Cars", ylab="Residual Ratio", xlab="Years", las=1, lwd=2, bty="n", cex.axis=0.7)
lines(x=cars1$year, y=cars1$dep_rate, col="blue", type='l', lwd=2.5)
lines(x=cars2$year, y=cars2$dep_rate, col="red", type='l', lwd=2.5)
lines(x=cars3$year, y=cars3$dep_rate, col="black", type='l', lwd=2.5)
lines(x=cars4$year, y=cars4$dep_rate, col="yellow", type='l', lwd=2.5)
legend("topright", inset=.05, c("LIA","ABB"), fill=c('red', 'blue'), horiz=TRUE)
dev.off()
def cat(curr):
x = float(curr)
if x<2.2:
return 1
elif x<2.4:
return 2
elif x<2.8:
return 3
elif x<3.2:
return 4
else:
return 5 |
# Duke University Co-lab Shiny Workshop, Session 1, Spring 2020
# Shiny App
# Generate a histogram from random normal values
# Version 1, one reactive variables
options(max.print=1000) # number of elements, not rows
options(stringsAsFactors=F)
options(scipen=999999)
#options(device="windows")
library(shiny)
library(ggplot2)
# A Shiny app consists of ui() and server() functions
# ui() can contain R statements (open a database and query it to populate selection lists, etc.), but its primary
# purpose is to format your web page (notice the explicit use of HTML tags)
# The HTML() function instructs Shiny to pass contained text to the browser verbatim, and is useful for formatting
# your page
# server() is a function containing R statements and function calls
# Any base function, functions declared in loaded packages (importantly, Shiny, here), or functions that you create
# in global memory cacn be called
# runApp() is a Shiny function that launches your default browser, renders a page based on the ui() function passed,
# then executes the server() function
ui <- function(req) {
fluidPage(
HTML("<br><b>Duke University Co-lab - Hello Shiny!<br><br>Generate Random, Normally Distributed Values</b><br><br>"),
# Prompt
fluidRow(width=12,
column(width=5, sliderInput("n", "number to generate", min=0, max=50000, step=250, value=5000, width="90%"))
),
HTML("<br><br><br>"),
# Graph
fluidRow(width=12,
column(width=12, plotOutput("plot", width="600px", height="600px"))
)
)
}
server <- function(input, output, session) {
# Use of cat() displays messages in R console, stderr() causes disply in red and writes to log (Shiny server)
#cat("AAA", file=stderr())
# Bind reactive variable(s)
# They are referenced as functions in a reactive context (renderPlot, renderText, renderPlotly, renderTable, etc.)
# Change in the value of reactive variables causes reactive function (renderPlot below) to be re-evaluated with new values
n <- reactive(input$n)
# Create and render plot
# References to n() and w() cause re-execution of renderPlot() anytime input$n or input$w are modified
# This gives the "instantaneous" or "fluid" appearance to graph updates in response to on-screen inputs
output$plot <- renderPlot(
ggplot() +
geom_histogram(aes(x=rnorm(n())), color="white", fill="blue3") +
scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
theme(plot.title=element_text(size=14, hjust=0.5),
plot.subtitle=element_text(size=12, hjust=0.5),
plot.caption=element_text(size=12, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
panel.spacing.x=unit(0, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10),
axis.text.y=element_text(size=10),
strip.text=element_text(size=10),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color="gray"),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=8),
legend.title=element_text(size=8)) +
labs(title=paste(format(n(), big.mark=","), " normal(0, 1) pseudo-random values\n", sep=""), x="\nz", y="frequency\n")
)
}
# Execute
runApp(list("ui"=ui, "server"=server), launch.browser=T)
| /Session-1/App/NPDHist/NPDHist-1.r | no_license | tbalmat/Duke-Co-lab | R | false | false | 3,859 | r | # Duke University Co-lab Shiny Workshop, Session 1, Spring 2020
# Shiny App
# Generate a histogram from random normal values
# Version 1, one reactive variables
options(max.print=1000) # number of elements, not rows
options(stringsAsFactors=F)
options(scipen=999999)
#options(device="windows")
library(shiny)
library(ggplot2)
# A Shiny app consists of ui() and server() functions
# ui() can contain R statements (open a database and query it to populate selection lists, etc.), but its primary
# purpose is to format your web page (notice the explicit use of HTML tags)
# The HTML() function instructs Shiny to pass contained text to the browser verbatim, and is useful for formatting
# your page
# server() is a function containing R statements and function calls
# Any base function, functions declared in loaded packages (importantly, Shiny, here), or functions that you create
# in global memory cacn be called
# runApp() is a Shiny function that launches your default browser, renders a page based on the ui() function passed,
# then executes the server() function
ui <- function(req) {
fluidPage(
HTML("<br><b>Duke University Co-lab - Hello Shiny!<br><br>Generate Random, Normally Distributed Values</b><br><br>"),
# Prompt
fluidRow(width=12,
column(width=5, sliderInput("n", "number to generate", min=0, max=50000, step=250, value=5000, width="90%"))
),
HTML("<br><br><br>"),
# Graph
fluidRow(width=12,
column(width=12, plotOutput("plot", width="600px", height="600px"))
)
)
}
server <- function(input, output, session) {
# Use of cat() displays messages in R console, stderr() causes disply in red and writes to log (Shiny server)
#cat("AAA", file=stderr())
# Bind reactive variable(s)
# They are referenced as functions in a reactive context (renderPlot, renderText, renderPlotly, renderTable, etc.)
# Change in the value of reactive variables causes reactive function (renderPlot below) to be re-evaluated with new values
n <- reactive(input$n)
# Create and render plot
# References to n() and w() cause re-execution of renderPlot() anytime input$n or input$w are modified
# This gives the "instantaneous" or "fluid" appearance to graph updates in response to on-screen inputs
output$plot <- renderPlot(
ggplot() +
geom_histogram(aes(x=rnorm(n())), color="white", fill="blue3") +
scale_y_continuous(labels=function(x) format(x, big.mark=",")) +
theme(plot.title=element_text(size=14, hjust=0.5),
plot.subtitle=element_text(size=12, hjust=0.5),
plot.caption=element_text(size=12, hjust=0.5),
panel.background=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor=element_blank(),
panel.border=element_rect(fill=NA, color="gray75"),
panel.spacing.x=unit(0, "lines"),
axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12),
axis.text.x=element_text(size=10),
axis.text.y=element_text(size=10),
strip.text=element_text(size=10),
strip.background=element_blank(),
legend.position="bottom",
legend.background=element_rect(color="gray"),
legend.key=element_rect(fill="white"),
legend.box="horizontal",
legend.text=element_text(size=8),
legend.title=element_text(size=8)) +
labs(title=paste(format(n(), big.mark=","), " normal(0, 1) pseudo-random values\n", sep=""), x="\nz", y="frequency\n")
)
}
# Execute
runApp(list("ui"=ui, "server"=server), launch.browser=T)
|
# Stroke Comorbidity Searches in MedRxiv
# 03 June 2020
# ABB
### Search Terms from Torsten
#Search in bioRxiv and medRxiv
# Search terms for Diabetes:
# for abstract or title "stroke mouse diabetes" (match all words)
# for abstract or title "stroke rat diabetes" (match all words)
# for abstract or title "stroke rodent diabetes" (match all words)
#
# Search terms for obesity:
# for abstract or title "stroke mouse obesity" (match all words)
# for abstract or title "stroke rat obesity" (match all words)
# for abstract or title "stroke rodent obesity" (match all words)
#
# Search terms for metabolic syndrome:
# for abstract or title "stroke mouse metabolic syndrome" (match all words)
# for abstract or title "stroke rat metabolic syndrome" (match all words)
# for abstract or title "stroke rodent metabolic syndrome" (match all words)
############################################################################################
# install & load the required packages
# install.packages("devtools")
devtools::install_github("mcguinlu/medrxivr")
library(medrxivr)
## get api info
medrxiv_data <- mx_api_content()
# rough idea of numbers from searching on the web interface
topicD1 <- c("stroke", "mouse", "diabetes")
# 41 in webpage
topicD2 <- c("stroke rat diabetes")
# 72
topicD3 <- c("stroke rodent diabetes")
# 25
###################
### based on this documentation: https://mcguinlu.github.io/medrxivr/articles/building-complex-search-strategies.html#building-your-search-with-boolean-operators
####################
topicStroke <- c("stroke")
#mouse OR rat OR rodent
topicSpecies <- c("mouse", "rat", "rodent")
topicDisease <- c("diabetes", "obesity", "metabolic syndrome")
# searches stroke AND (mouse OR rat OR rodent) AND diabetes
query <- list(topicStroke, topicSpecies, topicDisease)
## run the search
mx_results <- mx_search(data = medrxiv_data,
query = query,
from.date =20190625, to.date =20200703, NOT = c(""),
fields = c("title", "abstract"),
deduplicate = TRUE)
### print the reuslts to a csv file
## csv2 for german excel that has ';' as seperator
write.csv2(mx_results, file="medrxivResults.csv", row.names = F, sep = ";")
## Download the results PDFs - does not work with biorxiv DOIs as preffix URL is specified to medrxiv.org
mx_download(mx_results, directory = "pdf/", create = TRUE)
| /medRxivr.R | no_license | abannachbrown/MedrxivBiorxivSearches | R | false | false | 2,343 | r | # Stroke Comorbidity Searches in MedRxiv
# 03 June 2020
# ABB
### Search Terms from Torsten
#Search in bioRxiv and medRxiv
# Search terms for Diabetes:
# for abstract or title "stroke mouse diabetes" (match all words)
# for abstract or title "stroke rat diabetes" (match all words)
# for abstract or title "stroke rodent diabetes" (match all words)
#
# Search terms for obesity:
# for abstract or title "stroke mouse obesity" (match all words)
# for abstract or title "stroke rat obesity" (match all words)
# for abstract or title "stroke rodent obesity" (match all words)
#
# Search terms for metabolic syndrome:
# for abstract or title "stroke mouse metabolic syndrome" (match all words)
# for abstract or title "stroke rat metabolic syndrome" (match all words)
# for abstract or title "stroke rodent metabolic syndrome" (match all words)
############################################################################################
# install & load the required packages
# install.packages("devtools")
devtools::install_github("mcguinlu/medrxivr")
library(medrxivr)
## get api info
medrxiv_data <- mx_api_content()
# rough idea of numbers from searching on the web interface
topicD1 <- c("stroke", "mouse", "diabetes")
# 41 in webpage
topicD2 <- c("stroke rat diabetes")
# 72
topicD3 <- c("stroke rodent diabetes")
# 25
###################
### based on this documentation: https://mcguinlu.github.io/medrxivr/articles/building-complex-search-strategies.html#building-your-search-with-boolean-operators
####################
topicStroke <- c("stroke")
#mouse OR rat OR rodent
topicSpecies <- c("mouse", "rat", "rodent")
topicDisease <- c("diabetes", "obesity", "metabolic syndrome")
# searches stroke AND (mouse OR rat OR rodent) AND diabetes
query <- list(topicStroke, topicSpecies, topicDisease)
## run the search
mx_results <- mx_search(data = medrxiv_data,
query = query,
from.date =20190625, to.date =20200703, NOT = c(""),
fields = c("title", "abstract"),
deduplicate = TRUE)
### print the reuslts to a csv file
## csv2 for german excel that has ';' as seperator
write.csv2(mx_results, file="medrxivResults.csv", row.names = F, sep = ";")
## Download the results PDFs - does not work with biorxiv DOIs as preffix URL is specified to medrxiv.org
mx_download(mx_results, directory = "pdf/", create = TRUE)
|
library(tm)
library(SnowballC)
library(wordcloud)
#The library function removeWords does not work, so this is a custom remove words function
removeWordsCustom <- function(x, words) {
gsub(sprintf("\\b(%s)\\b", paste(sort(words, decreasing = TRUE),
collapse = "|")), "", x)
}
#Jessica's word cloud function, takes in dataframe, maximum number of words to consider,
#numer of words to display in the word cloud and option to display the wordCloud
#This function will generate a histogram and optional wordcloud
wordGen <- function(df, scale){
# , main='histogram'
df<- as.factor(df)
df <- Corpus(VectorSource(df))
# df <- tm_map(df, PlainTextDocument)
df <- tm_map(df, content_transformer(tolower))
df <- tm_map(df, removePunctuation)
df <- tm_map(df, removeNumbers)
df = tm_map(df, stemDocument)
df = tm_map(df, stripWhitespace)
rmWords <- c('the','this','From','Hi','Hello','Dear','xxxxxxxxx','email','master','msia',
'fwd','northwestern','rn','sciencernrn2145','8474918005','program','analyt',
'science','northwestern','univers','question','^applicat','\\S+[0-9]+\\S+',
'regard','thank','appli','applic','scienc','group','master','administrative',
'analyticsonuexchouexchang',stopwords('english'),'administr',
'fydibohfspdltcnrecipientscnomaeexnnnnnnnyellow','xxxxxxxxxxxxxxxxxx',
'montanari','mccormick','director','manag','lindsay','sheridan','please',
'xxxxxxxxxxxxxxxxxxxxxxxxxxx','please','victoria','xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
df <- tm_map(df, content_transformer(removeWordsCustom), rmWords)
wordcloud(df, max.words = 100, random.order = FALSE,colors=brewer.pal(6, "Dark2"),scale=c(scale,.7))
}
| /Code/wordGen.R | no_license | Jessicacjy/MSiAEmailQASystem | R | false | false | 1,813 | r |
library(tm)
library(SnowballC)
library(wordcloud)
#The library function removeWords does not work, so this is a custom remove words function
removeWordsCustom <- function(x, words) {
gsub(sprintf("\\b(%s)\\b", paste(sort(words, decreasing = TRUE),
collapse = "|")), "", x)
}
#Jessica's word cloud function, takes in dataframe, maximum number of words to consider,
#numer of words to display in the word cloud and option to display the wordCloud
#This function will generate a histogram and optional wordcloud
wordGen <- function(df, scale){
# , main='histogram'
df<- as.factor(df)
df <- Corpus(VectorSource(df))
# df <- tm_map(df, PlainTextDocument)
df <- tm_map(df, content_transformer(tolower))
df <- tm_map(df, removePunctuation)
df <- tm_map(df, removeNumbers)
df = tm_map(df, stemDocument)
df = tm_map(df, stripWhitespace)
rmWords <- c('the','this','From','Hi','Hello','Dear','xxxxxxxxx','email','master','msia',
'fwd','northwestern','rn','sciencernrn2145','8474918005','program','analyt',
'science','northwestern','univers','question','^applicat','\\S+[0-9]+\\S+',
'regard','thank','appli','applic','scienc','group','master','administrative',
'analyticsonuexchouexchang',stopwords('english'),'administr',
'fydibohfspdltcnrecipientscnomaeexnnnnnnnyellow','xxxxxxxxxxxxxxxxxx',
'montanari','mccormick','director','manag','lindsay','sheridan','please',
'xxxxxxxxxxxxxxxxxxxxxxxxxxx','please','victoria','xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
df <- tm_map(df, content_transformer(removeWordsCustom), rmWords)
wordcloud(df, max.words = 100, random.order = FALSE,colors=brewer.pal(6, "Dark2"),scale=c(scale,.7))
}
|
@@ -0,0 +1,207 @@
##Script for running CV_protocol 100 times on datasets
library(phyloseq)
library(ROCR)
library(randomForest)
library(kernlab)
library(Hmisc)
load('/ifs/home/leeb01/otus.rdata')
source('/ifs/home/leeb01/pesame.r')
##FUNCTION
rf.kfoldAUC = function(x, y, k=8, ...){
n=length(y)
split.idx = split(unlist(tapply(1:n, y, sample)), rep(1:k, length=n))
aucs = rep(NA, k)
split.sample = list()
x[!is.finite(x)] = 0
y = factor(y)
selected = matrix(NA, nrow=dim(x)[2], ncol=k)
for(i in 1:k){
sps = table(1:n %in% split.idx[[i]], y)
if(!any(sps==0)){
otus.x = table.wilcox.auc(x[-split.idx[[i]],], y[-split.idx[[i]]]);
sig_otu <- which(t(otus.x)[,"p.value"]<0.05)
## run the model only on OTUs w/p-value<0.05
selected[sig_otu, i] = 1
if(length(sig_otu)==0){
aucs[i] = 0.5
next
}
if(length(sig_otu)==1){
aucs[i]=wilcox.auc(c(x[split.idx[[i]], sig_otu]), y[split.idx[[i]]])["auc"]
next
}
rfm <- randomForest(x[-split.idx[[i]], sig_otu, drop=F], y[-split.idx[[i]]], ...)
aucs[i] = performance(prediction(predict(rfm, x[split.idx[[i]], sig_otu, drop=F], type="prob")[,2],
y[split.idx[[i]]]), measure='auc')@y.values[[1]]
}
split.sample = append(split.sample, sps)
}
list(aucs=aucs, splits = split.idx, splits.sample.size = split.sample, selected.var = selected)
}
##MALES
##OTU + HOSP
d.merg.hosp.results <- replicate(100, rf.kfoldAUC(dd1, sample_data(otu.subs)$CASECTL, k=8))
d.merg.hosp.aucs <- d.merg.hosp.results[seq(1, 400, 4)]
##Evaluation
d.merg.hosp.aucs.q <- quantile(unlist(d.merg.hosp.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.hosp.aucs.mean <- mean(unlist(d.merg.hosp.aucs), na.rm = TRUE)
##OTU + AGE
d.merg.age.results <- replicate(100, rf.kfoldAUC(dd2, sample_data(otu.subs)$CASECTL, k=8))
d.merg.age.aucs <- d.merg.age.results[seq(1, 400, 4)]
##Evaluation
d.merg.age.aucs.q <- quantile(unlist(d.merg.age.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.age.aucs.mean <- mean(unlist(d.merg.age.aucs), na.rm = TRUE)
##OTU + RACE
d.merg.race.results <-replicate(100, rf.kfoldAUC(dd3, sample_data(otu.subs)$CASECTL, k=8))
d.merg.race.aucs <- d.merg.race.results[seq(1, 400, 4)]
##Evaluation
d.merg.race.aucs.q <- quantile(unlist(d.merg.race.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.race.aucs.mean <- mean(unlist(d.merg.race.aucs), na.rm = TRUE)
##ALL LINKED
d.merg.all.results <- replicate(100, rf.kfoldAUC(dd.merg.all, sample_data(otu.subs)$CASECTL, k=8))
d.merg.all.aucs <- d.merg.all.results[seq(1, 400, 4)]
##Evaluation
d.merg.all.aucs.q <- quantile(unlist(d.merg.all.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.all.aucs.mean <- mean(unlist(d.merg.all.aucs), na.rm = TRUE)
##UNLINKED
d.merg.unlinked.results <- replicate(100, rf.kfoldAUC(t(otu_table(otu.subs)), sample_data(otu.subs)$CASECTL, k=8))
d.merg.unlinked.aucs <- d.merg.unlinked.results[seq(1, 400, 4)]
##Evaluation
d.merg.unlinked.aucs.q <- quantile(unlist(d.merg.unlinked.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.unlinked.aucs.mean <- mean(unlist(d.merg.unlinked.aucs), na.rm = TRUE)
##PHYLUM
d.phylum.mat = matrix(t(otu_table(phylum.subs)), ncol=nspecies(phylum.subs), nrow=nsamples(phylum.subs))
d.phylum.results <- replicate(100, rf.kfoldAUC(d.phylum.mat, sample_data(phylum.subs)$CASECTL, k=8))
d.phylum.aucs <- d.phylum.results[seq(1, 400, 4)]
##Evaluation
d.phylum.aucs.q <- quantile(unlist(d.phylum.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.phylum.aucs.mean <- mean(unlist(d.phylum.aucs), na.rm = TRUE)
##CLASS
d.class.mat = matrix(t(otu_table(class.subs)), ncol=nspecies(class.subs), nrow=nsamples(class.subs))
d.class.results <- replicate(100, rf.kfoldAUC(d.class.mat, sample_data(class.subs)$CASECTL, k=8))
d.class.aucs <- d.class.results[seq(1, 400, 4)]
##Evaluation
d.class.aucs.q <- quantile(unlist(d.class.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.class.aucs.mean <- mean(unlist(d.class.aucs), na.rm = TRUE)
##ORDER
d.order.mat = matrix(t(otu_table(order.subs)), ncol=nspecies(order.subs), nrow=nsamples(order.subs))
d.order.results <- replicate(100, rf.kfoldAUC(d.order.mat, sample_data(order.subs)$CASECTL, k=8))
d.order.aucs <- d.order.results[seq(1, 400, 4)]
##Evaluation
d.order.aucs.q <- quantile(unlist(d.order.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.order.aucs.mean <- mean(unlist(d.order.aucs), na.rm = TRUE)
##FAMILY
d.family.mat = matrix(t(otu_table(family.subs)), ncol=nspecies(family.subs), nrow=nsamples(family.subs))
d.family.results <- replicate(100, rf.kfoldAUC(d.family.mat, sample_data(family.subs)$CASECTL, k=8))
d.family.aucs <- d.family.results[seq(1, 400, 4)]
##Evaluation
d.family.aucs.q <- quantile(unlist(d.family.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.family.aucs.mean <- mean(unlist(d.family.aucs), na.rm = TRUE)
##FEMALES
##OTU + HOSP
f.merg.hosp.results <- replicate(100, rf.kfoldAUC(ff1, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.hosp.aucs <- f.merg.hosp.results[seq(1, 400, 4)]
##Evaluation
f.merg.hosp.aucs.q <- quantile(unlist(f.merg.hosp.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.hosp.aucs.mean <- mean(unlist(f.merg.hosp.aucs), na.rm = TRUE)
##OTU + AGE
f.merg.age.results <- replicate(100, rf.kfoldAUC(ff2, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.age.aucs <- f.merg.age.results[seq(1, 400, 4)]
##Evaluation
f.merg.age.aucs.q <- quantile(unlist(f.merg.age.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.age.aucs.mean <- mean(unlist(f.merg.age.aucs), na.rm = TRUE)
#OTU + RACE
f.merg.race.results <- replicate(100, rf.kfoldAUC(ff3, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.race.aucs <- f.merg.race.results[seq(1, 400, 4)]
##Evaluation
f.merg.race.aucs.q <- quantile(unlist(f.merg.race.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.race.aucs.mean <- mean(unlist(f.merg.race.aucs), na.rm = TRUE)
##ALL LINKED
f.merg.all.results <- replicate(100, rf.kfoldAUC(ff.merg.all, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.all.aucs <- f.merg.all.results[seq(1, 400, 4)]
##Evaluation
f.merg.all.aucs.q <- quantile(unlist(f.merg.all.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.all.aucs.mean <- mean(unlist(f.merg.all.aucs), na.rm = TRUE)
##UNLINKED
f.merg.unlinked.results <- replicate(100, rf.kfoldAUC(t(otu_table(otu.subs.f)), sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.unlinked.aucs <- f.merg.unlinked.results[seq(1, 400, 4)]
##Evaluation
f.merg.unlinked.aucs.q <- quantile(unlist(f.merg.unlinked.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.unlinked.aucs.mean <- mean(unlist(f.merg.unlinked.aucs), na.rm = TRUE)
##PHYLUM
f.phylum.mat = matrix(t(otu_table(phylum.subs.f)), ncol=nspecies(phylum.subs.f), nrow=nsamples(phylum.subs.f))
f.phylum.results <- replicate(100, rf.kfoldAUC(f.phylum.mat, sample_data(phylum.subs.f)$CASECTL, k=8))
f.phylum.aucs <- f.phylum.results[seq(1, 400, 4)]
##Evaluation
f.phylum.aucs.q <- quantile(unlist(f.phylum.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.phylum.aucs.mean <- mean(unlist(f.phylum.aucs), na.rm = TRUE)
##CLASS
f.class.mat = matrix(t(otu_table(class.subs.f)), ncol=nspecies(class.subs.f), nrow=nsamples(class.subs.f))
f.class.results <- replicate(100, rf.kfoldAUC(f.class.mat, sample_data(class.subs.f)$CASECTL, k=8))
f.class.aucs <- f.class.results[seq(1, 400, 4)]
##Evaluation
f.class.aucs.q <- quantile(unlist(f.class.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.class.aucs.mean <- mean(unlist(f.class.aucs), na.rm = TRUE)
##ORDER
f.order.mat = matrix(t(otu_table(order.subs.f)), ncol=nspecies(order.subs.f), nrow=nsamples(order.subs.f))
f.order.results <- replicate(100, rf.kfoldAUC(f.order.mat, sample_data(order.subs.f)$CASECTL, k=8))
f.order.aucs <- f.order.results[seq(1, 400, 4)]
##Evaluation
f.order.aucs.q <- quantile(unlist(f.order.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.order.aucs.mean <- mean(unlist(f.order.aucs), na.rm = TRUE)
##FAMILY
f.family.mat = matrix(t(otu_table(family.subs.f)), ncol=nspecies(family.subs.f), nrow=nsamples(family.subs.f))
f.family.results <- replicate(100, rf.kfoldAUC(f.family.mat, sample_data(family.subs.f)$CASECTL, k=8))
f.family.aucs <- f.family.results[seq(1, 400, 4)]
##Evaluation
f.family.aucs.q <- quantile(unlist(f.family.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.family.aucs.mean <- mean(unlist(f.family.aucs), na.rm = TRUE)
##AUC EVALUATION AT DIFFERENT PERCENTILES
quantile(unlist(d.merg.hosp.aucs))
save.image('/Users/brianlee/Documents/CRC/CV_protocol_w_f_s_output/CV analyses results/merg.analysis.all.rdata')
| /final_script.r | no_license | angryhamburger/R | R | false | false | 8,693 | r | @@ -0,0 +1,207 @@
##Script for running CV_protocol 100 times on datasets
library(phyloseq)
library(ROCR)
library(randomForest)
library(kernlab)
library(Hmisc)
load('/ifs/home/leeb01/otus.rdata')
source('/ifs/home/leeb01/pesame.r')
##FUNCTION
rf.kfoldAUC = function(x, y, k=8, ...){
n=length(y)
split.idx = split(unlist(tapply(1:n, y, sample)), rep(1:k, length=n))
aucs = rep(NA, k)
split.sample = list()
x[!is.finite(x)] = 0
y = factor(y)
selected = matrix(NA, nrow=dim(x)[2], ncol=k)
for(i in 1:k){
sps = table(1:n %in% split.idx[[i]], y)
if(!any(sps==0)){
otus.x = table.wilcox.auc(x[-split.idx[[i]],], y[-split.idx[[i]]]);
sig_otu <- which(t(otus.x)[,"p.value"]<0.05)
## run the model only on OTUs w/p-value<0.05
selected[sig_otu, i] = 1
if(length(sig_otu)==0){
aucs[i] = 0.5
next
}
if(length(sig_otu)==1){
aucs[i]=wilcox.auc(c(x[split.idx[[i]], sig_otu]), y[split.idx[[i]]])["auc"]
next
}
rfm <- randomForest(x[-split.idx[[i]], sig_otu, drop=F], y[-split.idx[[i]]], ...)
aucs[i] = performance(prediction(predict(rfm, x[split.idx[[i]], sig_otu, drop=F], type="prob")[,2],
y[split.idx[[i]]]), measure='auc')@y.values[[1]]
}
split.sample = append(split.sample, sps)
}
list(aucs=aucs, splits = split.idx, splits.sample.size = split.sample, selected.var = selected)
}
##MALES
##OTU + HOSP
d.merg.hosp.results <- replicate(100, rf.kfoldAUC(dd1, sample_data(otu.subs)$CASECTL, k=8))
d.merg.hosp.aucs <- d.merg.hosp.results[seq(1, 400, 4)]
##Evaluation
d.merg.hosp.aucs.q <- quantile(unlist(d.merg.hosp.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.hosp.aucs.mean <- mean(unlist(d.merg.hosp.aucs), na.rm = TRUE)
##OTU + AGE
d.merg.age.results <- replicate(100, rf.kfoldAUC(dd2, sample_data(otu.subs)$CASECTL, k=8))
d.merg.age.aucs <- d.merg.age.results[seq(1, 400, 4)]
##Evaluation
d.merg.age.aucs.q <- quantile(unlist(d.merg.age.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.age.aucs.mean <- mean(unlist(d.merg.age.aucs), na.rm = TRUE)
##OTU + RACE
d.merg.race.results <-replicate(100, rf.kfoldAUC(dd3, sample_data(otu.subs)$CASECTL, k=8))
d.merg.race.aucs <- d.merg.race.results[seq(1, 400, 4)]
##Evaluation
d.merg.race.aucs.q <- quantile(unlist(d.merg.race.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.race.aucs.mean <- mean(unlist(d.merg.race.aucs), na.rm = TRUE)
##ALL LINKED
d.merg.all.results <- replicate(100, rf.kfoldAUC(dd.merg.all, sample_data(otu.subs)$CASECTL, k=8))
d.merg.all.aucs <- d.merg.all.results[seq(1, 400, 4)]
##Evaluation
d.merg.all.aucs.q <- quantile(unlist(d.merg.all.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.all.aucs.mean <- mean(unlist(d.merg.all.aucs), na.rm = TRUE)
##UNLINKED
d.merg.unlinked.results <- replicate(100, rf.kfoldAUC(t(otu_table(otu.subs)), sample_data(otu.subs)$CASECTL, k=8))
d.merg.unlinked.aucs <- d.merg.unlinked.results[seq(1, 400, 4)]
##Evaluation
d.merg.unlinked.aucs.q <- quantile(unlist(d.merg.unlinked.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.merg.unlinked.aucs.mean <- mean(unlist(d.merg.unlinked.aucs), na.rm = TRUE)
##PHYLUM
d.phylum.mat = matrix(t(otu_table(phylum.subs)), ncol=nspecies(phylum.subs), nrow=nsamples(phylum.subs))
d.phylum.results <- replicate(100, rf.kfoldAUC(d.phylum.mat, sample_data(phylum.subs)$CASECTL, k=8))
d.phylum.aucs <- d.phylum.results[seq(1, 400, 4)]
##Evaluation
d.phylum.aucs.q <- quantile(unlist(d.phylum.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.phylum.aucs.mean <- mean(unlist(d.phylum.aucs), na.rm = TRUE)
##CLASS
d.class.mat = matrix(t(otu_table(class.subs)), ncol=nspecies(class.subs), nrow=nsamples(class.subs))
d.class.results <- replicate(100, rf.kfoldAUC(d.class.mat, sample_data(class.subs)$CASECTL, k=8))
d.class.aucs <- d.class.results[seq(1, 400, 4)]
##Evaluation
d.class.aucs.q <- quantile(unlist(d.class.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.class.aucs.mean <- mean(unlist(d.class.aucs), na.rm = TRUE)
##ORDER
d.order.mat = matrix(t(otu_table(order.subs)), ncol=nspecies(order.subs), nrow=nsamples(order.subs))
d.order.results <- replicate(100, rf.kfoldAUC(d.order.mat, sample_data(order.subs)$CASECTL, k=8))
d.order.aucs <- d.order.results[seq(1, 400, 4)]
##Evaluation
d.order.aucs.q <- quantile(unlist(d.order.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.order.aucs.mean <- mean(unlist(d.order.aucs), na.rm = TRUE)
##FAMILY
d.family.mat = matrix(t(otu_table(family.subs)), ncol=nspecies(family.subs), nrow=nsamples(family.subs))
d.family.results <- replicate(100, rf.kfoldAUC(d.family.mat, sample_data(family.subs)$CASECTL, k=8))
d.family.aucs <- d.family.results[seq(1, 400, 4)]
##Evaluation
d.family.aucs.q <- quantile(unlist(d.family.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
d.family.aucs.mean <- mean(unlist(d.family.aucs), na.rm = TRUE)
##FEMALES
##OTU + HOSP
f.merg.hosp.results <- replicate(100, rf.kfoldAUC(ff1, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.hosp.aucs <- f.merg.hosp.results[seq(1, 400, 4)]
##Evaluation
f.merg.hosp.aucs.q <- quantile(unlist(f.merg.hosp.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.hosp.aucs.mean <- mean(unlist(f.merg.hosp.aucs), na.rm = TRUE)
##OTU + AGE
f.merg.age.results <- replicate(100, rf.kfoldAUC(ff2, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.age.aucs <- f.merg.age.results[seq(1, 400, 4)]
##Evaluation
f.merg.age.aucs.q <- quantile(unlist(f.merg.age.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.age.aucs.mean <- mean(unlist(f.merg.age.aucs), na.rm = TRUE)
#OTU + RACE
f.merg.race.results <- replicate(100, rf.kfoldAUC(ff3, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.race.aucs <- f.merg.race.results[seq(1, 400, 4)]
##Evaluation
f.merg.race.aucs.q <- quantile(unlist(f.merg.race.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.race.aucs.mean <- mean(unlist(f.merg.race.aucs), na.rm = TRUE)
##ALL LINKED
f.merg.all.results <- replicate(100, rf.kfoldAUC(ff.merg.all, sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.all.aucs <- f.merg.all.results[seq(1, 400, 4)]
##Evaluation
f.merg.all.aucs.q <- quantile(unlist(f.merg.all.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.all.aucs.mean <- mean(unlist(f.merg.all.aucs), na.rm = TRUE)
##UNLINKED
f.merg.unlinked.results <- replicate(100, rf.kfoldAUC(t(otu_table(otu.subs.f)), sample_data(otu.subs.f)$CASECTL, k=8))
f.merg.unlinked.aucs <- f.merg.unlinked.results[seq(1, 400, 4)]
##Evaluation
f.merg.unlinked.aucs.q <- quantile(unlist(f.merg.unlinked.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.merg.unlinked.aucs.mean <- mean(unlist(f.merg.unlinked.aucs), na.rm = TRUE)
##PHYLUM
f.phylum.mat = matrix(t(otu_table(phylum.subs.f)), ncol=nspecies(phylum.subs.f), nrow=nsamples(phylum.subs.f))
f.phylum.results <- replicate(100, rf.kfoldAUC(f.phylum.mat, sample_data(phylum.subs.f)$CASECTL, k=8))
f.phylum.aucs <- f.phylum.results[seq(1, 400, 4)]
##Evaluation
f.phylum.aucs.q <- quantile(unlist(f.phylum.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.phylum.aucs.mean <- mean(unlist(f.phylum.aucs), na.rm = TRUE)
##CLASS
f.class.mat = matrix(t(otu_table(class.subs.f)), ncol=nspecies(class.subs.f), nrow=nsamples(class.subs.f))
f.class.results <- replicate(100, rf.kfoldAUC(f.class.mat, sample_data(class.subs.f)$CASECTL, k=8))
f.class.aucs <- f.class.results[seq(1, 400, 4)]
##Evaluation
f.class.aucs.q <- quantile(unlist(f.class.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.class.aucs.mean <- mean(unlist(f.class.aucs), na.rm = TRUE)
##ORDER
f.order.mat = matrix(t(otu_table(order.subs.f)), ncol=nspecies(order.subs.f), nrow=nsamples(order.subs.f))
f.order.results <- replicate(100, rf.kfoldAUC(f.order.mat, sample_data(order.subs.f)$CASECTL, k=8))
f.order.aucs <- f.order.results[seq(1, 400, 4)]
##Evaluation
f.order.aucs.q <- quantile(unlist(f.order.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.order.aucs.mean <- mean(unlist(f.order.aucs), na.rm = TRUE)
##FAMILY
f.family.mat = matrix(t(otu_table(family.subs.f)), ncol=nspecies(family.subs.f), nrow=nsamples(family.subs.f))
f.family.results <- replicate(100, rf.kfoldAUC(f.family.mat, sample_data(family.subs.f)$CASECTL, k=8))
f.family.aucs <- f.family.results[seq(1, 400, 4)]
##Evaluation
f.family.aucs.q <- quantile(unlist(f.family.aucs), probs=c(0, 0.5, 0.95, 1), na.rm = TRUE)
f.family.aucs.mean <- mean(unlist(f.family.aucs), na.rm = TRUE)
##AUC EVALUATION AT DIFFERENT PERCENTILES
quantile(unlist(d.merg.hosp.aucs))
save.image('/Users/brianlee/Documents/CRC/CV_protocol_w_f_s_output/CV analyses results/merg.analysis.all.rdata')
|
##################################################################################################################
############################################## find all direct repeats
## example dataset
s<-readDNAStringSet(".../Lakshamanan 294 mammals.fasta",format="fasta")
#
library("Biostrings")
library("stringr")
# get repeats for a set of species
get_all_repeats_of_DNAStringset<-function(mtGenomes=s,min=10,kmer_length=13,unique_repeats=TRUE,repeats_type="direct",
remove_overlapping=TRUE, ralgo="perfect",
return_repeat_coord=FALSE, heuristic_exclude_DLoop=FALSE,return_repeat_seq=FALSE){
library(stringr)
library(stringi)
print("calculating repeats for species: ")
rcord<-list()
if(repeats_type=="all"){ ## this will then return DR, MR, IR and ER in that order, species=row, columns=repeat types
## this will totally ignore the min=x value
m <- matrix(0, ncol = 4, nrow = 0)## ncol is equal to 4 because there are 4 repeat types
reps<-data.frame(m)
for(a in 1:length(mtGenomes)){
print(names(mtGenomes)[a])
repeats<-get_all_repeat_types(mtGenomes[a],kmer_length=kmer_length,unique_repeats=unique_repeats,
repeats_type=repeats_type,heuristic_exclude_DLoop=heuristic_exclude_DLoop)
repeats<-t(data.frame(repeats)) #need to transpose because per default the created df has only 1 column & 4 rows
reps<-rbind(reps,repeats)
print(a)
}
names(reps)<-c("direct","mirror","inverted","everted")
} else {
## this is the standard function that allows you to iterate between a min and max repeat length
if(ralgo=="perfect"){
m <- matrix(0, ncol = kmer_length-min+1, nrow = 0)
reps<-data.frame(m)
for(a in 1:length(mtGenomes)){
print(names(mtGenomes)[a])
repeats<-get_all_repeats_upto_kmer(mtGenomes[a],min=min,kmer_length=kmer_length,unique_repeats=unique_repeats,
repeats_type=repeats_type,remove_overlapping=remove_overlapping,
heuristic_exclude_DLoop=heuristic_exclude_DLoop,return_repeat_seq=return_repeat_seq)
if(return_repeat_seq==TRUE){
rcord[[a]]<-repeats
}else{
repeats<-t(data.frame(repeats)) # need to transpose because per default the created df has only 1 column & 4 rows
reps<-rbind(reps,repeats)
}
print(a)
}
}
}
if(return_repeat_seq==TRUE){return(rcord); }
return(reps)
}
# get repeats for a single species
get_all_repeats_upto_kmer<-function(mtGenome=s[250],min=10,kmer_length=13,unique_repeats=TRUE,return_repeat_seq=F,
repeats_type="direct",ralgo="perfect",remove_overlapping=TRUE,
return_repeat_coord=FALSE,heuristic_exclude_DLoop=FALSE){
print("calculating repeats for kmer length of =")
print(kmer_length)
counter=1
rep_seq<-vector()
if(ralgo=="perfect"){
repeat_results<-rep(1:((kmer_length-min)+1))
for(xy in min:kmer_length){
message("xy")
if(return_repeat_seq==FALSE){
repeat_results[counter]<-repeats_of_given_kmer(mtGenome,kmer_length=xy,return_repeat_seq=return_repeat_seq, unique_repeats=unique_repeats,
repeats_type=repeats_type,remove_overlapping=remove_overlapping,
return_repeat_coord=return_repeat_coord,heuristic_exclude_DLoop=heuristic_exclude_DLoop)
}
counter<-counter+1
if(return_repeat_seq==TRUE){
res<-repeats_of_given_kmer(mtGenome,kmer_length=xy,return_repeat_seq=return_repeat_seq, unique_repeats=unique_repeats,
repeats_type=repeats_type,remove_overlapping=remove_overlapping,
return_repeat_coord=return_repeat_coord,heuristic_exclude_DLoop=heuristic_exclude_DLoop)
rep_seq<-c(rep_seq,res)
}
cat(xy)
}
}
## returns a vector for all repeats that have length between min and kmer_length
if(return_repeat_seq==TRUE){ return(rep_seq) }
return(repeat_results)
}
###############
# get repeats of fixed length for a single species
repeats_of_given_kmer<-function(mtGenome=s[250],kmer_length=13, clean=TRUE, return_repeat_seq=FALSE,
unique_repeats=TRUE,remove_overlapping=TRUE,return_repeat_coord=FALSE,
heuristic_exclude_DLoop=FALSE,repeats_type="direct"){
mtg<-DNAString(paste(mtGenome))
if(heuristic_exclude_DLoop==TRUE){
message("excluding D loop by heuristic approach")
mtg<-mtg[1000:15400]
}
if(clean==TRUE){
mtg<-symbolClean(DNAStringSet(mtg))
mtg<-paste(mtg)
}else{ mtg<-paste(mtg) }
if(repeats_type=="inverted"){ mtg_inverted<-reverseComplement(mtGenome) }
if(repeats_type=="mirror"){ mtg_inverted<-reverse(mtGenome) }
if(repeats_type=="everted"){ mtg_inverted<-Biostrings::complement(mtGenome) }
## generate truncated sequence; this should be fast
## for kmer=4 we will generate 4 mtDNAs truncated by one nt each
## this will alow us to split the mtDNA into chunks as long as the potential repeat
truncated_strings<-rep(1:(kmer_length+1))
for(i in 0:kmer_length){
truncated_strings[i]<-substring(mtg,i,nchar(mtg)) ##start=i, end=nchar=mtDNA length=mtDNA end
}
### get all the kmers for each of the truncated mtDNAs which should cover all unique repeats
### split the mtDNA every k bases, based on the kmer length
substrings<-rep(1:kmer_length)
for(n in 1:length(truncated_strings)){
substrings[n]<-fixed_split(truncated_strings[n],kmer_length)##returns a list
}
## exclude all duplicates and strings < kmer length
## in this step we are identifying all *potential* repeats
substrings_unique<-unique(unlist(substrings))
substrings_unique<-substrings_unique[nchar(substrings_unique)>=kmer_length]
cat("Identified X unique substrings that could be potential repeats:")
cat(length(substrings_unique))
coords_of_repeatsv2<-vector()
## now count the repeats that actually match the mtDNA
nr_repeats<-rep(1:length(substrings_unique))
if(repeats_type=="direct"){
print("checking direct repeats")
for(x in 1:length(substrings_unique)){
count_repeats<-str_count(mtg,substrings_unique[x]) ## can you find substrings_unique in mtg genome
## get the coordinate of the repeats (substrings_unique[x]) WITHIN the mtDNA (mtg)
if(count_repeats>=2){
interm<-str_locate_all(mtg, substrings_unique[x])[[1]][,1]
coords_of_repeatsv2<-c(coords_of_repeatsv2,interm)
}
}
}
print("length(substrings_unique)")
print(length(substrings_unique))
if(repeats_type=="inverted" || repeats_type=="everted" || repeats_type=="mirror"){
print("checking non direct repeats")
for(x in 1:length(substrings_unique)){
count_repeats<-str_count(mtg_inverted,substrings_unique[x]) ## can you find substrings_unique in mtg genome
## get the coordinate of the repeats (substrings_unique[x]) WITHIN the mtDNA (mtg)
if(count_repeats>=1){
interm<-str_locate_all(mtg, substrings_unique[x])[[1]][,1]
coords_of_repeatsv2<-c(coords_of_repeatsv2,interm)
}
}
}
## now handle zero repeats
if(length(coords_of_repeatsv2)==0){
print("no repeats found")
if(return_repeat_seq==TRUE) return("")
return(0)
}
### the below code will exclude OVERLAPPING (but not redundant repeats thus double counting some)
sr<-sort(coords_of_repeatsv2)
if(remove_overlapping==TRUE){
counter=1
while(counter<=length(sr)){
## get coordinates for all repeats whose start point is equal or below the current one
## or whose start point does NOT overlap with startpoint of current one + repeat length
sr<-sr[sr<=sr[counter] | sr>=(sr[counter]+kmer_length)]
counter=counter+1
}
}
## get the sequences based on the positions
return_vector<-DNAStringSet(rep("A",length(sr)))
mtg<-DNAString(mtg)
for(i in 1:length(sr)){
# recover the sequences
return_vector[[i]]<-mtg[sr[i]:(sr[i]+(kmer_length-1))]
}
## now we also exclude redundant repeats
if(unique_repeats==TRUE){
return_vector<-levels(as.factor(return_vector))
}
if(return_repeat_seq==TRUE){return(return_vector)}
return(length(return_vector))##returns the (cleaned) start coordinates for all repeats
}
| /repeat_detection.R | no_license | pabisk/aging_triplex2 | R | false | false | 8,860 | r | ##################################################################################################################
############################################## find all direct repeats
## example dataset
s<-readDNAStringSet(".../Lakshamanan 294 mammals.fasta",format="fasta")
#
library("Biostrings")
library("stringr")
# get repeats for a set of species
get_all_repeats_of_DNAStringset<-function(mtGenomes=s,min=10,kmer_length=13,unique_repeats=TRUE,repeats_type="direct",
remove_overlapping=TRUE, ralgo="perfect",
return_repeat_coord=FALSE, heuristic_exclude_DLoop=FALSE,return_repeat_seq=FALSE){
library(stringr)
library(stringi)
print("calculating repeats for species: ")
rcord<-list()
if(repeats_type=="all"){ ## this will then return DR, MR, IR and ER in that order, species=row, columns=repeat types
## this will totally ignore the min=x value
m <- matrix(0, ncol = 4, nrow = 0)## ncol is equal to 4 because there are 4 repeat types
reps<-data.frame(m)
for(a in 1:length(mtGenomes)){
print(names(mtGenomes)[a])
repeats<-get_all_repeat_types(mtGenomes[a],kmer_length=kmer_length,unique_repeats=unique_repeats,
repeats_type=repeats_type,heuristic_exclude_DLoop=heuristic_exclude_DLoop)
repeats<-t(data.frame(repeats)) #need to transpose because per default the created df has only 1 column & 4 rows
reps<-rbind(reps,repeats)
print(a)
}
names(reps)<-c("direct","mirror","inverted","everted")
} else {
## this is the standard function that allows you to iterate between a min and max repeat length
if(ralgo=="perfect"){
m <- matrix(0, ncol = kmer_length-min+1, nrow = 0)
reps<-data.frame(m)
for(a in 1:length(mtGenomes)){
print(names(mtGenomes)[a])
repeats<-get_all_repeats_upto_kmer(mtGenomes[a],min=min,kmer_length=kmer_length,unique_repeats=unique_repeats,
repeats_type=repeats_type,remove_overlapping=remove_overlapping,
heuristic_exclude_DLoop=heuristic_exclude_DLoop,return_repeat_seq=return_repeat_seq)
if(return_repeat_seq==TRUE){
rcord[[a]]<-repeats
}else{
repeats<-t(data.frame(repeats)) # need to transpose because per default the created df has only 1 column & 4 rows
reps<-rbind(reps,repeats)
}
print(a)
}
}
}
if(return_repeat_seq==TRUE){return(rcord); }
return(reps)
}
# get repeats for a single species
get_all_repeats_upto_kmer<-function(mtGenome=s[250],min=10,kmer_length=13,unique_repeats=TRUE,return_repeat_seq=F,
repeats_type="direct",ralgo="perfect",remove_overlapping=TRUE,
return_repeat_coord=FALSE,heuristic_exclude_DLoop=FALSE){
print("calculating repeats for kmer length of =")
print(kmer_length)
counter=1
rep_seq<-vector()
if(ralgo=="perfect"){
repeat_results<-rep(1:((kmer_length-min)+1))
for(xy in min:kmer_length){
message("xy")
if(return_repeat_seq==FALSE){
repeat_results[counter]<-repeats_of_given_kmer(mtGenome,kmer_length=xy,return_repeat_seq=return_repeat_seq, unique_repeats=unique_repeats,
repeats_type=repeats_type,remove_overlapping=remove_overlapping,
return_repeat_coord=return_repeat_coord,heuristic_exclude_DLoop=heuristic_exclude_DLoop)
}
counter<-counter+1
if(return_repeat_seq==TRUE){
res<-repeats_of_given_kmer(mtGenome,kmer_length=xy,return_repeat_seq=return_repeat_seq, unique_repeats=unique_repeats,
repeats_type=repeats_type,remove_overlapping=remove_overlapping,
return_repeat_coord=return_repeat_coord,heuristic_exclude_DLoop=heuristic_exclude_DLoop)
rep_seq<-c(rep_seq,res)
}
cat(xy)
}
}
## returns a vector for all repeats that have length between min and kmer_length
if(return_repeat_seq==TRUE){ return(rep_seq) }
return(repeat_results)
}
###############
# get repeats of fixed length for a single species
repeats_of_given_kmer<-function(mtGenome=s[250],kmer_length=13, clean=TRUE, return_repeat_seq=FALSE,
unique_repeats=TRUE,remove_overlapping=TRUE,return_repeat_coord=FALSE,
heuristic_exclude_DLoop=FALSE,repeats_type="direct"){
mtg<-DNAString(paste(mtGenome))
if(heuristic_exclude_DLoop==TRUE){
message("excluding D loop by heuristic approach")
mtg<-mtg[1000:15400]
}
if(clean==TRUE){
mtg<-symbolClean(DNAStringSet(mtg))
mtg<-paste(mtg)
}else{ mtg<-paste(mtg) }
if(repeats_type=="inverted"){ mtg_inverted<-reverseComplement(mtGenome) }
if(repeats_type=="mirror"){ mtg_inverted<-reverse(mtGenome) }
if(repeats_type=="everted"){ mtg_inverted<-Biostrings::complement(mtGenome) }
## generate truncated sequence; this should be fast
## for kmer=4 we will generate 4 mtDNAs truncated by one nt each
## this will alow us to split the mtDNA into chunks as long as the potential repeat
truncated_strings<-rep(1:(kmer_length+1))
for(i in 0:kmer_length){
truncated_strings[i]<-substring(mtg,i,nchar(mtg)) ##start=i, end=nchar=mtDNA length=mtDNA end
}
### get all the kmers for each of the truncated mtDNAs which should cover all unique repeats
### split the mtDNA every k bases, based on the kmer length
substrings<-rep(1:kmer_length)
for(n in 1:length(truncated_strings)){
substrings[n]<-fixed_split(truncated_strings[n],kmer_length)##returns a list
}
## exclude all duplicates and strings < kmer length
## in this step we are identifying all *potential* repeats
substrings_unique<-unique(unlist(substrings))
substrings_unique<-substrings_unique[nchar(substrings_unique)>=kmer_length]
cat("Identified X unique substrings that could be potential repeats:")
cat(length(substrings_unique))
coords_of_repeatsv2<-vector()
## now count the repeats that actually match the mtDNA
nr_repeats<-rep(1:length(substrings_unique))
if(repeats_type=="direct"){
print("checking direct repeats")
for(x in 1:length(substrings_unique)){
count_repeats<-str_count(mtg,substrings_unique[x]) ## can you find substrings_unique in mtg genome
## get the coordinate of the repeats (substrings_unique[x]) WITHIN the mtDNA (mtg)
if(count_repeats>=2){
interm<-str_locate_all(mtg, substrings_unique[x])[[1]][,1]
coords_of_repeatsv2<-c(coords_of_repeatsv2,interm)
}
}
}
print("length(substrings_unique)")
print(length(substrings_unique))
if(repeats_type=="inverted" || repeats_type=="everted" || repeats_type=="mirror"){
print("checking non direct repeats")
for(x in 1:length(substrings_unique)){
count_repeats<-str_count(mtg_inverted,substrings_unique[x]) ## can you find substrings_unique in mtg genome
## get the coordinate of the repeats (substrings_unique[x]) WITHIN the mtDNA (mtg)
if(count_repeats>=1){
interm<-str_locate_all(mtg, substrings_unique[x])[[1]][,1]
coords_of_repeatsv2<-c(coords_of_repeatsv2,interm)
}
}
}
## now handle zero repeats
if(length(coords_of_repeatsv2)==0){
print("no repeats found")
if(return_repeat_seq==TRUE) return("")
return(0)
}
### the below code will exclude OVERLAPPING (but not redundant repeats thus double counting some)
sr<-sort(coords_of_repeatsv2)
if(remove_overlapping==TRUE){
counter=1
while(counter<=length(sr)){
## get coordinates for all repeats whose start point is equal or below the current one
## or whose start point does NOT overlap with startpoint of current one + repeat length
sr<-sr[sr<=sr[counter] | sr>=(sr[counter]+kmer_length)]
counter=counter+1
}
}
## get the sequences based on the positions
return_vector<-DNAStringSet(rep("A",length(sr)))
mtg<-DNAString(mtg)
for(i in 1:length(sr)){
# recover the sequences
return_vector[[i]]<-mtg[sr[i]:(sr[i]+(kmer_length-1))]
}
## now we also exclude redundant repeats
if(unique_repeats==TRUE){
return_vector<-levels(as.factor(return_vector))
}
if(return_repeat_seq==TRUE){return(return_vector)}
return(length(return_vector))##returns the (cleaned) start coordinates for all repeats
}
|
setwd("C:/Users/Roberto/Desktop/rstudio_default/covid/covid_vaccinazioni_ita/wd-vaccinazioni")
library(tidyverse)
library(data.table)
library(lubridate)
library(xts)
vac_dosi_trend <- fread("vac_reg_longer.csv")
vac_all
vac_lead <- vac_all %>%
#filter(data < today()) %>%
select(data, area, prima_dose, seconda_dose) %>%
group_by(area) %>%
mutate(seconda_dose_meno21 = lead(seconda_dose, 21)) %>%
ungroup()
vac_lead_long = vac_lead %>%
pivot_longer(-c(data, area), names_to = "dose", values_to = "n") %>%
mutate(dose = replace_na(dose, 0),
data = as.POSIXct(data)) %>%
mutate(dose = case_when(dose == "prima_dose" ~ "prima dose",
dose == "seconda_dose" ~ "seconda dose",
dose == "seconda_dose_meno21" ~ "seconda dose (-21 giorni)",
TRUE ~ ""))
class(vac_lead_long$data)
vac <- as.xts(vac_lead_long)
vac_lead_long %>%
filter(dose != "seconda dose" & area == "ITA") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2)
vac_lead_long %>%
filter(dose != "seconda dose" & area == "LAZ") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2) +
ggtitle("regione Lazio")
vac_lead_long %>%
filter(dose != "seconda dose" & area == "MOL") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2) +
ggtitle("regione Molise")
vac_lead_long %>%
filter(dose != "seconda dose" & area != "ITA") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2) +
facet_wrap(~ area, scales = "free_y")
| /code-vaccinazioni/old/vaccinazioni_trend_secondedosi_old.R | no_license | volperob/covid_vaccinazioni_ita | R | false | false | 1,580 | r | setwd("C:/Users/Roberto/Desktop/rstudio_default/covid/covid_vaccinazioni_ita/wd-vaccinazioni")
library(tidyverse)
library(data.table)
library(lubridate)
library(xts)
vac_dosi_trend <- fread("vac_reg_longer.csv")
vac_all
vac_lead <- vac_all %>%
#filter(data < today()) %>%
select(data, area, prima_dose, seconda_dose) %>%
group_by(area) %>%
mutate(seconda_dose_meno21 = lead(seconda_dose, 21)) %>%
ungroup()
vac_lead_long = vac_lead %>%
pivot_longer(-c(data, area), names_to = "dose", values_to = "n") %>%
mutate(dose = replace_na(dose, 0),
data = as.POSIXct(data)) %>%
mutate(dose = case_when(dose == "prima_dose" ~ "prima dose",
dose == "seconda_dose" ~ "seconda dose",
dose == "seconda_dose_meno21" ~ "seconda dose (-21 giorni)",
TRUE ~ ""))
class(vac_lead_long$data)
vac <- as.xts(vac_lead_long)
vac_lead_long %>%
filter(dose != "seconda dose" & area == "ITA") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2)
vac_lead_long %>%
filter(dose != "seconda dose" & area == "LAZ") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2) +
ggtitle("regione Lazio")
vac_lead_long %>%
filter(dose != "seconda dose" & area == "MOL") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2) +
ggtitle("regione Molise")
vac_lead_long %>%
filter(dose != "seconda dose" & area != "ITA") %>%
ggplot() +
aes(data, n, color = dose) +
geom_line(size = 2) +
facet_wrap(~ area, scales = "free_y")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimB.R
\name{giveTuples}
\alias{giveTuples}
\title{Gives tuples of B who check all constraints}
\usage{
giveTuples(face, pointX)
}
\arguments{
\item{face}{\code{data.table}, face for 3 country, BE, DE anf FR}
\item{pointX}{\code{data.table}, extreme points for 3 country, BE, DE anf FR}
}
\description{
Gives tuples of B who check all constraints
}
| /man/giveTuples.Rd | no_license | MarionLi0/antaresFlowbased | R | false | true | 430 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimB.R
\name{giveTuples}
\alias{giveTuples}
\title{Gives tuples of B who check all constraints}
\usage{
giveTuples(face, pointX)
}
\arguments{
\item{face}{\code{data.table}, face for 3 country, BE, DE anf FR}
\item{pointX}{\code{data.table}, extreme points for 3 country, BE, DE anf FR}
}
\description{
Gives tuples of B who check all constraints
}
|
# have to run this
library(tidyverse)
dir <- usethis::use_zip(
"https://www.fec.gov/files/bulk-downloads/2016/indiv16.zip",
destdir = tempdir(), cleanup = TRUE
)
indiv_path <- fs::path(dir, "itcont.txt")
indiv_names <- read_csv("https://www.fec.gov/files/bulk-downloads/data_dictionaries/indiv_header_file.csv") %>%
names() %>%
tolower()
individuals_all <- read_delim(
indiv_path,
col_names = indiv_names,
col_types = cols(
zip_code = col_character(),
other_id = col_character(),
memo_cd = col_character(),
memo_text = col_character(),
sub_id = col_character(),
transaction_tp = col_character()
),
delim = "|"
)
individuals <- individuals_all %>%
select(-image_num, -sub_id, -memo_text, -memo_cd, -file_num) %>%
sample_n(1000) %>%
mutate(
transaction_dt = lubridate::mdy(transaction_dt)
)
usethis::use_data(individuals, overwrite = TRUE)
| /data-raw/process_individuals.R | no_license | baumer-lab/fec16 | R | false | false | 898 | r | # have to run this
library(tidyverse)
dir <- usethis::use_zip(
"https://www.fec.gov/files/bulk-downloads/2016/indiv16.zip",
destdir = tempdir(), cleanup = TRUE
)
indiv_path <- fs::path(dir, "itcont.txt")
indiv_names <- read_csv("https://www.fec.gov/files/bulk-downloads/data_dictionaries/indiv_header_file.csv") %>%
names() %>%
tolower()
individuals_all <- read_delim(
indiv_path,
col_names = indiv_names,
col_types = cols(
zip_code = col_character(),
other_id = col_character(),
memo_cd = col_character(),
memo_text = col_character(),
sub_id = col_character(),
transaction_tp = col_character()
),
delim = "|"
)
individuals <- individuals_all %>%
select(-image_num, -sub_id, -memo_text, -memo_cd, -file_num) %>%
sample_n(1000) %>%
mutate(
transaction_dt = lubridate::mdy(transaction_dt)
)
usethis::use_data(individuals, overwrite = TRUE)
|
# Set local working parameters, unzip data file and read it in
setwd("/media/sf_Dropbox/Technology/R/coursera/4 Exploratory Data Analysis/assignment1/ExData_Plotting1/")
dat.file <- unz("./data/exdata-data-household_power_consumption.zip", "household_power_consumption.txt")
dat.0 <- read.table(dat.file, nrows = 800000, header = TRUE, quote = "\"", stringsAsFactors = FALSE, sep = ";", dec = ".") # nrows to keep it a bit smaller (size determined through looking at dataset - needs to encompass our dates)
# Subset only the data that is relevant to our interest
dat <- dat.0[dat.0$Date %in% c("1/2/2007", "2/2/2007"),]
# Converting date and time values
dat$datetime <- strptime(x = paste(dat$Date, dat$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
# Global Active Power as a numeric value
dat$Global_active_power <- as.numeric(dat$Global_active_power)
# Create a PNG with specified dimensions
png("plot4.png", width = 504, height = 504)
par(mfrow = c(2, 2)) # Setting parameters
# Plot Global Active Power vs datetime
plot(x = dat$datetime, y = dat$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
# Plot Voltage vs datetime
plot(x = dat$datetime, y = dat$Voltage, type = "l", ylab = "Voltage", xlab = "")
# Plot Energy sub metering vs datetime
plot(x = dat$datetime, y = dat$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(x = dat$datetime, y = dat$Sub_metering_2, col = "red")
lines(x = dat$datetime, y = dat$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), box.lwd = 0, lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
# Plot Global reactive power vs datetime
plot(x = dat$datetime, y = dat$Global_reactive_power, type = "l", ylab = "Global reactive power", xlab = "")
dev.off()
| /plot4.R | no_license | thomasteoh/ExData_Plotting1 | R | false | false | 1,821 | r | # Set local working parameters, unzip data file and read it in
setwd("/media/sf_Dropbox/Technology/R/coursera/4 Exploratory Data Analysis/assignment1/ExData_Plotting1/")
dat.file <- unz("./data/exdata-data-household_power_consumption.zip", "household_power_consumption.txt")
dat.0 <- read.table(dat.file, nrows = 800000, header = TRUE, quote = "\"", stringsAsFactors = FALSE, sep = ";", dec = ".") # nrows to keep it a bit smaller (size determined through looking at dataset - needs to encompass our dates)
# Subset only the data that is relevant to our interest
dat <- dat.0[dat.0$Date %in% c("1/2/2007", "2/2/2007"),]
# Converting date and time values
dat$datetime <- strptime(x = paste(dat$Date, dat$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
# Global Active Power as a numeric value
dat$Global_active_power <- as.numeric(dat$Global_active_power)
# Create a PNG with specified dimensions
png("plot4.png", width = 504, height = 504)
par(mfrow = c(2, 2)) # Setting parameters
# Plot Global Active Power vs datetime
plot(x = dat$datetime, y = dat$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
# Plot Voltage vs datetime
plot(x = dat$datetime, y = dat$Voltage, type = "l", ylab = "Voltage", xlab = "")
# Plot Energy sub metering vs datetime
plot(x = dat$datetime, y = dat$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(x = dat$datetime, y = dat$Sub_metering_2, col = "red")
lines(x = dat$datetime, y = dat$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), box.lwd = 0, lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
# Plot Global reactive power vs datetime
plot(x = dat$datetime, y = dat$Global_reactive_power, type = "l", ylab = "Global reactive power", xlab = "")
dev.off()
|
test_that("multiplication works", {
expect_equal(2 * 2, 4)
expect_equal(1:3, c(1,4,9))
})
| /tests/testthat/test-square.R | no_license | xietian99/test_pkg1 | R | false | false | 94 | r | test_that("multiplication works", {
expect_equal(2 * 2, 4)
expect_equal(1:3, c(1,4,9))
})
|
#create test df
rimini_geocoded <- rimini
address <- base::sample(x = rimini_geocoded$indirizzo_2019,
size = 1,
replace = TRUE)
#register google key
ggmap::register_google(key = key)
#extract geocoded addresses with ggmap (test)
ggmap::geocode(location = address)
geocoded <- purrr::map_df(.x = rimini_geocoded$indirizzo_2019, .f = ggmap::geocode)
#add longitude and latitude data to df
rimini_geocoded$lng <- geocoded$lon
rimini_geocoded$lat <- geocoded$lat
write.csv(rimini_geocoded, "rimini_geocoded.csv", row.names = F)
| /geocode_test.R | no_license | vchiara/google_api_address | R | false | false | 594 | r | #create test df
rimini_geocoded <- rimini
address <- base::sample(x = rimini_geocoded$indirizzo_2019,
size = 1,
replace = TRUE)
#register google key
ggmap::register_google(key = key)
#extract geocoded addresses with ggmap (test)
ggmap::geocode(location = address)
geocoded <- purrr::map_df(.x = rimini_geocoded$indirizzo_2019, .f = ggmap::geocode)
#add longitude and latitude data to df
rimini_geocoded$lng <- geocoded$lon
rimini_geocoded$lat <- geocoded$lat
write.csv(rimini_geocoded, "rimini_geocoded.csv", row.names = F)
|
#!/usr/bin/env Rscript
# Run with 'Rscript data_cleaning_and_exploration.r'
## Data Cleaning and Standardization:
## 1. Inital Exploration
## a. Data set dimensions (number of rows and columns)
## b. Summary of variables
## c. Identify potential problems
## d. Quick visualization
## 1. Why visualize? Look at Anscombeโs quartet for an example.
## 2. Observe outlying values
## 3. Observe and understand the shape of the distributions
## 2. Fixing errors
## a. Remove irrelevant columns or rows
## b. Identify and deal with missing values
## c. Look for and remove incorrect data (impossible values, duplicates, typos, and extra spaces)
## d. Errors vs. Artifacts
## 3. Standardize values
## a. Scaling (changing the range of data)
## b. Normalization
## 4. Dimensionality reduction: Can you get rid of any columns?
## a. High ratio of missing values (based on a determined threshold)
## b. High correlation with other variable(s)
## c. Various methods discussed here
## 5. Repeat visualization
## 6. Write a cleaned data frame to a .csv file
## 7. Convert your df to a tibble
##
## Data Exploration:
## 1. Descriptive Stats
## 2. Exploratory Data Analysis (EDA)
## 3. Visual presentation
# Load the dataset
# Identifies missing data more accurately. Why is this?
library(readr)
companies <- read_csv("/Users/mbc400/Box Sync/GitHub/data-cleaning-with-r/datasets/company_dataset.csv")
##
## 1. Initial Exploration
## a. Summary of variables
## b. Data set dimensions (number of rows and columns)
## c. Identify potential problems
## d. Quick visualization
## 1. Why visualize? Look at Anscombeโs quartet for an example.
## 2. Observe outlying values
## 3. Observe and understand the shape of the distributions
# Look at the data frame
View(companies)
# Summarize the data
summary(companies)
# Look at the data structure
str(companies)
# Type of data structure in R
class(companies)
# Data set dimensions
dim(companies)
# Quick Visual Exploration
#
# This helps us get an idea about the distribution of values for different variables and lets us know if we have any outliers
hist(companies$Gross_Income_2013)
boxplot(companies$Num_widgets)
## 2. Fixing Errors
##
## a. Remove irrelevant columns or rows
##
# Add some content here
## 2. Fixing Errors
##
## b. Identify and deal with missing values
##
## IMPORTANT NOTE: Verify that all missing values are actually missing. If you notice more missing values
## than expected, make sure there wasn't a problem at the data import step.
###
# Count the number of missing values
###
# Checking for missing values; returns a logical data frame with TRUE if value is missing, FALSE otherwise
is.na(companies)
# Returns the number of missing values in each column
sapply(companies, function(x) sum(is.na(x)))
# Returns just columns with missing values
missing_vals <- sapply(companies, function(x) sum(is.na(x)))
missing_vals[missing_vals > 0]
# List all records that have any missing values
companies[!complete.cases(companies),]
###
# Decide what to do with missing values
###
# Ignore
#
# If you choose to ignore missing values remember to use `na.rm = TRUE` for aggregate functions or you will
# get `NA` as a result:
mean(companies$Num_widgets, na.rm=TRUE)
# Exclude
#
# Return all rows with missing values in at least one column
companies[!complete.cases(companies),]
# Create a new data frame containing only rows with no missing data
companies_new <- na.omit(companies)
# Replace
#
# Eliminate all missing values from a data frame
na.omit(companies)
# Eliminate all missing values from all values in a column
na.omit(companies$Status)
# Replace all NA's in a data frame with '0'
companies[is.na(companies)] <- 0
# Replace all NA's in a data frame column with '0'
companies$Num_widgets[is.na(companies$Num_widgets)] <- 0
# Replace all NA's in a column with the median value of the column
companies$Num_widgets[is.na(companies$Num_widgets)] <- median(companies$Num_widgets)
## 2. Fixing Errors
##
## c. Look for and remove incorrect data (impossible values, duplicates, typos, and extra spaces)
##
# Recode impossible values to missing
#
# If you know that a particular range of values for a variable is invalid, you can set those values
# as missing so as not to throw off your analysis later on:
# Recode negative (impossible) values in the Num_widgets column to NA
companies$Num_widgets[companies$Num_widgets < 0] <- NA
# Duplicates
#
# Are any of the rows in our data frame duplicated?
# Returns the array index of the first duplicate if any, otherwise 0.
anyDuplicated(companies)
# a Dufus & Dingus Ltd. 500000 1 yes 1/1/14 10:00
# b Snooty Pants Fashion silver 100000 5 no 1/1/14 13:24
# b Snooty Pants Fashion silver 100000 5 no 1/1/14 13:24
# b Snooty Pants Fashion silver 100000 5 no 1/1/14 13:24
# c Harry Ham Handlers gold 123409 67 no 1/1/14 15:05
# Logical vector that shows duplicates
duplicated(companies)
# Row indeces of duplicates
# https://stackoverflow.com/questions/12495345/find-indices-of-duplicated-rows
which(duplicated(companies) | duplicated(companies[nrow(companies):1, ],fromLast = TRUE)[nrow(companies):1])
# Works too. Why use the above command?
which(duplicated(companies))
# Create a new data frame of only unique rows
unique_companies <- unique(companies)
# Same using the dplyr package
library(dplyr)
unique_companies <- companies %>% distinct()
# Remove duplicated rows based on duplication in a specific column
companies %>% distinct(Account_Name, .keep_all = TRUE)
# For more details you can use a table to find duplicated values in columns that should contain unique
# values only. Then you can look at the associated full records to see what kind of duplication it is
# (e.g., full row vs. mistakenly entered Account_Name)
# Lists the values of 'Account_Name' and the number of times they occur
occurrences <- data.frame(table(companies$Account_Name))
# tells you which ids occurred more than once.
occurrences[occurrences$Freq > 1,]
# Returns the records that contain the duplicated 'Account_Name'
companies[companies$Account_Name %in% occurrences$Var1[occurrences$Freq > 1],]
# Typos
#
# Identifiy typos in status categories...
table(companies$Status, useNA = "ifany")
# then fix them
companies$Status[companies$Status == "bornze"] <- "bronze"
# Same for the 'Complete' column
table(companies$Complete, useNA = "ifany")
companies$Complete[companies$Complete == "yess"] <- "yes"
companies$Complete[companies$Complete == "NOT"] <- "no"
# Removing Extra Spaces
#
# Eliminate all leading and trailing white space from every value in the data frame
# sapply returns a matrix, so we have to cast companies_no_ws back as a data frame.
companies_no_ws <- as.data.frame(sapply(companies, function(x) trimws(x)), stringsAsFactors = FALSE)
# Works on columns
# Removes leading and trailing white space from specific columns
library(stringr)
companies$Complete <- str_trim(companies$Complete)
# Remove extra spaces within account names
companies$Account_Name <- gsub("\\s+"," ", companies$Account_Name)
# Fixing case issues with categorical data
#
# Look at the different status categories again...
table(companies$Status, useNA = "ifany")
# ...and change them all to lower case
companies$Status <- sapply(companies$Status, tolower)
# check your work
table(companies$Status, useNA = "ifany")
## 2. Fixing Errors
##
## d. Errors vs. Artifacts
##
# Sometimes during the import, organization, or cleaning stages of a project you inadvertantly introduce
# artifacts into your data. For example, when you import data in Excel it sometimes chooses the wrong data
# type for one or more of your columns (assigning a column with eight digit numeric values as type 'date').
# Keep this in mind so you don't carry these artifacts into your analysis steps.
## 3. Standardize values
##
## a. Scaling (changing the range of data)
##
# https://www.thekerneltrip.com/statistics/when-scale-my-data/
# Scaling vs. normalization: https://www.quora.com/When-should-you-perform-feature-scaling-and-mean-normalization-on-the-given-data-What-are-the-advantages-of-these-techniques
# If you want to compare columns that are on a different scale, you need to change both sets of values to
# use a common scale. Algorithms such as SVM and KNN treat a change of '1' in a value with the same importance.
# https://stackoverflow.com/questions/15215457/standardize-data-columns-in-r
dat <- data.frame(x = rnorm(10, 30, .2), y = runif(10, 3, 5))
scaled.dat <- scale(dat)
# check that we get mean of 0 and sd of 1
colMeans(scaled.dat) # faster version of apply(scaled.dat, 2, mean)
apply(scaled.dat, 2, sd)
## 3. Standardize values
##
## b. Normalization (changing the shape of the distribution of the data)
##
# Needed when running algorithms that assume a normal distribution such as t-test, ANOVA, linear regression,
# LDA, and Gaussian Naive Bayes.
# We can make a "right-skewed" variable in the following manner:
# [a] drawing from a (standard)-normal distribution, and then:
# [b] exponentiating the results
x <- exp(rnorm(100,0,1)) # Combined [a] and [b]
hist(x) # Plot the original right-skewed variable;
hist(log(x)) # plot the logged-version of the variable.
## 4. Dimensionality reduction: Can you get rid of any columns?
## a. High ratio of missing values (based on a determined threshold)
## b. High correlation with other variable(s)
## c. Various methods discussed here
## 5. Repeat visualization
library(ggplot2)
ggplot(companies, aes(x = Status, fill = Complete)) + geom_bar()
## 6. Write a cleaned data frame to a .csv file
write.csv(companies, "/Users/mbc400/Box Sync/GitHub/data-cleaning-with-r/output/companies_cleaned.csv", row.names = FALSE)
## 7. Convert your data frame to a tibble
# With the 'dplyr' package, you can convert your data from to a tibble
companies_tbl <- as_tibble(companies_cleaned)
companies_tbl
# Check out the newly created tibble with 'glimpse'
glimpse(companies_tbl)
# Convert back to a data frame if you like
companies_cleaned <- as.data.frame(companies_tbl)
# test
median_gross <- companies %>%
summarize (median_gross = median(Gross_Income_2013))
| /data_cleaning_and_exploration.r | permissive | AMRI-Ibrahim/data-cleaning-with-r | R | false | false | 10,373 | r | #!/usr/bin/env Rscript
# Run with 'Rscript data_cleaning_and_exploration.r'
## Data Cleaning and Standardization:
## 1. Inital Exploration
## a. Data set dimensions (number of rows and columns)
## b. Summary of variables
## c. Identify potential problems
## d. Quick visualization
## 1. Why visualize? Look at Anscombeโs quartet for an example.
## 2. Observe outlying values
## 3. Observe and understand the shape of the distributions
## 2. Fixing errors
## a. Remove irrelevant columns or rows
## b. Identify and deal with missing values
## c. Look for and remove incorrect data (impossible values, duplicates, typos, and extra spaces)
## d. Errors vs. Artifacts
## 3. Standardize values
## a. Scaling (changing the range of data)
## b. Normalization
## 4. Dimensionality reduction: Can you get rid of any columns?
## a. High ratio of missing values (based on a determined threshold)
## b. High correlation with other variable(s)
## c. Various methods discussed here
## 5. Repeat visualization
## 6. Write a cleaned data frame to a .csv file
## 7. Convert your df to a tibble
##
## Data Exploration:
## 1. Descriptive Stats
## 2. Exploratory Data Analysis (EDA)
## 3. Visual presentation
# Load the dataset
# Identifies missing data more accurately. Why is this?
library(readr)
companies <- read_csv("/Users/mbc400/Box Sync/GitHub/data-cleaning-with-r/datasets/company_dataset.csv")
##
## 1. Initial Exploration
## a. Summary of variables
## b. Data set dimensions (number of rows and columns)
## c. Identify potential problems
## d. Quick visualization
## 1. Why visualize? Look at Anscombeโs quartet for an example.
## 2. Observe outlying values
## 3. Observe and understand the shape of the distributions
# Look at the data frame
View(companies)
# Summarize the data
summary(companies)
# Look at the data structure
str(companies)
# Type of data structure in R
class(companies)
# Data set dimensions
dim(companies)
# Quick Visual Exploration
#
# This helps us get an idea about the distribution of values for different variables and lets us know if we have any outliers
hist(companies$Gross_Income_2013)
boxplot(companies$Num_widgets)
## 2. Fixing Errors
##
## a. Remove irrelevant columns or rows
##
# Add some content here
## 2. Fixing Errors
##
## b. Identify and deal with missing values
##
## IMPORTANT NOTE: Verify that all missing values are actually missing. If you notice more missing values
## than expected, make sure there wasn't a problem at the data import step.
###
# Count the number of missing values
###
# Checking for missing values; returns a logical data frame with TRUE if value is missing, FALSE otherwise
is.na(companies)
# Returns the number of missing values in each column
sapply(companies, function(x) sum(is.na(x)))
# Returns just columns with missing values
missing_vals <- sapply(companies, function(x) sum(is.na(x)))
missing_vals[missing_vals > 0]
# List all records that have any missing values
companies[!complete.cases(companies),]
###
# Decide what to do with missing values
###
# Ignore
#
# If you choose to ignore missing values remember to use `na.rm = TRUE` for aggregate functions or you will
# get `NA` as a result:
mean(companies$Num_widgets, na.rm=TRUE)
# Exclude
#
# Return all rows with missing values in at least one column
companies[!complete.cases(companies),]
# Create a new data frame containing only rows with no missing data
companies_new <- na.omit(companies)
# Replace
#
# Eliminate all missing values from a data frame
na.omit(companies)
# Eliminate all missing values from all values in a column
na.omit(companies$Status)
# Replace all NA's in a data frame with '0'
companies[is.na(companies)] <- 0
# Replace all NA's in a data frame column with '0'
companies$Num_widgets[is.na(companies$Num_widgets)] <- 0
# Replace all NA's in a column with the median value of the column
companies$Num_widgets[is.na(companies$Num_widgets)] <- median(companies$Num_widgets)
## 2. Fixing Errors
##
## c. Look for and remove incorrect data (impossible values, duplicates, typos, and extra spaces)
##
# Recode impossible values to missing
#
# If you know that a particular range of values for a variable is invalid, you can set those values
# as missing so as not to throw off your analysis later on:
# Recode negative (impossible) values in the Num_widgets column to NA
companies$Num_widgets[companies$Num_widgets < 0] <- NA
# Duplicates
#
# Are any of the rows in our data frame duplicated?
# Returns the array index of the first duplicate if any, otherwise 0.
anyDuplicated(companies)
# a Dufus & Dingus Ltd. 500000 1 yes 1/1/14 10:00
# b Snooty Pants Fashion silver 100000 5 no 1/1/14 13:24
# b Snooty Pants Fashion silver 100000 5 no 1/1/14 13:24
# b Snooty Pants Fashion silver 100000 5 no 1/1/14 13:24
# c Harry Ham Handlers gold 123409 67 no 1/1/14 15:05
# Logical vector that shows duplicates
duplicated(companies)
# Row indeces of duplicates
# https://stackoverflow.com/questions/12495345/find-indices-of-duplicated-rows
which(duplicated(companies) | duplicated(companies[nrow(companies):1, ],fromLast = TRUE)[nrow(companies):1])
# Works too. Why use the above command?
which(duplicated(companies))
# Create a new data frame of only unique rows
unique_companies <- unique(companies)
# Same using the dplyr package
library(dplyr)
unique_companies <- companies %>% distinct()
# Remove duplicated rows based on duplication in a specific column
companies %>% distinct(Account_Name, .keep_all = TRUE)
# For more details you can use a table to find duplicated values in columns that should contain unique
# values only. Then you can look at the associated full records to see what kind of duplication it is
# (e.g., full row vs. mistakenly entered Account_Name)
# Lists the values of 'Account_Name' and the number of times they occur
occurrences <- data.frame(table(companies$Account_Name))
# tells you which ids occurred more than once.
occurrences[occurrences$Freq > 1,]
# Returns the records that contain the duplicated 'Account_Name'
companies[companies$Account_Name %in% occurrences$Var1[occurrences$Freq > 1],]
# Typos
#
# Identifiy typos in status categories...
table(companies$Status, useNA = "ifany")
# then fix them
companies$Status[companies$Status == "bornze"] <- "bronze"
# Same for the 'Complete' column
table(companies$Complete, useNA = "ifany")
companies$Complete[companies$Complete == "yess"] <- "yes"
companies$Complete[companies$Complete == "NOT"] <- "no"
# Removing Extra Spaces
#
# Eliminate all leading and trailing white space from every value in the data frame
# sapply returns a matrix, so we have to cast companies_no_ws back as a data frame.
companies_no_ws <- as.data.frame(sapply(companies, function(x) trimws(x)), stringsAsFactors = FALSE)
# Works on columns
# Removes leading and trailing white space from specific columns
library(stringr)
companies$Complete <- str_trim(companies$Complete)
# Remove extra spaces within account names
companies$Account_Name <- gsub("\\s+"," ", companies$Account_Name)
# Fixing case issues with categorical data
#
# Look at the different status categories again...
table(companies$Status, useNA = "ifany")
# ...and change them all to lower case
companies$Status <- sapply(companies$Status, tolower)
# check your work
table(companies$Status, useNA = "ifany")
## 2. Fixing Errors
##
## d. Errors vs. Artifacts
##
# Sometimes during the import, organization, or cleaning stages of a project you inadvertantly introduce
# artifacts into your data. For example, when you import data in Excel it sometimes chooses the wrong data
# type for one or more of your columns (assigning a column with eight digit numeric values as type 'date').
# Keep this in mind so you don't carry these artifacts into your analysis steps.
## 3. Standardize values
##
## a. Scaling (changing the range of data)
##
# https://www.thekerneltrip.com/statistics/when-scale-my-data/
# Scaling vs. normalization: https://www.quora.com/When-should-you-perform-feature-scaling-and-mean-normalization-on-the-given-data-What-are-the-advantages-of-these-techniques
# If you want to compare columns that are on a different scale, you need to change both sets of values to
# use a common scale. Algorithms such as SVM and KNN treat a change of '1' in a value with the same importance.
# https://stackoverflow.com/questions/15215457/standardize-data-columns-in-r
dat <- data.frame(x = rnorm(10, 30, .2), y = runif(10, 3, 5))
scaled.dat <- scale(dat)
# check that we get mean of 0 and sd of 1
colMeans(scaled.dat) # faster version of apply(scaled.dat, 2, mean)
apply(scaled.dat, 2, sd)
## 3. Standardize values
##
## b. Normalization (changing the shape of the distribution of the data)
##
# Needed when running algorithms that assume a normal distribution such as t-test, ANOVA, linear regression,
# LDA, and Gaussian Naive Bayes.
# We can make a "right-skewed" variable in the following manner:
# [a] drawing from a (standard)-normal distribution, and then:
# [b] exponentiating the results
x <- exp(rnorm(100,0,1)) # Combined [a] and [b]
hist(x) # Plot the original right-skewed variable;
hist(log(x)) # plot the logged-version of the variable.
## 4. Dimensionality reduction: Can you get rid of any columns?
## a. High ratio of missing values (based on a determined threshold)
## b. High correlation with other variable(s)
## c. Various methods discussed here
## 5. Repeat visualization
library(ggplot2)
ggplot(companies, aes(x = Status, fill = Complete)) + geom_bar()
## 6. Write a cleaned data frame to a .csv file
write.csv(companies, "/Users/mbc400/Box Sync/GitHub/data-cleaning-with-r/output/companies_cleaned.csv", row.names = FALSE)
## 7. Convert your data frame to a tibble
# With the 'dplyr' package, you can convert your data from to a tibble
companies_tbl <- as_tibble(companies_cleaned)
companies_tbl
# Check out the newly created tibble with 'glimpse'
glimpse(companies_tbl)
# Convert back to a data frame if you like
companies_cleaned <- as.data.frame(companies_tbl)
# test
median_gross <- companies %>%
summarize (median_gross = median(Gross_Income_2013))
|
/ๆข็ดขๆงๆฐๆฎๅๆR/Germancreditshort.R | no_license | yechafengyun/Lesson-Code | R | false | false | 1,132 | r | ||
\name{add.Inds}
\alias{add.Inds}
\title{Function to add missing individuals to a pedigree}
\description{
Function add.Inds() adds missing individuals to a pedigree and returns the
complete pedigree as a data.frame with the same headers as the
original pedigree. Remeber to check for errors beforehand with
function \code{errors.ped}. Unknown parents should be coded as NA.
}
\usage{
add.Inds(ped)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ped}{\code{data.frame} with three columns: id,id parent1,id parent2 }
}
\value{
data.frame of three columns with identical header as input.
}
\author{Albart Coster, Albart.Coster@wur.nl}
\seealso{ \code{\link{orderPed}}}
\examples{
ID <- 3:5
DAM <- c(1,1,3)
SIRE <- c(2,2,4)
pedigree <- data.frame(ID,DAM,SIRE)
pedigree <- add.Inds(pedigree)
}
\keyword{utilities}
| /man/add.Inds.Rd | no_license | cran/pedigree | R | false | false | 850 | rd | \name{add.Inds}
\alias{add.Inds}
\title{Function to add missing individuals to a pedigree}
\description{
Function add.Inds() adds missing individuals to a pedigree and returns the
complete pedigree as a data.frame with the same headers as the
original pedigree. Remeber to check for errors beforehand with
function \code{errors.ped}. Unknown parents should be coded as NA.
}
\usage{
add.Inds(ped)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ped}{\code{data.frame} with three columns: id,id parent1,id parent2 }
}
\value{
data.frame of three columns with identical header as input.
}
\author{Albart Coster, Albart.Coster@wur.nl}
\seealso{ \code{\link{orderPed}}}
\examples{
ID <- 3:5
DAM <- c(1,1,3)
SIRE <- c(2,2,4)
pedigree <- data.frame(ID,DAM,SIRE)
pedigree <- add.Inds(pedigree)
}
\keyword{utilities}
|
library(lubridate)
library(BH)
##########################
# Actual intervention dates
first_dx_date = mdy("11/18/2014")
investigation_begin_date = mdy("01/23/2015")
intvxdate_actual_date = mdy("03/15/2015") # actual intervention date
emergency_declared_date = mdy("03/26/2015")
clinic_opened_date = mdy("03/31/2015") # do we use this?
sep_started_date = mdy("04/04/2015")
##########################
# setup and scenario dates
zero_date = mdy("04/01/2011") # Beginning of HIV incidence calculations
intvx_actual_date = first_dx_date
intvx_mid_date = mdy("01/01/2013")
intvx_early_date = zero_date+2
end_date = mdy("08/11/2015") # this is last date from incidence data
dateseq = seq(zero_date, end_date, by="day")
###########################
# translated days
first_dx_day = as.numeric(first_dx_date - zero_date)
investigation_begin_day = as.numeric(investigation_begin_date - zero_date)
emergency_declared_day = as.numeric(emergency_declared_date - zero_date)
clinic_opened_day = as.numeric(clinic_opened_date - zero_date)
sep_started_day = as.numeric(sep_started_date - zero_date)
zero_day = 0
intvx_actual_day = as.numeric(intvx_actual_date - zero_date)
intvx_mid_day = as.numeric(intvx_mid_date - zero_date)
intvx_early_day = as.numeric(intvx_early_date - zero_date)
end_day = as.numeric(end_date - zero_date)
dayseq = zero_day:end_day
ndays = length(dayseq)
#######################
# load scott county diagnosis data:
casesbyweek = read.csv("data/scott_county_cases_by_week.csv",stringsAsFactors=FALSE)
cumcases = cumsum(casesbyweek$Cases)
casesbyweek$Date = mdy(casesbyweek$Date)
casesbyweek$day = as.numeric(casesbyweek$Date - zero_date)
dx = approx(casesbyweek$day, cumcases, xout=dayseq, method="constant")$y
dx[is.na(dx)] = 0
########################################
# load estimated incidence
incidence_extracted = read.csv("data/extracted_infection_curves.csv", header=FALSE)
names(incidence_extracted) = c("Date.Decimal", "Infections")
incidence_extracted$date = date_decimal(incidence_extracted$Date.Decimal)
incidence_extracted$day = difftime(incidence_extracted$date, zero_date, units="days")
incidence_extracted$Infections = pmax(incidence_extracted$Infections,0)
ord = order(incidence_extracted$day)
incidence_extracted = incidence_extracted[ord,]
n_time_points = dim(incidence_extracted)[1]
infections_lo_tmp = rep(0, n_time_points)
infections_hi_tmp = rep(NA, n_time_points)
infections_lo_tmp[1] = incidence_extracted$Infections[1]
infections_hi_tmp[1] = incidence_extracted$Infections[1]
for(i in 2:n_time_points) {
infections_hi_tmp[i] = max(c(infections_hi_tmp[1:(i-1)], incidence_extracted$Infections[i]))
infections_lo_tmp[i] = min(incidence_extracted$Infections[i:n_time_points])
}
for(i in 1:(n_time_points-1)) {
infections_lo_tmp[i] = min(infections_lo_tmp[(i+1):n_time_points])
}
infections_lo = approx(incidence_extracted$day, infections_lo_tmp, xout=dayseq, method="constant")$y
infections_hi = approx(incidence_extracted$day, infections_hi_tmp, xout=dayseq, method="constant")$y
#########################################
# create month markers
monthseq = mdy(c("04/01/2011", "01/01/2012", "01/01/2013", "01/01/2014", "01/01/2015", "01/01/2016"))
monthdayseq = difftime(monthseq, zero_date, units="days")
monthlabseq = format(monthseq, "%B %Y")
detail_monthseq = seq(mdy("11/10/2014"), mdy("09/01/2015"), by="month")
detail_monthdayseq = difftime(detail_monthseq, zero_date, units="days")
detail_monthlabseq = format(detail_monthseq, "%b %Y")
#########################################
# create raw data frame
dat = data.frame(day=dayseq, date=dateseq, cases=dx, infections_lo=infections_lo, infections_hi=infections_hi)
| /indiana-hiv-load.R | permissive | fcrawford/indiana-hiv | R | false | false | 3,788 | r | library(lubridate)
library(BH)
##########################
# Actual intervention dates
first_dx_date = mdy("11/18/2014")
investigation_begin_date = mdy("01/23/2015")
intvxdate_actual_date = mdy("03/15/2015") # actual intervention date
emergency_declared_date = mdy("03/26/2015")
clinic_opened_date = mdy("03/31/2015") # do we use this?
sep_started_date = mdy("04/04/2015")
##########################
# setup and scenario dates
zero_date = mdy("04/01/2011") # Beginning of HIV incidence calculations
intvx_actual_date = first_dx_date
intvx_mid_date = mdy("01/01/2013")
intvx_early_date = zero_date+2
end_date = mdy("08/11/2015") # this is last date from incidence data
dateseq = seq(zero_date, end_date, by="day")
###########################
# translated days
first_dx_day = as.numeric(first_dx_date - zero_date)
investigation_begin_day = as.numeric(investigation_begin_date - zero_date)
emergency_declared_day = as.numeric(emergency_declared_date - zero_date)
clinic_opened_day = as.numeric(clinic_opened_date - zero_date)
sep_started_day = as.numeric(sep_started_date - zero_date)
zero_day = 0
intvx_actual_day = as.numeric(intvx_actual_date - zero_date)
intvx_mid_day = as.numeric(intvx_mid_date - zero_date)
intvx_early_day = as.numeric(intvx_early_date - zero_date)
end_day = as.numeric(end_date - zero_date)
dayseq = zero_day:end_day
ndays = length(dayseq)
#######################
# load scott county diagnosis data:
casesbyweek = read.csv("data/scott_county_cases_by_week.csv",stringsAsFactors=FALSE)
cumcases = cumsum(casesbyweek$Cases)
casesbyweek$Date = mdy(casesbyweek$Date)
casesbyweek$day = as.numeric(casesbyweek$Date - zero_date)
dx = approx(casesbyweek$day, cumcases, xout=dayseq, method="constant")$y
dx[is.na(dx)] = 0
########################################
# load estimated incidence
incidence_extracted = read.csv("data/extracted_infection_curves.csv", header=FALSE)
names(incidence_extracted) = c("Date.Decimal", "Infections")
incidence_extracted$date = date_decimal(incidence_extracted$Date.Decimal)
incidence_extracted$day = difftime(incidence_extracted$date, zero_date, units="days")
incidence_extracted$Infections = pmax(incidence_extracted$Infections,0)
ord = order(incidence_extracted$day)
incidence_extracted = incidence_extracted[ord,]
n_time_points = dim(incidence_extracted)[1]
infections_lo_tmp = rep(0, n_time_points)
infections_hi_tmp = rep(NA, n_time_points)
infections_lo_tmp[1] = incidence_extracted$Infections[1]
infections_hi_tmp[1] = incidence_extracted$Infections[1]
for(i in 2:n_time_points) {
infections_hi_tmp[i] = max(c(infections_hi_tmp[1:(i-1)], incidence_extracted$Infections[i]))
infections_lo_tmp[i] = min(incidence_extracted$Infections[i:n_time_points])
}
for(i in 1:(n_time_points-1)) {
infections_lo_tmp[i] = min(infections_lo_tmp[(i+1):n_time_points])
}
infections_lo = approx(incidence_extracted$day, infections_lo_tmp, xout=dayseq, method="constant")$y
infections_hi = approx(incidence_extracted$day, infections_hi_tmp, xout=dayseq, method="constant")$y
#########################################
# create month markers
monthseq = mdy(c("04/01/2011", "01/01/2012", "01/01/2013", "01/01/2014", "01/01/2015", "01/01/2016"))
monthdayseq = difftime(monthseq, zero_date, units="days")
monthlabseq = format(monthseq, "%B %Y")
detail_monthseq = seq(mdy("11/10/2014"), mdy("09/01/2015"), by="month")
detail_monthdayseq = difftime(detail_monthseq, zero_date, units="days")
detail_monthlabseq = format(detail_monthseq, "%b %Y")
#########################################
# create raw data frame
dat = data.frame(day=dayseq, date=dateseq, cases=dx, infections_lo=infections_lo, infections_hi=infections_hi)
|
# Define two functions for segmenting an EVI time series
#
# The functions are:
# segmentEVI: Take an EVI time series, clean it and
# segment it into linear components
# assignPhenophase: Take the output of segmentEVI and identify three
# phenophases that obey qualitative conditions for each phase.
# The three phases are: Start of season (SOS),
# peak of season (POS) and end of season (EOS)
#
# Jon yearsley (Jon.Yearsley@ucd.ie)
# Dec 2021
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++ Start of function definitions ++++++++++++++++++++++++++
# *************************************
# Function to perform segmentation on the whole time series ----
segmentEVI = function(d_sub,
nBreaks = 4,
useHighQuality=FALSE,
knots=-1,
use_raw = FALSE,
sd_filter_threshold=4) {
# This functions fits a segmented linear model to raw data and smoothed data
# the smoothed data gives fewer NA's and more consistent results.
require(segmented)
require(mgcv)
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Perform segmentation on data smoothed using a GAM
# Fit initial GAM
m_gam = tryCatch(gam(evi~s(doy, bs='cr',k=knots),
data=d_sub,
gamma=1), # Gamma controls over/under fitting
error = function(e) {
NULL
},
warning = function(e) {
NULL
})
if (!is.null(m_gam)) {
# Remove EVI data points that are more than sd_filter_threshold SE below from the prediction from m_gam
tmp = predict(m_gam, se.fit = TRUE, newdata = d_sub)
evi_deviation = ((d_sub$evi-tmp$fit)/tmp$se.fit)
filter_ind = evi_deviation>-sd_filter_threshold
# # Option to visualise the filtered data
# plot(d_sub$doy, d_sub$evi)
# points(d_sub$doy[filter_ind], d_sub$evi[filter_ind], pch=20)
# Smooth data after removing data more than sd_filter_threshold se below prediction
m_gam2 = gam(evi~s(doy, bs='cr',k=knots),
data=d_sub[filter_ind,],
gamma=1) # Gamma controls over/under fitting
# Add smoothed predictions to the data frame
d_sub$evi_smooth = NA
d_sub$evi_smooth[filter_ind] = predict(m_gam2)
# Segmenting the smoothed predictions
m_smooth = lm(evi_smooth~doy, data=d_sub[filter_ind,]) # Create base lm model
m_seg_smooth = tryCatch(segmented(m_smooth,
seg.Z = ~doy,
npsi = nBreaks,
control=seg.control(it.max=50,
fix.npsi=TRUE,
n.boot=15,
display=FALSE)),
error = function(e) {
NULL
},
warning = function(e) {
NULL
})
} else {
m_seg_smooth = NULL
m_gam2 = NULL
filter_ind = rep(TRUE, times=nrow(d_sub))
}
if (use_raw) {
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Perform segmentation on raw evi data
m_raw = lm(evi~doy, data=d_sub[filter_ind,])
m_seg_raw = tryCatch(segmented(m_raw,
seg.Z = ~doy,
npsi=nBreaks,
control=seg.control(it.max=50,
fix.npsi=TRUE,
n.boot=15,
display=FALSE)),
error = function(e) {
NULL
},
warning = function(e) {
NULL
})
} else {
m_seg_raw = NULL
}
# Outputs are segmented model using raw data, segmented model using smoothed data,
# indices for data kept in analysis, the original data, the gam used for smoothing
return(list(m_seg_raw, m_seg_smooth, filtered=filter_ind, d_sub=d_sub, m_gam2=m_gam2))
}
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Function to assign phenophases by applying conditions for SOS, POS and EOS -------
assignPhenophase = function(input_data) {
# A function to assign phenophases based upon criteria for Start of season (SOS),
# peak of season (POS) and end of season (EOS)
# input data is a data frame with variable:
# t time of break point estimate (doy)
# slope slope of segment following the breakpoint
# slopeprev slope of segment preceeding the breakpoint
# output_data is a data frame with variables:
# phase estimated phenophases (1=SOS, 2=POS, 3=EOS)
# warning TRUE if SOS is after 1st June or EOS after end of year
output_data = data.frame(phase = rep(NA, times=nrow(input_data)),
warning = rep(FALSE, times=nrow(input_data)))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Phenophase 1
# starts in the year
# at the first positive slope
# a warning if it is after 1st June
test_cond_phase1 = input_data$t>0 & input_data$slope>0
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Phenophase 2
# is the first maximum (i.e. slope is positive before and negative after)
test_cond_phase2 = input_data$t>0 &
input_data$t<366 &
input_data$slopeprev>0 &
input_data$slope<0
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Phenophase 3
#
# has a negative slope before
# a slope after that is less steep (or maybe positive) compared to before
# A warning if it is not within the year
test_cond_phase3 = input_data$t>0 &
input_data$slopeprev<0 &
input_data$slopeprev< input_data$slope
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Find possible break points for each phenophase
phase1_ind = NA
phase2_ind = NA
phase3_ind = NA
# Phenophase 1 (SOS) ++++++++++++++++++++++++++++++++++++++++++
if (any(test_cond_phase1)) {
phase1_ind = which(test_cond_phase1)[1] # Take first valid phase 1 (SOS) break point
# Adjust any preceding breakpoints
output_data$phase[1:phase1_ind] = 2-rev(c(1:phase1_ind))
}
# Phenophase 2 (POS) ++++++++++++++++++++++++++++++++++++++++++
# Pick phenophase 2 if it is after phenophase 1
if (any(test_cond_phase2) & is.na(phase1_ind)) {
# No phenophase 1 defined
phase2_ind = which(test_cond_phase2)[1]
} else if (!is.na(phase1_ind)) {
if (any(test_cond_phase2[-c(1:phase1_ind)]) ) {
# Phenophase 1 defined
phase2_ind = which(test_cond_phase2[-c(1:phase1_ind)])[1] + phase1_ind
}
}
# Adjust any breakpoints between phenophases 1 and 2 to be a value of 1.5
if (!is.na(phase2_ind)) {
output_data$phase[phase2_ind] = 2 # Set first valid phase 2 breakpoint (POS)
if (!is.na(phase1_ind) & phase2_ind>(phase1_ind+1)) {
# Adjust breakpoints between phase 1 and 2 to be 1.5
output_data$phase[(phase1_ind+1):(phase2_ind-1)] = 1.5
}
if (is.na(phase1_ind)) {
# If no phase 1 set all phases before 2 to NA
output_data$phase[1:(phase2_ind-1)] = NA
}
}
# Phenophase 3 (EOS) ++++++++++++++++++++++++++++++++++++++++++
# Pick phenophase 3 if it is after phenophase 1 and phenophase 2
if (any(test_cond_phase3) & is.na(phase2_ind) & is.na(phase1_ind)) {
# No phenophases 1 & 2 defined
phase3_ind = which(test_cond_phase3)[1]
}
if (!is.na(phase2_ind)) {
# Phenophase 2 defined
if (any(test_cond_phase3[-c(1:phase2_ind)])) {
phase3_ind = which(test_cond_phase3[-c(1:phase2_ind)])[1] + phase2_ind
}
} else if (!is.na(phase1_ind)) {
# No phenophase 2 but phenophase 1 defined
if (any(test_cond_phase3[-c(1:phase1_ind)])) {
phase3_ind = which(test_cond_phase3[-c(1:phase1_ind)])[1] + phase1_ind
}
}
if (!is.na(phase3_ind)) {
output_data$phase[phase3_ind] = 3 # Set first valid phase 3 breakpoint (EOS)
if (!is.na(phase2_ind) & phase3_ind>(phase2_ind+1)) {
# Adjust any breakpoints between phenophases 2 and 3 to be a value of 2.5
output_data$phase[(phase2_ind+1):(phase3_ind-1)] = 2.5
}
if (is.na(phase2_ind) & !is.na(phase1_ind) & phase3_ind - phase1_ind>1) {
# If no phase 2, set all phases between phase 1 and phase 3 to NA
output_data$phase[(phase1_ind+1):(phase3_ind-1)] = NA
}
if (is.na(phase2_ind) & is.na(phase1_ind)) {
# If no phase 1 or phase 2, set all phases before 3 to NA
output_data$phase[1:(phase3_ind-1)] = NA
}
if (phase3_ind<nrow(output_data)) {
# Set all breaks after phase 3 to be phase 4
output_data$phase[-c(1:phase3_ind)] = 4
}
}
# Flag warnings +++++++++++++++++++++++
# If phase one is later than day 152 (1st June on non leap years) then raise a warning
if (is.finite(phase1_ind)) {
if (input_data$t[phase1_ind]>=152) {
output_data$warning[phase1_ind] = TRUE
}
}
# If phase three is later than day 365 (31st December on non leap years) then raise a warning
if (is.finite(phase3_ind)) {
if (input_data$t[phase3_ind]>365) {
output_data$warning[phase3_ind] = TRUE
}
}
return(output_data)
}
| /RemoteSensingAnalysis/segmentation_functions.R | no_license | DrJonYearsley/Phenograss | R | false | false | 10,237 | r | # Define two functions for segmenting an EVI time series
#
# The functions are:
# segmentEVI: Take an EVI time series, clean it and
# segment it into linear components
# assignPhenophase: Take the output of segmentEVI and identify three
# phenophases that obey qualitative conditions for each phase.
# The three phases are: Start of season (SOS),
# peak of season (POS) and end of season (EOS)
#
# Jon yearsley (Jon.Yearsley@ucd.ie)
# Dec 2021
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +++++++++++++ Start of function definitions ++++++++++++++++++++++++++
# *************************************
# Function to perform segmentation on the whole time series ----
segmentEVI = function(d_sub,
nBreaks = 4,
useHighQuality=FALSE,
knots=-1,
use_raw = FALSE,
sd_filter_threshold=4) {
# This functions fits a segmented linear model to raw data and smoothed data
# the smoothed data gives fewer NA's and more consistent results.
require(segmented)
require(mgcv)
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Perform segmentation on data smoothed using a GAM
# Fit initial GAM
m_gam = tryCatch(gam(evi~s(doy, bs='cr',k=knots),
data=d_sub,
gamma=1), # Gamma controls over/under fitting
error = function(e) {
NULL
},
warning = function(e) {
NULL
})
if (!is.null(m_gam)) {
# Remove EVI data points that are more than sd_filter_threshold SE below from the prediction from m_gam
tmp = predict(m_gam, se.fit = TRUE, newdata = d_sub)
evi_deviation = ((d_sub$evi-tmp$fit)/tmp$se.fit)
filter_ind = evi_deviation>-sd_filter_threshold
# # Option to visualise the filtered data
# plot(d_sub$doy, d_sub$evi)
# points(d_sub$doy[filter_ind], d_sub$evi[filter_ind], pch=20)
# Smooth data after removing data more than sd_filter_threshold se below prediction
m_gam2 = gam(evi~s(doy, bs='cr',k=knots),
data=d_sub[filter_ind,],
gamma=1) # Gamma controls over/under fitting
# Add smoothed predictions to the data frame
d_sub$evi_smooth = NA
d_sub$evi_smooth[filter_ind] = predict(m_gam2)
# Segmenting the smoothed predictions
m_smooth = lm(evi_smooth~doy, data=d_sub[filter_ind,]) # Create base lm model
m_seg_smooth = tryCatch(segmented(m_smooth,
seg.Z = ~doy,
npsi = nBreaks,
control=seg.control(it.max=50,
fix.npsi=TRUE,
n.boot=15,
display=FALSE)),
error = function(e) {
NULL
},
warning = function(e) {
NULL
})
} else {
m_seg_smooth = NULL
m_gam2 = NULL
filter_ind = rep(TRUE, times=nrow(d_sub))
}
if (use_raw) {
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Perform segmentation on raw evi data
m_raw = lm(evi~doy, data=d_sub[filter_ind,])
m_seg_raw = tryCatch(segmented(m_raw,
seg.Z = ~doy,
npsi=nBreaks,
control=seg.control(it.max=50,
fix.npsi=TRUE,
n.boot=15,
display=FALSE)),
error = function(e) {
NULL
},
warning = function(e) {
NULL
})
} else {
m_seg_raw = NULL
}
# Outputs are segmented model using raw data, segmented model using smoothed data,
# indices for data kept in analysis, the original data, the gam used for smoothing
return(list(m_seg_raw, m_seg_smooth, filtered=filter_ind, d_sub=d_sub, m_gam2=m_gam2))
}
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Function to assign phenophases by applying conditions for SOS, POS and EOS -------
assignPhenophase = function(input_data) {
# A function to assign phenophases based upon criteria for Start of season (SOS),
# peak of season (POS) and end of season (EOS)
# input data is a data frame with variable:
# t time of break point estimate (doy)
# slope slope of segment following the breakpoint
# slopeprev slope of segment preceeding the breakpoint
# output_data is a data frame with variables:
# phase estimated phenophases (1=SOS, 2=POS, 3=EOS)
# warning TRUE if SOS is after 1st June or EOS after end of year
output_data = data.frame(phase = rep(NA, times=nrow(input_data)),
warning = rep(FALSE, times=nrow(input_data)))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Phenophase 1
# starts in the year
# at the first positive slope
# a warning if it is after 1st June
test_cond_phase1 = input_data$t>0 & input_data$slope>0
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Phenophase 2
# is the first maximum (i.e. slope is positive before and negative after)
test_cond_phase2 = input_data$t>0 &
input_data$t<366 &
input_data$slopeprev>0 &
input_data$slope<0
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Phenophase 3
#
# has a negative slope before
# a slope after that is less steep (or maybe positive) compared to before
# A warning if it is not within the year
test_cond_phase3 = input_data$t>0 &
input_data$slopeprev<0 &
input_data$slopeprev< input_data$slope
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Find possible break points for each phenophase
phase1_ind = NA
phase2_ind = NA
phase3_ind = NA
# Phenophase 1 (SOS) ++++++++++++++++++++++++++++++++++++++++++
if (any(test_cond_phase1)) {
phase1_ind = which(test_cond_phase1)[1] # Take first valid phase 1 (SOS) break point
# Adjust any preceding breakpoints
output_data$phase[1:phase1_ind] = 2-rev(c(1:phase1_ind))
}
# Phenophase 2 (POS) ++++++++++++++++++++++++++++++++++++++++++
# Pick phenophase 2 if it is after phenophase 1
if (any(test_cond_phase2) & is.na(phase1_ind)) {
# No phenophase 1 defined
phase2_ind = which(test_cond_phase2)[1]
} else if (!is.na(phase1_ind)) {
if (any(test_cond_phase2[-c(1:phase1_ind)]) ) {
# Phenophase 1 defined
phase2_ind = which(test_cond_phase2[-c(1:phase1_ind)])[1] + phase1_ind
}
}
# Adjust any breakpoints between phenophases 1 and 2 to be a value of 1.5
if (!is.na(phase2_ind)) {
output_data$phase[phase2_ind] = 2 # Set first valid phase 2 breakpoint (POS)
if (!is.na(phase1_ind) & phase2_ind>(phase1_ind+1)) {
# Adjust breakpoints between phase 1 and 2 to be 1.5
output_data$phase[(phase1_ind+1):(phase2_ind-1)] = 1.5
}
if (is.na(phase1_ind)) {
# If no phase 1 set all phases before 2 to NA
output_data$phase[1:(phase2_ind-1)] = NA
}
}
# Phenophase 3 (EOS) ++++++++++++++++++++++++++++++++++++++++++
# Pick phenophase 3 if it is after phenophase 1 and phenophase 2
if (any(test_cond_phase3) & is.na(phase2_ind) & is.na(phase1_ind)) {
# No phenophases 1 & 2 defined
phase3_ind = which(test_cond_phase3)[1]
}
if (!is.na(phase2_ind)) {
# Phenophase 2 defined
if (any(test_cond_phase3[-c(1:phase2_ind)])) {
phase3_ind = which(test_cond_phase3[-c(1:phase2_ind)])[1] + phase2_ind
}
} else if (!is.na(phase1_ind)) {
# No phenophase 2 but phenophase 1 defined
if (any(test_cond_phase3[-c(1:phase1_ind)])) {
phase3_ind = which(test_cond_phase3[-c(1:phase1_ind)])[1] + phase1_ind
}
}
if (!is.na(phase3_ind)) {
output_data$phase[phase3_ind] = 3 # Set first valid phase 3 breakpoint (EOS)
if (!is.na(phase2_ind) & phase3_ind>(phase2_ind+1)) {
# Adjust any breakpoints between phenophases 2 and 3 to be a value of 2.5
output_data$phase[(phase2_ind+1):(phase3_ind-1)] = 2.5
}
if (is.na(phase2_ind) & !is.na(phase1_ind) & phase3_ind - phase1_ind>1) {
# If no phase 2, set all phases between phase 1 and phase 3 to NA
output_data$phase[(phase1_ind+1):(phase3_ind-1)] = NA
}
if (is.na(phase2_ind) & is.na(phase1_ind)) {
# If no phase 1 or phase 2, set all phases before 3 to NA
output_data$phase[1:(phase3_ind-1)] = NA
}
if (phase3_ind<nrow(output_data)) {
# Set all breaks after phase 3 to be phase 4
output_data$phase[-c(1:phase3_ind)] = 4
}
}
# Flag warnings +++++++++++++++++++++++
# If phase one is later than day 152 (1st June on non leap years) then raise a warning
if (is.finite(phase1_ind)) {
if (input_data$t[phase1_ind]>=152) {
output_data$warning[phase1_ind] = TRUE
}
}
# If phase three is later than day 365 (31st December on non leap years) then raise a warning
if (is.finite(phase3_ind)) {
if (input_data$t[phase3_ind]>365) {
output_data$warning[phase3_ind] = TRUE
}
}
return(output_data)
}
|
library(XLConnect)
wb <- loadWorkbook("WIRDS/datasets/gospodarstwa.xls")
gosp <- readWorksheet(wb, sheet = "gospodarstwa")
cechy <- readWorksheet(wb, sheet = "opis wariantรณw cech")
woj <- cechy[8:23, 2:3]
gosp$woj <- factor(gosp$woj, levels = woj$Col2, labels = woj$Col3)
library(dplyr)
gosp <- tbl_df(gosp)
library(ggplot2)
gosp %>%
count(klm, woj) %>%
group_by(woj) %>%
mutate(procent = n/sum(n)) %>%
ggplot(data = .,
aes(x = "",
y = procent,
group = klm,
fill = klm)) +
geom_bar(stat = "identity",
col = "black") +
facet_wrap(~ woj, ncol = 1) +
coord_flip() +
xlab("Wojewรณdztwa") +
theme_bw() +
ggtitle("tytuล")
| /WIRDS/homework/praca_domowa_1_woropaj.R | no_license | tomasznierychly/Dydaktyka | R | false | false | 702 | r | library(XLConnect)
wb <- loadWorkbook("WIRDS/datasets/gospodarstwa.xls")
gosp <- readWorksheet(wb, sheet = "gospodarstwa")
cechy <- readWorksheet(wb, sheet = "opis wariantรณw cech")
woj <- cechy[8:23, 2:3]
gosp$woj <- factor(gosp$woj, levels = woj$Col2, labels = woj$Col3)
library(dplyr)
gosp <- tbl_df(gosp)
library(ggplot2)
gosp %>%
count(klm, woj) %>%
group_by(woj) %>%
mutate(procent = n/sum(n)) %>%
ggplot(data = .,
aes(x = "",
y = procent,
group = klm,
fill = klm)) +
geom_bar(stat = "identity",
col = "black") +
facet_wrap(~ woj, ncol = 1) +
coord_flip() +
xlab("Wojewรณdztwa") +
theme_bw() +
ggtitle("tytuล")
|
#1) plot a heatmap for the bottom 100 expressed genes in euploid shoots on chr1A
# is the pattern similar to for the top 100 expressed genes?
#2) plot a heatmap for both roots and shoots for chr1A,
# use the top 100 expressed genes in euploid shoots
# add a dendrogram for the columns. You will need to make the columns re-order using colv
# which samples are more related?
#3) CHALLENGE plot the heatmap from #2) using the "aheatmap" function in NMF
# can you add on an annotation column showing which samples are roots, and which are shoots?
# HINT: you will need to make an annotation dataframe and pass it to annCol
# http://nmf.r-forge.r-project.org/vignettes/heatmaps.pdf part 1.4 shows an example
| /Exercises/heatmap_exercises.R | no_license | mudiboevans/module2_R_biostats | R | false | false | 719 | r | #1) plot a heatmap for the bottom 100 expressed genes in euploid shoots on chr1A
# is the pattern similar to for the top 100 expressed genes?
#2) plot a heatmap for both roots and shoots for chr1A,
# use the top 100 expressed genes in euploid shoots
# add a dendrogram for the columns. You will need to make the columns re-order using colv
# which samples are more related?
#3) CHALLENGE plot the heatmap from #2) using the "aheatmap" function in NMF
# can you add on an annotation column showing which samples are roots, and which are shoots?
# HINT: you will need to make an annotation dataframe and pass it to annCol
# http://nmf.r-forge.r-project.org/vignettes/heatmaps.pdf part 1.4 shows an example
|
library(pls)
### Name: plot.mvr
### Title: Plot Method for MVR objects
### Aliases: plot.mvr
### Keywords: regression multivariate hplot
### ** Examples
data(yarn)
nir.pcr <- pcr(density ~ NIR, ncomp = 9, data = yarn, validation = "CV")
## Not run:
##D plot(nir.pcr, ncomp = 5) # Plot of cross-validated predictions
##D plot(nir.pcr, "scores") # Score plot
##D plot(nir.pcr, "loadings", comps = 1:3) # The three first loadings
##D plot(nir.pcr, "coef", ncomp = 5) # Coefficients
##D plot(nir.pcr, "val") # RMSEP curves
##D plot(nir.pcr, "val", val.type = "MSEP", estimate = "CV") # CV MSEP
## End(Not run)
| /data/genthat_extracted_code/pls/examples/plot.mvr.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 614 | r | library(pls)
### Name: plot.mvr
### Title: Plot Method for MVR objects
### Aliases: plot.mvr
### Keywords: regression multivariate hplot
### ** Examples
data(yarn)
nir.pcr <- pcr(density ~ NIR, ncomp = 9, data = yarn, validation = "CV")
## Not run:
##D plot(nir.pcr, ncomp = 5) # Plot of cross-validated predictions
##D plot(nir.pcr, "scores") # Score plot
##D plot(nir.pcr, "loadings", comps = 1:3) # The three first loadings
##D plot(nir.pcr, "coef", ncomp = 5) # Coefficients
##D plot(nir.pcr, "val") # RMSEP curves
##D plot(nir.pcr, "val", val.type = "MSEP", estimate = "CV") # CV MSEP
## End(Not run)
|
##############################
#Clean Console and Environment and Load Required Library
##############################
cat("\014")
rm(list = ls())
library("gbm")
##############################
# Activate Working Directory
##############################
WD<-setwd("C:\\Users\\Nikitas Marios\\Desktop\\Data Mining Techniques\\Assignment_2\\FinalTrainingAndForecasting")
##############################
# Load Trained Models and Testing Dataset
##############################
load("TrainedLamdaMart.RData")
load("TestingData.RData")
cd_test<-rbind(testing_filtered_data_1_1,testing_filtered_data_1_2,testing_filtered_data_1_3,testing_filtered_data_1_4,
testing_filtered_data_2_1,testing_filtered_data_2_2,testing_filtered_data_2_3,testing_filtered_data_2_4,
testing_filtered_data_3_1,testing_filtered_data_3_2,testing_filtered_data_3_3,testing_filtered_data_3_4,
testing_filtered_data_4_1,testing_filtered_data_4_2,testing_filtered_data_4_3,testing_filtered_data_4_4,
testing_filtered_data_5_1,testing_filtered_data_5_2,testing_filtered_data_5_3,testing_filtered_data_5_4,
testing_filtered_data_6_1,testing_filtered_data_6_2,testing_filtered_data_6_3,testing_filtered_data_6_4)
##############################
# Prediction
##############################
for (i in seq(1,6,1) ){
for (j in seq(1,4,1)){
Pr_name <- paste("Prediction",i,j, sep = "_")
predictionTest<-data.frame(predict(get(paste("LM",i,j,sep="_")),cd_test[cd_test$Pclass==i&cd_test$consumer==j,],
gbm.perf(get(paste("LM",i,j,sep="_")),method='cv')))
predictionTest<-cbind(cd_test$srch_id[cd_test$Pclass==i&cd_test$consumer==j],cd_test$prop_id[cd_test$Pclass==i&cd_test$consumer==j],predictionTest)
names(predictionTest)[3]<-paste("Prediction",i,j,sep="_")
names(predictionTest)[2]<-paste("prop_id")
names(predictionTest)[1]<-paste("srch_id")
predictionTest<-predictionTest[order(predictionTest[,1],-predictionTest[,3]),]
assign(Pr_name, predictionTest)
}} | /assignment-2/R/LamdaMART R Code/LambdaMART_P_C_class_Forecasting.R | no_license | arashparnia/Data-Mining | R | false | false | 2,112 | r | ##############################
#Clean Console and Environment and Load Required Library
##############################
cat("\014")
rm(list = ls())
library("gbm")
##############################
# Activate Working Directory
##############################
WD<-setwd("C:\\Users\\Nikitas Marios\\Desktop\\Data Mining Techniques\\Assignment_2\\FinalTrainingAndForecasting")
##############################
# Load Trained Models and Testing Dataset
##############################
load("TrainedLamdaMart.RData")
load("TestingData.RData")
cd_test<-rbind(testing_filtered_data_1_1,testing_filtered_data_1_2,testing_filtered_data_1_3,testing_filtered_data_1_4,
testing_filtered_data_2_1,testing_filtered_data_2_2,testing_filtered_data_2_3,testing_filtered_data_2_4,
testing_filtered_data_3_1,testing_filtered_data_3_2,testing_filtered_data_3_3,testing_filtered_data_3_4,
testing_filtered_data_4_1,testing_filtered_data_4_2,testing_filtered_data_4_3,testing_filtered_data_4_4,
testing_filtered_data_5_1,testing_filtered_data_5_2,testing_filtered_data_5_3,testing_filtered_data_5_4,
testing_filtered_data_6_1,testing_filtered_data_6_2,testing_filtered_data_6_3,testing_filtered_data_6_4)
##############################
# Prediction
##############################
for (i in seq(1,6,1) ){
for (j in seq(1,4,1)){
Pr_name <- paste("Prediction",i,j, sep = "_")
predictionTest<-data.frame(predict(get(paste("LM",i,j,sep="_")),cd_test[cd_test$Pclass==i&cd_test$consumer==j,],
gbm.perf(get(paste("LM",i,j,sep="_")),method='cv')))
predictionTest<-cbind(cd_test$srch_id[cd_test$Pclass==i&cd_test$consumer==j],cd_test$prop_id[cd_test$Pclass==i&cd_test$consumer==j],predictionTest)
names(predictionTest)[3]<-paste("Prediction",i,j,sep="_")
names(predictionTest)[2]<-paste("prop_id")
names(predictionTest)[1]<-paste("srch_id")
predictionTest<-predictionTest[order(predictionTest[,1],-predictionTest[,3]),]
assign(Pr_name, predictionTest)
}} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/which_true_onwards.R
\name{which_true_onwards}
\alias{which_true_onwards}
\title{At which point are all values true onwards}
\usage{
which_true_onwards(x)
}
\arguments{
\item{x}{A logical vector. \code{NA} values are not permitted.}
}
\value{
The position of the first \code{TRUE} value in \code{x} at which all
the following values are \code{TRUE}.
}
\description{
At which point are all values true onwards
}
\examples{
which_true_onwards(c(TRUE, FALSE, TRUE, TRUE, TRUE))
}
| /hutilscpp/man/which_true_onwards.Rd | no_license | akhikolla/InformationHouse | R | false | true | 578 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/which_true_onwards.R
\name{which_true_onwards}
\alias{which_true_onwards}
\title{At which point are all values true onwards}
\usage{
which_true_onwards(x)
}
\arguments{
\item{x}{A logical vector. \code{NA} values are not permitted.}
}
\value{
The position of the first \code{TRUE} value in \code{x} at which all
the following values are \code{TRUE}.
}
\description{
At which point are all values true onwards
}
\examples{
which_true_onwards(c(TRUE, FALSE, TRUE, TRUE, TRUE))
}
|
library(ggplot2)
library(png)
library(grid)
library(hexSticker)
#' @param x x offset of the hexagon's center
#'
#' @param y y offset of the hexagon's center
#'
#' @param radius the radius (side length) of the hexagon.
#'
#' @param from_radius from where should the segment be drawn? defaults to the center
#'
#' @param to_radius to where should the segment be drawn? defaults to the radius
#'
#' @param from_angle from which angle should we draw?
#'
#' @param to_angle to which angle should we draw?
#'
#' @param fill fill color
#'
#' @param color line color
#'
#' @param size size of the line?
hex_segment2 <- function(x = 1, y = 1, radius = 1, from_radius = 0,
to_radius = radius, from_angle = 30, to_angle = 90,
fill = NA, color = NA, size = 1.2) {
from_angle <- from_angle * pi / 180
to_angle <- to_angle * pi / 180
coords <- data.frame(x = x + c(from_radius * cos(from_angle),
to_radius * cos(from_angle),
to_radius * cos(to_angle),
from_radius * cos(to_angle)),
y = y + c(from_radius * sin(from_angle),
to_radius * sin(from_angle),
to_radius * sin(to_angle),
from_radius * sin(to_angle))
)
geom_polygon(aes_(x = ~x, y = ~y), data = coords,
fill = fill, color = color, size = size)
}
img <- readPNG("images/MsCoreUtils-drawing.png")
img <- rasterGrob(img, width = 1.4, x = 0.5, y = 0.6,
interpolate = TRUE)
## Manually define...
col_blue = "#246abe"
col_grey = "#95959c"
col_grey = "#838289" # The color after Gimp converting the color scheme
col_purple = "#9200fc"
col_orange = "#f4810b"
col_yellow = "#fef14e"
col_white = "#ffffff"
## colored beams.
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_white, color = NA) +
hex_segment2(size = 0, fill = paste0(col_blue, 60),
from_radius = 0, to_radius = 1,
from_angle = 150, to_angle = 210) +
geom_polygon(data = data.frame(x = c(1, 1 + sqrt(3)/2, 1 + sqrt(3)/3),
y = c(1, 1.5, 1.5 + 1/6)),
aes(x = x, y = y),
fill = paste0(col_yellow, 40)) +
geom_polygon(data = data.frame(x = c(1, 1 + sqrt(3)/3, 1 + sqrt(3)/6),
y = c(1, 1.5 + 1/6, 1.5 + 1/3)),
aes(x = x, y = y),
fill = paste0(col_orange, 40)) +
geom_polygon(data = data.frame(x = c(1, 1 + sqrt(3)/6, 1),
y = c(1, 1.5 + 1/3, 2)),
aes(x = x, y = y),
fill = paste0(col_purple, 40)) +
geom_hexagon(size = 1.2, fill = NA, color = col_grey) +
geom_subview(subview = img, x = 0.95, y = 0.64,
width = 1.0, height = 1.7) +
geom_url("www.bioconductor.org", color = col_grey, size = 5.5) +
geom_pkgname("MsCoreUtils", y = 1.38, size = 23,
color = col_grey, family = "Aller") +
theme_sticker()
save_sticker(filename = "MsCoreUtils.png", hex)
| /MsCoreUtils/MsCoreUtils.R | permissive | Bioconductor/BiocStickers | R | false | false | 3,231 | r | library(ggplot2)
library(png)
library(grid)
library(hexSticker)
#' @param x x offset of the hexagon's center
#'
#' @param y y offset of the hexagon's center
#'
#' @param radius the radius (side length) of the hexagon.
#'
#' @param from_radius from where should the segment be drawn? defaults to the center
#'
#' @param to_radius to where should the segment be drawn? defaults to the radius
#'
#' @param from_angle from which angle should we draw?
#'
#' @param to_angle to which angle should we draw?
#'
#' @param fill fill color
#'
#' @param color line color
#'
#' @param size size of the line?
hex_segment2 <- function(x = 1, y = 1, radius = 1, from_radius = 0,
to_radius = radius, from_angle = 30, to_angle = 90,
fill = NA, color = NA, size = 1.2) {
from_angle <- from_angle * pi / 180
to_angle <- to_angle * pi / 180
coords <- data.frame(x = x + c(from_radius * cos(from_angle),
to_radius * cos(from_angle),
to_radius * cos(to_angle),
from_radius * cos(to_angle)),
y = y + c(from_radius * sin(from_angle),
to_radius * sin(from_angle),
to_radius * sin(to_angle),
from_radius * sin(to_angle))
)
geom_polygon(aes_(x = ~x, y = ~y), data = coords,
fill = fill, color = color, size = size)
}
img <- readPNG("images/MsCoreUtils-drawing.png")
img <- rasterGrob(img, width = 1.4, x = 0.5, y = 0.6,
interpolate = TRUE)
## Manually define...
col_blue = "#246abe"
col_grey = "#95959c"
col_grey = "#838289" # The color after Gimp converting the color scheme
col_purple = "#9200fc"
col_orange = "#f4810b"
col_yellow = "#fef14e"
col_white = "#ffffff"
## colored beams.
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_white, color = NA) +
hex_segment2(size = 0, fill = paste0(col_blue, 60),
from_radius = 0, to_radius = 1,
from_angle = 150, to_angle = 210) +
geom_polygon(data = data.frame(x = c(1, 1 + sqrt(3)/2, 1 + sqrt(3)/3),
y = c(1, 1.5, 1.5 + 1/6)),
aes(x = x, y = y),
fill = paste0(col_yellow, 40)) +
geom_polygon(data = data.frame(x = c(1, 1 + sqrt(3)/3, 1 + sqrt(3)/6),
y = c(1, 1.5 + 1/6, 1.5 + 1/3)),
aes(x = x, y = y),
fill = paste0(col_orange, 40)) +
geom_polygon(data = data.frame(x = c(1, 1 + sqrt(3)/6, 1),
y = c(1, 1.5 + 1/3, 2)),
aes(x = x, y = y),
fill = paste0(col_purple, 40)) +
geom_hexagon(size = 1.2, fill = NA, color = col_grey) +
geom_subview(subview = img, x = 0.95, y = 0.64,
width = 1.0, height = 1.7) +
geom_url("www.bioconductor.org", color = col_grey, size = 5.5) +
geom_pkgname("MsCoreUtils", y = 1.38, size = 23,
color = col_grey, family = "Aller") +
theme_sticker()
save_sticker(filename = "MsCoreUtils.png", hex)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparedf.control.R
\name{comparedf.control}
\alias{comparedf.control}
\title{Control settings for \code{comparedf} function}
\usage{
comparedf.control(tol.logical = "none", tol.num = c("absolute",
"percent", "pct"), tol.num.val = sqrt(.Machine$double.eps),
int.as.num = FALSE, tol.char = c("none", "trim", "case", "both"),
tol.factor = c("none", "levels", "labels"), factor.as.char = FALSE,
tol.date = "absolute", tol.date.val = 0, tol.other = "none",
tol.vars = "none", max.print.vars = NA, max.print.obs = NA,
max.print.diffs.per.var = 10, max.print.diffs = 50,
max.print.attrs = NA, ..., max.print.diff = 10)
}
\arguments{
\item{tol.logical, tol.num, tol.char, tol.factor, tol.date, tol.other}{A function or one of the shortcut character strings,
denoting the tolerance function to use for a given data type. See "details", below.}
\item{tol.num.val}{Numeric; maximum value of differences allowed in numerics (fed to the function given in \code{tol.num}).}
\item{int.as.num}{Logical; should integers be coerced to numeric before comparison? Default FALSE.}
\item{factor.as.char}{Logical; should factors be coerced to character before comparison? Default FALSE.}
\item{tol.date.val}{Numeric; maximum value of differences allowed in dates (fed to the function given in \code{tol.date}).}
\item{tol.vars}{Either \code{"none"} (the default), denoting that variable names are to be matched as-is,
a named vector manually specifying variable names to compare (where the names correspond to columns of
\code{x} and the values correspond to columns of \code{y}), or a
character vector denoting equivalence classes for characters in the variable names. See "details", below.}
\item{max.print.vars}{Integer denoting maximum number of variables to report in the "variables not shared" and "variables not compared"
output. \code{NA} will print all differences.}
\item{max.print.obs}{Integer denoting maximum number of not-shared observations to report. \code{NA} will print all differences.}
\item{max.print.diffs.per.var, max.print.diffs}{Integers denoting the maximum number of differences to report for each variable or overall.
\code{NA} will print all differences for each variable or overall.}
\item{max.print.attrs}{Integers denoting the maximum number of non-identical attributes to report.\code{NA} will print all differences.}
\item{...}{Other arguments (not in use at this time).}
\item{max.print.diff}{Deprecated.}
}
\value{
A list containing the necessary parameters for the \code{\link{comparedf}} function.
}
\description{
Control tolerance definitions for the \code{\link{comparedf}} function.
}
\details{
The following character strings are accepted:
\itemize{
\item{\code{tol.logical = "none"}: compare logicals exactly as they are.}
\item{\code{tol.num = "absolute"}: compare absolute differences in numerics.}
\item{\code{tol.num = "percent"}, \code{tol.num = "pct"} compare percent differences in numerics.}
\item{\code{tol.char = "none"}: compare character strings exactly as they are.}
\item{\code{tol.char = "trim"}: left-justify and trim all trailing white space.}
\item{\code{tol.char = "case"}: allow differences in upper/lower case.}
\item{\code{tol.char = "both"}: combine \code{"trim"} and \code{"case"}.}
\item{\code{tol.factor = "none"}: match both character labels and numeric levels.}
\item{\code{tol.factor = "levels"}: match only the numeric levels.}
\item{\code{tol.factor = "labels"}: match only the labels.}
\item{\code{tol.date = "absolute"}: compare absolute differences in dates.}
\item{\code{tol.other = "none"}: expect objects of other classes to be exactly identical.}
}
\code{tol.vars}: If not set to \code{"none"} (the default) or a named vector,
the \code{tol.vars} argument is a character vector denoting equivalence classes
for the characters in the variable names. A single character in this vector means to replace that character
with \code{""}. All other strings in this vector are split by character and replaced by the first character in the string.
E.g., a character vector \code{c("._", "aA", " ")} would denote that the dot and underscore are equivalent (to be translated to a dot),
that "a" and "A" are equivalent (to be translated to "a"), and that spaces should be removed.
The special character string \code{"case"} in this vector is the same as specifying \code{paste0(letters, LETTERS)}.
}
\examples{
cntl <- comparedf.control(
tol.num = "pct", # calculate percent differences
tol.vars = c("case", # ignore case
"._", # set all underscores to dots.
"e") # remove all letter e's
)
}
\seealso{
\code{\link{comparedf}}, \code{\link{comparedf.tolerances}}, \code{\link{summary.comparedf}}
}
\author{
Ethan Heinzen
}
| /man/comparedf.control.Rd | no_license | oterium/arsenal-1 | R | false | true | 4,849 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparedf.control.R
\name{comparedf.control}
\alias{comparedf.control}
\title{Control settings for \code{comparedf} function}
\usage{
comparedf.control(tol.logical = "none", tol.num = c("absolute",
"percent", "pct"), tol.num.val = sqrt(.Machine$double.eps),
int.as.num = FALSE, tol.char = c("none", "trim", "case", "both"),
tol.factor = c("none", "levels", "labels"), factor.as.char = FALSE,
tol.date = "absolute", tol.date.val = 0, tol.other = "none",
tol.vars = "none", max.print.vars = NA, max.print.obs = NA,
max.print.diffs.per.var = 10, max.print.diffs = 50,
max.print.attrs = NA, ..., max.print.diff = 10)
}
\arguments{
\item{tol.logical, tol.num, tol.char, tol.factor, tol.date, tol.other}{A function or one of the shortcut character strings,
denoting the tolerance function to use for a given data type. See "details", below.}
\item{tol.num.val}{Numeric; maximum value of differences allowed in numerics (fed to the function given in \code{tol.num}).}
\item{int.as.num}{Logical; should integers be coerced to numeric before comparison? Default FALSE.}
\item{factor.as.char}{Logical; should factors be coerced to character before comparison? Default FALSE.}
\item{tol.date.val}{Numeric; maximum value of differences allowed in dates (fed to the function given in \code{tol.date}).}
\item{tol.vars}{Either \code{"none"} (the default), denoting that variable names are to be matched as-is,
a named vector manually specifying variable names to compare (where the names correspond to columns of
\code{x} and the values correspond to columns of \code{y}), or a
character vector denoting equivalence classes for characters in the variable names. See "details", below.}
\item{max.print.vars}{Integer denoting maximum number of variables to report in the "variables not shared" and "variables not compared"
output. \code{NA} will print all differences.}
\item{max.print.obs}{Integer denoting maximum number of not-shared observations to report. \code{NA} will print all differences.}
\item{max.print.diffs.per.var, max.print.diffs}{Integers denoting the maximum number of differences to report for each variable or overall.
\code{NA} will print all differences for each variable or overall.}
\item{max.print.attrs}{Integers denoting the maximum number of non-identical attributes to report.\code{NA} will print all differences.}
\item{...}{Other arguments (not in use at this time).}
\item{max.print.diff}{Deprecated.}
}
\value{
A list containing the necessary parameters for the \code{\link{comparedf}} function.
}
\description{
Control tolerance definitions for the \code{\link{comparedf}} function.
}
\details{
The following character strings are accepted:
\itemize{
\item{\code{tol.logical = "none"}: compare logicals exactly as they are.}
\item{\code{tol.num = "absolute"}: compare absolute differences in numerics.}
\item{\code{tol.num = "percent"}, \code{tol.num = "pct"} compare percent differences in numerics.}
\item{\code{tol.char = "none"}: compare character strings exactly as they are.}
\item{\code{tol.char = "trim"}: left-justify and trim all trailing white space.}
\item{\code{tol.char = "case"}: allow differences in upper/lower case.}
\item{\code{tol.char = "both"}: combine \code{"trim"} and \code{"case"}.}
\item{\code{tol.factor = "none"}: match both character labels and numeric levels.}
\item{\code{tol.factor = "levels"}: match only the numeric levels.}
\item{\code{tol.factor = "labels"}: match only the labels.}
\item{\code{tol.date = "absolute"}: compare absolute differences in dates.}
\item{\code{tol.other = "none"}: expect objects of other classes to be exactly identical.}
}
\code{tol.vars}: If not set to \code{"none"} (the default) or a named vector,
the \code{tol.vars} argument is a character vector denoting equivalence classes
for the characters in the variable names. A single character in this vector means to replace that character
with \code{""}. All other strings in this vector are split by character and replaced by the first character in the string.
E.g., a character vector \code{c("._", "aA", " ")} would denote that the dot and underscore are equivalent (to be translated to a dot),
that "a" and "A" are equivalent (to be translated to "a"), and that spaces should be removed.
The special character string \code{"case"} in this vector is the same as specifying \code{paste0(letters, LETTERS)}.
}
\examples{
cntl <- comparedf.control(
tol.num = "pct", # calculate percent differences
tol.vars = c("case", # ignore case
"._", # set all underscores to dots.
"e") # remove all letter e's
)
}
\seealso{
\code{\link{comparedf}}, \code{\link{comparedf.tolerances}}, \code{\link{summary.comparedf}}
}
\author{
Ethan Heinzen
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addSigline.R
\name{addSigline}
\alias{addSigline}
\title{addSigline}
\usage{
addSigline(
point.left.up = c(1, 100),
point.right.up = c(2, 100),
point.left.bottom = c(1, 50),
point.right.bottom = c(2, 50),
line.width = 0.5,
lwd = 1,
sig.label = "***",
n.y = 1.01,
cex = 1.3
)
}
\arguments{
\item{point.left.up}{Left up point coordinates.}
\item{point.right.up}{Right up point coordinates}
\item{point.left.bottom}{Left bottom point coordinates.}
\item{point.right.bottom}{Right bottom point coordinates}
\item{line.width}{Width of lines.}
\item{lwd}{lwd}
\item{sig.label}{Significant label.}
\item{n.y}{Position for signigficant label.}
\item{cex}{Size for significant label.}
}
\description{
Add significant lines in boxplot or ohter plots.
}
\author{
Xiaotao Shen
}
| /man/addSigline.Rd | permissive | jaspershen/sxtTools | R | false | true | 914 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addSigline.R
\name{addSigline}
\alias{addSigline}
\title{addSigline}
\usage{
addSigline(
point.left.up = c(1, 100),
point.right.up = c(2, 100),
point.left.bottom = c(1, 50),
point.right.bottom = c(2, 50),
line.width = 0.5,
lwd = 1,
sig.label = "***",
n.y = 1.01,
cex = 1.3
)
}
\arguments{
\item{point.left.up}{Left up point coordinates.}
\item{point.right.up}{Right up point coordinates}
\item{point.left.bottom}{Left bottom point coordinates.}
\item{point.right.bottom}{Right bottom point coordinates}
\item{line.width}{Width of lines.}
\item{lwd}{lwd}
\item{sig.label}{Significant label.}
\item{n.y}{Position for signigficant label.}
\item{cex}{Size for significant label.}
}
\description{
Add significant lines in boxplot or ohter plots.
}
\author{
Xiaotao Shen
}
|
.TPP_importFct_CheckDataFormat <- function (files, dataframes, expNames){
# internal function copied from TPP package to avoid
# import of non-exported package functions
. <- NULL
isDF <- !is.null(dataframes)
isF <- !is.null(files)
isBoth <- isDF & isF
isNone <- !(isDF | isF)
if (isBoth) {
stop("Data import function received a",
" filename AND a dataframe object. \n",
"Please specify only one.")
}
else if (isNone) {
stop("Data import function requires a",
" filename or a dataframe object. \n",
"Please specify one.")
}
if (isDF) {
isClassList <- is.list(dataframes) && !is.data.frame(dataframes)
isClassDF <- is.data.frame(dataframes)
if (isClassList) {
classesInList <- dataframes %>%
vapply(. %>% inherits(., "data.frame"), TRUE)
if (!all(classesInList)) {
stop(paste("Argument 'dataframes' contains",
"elements that are not of type",
"'data.frame' at the following positions: "),
which(!classesInList) %>% paste(collapse = ", "), ".")
}
}
else if (isClassDF) {
dataframes <- list(dataframes)
names(dataframes) <- expNames
}
else {
stop("Argument 'dataframes' must be either an object of class \n
'data.frame', or a list of such objects.")
}
}
if (isF) {
files <- as.character(files)
names(files) <- expNames
}
return(list(files = files, dataframes = dataframes))
}
#' @importFrom utils read.delim
#' @importFrom RCurl url.exists
.TPP_importFct_readFiles <- function (files, naStrs){
# internal function copied from TPP package to avoid
# import of non-exported package functions
expNames <- names(files)
data <- vector("list", length(files))
names(data) <- expNames
for (expName in expNames) {
fTmp <- files[[expName]]
if (file.exists(fTmp) || url.exists(fTmp)) {
data[[expName]] <- read.delim(fTmp, as.is = TRUE,
na.strings = naStrs, quote = "")
}
else {
stop("File ", fTmp, " could not be found.")
}
}
return(data)
}
#' @import dplyr
.TPP_importFct_removeDuplicates <- function(inDF, refColName,
nonNAColNames, qualColName){
# internal function copied from TPP package to avoid
# import of non-exported package functions
message("Removing duplicate identifiers using quality column '",
qualColName, "'...")
nonUniques <- unique(as_tibble(inDF)[duplicated(inDF[[refColName]]),
refColName])
retDF <- subset(inDF, !(get(refColName) %in% nonUniques))
if(nrow(nonUniques)){
for (nU in nonUniques) {
tmpDF <- subset(inDF, get(refColName) == nU)
nonNArows <- NULL
for (r in seq_len(nrow(tmpDF))) {
if (any(!is.na(tmpDF[r, nonNAColNames]))) {
nonNArows <- c(nonNArows, r)
}
}
if (length(nonNArows) > 1) {
if (is.null(qualColName)) {
useRow <- 1
}
else {
qualVals <- tmpDF[nonNArows, qualColName]
useRow <- match(max(qualVals), qualVals)
}
}
else {
useRow <- nonNArows[1]
}
retDF <- rbind(retDF, tmpDF[useRow, ])
}
}
message(nrow(retDF), " out of ", nrow(inDF),
" rows kept for further analysis.")
return(retDF)
}
.TPP_replaceZeros <- function(x){
# internal function copied from TPP package to avoid
# import of non-exported package functions
x[which(x == 0)] <- NA
return(x)
}
.TPP_importFct_rmZeroSias <- function(data.list,
intensityStr){
# internal function copied from TPP package to avoid
# import of non-exported package functions
out <- lapply(names(data.list), function(l.name) {
datTmp <- data.list[[l.name]]
colsTmp <- colnames(datTmp)
intensity.cols <- grep(intensityStr, colsTmp, value = TRUE)
intensity.df <- subset(datTmp, select = intensity.cols) %>%
mutate_all(as.character) %>% mutate_all(as.numeric)
new.intensity.df <- intensity.df %>% mutate_all(.TPP_replaceZeros)
datTmp[, intensity.cols] <- new.intensity.df
return(datTmp)
})
names(out) <- names(data.list)
return(out)
}
.TPP_importFct_checkExperimentCol <- function(expCol){
# internal function copied from TPP package to avoid
# import of non-exported package functions
if (is.null(expCol)) {
m <- paste("Config table needs an 'Experiment'",
"column with unique experiment IDs.")
stop(m, "\n")
}
oldExpNames <- expCol
newExpNames <- gsub("([^[:alnum:]])", "_", expCol)
iChanged <- oldExpNames != newExpNames
if (any(iChanged)) {
m1 <- paste("Replaced non-alphanumeric characters",
"in the 'Experiment' column entries:")
m2 <- paste("'", paste(oldExpNames[iChanged], collapse = "', '"),
"'\nby\n'", paste(newExpNames[iChanged], collapse = "', '"),
sep = "")
message(m1, "\n", m2, "\n")
}
return(newExpNames)
}
.TPP_importFct_checkComparisons <- function(confgTable){
# internal function copied from TPP package to avoid
# import of non-exported package functions
expConds <- confgTable$Condition
expNames <- confgTable$Experiment
compCols <- grep("Comparison", colnames(confgTable), ignore.case = TRUE,
value = TRUE)
compChars <- apply(confgTable[compCols], 2, function(x) {
length(grep("[[:alnum:]]", x, value = TRUE))
})
comp_unequal_two <- compChars != 2
if (any(comp_unequal_two)) {
warning(paste("\nThe following comparison columns could not be evaluated",
"because they did not contain exactly two entries:\n\t\t"),
paste(compCols[comp_unequal_two], collapse = ",\n\t\t"))
}
validCompCols <- compCols[!comp_unequal_two]
allCompStrs <- c()
if (length(validCompCols)) {
message("Comparisons will be performed between the following experiments:")
for (colName in validCompCols) {
current_compEntries <- confgTable[[colName]]
current_compRows <- grep("[[:alnum:]]", current_compEntries)
current_compExps <- expNames[current_compRows]
compRef <- current_compExps[1]
compTreatm <- current_compExps[2]
if ("Condition" %in% names(confgTable)) {
current_compConds <- expConds[current_compRows]
if ("Vehicle" %in% current_compConds && "Treatment" %in%
current_compConds) {
compRef <- current_compExps[current_compConds ==
"Vehicle"]
compTreatm <- current_compExps[current_compConds ==
"Treatment"]
}
}
compStr <- paste(compTreatm, "_vs_", compRef, sep = "")
names(compStr) <- colName
message(compStr)
allCompStrs <- c(allCompStrs, compStr)
}
message("\n")
}
return(allCompStrs)
}
#' @importFrom stringr str_to_title
.TPP_importFct_checkConditions <- function(condInfo,
expectedLength){
# internal function copied from TPP package to avoid
# import of non-exported package functions
flagGenerateConds <- FALSE
if (is.null(condInfo)) {
message("No information about experimental conditions given.",
"Assigning NA instead.\n",
"Reminder: recognition of Vehicle and Treatment groups",
"during pairwise \n",
"comparisons is only possible when they are specified ",
"in the config table.\n")
condInfo <- rep(NA_character_, expectedLength)
}
else {
condInfo <- as.character(condInfo) %>%
stringr::str_to_title()
condLevels <- unique(condInfo)
invalidLevels =
setdiff(condLevels, c("Treatment", "Vehicle"))
if (length(invalidLevels) > 0) {
stop("The entry '", invalidLevels,
paste("' in the condition column is invalid.",
"Only the values 'Treatment' and",
"'Vehicle' are allowed. Please correct",
"this and start again."))
}
}
return(condInfo)
}
.TPP_checkFunctionArgs <-
function(functionCall, expectedArguments){
# internal function copied from TPP package to avoid
# import of non-exported package functions
myArgs <- names(functionCall)
lapply(expectedArguments, function(arg) {
if (!arg %in% myArgs) {
stop("Error in ", paste(functionCall)[1],
": argument '",
arg, "' is missing, with no default",
call. = FALSE)
}
})
}
.TPP_nonLabelColumns <- function(){
# internal function copied from TPP package to avoid
# import of non-exported package functions
out <- data.frame(
column = c("Experiment", "Experiment",
"Experiment", "Path", "Path",
"Path", "Condition", "Replicate",
"Compound", "Temperature", "RefCol"),
type = c("TR", "CCR", "2D", "TR", "CCR", "2D",
"TR", "TR", "2D", "2D", "2D"),
obligatory = c(TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, TRUE, FALSE, TRUE, TRUE, TRUE),
exclusive = c(FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE))
return(out)
}
.TPP_detectLabelColumnsInConfigTable <-
function(allColumns){
# internal function copied from TPP package to avoid
# import of non-exported package functions
.TPP_checkFunctionArgs(match.call(), c("allColumns"))
noLabelCols <- .TPP_nonLabelColumns()$column %>%
as.character %>%
unique
compCols <- grep("comparison", allColumns, value = TRUE,
ignore.case = TRUE)
noLabelCols <- c(noLabelCols, compCols)
labelCols <- setdiff(allColumns, noLabelCols)
return(labelCols)
}
.TPP_importCheckTemperatures <- function(temp){
# internal function copied from TPP package to avoid
# import of non-exported package functions
tempMatrix <- as.matrix(temp)
rownames(tempMatrix) <- NULL
naRows <- apply(is.na(tempMatrix), 1, all)
if (any(naRows)) {
stop("Row(s) ", paste(which(naRows), collapse = ", "),
" in the configuration table contain",
" only missing temperature values.")
}
return(tempMatrix)
}
#' @importFrom openxlsx read.xlsx
#' @importFrom utils read.table
.TPP_importFct_readConfigTable <- function(cfg){
# internal function copied from TPP package to avoid
# import of non-exported package functions
if (is.character(cfg)) {
if (file.exists(cfg)) {
strChunks <- strsplit(cfg, "\\.")[[1]]
fileExtension <- strChunks[length(strChunks)]
if (fileExtension == "txt") {
tab <- read.table(
file = cfg, header = TRUE,
check.names = FALSE, stringsAsFactors = FALSE,
sep = "\t")
}
else if (fileExtension == "csv") {
tab <- read.table(
file = cfg, header = TRUE,
check.names = FALSE, stringsAsFactors = FALSE,
sep = ",")
}
else if (fileExtension == "xlsx") {
tab <- openxlsx::read.xlsx(cfg)
}
else {
stop("Error during data import: ", cfg,
" does not belong to a valid configuration file.")
}
}
else {
stop("Error during data import: ", cfg,
" does not belong to a valid configuration file.")
}
cfg <- tab
}
return(cfg)
}
#' Import and chech configuration table
#'
#' @param infoTable character string of a file path to
#' a config table (excel,txt or csv file) or data frame
#' containing a config table
#' @param type charater string indicating dataset type
#' default is 2D
#'
#' @return data frame with config table
#'
#' @examples
#' data("config_tab")
#' TPP_importCheckConfigTable(config_tab, type = "2D")
#' @export
TPP_importCheckConfigTable <- function(infoTable, type = "2D"){
.TPP_checkFunctionArgs(match.call(), c("infoTable", "type"))
Experiment <- Path <- Compound <- NULL
isValidDF <- FALSE
if (is.data.frame(infoTable)) {
if ((ncol(infoTable) > 1) &
("Experiment" %in% colnames(infoTable))) {
isValidDF <- TRUE
}
}
if (!is.character(infoTable) & !isValidDF) {
stop("'infoTable' must either be a data frame",
" with an 'Experiment' column \n",
"and at least one isobaric label column,",
"or a filename pointing at a \n",
"table that fulfills the same criteria")
}
isValidType <- type %in% c("2D")
if (!isValidType) {
stop("'type' must have this value: '2D'")
}
infoTable <- .TPP_importFct_readConfigTable(cfg = infoTable)
infoTable$Experiment <-
.TPP_importFct_checkExperimentCol(infoTable$Experiment)
infoTable <- subset(infoTable, Experiment != "")
givenPaths <- NULL
if (any("Path" %in% colnames(infoTable))) {
if (all(infoTable$Path == "") || all(is.na(infoTable$Path))) {
message("Removing empty 'Path' column from config table")
infoTable <- infoTable %>% select(-Path)
}
else {
givenPaths <- infoTable$Path
}
}
compStrs <- NA
infoTable$Condition <- NULL
allCols <- colnames(infoTable)
labelCols <- .TPP_detectLabelColumnsInConfigTable(allColumns = allCols)
labelValues <- infoTable[, labelCols]
labelValuesNum <- suppressWarnings(labelValues %>% apply(2,
as.numeric))
if (is.matrix(labelValuesNum)) {
isInvalid <- labelValuesNum %>% apply(2, is.na) %>% apply(2,
all)
}
else if (is.vector(labelValuesNum)) {
isInvalid <- is.na(labelValuesNum)
}
invalidLabels <- labelCols[isInvalid]
infoTable[, invalidLabels] <- NULL
labelColsNew <- labelCols[!isInvalid]
labelStr <- paste(labelColsNew, collapse = ", ")
message("The following valid label columns were detected:\n",
labelStr, ".")
if (type == "2D") {
temperatures <- infoTable$Temperature
if (is.null(temperatures) | length(temperatures) < 2) {
m1 <- paste("Insufficient temperatures (<2)",
"specified in config file.")
m2 <- paste("Does your configuration table",
"have the correct column names?")
stop(m1, "\n", m2)
}
else if (length(which(!infoTable$RefCol %in% labelColsNew)) !=
0) {
stop(paste("Labels in reference column not found",
"in any of the label columns."))
}
hasCompoundCol <- any(allCols == "Compound")
if (!hasCompoundCol) {
m <- paste("Config table of a 2D-TPP experiment",
"needs a 'Compound' column.")
stop(m, "\n")
}
else {
infoTable <- infoTable %>%
mutate(Compound =
gsub("([^[:alnum:]])", "_", Compound))
}
out <- infoTable
}
else {
temperatures <- subset(infoTable, select = labelColsNew)
tempMatrix <- .TPP_importCheckTemperatures(temp = temperatures)
infoList <- list(
expNames = as.character(infoTable$Experiment),
expCond = infoTable$Condition, files = givenPaths,
compStrs = compStrs, labels = labelColsNew,
tempMatrix = tempMatrix)
out <- infoList
}
return(out)
}
#' Import 2D-TPP dataset main function
#'
#' @param configTable character string of a file path to a config table
#' @param data possible list of datasets from different MS runs
#' corresponding to a 2D-TPP dataset, circumvents loading datasets
#' referencend in config table, default is NULL
#' @param idVar character string indicating which data column provides the
#' unique identifiers for each protein.
#' @param intensityStr character string indicating which columns contain
#' raw intensities measurements
#' @param fcStr character string indicating which columns contain the actual
#' fold change values. Those column names containing the suffix \code{fcStr}
#' will be regarded as containing fold change values.
#' @param naStrs character vector indicating missing values in the data table.
#' When reading data from file, this value will be passed on to the argument
#' \code{na.strings} in function \code{read.delim}.
#' @param addCol character string indicating additional column to import
#' @param nonZeroCols column like default qssm that should be imported and
#' requested to be non-zero in analyzed data
#' @param qualColName character string indicating which column can be used for
#' additional quality criteria when deciding between different non-unique
#' protein identifiers.
#'
#' @return list of data frames containing different
#' datasets
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#' dataList <- import2dMain(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' fcStr = "rel_fc_",
#' addCol = "gene_name",
#' naStrs = NA,
#' intensityStr = "signal_sum_",
#' nonZeroCols = "qusm",
#' qualColName = "qupm")
#' @export
import2dMain <- function(configTable, data, idVar, fcStr,
addCol, naStrs, intensityStr,
qualColName, nonZeroCols){
# internal import main function, adapted from TPP package
files <- configTable$Path
if (!is.null(files)) {
if (any(files == "")) {
files <- NULL
}
}
Experiment <- Compound <- Temperature <- RefCol <- NULL
expNames <- configTable$Experiment
argList <- .TPP_importFct_CheckDataFormat(dataframes = data,
files = files,
expNames = expNames)
data <- argList[["dataframes"]]
files <- argList[["files"]]
if (!is.null(files)) {
files2 <- files[!duplicated(names(files))]
data <- .TPP_importFct_readFiles(files = files2,
naStrs = naStrs)
}
iVec <- seq_len(nrow(configTable))
dataList <- lapply(iVec, function(iTmp) {
rowTmp <- configTable[iTmp, ]
expTmp <- rowTmp$Experiment
message("Importing 2D-TPP dataset: ", expTmp)
tTmp <- rowTmp$Temperature
dataTmp <- data[[expTmp]]
noFCCols <- c("Compound", "Experiment", "Temperature",
"RefCol", "Path", "Condition")
allCols <- colnames(rowTmp)
labelCols <- setdiff(allCols, noFCCols)
labelValues <- suppressWarnings(rowTmp[, labelCols] %>%
as.numeric)
labelColsNum <- labelCols[!is.na(labelValues)]
signalCols <- paste(intensityStr, labelColsNum, sep = "")
relevant.cols <- c(idVar, qualColName, nonZeroCols, addCol,
signalCols) %>% unique
if (!is.null(fcStr)) {
fcCols <- paste(fcStr, labelColsNum, sep = "")
relevant.cols <- c(relevant.cols, fcCols)
dataCols <- fcCols
}
else {
dataCols <- signalCols
}
if (!all(relevant.cols %in% colnames(dataTmp))) {
notFound <- paste(setdiff(relevant.cols, colnames(dataTmp)),
collapse = "', '")
stop("The following columns could not be found: '",
notFound, paste("'. Please check the suffices and the",
"additional column names you have specified."))
}
dataFiltered <- .TPP_importFct_removeDuplicates(
inDF = dataTmp,refColName = idVar,
nonNAColNames = dataCols,
qualColName = qualColName[1])
idsTmp <- as.character(dataFiltered[, idVar])
idsAnnotated <- paste(expTmp, tTmp, idsTmp, sep = "_")
dataFinal <- dataFiltered %>% subset(select = relevant.cols) %>%
mutate(temperature = tTmp, experiment = expTmp, unique_ID = idsAnnotated)
return(dataFinal)
})
newNames <- vapply(seq(nrow(configTable)), function(iTmp) {
rowTmp <- configTable[iTmp, ]
tTmp <- rowTmp$Temperature
expTmp <- rowTmp$Experiment
newName <- paste(expTmp, tTmp, sep = "_")
return(newName)
}, "")
names(dataList) <- newNames
out <- .TPP_importFct_rmZeroSias(data.list = dataList,
intensityStr = intensityStr)
return(out)
}
#' Tranform configuration table from wide to long
#'
#' @param configWide data frame containing a config table
#' @return data frame containing config table in long format
#'
#' @importFrom tidyr gather
#' @examples
#' data("config_tab")
#' configWide2Long(configWide = config_tab)
#'
#' @export
configWide2Long <- function(configWide){
Path <- label <- conc <- Compound <- Experiment <-
Temperature <- RefCol <- NULL
if(any(grepl("Path", colnames(configWide)))){
configLong <- configWide %>%
dplyr::select(-Path) %>%
gather(label, conc, -Compound,
-Experiment, -Temperature, -RefCol) %>%
filter(conc != "-")
}else{
configLong <- configWide %>%
gather(label, conc, -Compound,
-Experiment, -Temperature, -RefCol) %>%
filter(conc != "-")
}
}
#' Annotate imported data list using a config table
#' @param dataList list of datasets from different MS runs
#' corresponding to a 2D-TPP dataset
#' @param geneNameVar character string of the column name that describes
#' the gene name of a given protein in the raw data files
#' @param configLong long formatted data frame of a corresponding
#' config table
#' @param intensityStr character string indicating which columns contain
#' raw intensities measurements
#' @param fcStr character string indicating which columns contain the actual
#' fold change values. Those column names containing the suffix \code{fcStr}
#' will be regarded as containing fold change values.
#'
#' @return data frame containing all data annotated
#' by information supplied in the config table
#'
#' @importFrom tidyr spread
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#' dataList <- import2dMain(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' fcStr = "rel_fc_",
#' addCol = "gene_name",
#' naStrs = NA,
#' intensityStr = "signal_sum_",
#' nonZeroCols = "qusm",
#' qualColName = "qupm")
#' configLong <- configWide2Long(configWide = config_tab)
#' annotateDataList(dataList = dataList,
#' geneNameVar = "gene_name",
#' configLong = configLong,
#' intensityStr = "signal_sum_",
#' fcStr = "rel_fc_")
#' @export
annotateDataList <- function(dataList, geneNameVar, configLong,
intensityStr, fcStr){
channel <- signal <- Temperature <- RefCol <- label <-
conc <- unique_ID <- spread_var <- NULL
combinedTab <- bind_rows(lapply(dataList, function(dat){
datLong <- dat %>% as_tibble() %>%
gather(channel, signal, matches(intensityStr), matches(fcStr)) %>%
mutate(label = gsub(fcStr, "", gsub(intensityStr, "", channel))) %>%
left_join(configLong %>%
dplyr::select(Temperature, RefCol, label, conc),
by = c("temperature" = "Temperature", "label")) %>%
mutate(spread_var =
ifelse(grepl(fcStr, channel), "rel_value", "raw_value")) %>%
dplyr::select(-channel, -unique_ID) %>%
spread(spread_var, signal)
}))
return(combinedTab)
}
#' Filter out contaminants
#'
#' @param dataLong long format data frame of imported dataset
#'
#' @return data frame containing full dataset filtered to
#' contain no contaminants
#'
#' @examples
#' data("simulated_cell_extract_df")
#' filterOutContaminants(simulated_cell_extract_df)
#'
#' @export
filterOutContaminants <- function(dataLong){
# internal function to filter out contaminants
representative <- NULL
filter(dataLong, !grepl("##", representative))
}
.checkRatioRef <- function(dataLong, idVar, concFactor = 1e6){
# internal function to check that protein
# fold changes are computed
# relative to the correct TMT channel
label <- RefCol <- rel_value <- raw_value <- conc <- NULL
if(!all(filter(dataLong, label == RefCol)$rel_value == 1,
na.rm = TRUE)){
message("Recomputing ratios!")
dataOut <- dataLong %>%
group_by(.dots = c(idVar, "temperature")) %>%
mutate(rel_value = rel_value/rel_value[label == RefCol]) %>%
ungroup %>%
filter(!is.na(raw_value)) %>%
mutate(conc = as.numeric(conc)) %>%
mutate(log_conc = log10(conc/concFactor))
return(dataOut)
}else{
message("Ratios were correctly computed!")
return(dataLong %>%
filter(!is.na(raw_value)) %>%
mutate(conc = as.numeric(conc)) %>%
mutate(log_conc = log10(conc/concFactor)))
}
}
#' @importFrom stats median
.medianNormalizeRatios <- function(dataLong){
# internal function to perform median normalization
# of ratios
rel_value <- temperature <- conc <-
raw_rel_value <- NULL
dataOut <- dataLong %>%
rename(raw_rel_value = rel_value) %>%
group_by(temperature, conc) %>%
mutate(rel_value = raw_rel_value /
median(raw_rel_value, na.rm = TRUE)) %>%
ungroup()
return(dataOut)
}
#' Rename columns of imported data frame
#'
#' @param dataLong long format data frame of imported dataset
#' @param idVar character string indicating which data column provides the
#' unique identifiers for each protein.
#' @param geneNameVar character string of the column name that describes
#' the gene name of a given protein in the raw data files
#'
#' @return data frame containing imported data with renamed
#' columns
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#'
#' dataList <- import2dMain(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' fcStr = "rel_fc_",
#' addCol = "gene_name",
#' naStrs = NA,
#' intensityStr = "signal_sum_",
#' nonZeroCols = "qusm",
#' qualColName = "qupm")
#' configLong <- configWide2Long(configWide = config_tab)
#' annoDat <- annotateDataList(dataList = dataList,
#' geneNameVar = "gene_name",
#' configLong = configLong,
#' intensityStr = "signal_sum_",
#' fcStr = "rel_fc_")
#' renameColumns(annoDat,
#' idVar = "protein_id",
#' geneNameVar = "gene_name")
#' @export
renameColumns <- function(dataLong, idVar, geneNameVar){
clustername <- representative <- NULL
dplyr::rename(dataLong, "representative" = idVar,
"clustername" = geneNameVar) %>%
group_by(clustername) %>%
mutate(representative =
.paste_rmNA(unique(unlist(strsplit(representative,
split = "\\|"))),
sep = "|")) %>%
ungroup()
}
#' Import 2D-TPP dataset using a config table
#'
#' @param configTable character string of a file path to a config table
#' @param data possible list of datasets from different MS runs
#' corresponding to a 2D-TPP dataset, circumvents loading datasets
#' referencend in config table, default is NULL
#' @param idVar character string indicating which data column provides the
#' unique identifiers for each protein.
#' @param intensityStr character string indicating which columns contain
#' raw intensities measurements
#' @param fcStr character string indicating which columns contain the actual
#' fold change values. Those column names containing the suffix \code{fcStr}
#' will be regarded as containing fold change values.
#' @param naStrs character vector indicating missing values in the data table.
#' When reading data from file, this value will be passed on to the argument
#' \code{na.strings} in function \code{read.delim}.
#' @param qualColName character string indicating which column can be used for
#' additional quality criteria when deciding between different non-unique
#' protein identifiers.
#' @param medianNormalizeFC perform median normalization (default: TRUE).
#' @param addCol character string indicating additional column to import
#' @param filterContaminants boolean variable indicating whether data
#' should be filtered to exclude contaminants (default: TRUE).
#' @param nonZeroCols column like default qssm that should be imported and
#' requested to be non-zero in analyzed data
#' @param geneNameVar character string of the column name that describes
#' the gene name of a given protein in the raw data files
#' @param concFactor numeric value that indicates how concentrations need to
#' be adjusted to yield total unit e.g. default mmol - 1e6
#'
#' @return tidy data frame representing a 2D-TPP dataset
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#' import_df <- import2dDataset(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' intensityStr = "signal_sum_",
#' fcStr = "rel_fc_",
#' nonZeroCols = "qusm",
#' geneNameVar = "gene_name",
#' addCol = NULL,
#' qualColName = "qupm",
#' naStrs = c("NA", "n/d", "NaN"),
#' concFactor = 1e6,
#' medianNormalizeFC = TRUE,
#' filterContaminants = TRUE)
#'
#' @export
import2dDataset <- function(configTable, data,
idVar = "representative",
intensityStr = "sumionarea_protein_",
fcStr = "rel_fc_protein_",
nonZeroCols = "qssm",
geneNameVar = "clustername",
addCol = NULL,
qualColName = "qupm",
naStrs = c("NA", "n/d", "NaN"),
concFactor = 1e6,
medianNormalizeFC = TRUE,
filterContaminants = TRUE){
raw_value <- rel_value <- NULL
configWide <- TPP_importCheckConfigTable(
infoTable = configTable, type = "2D")
configLong <- configWide2Long(configWide = configWide)
dataList <- import2dMain(configTable = configWide,
data = data,
idVar = idVar,
fcStr = fcStr,
addCol = c(geneNameVar, addCol),
naStrs = naStrs,
intensityStr = intensityStr,
nonZeroCols = nonZeroCols,
qualColName = qualColName)
dataLong <- annotateDataList(dataList = dataList,
geneNameVar = geneNameVar,
configLong = configLong,
intensityStr = intensityStr,
fcStr = fcStr)
dataRatioChecked <- .checkRatioRef(dataLong, idVar = idVar,
concFactor = concFactor)
if(medianNormalizeFC){
message("Median normalizing fold changes...")
dataNorm <- .medianNormalizeRatios(dataRatioChecked)
}else{
dataNorm <- dataRatioChecked
}
dataOut <- renameColumns(dataNorm,
idVar = idVar,
geneNameVar = geneNameVar)
if(filterContaminants){
dataOut <- filterOutContaminants(dataOut)
}
dataOut <- filter(dataOut, !is.na(raw_value), !is.na(rel_value))
return(dataOut)
} | /R/import_funcs.R | no_license | nkurzaw/TPP2D | R | false | false | 32,329 | r | .TPP_importFct_CheckDataFormat <- function (files, dataframes, expNames){
# internal function copied from TPP package to avoid
# import of non-exported package functions
. <- NULL
isDF <- !is.null(dataframes)
isF <- !is.null(files)
isBoth <- isDF & isF
isNone <- !(isDF | isF)
if (isBoth) {
stop("Data import function received a",
" filename AND a dataframe object. \n",
"Please specify only one.")
}
else if (isNone) {
stop("Data import function requires a",
" filename or a dataframe object. \n",
"Please specify one.")
}
if (isDF) {
isClassList <- is.list(dataframes) && !is.data.frame(dataframes)
isClassDF <- is.data.frame(dataframes)
if (isClassList) {
classesInList <- dataframes %>%
vapply(. %>% inherits(., "data.frame"), TRUE)
if (!all(classesInList)) {
stop(paste("Argument 'dataframes' contains",
"elements that are not of type",
"'data.frame' at the following positions: "),
which(!classesInList) %>% paste(collapse = ", "), ".")
}
}
else if (isClassDF) {
dataframes <- list(dataframes)
names(dataframes) <- expNames
}
else {
stop("Argument 'dataframes' must be either an object of class \n
'data.frame', or a list of such objects.")
}
}
if (isF) {
files <- as.character(files)
names(files) <- expNames
}
return(list(files = files, dataframes = dataframes))
}
#' @importFrom utils read.delim
#' @importFrom RCurl url.exists
.TPP_importFct_readFiles <- function (files, naStrs){
# internal function copied from TPP package to avoid
# import of non-exported package functions
expNames <- names(files)
data <- vector("list", length(files))
names(data) <- expNames
for (expName in expNames) {
fTmp <- files[[expName]]
if (file.exists(fTmp) || url.exists(fTmp)) {
data[[expName]] <- read.delim(fTmp, as.is = TRUE,
na.strings = naStrs, quote = "")
}
else {
stop("File ", fTmp, " could not be found.")
}
}
return(data)
}
#' @import dplyr
.TPP_importFct_removeDuplicates <- function(inDF, refColName,
nonNAColNames, qualColName){
# internal function copied from TPP package to avoid
# import of non-exported package functions
message("Removing duplicate identifiers using quality column '",
qualColName, "'...")
nonUniques <- unique(as_tibble(inDF)[duplicated(inDF[[refColName]]),
refColName])
retDF <- subset(inDF, !(get(refColName) %in% nonUniques))
if(nrow(nonUniques)){
for (nU in nonUniques) {
tmpDF <- subset(inDF, get(refColName) == nU)
nonNArows <- NULL
for (r in seq_len(nrow(tmpDF))) {
if (any(!is.na(tmpDF[r, nonNAColNames]))) {
nonNArows <- c(nonNArows, r)
}
}
if (length(nonNArows) > 1) {
if (is.null(qualColName)) {
useRow <- 1
}
else {
qualVals <- tmpDF[nonNArows, qualColName]
useRow <- match(max(qualVals), qualVals)
}
}
else {
useRow <- nonNArows[1]
}
retDF <- rbind(retDF, tmpDF[useRow, ])
}
}
message(nrow(retDF), " out of ", nrow(inDF),
" rows kept for further analysis.")
return(retDF)
}
.TPP_replaceZeros <- function(x){
# internal function copied from TPP package to avoid
# import of non-exported package functions
x[which(x == 0)] <- NA
return(x)
}
.TPP_importFct_rmZeroSias <- function(data.list,
intensityStr){
# internal function copied from TPP package to avoid
# import of non-exported package functions
out <- lapply(names(data.list), function(l.name) {
datTmp <- data.list[[l.name]]
colsTmp <- colnames(datTmp)
intensity.cols <- grep(intensityStr, colsTmp, value = TRUE)
intensity.df <- subset(datTmp, select = intensity.cols) %>%
mutate_all(as.character) %>% mutate_all(as.numeric)
new.intensity.df <- intensity.df %>% mutate_all(.TPP_replaceZeros)
datTmp[, intensity.cols] <- new.intensity.df
return(datTmp)
})
names(out) <- names(data.list)
return(out)
}
.TPP_importFct_checkExperimentCol <- function(expCol){
# internal function copied from TPP package to avoid
# import of non-exported package functions
if (is.null(expCol)) {
m <- paste("Config table needs an 'Experiment'",
"column with unique experiment IDs.")
stop(m, "\n")
}
oldExpNames <- expCol
newExpNames <- gsub("([^[:alnum:]])", "_", expCol)
iChanged <- oldExpNames != newExpNames
if (any(iChanged)) {
m1 <- paste("Replaced non-alphanumeric characters",
"in the 'Experiment' column entries:")
m2 <- paste("'", paste(oldExpNames[iChanged], collapse = "', '"),
"'\nby\n'", paste(newExpNames[iChanged], collapse = "', '"),
sep = "")
message(m1, "\n", m2, "\n")
}
return(newExpNames)
}
.TPP_importFct_checkComparisons <- function(confgTable){
# internal function copied from TPP package to avoid
# import of non-exported package functions
expConds <- confgTable$Condition
expNames <- confgTable$Experiment
compCols <- grep("Comparison", colnames(confgTable), ignore.case = TRUE,
value = TRUE)
compChars <- apply(confgTable[compCols], 2, function(x) {
length(grep("[[:alnum:]]", x, value = TRUE))
})
comp_unequal_two <- compChars != 2
if (any(comp_unequal_two)) {
warning(paste("\nThe following comparison columns could not be evaluated",
"because they did not contain exactly two entries:\n\t\t"),
paste(compCols[comp_unequal_two], collapse = ",\n\t\t"))
}
validCompCols <- compCols[!comp_unequal_two]
allCompStrs <- c()
if (length(validCompCols)) {
message("Comparisons will be performed between the following experiments:")
for (colName in validCompCols) {
current_compEntries <- confgTable[[colName]]
current_compRows <- grep("[[:alnum:]]", current_compEntries)
current_compExps <- expNames[current_compRows]
compRef <- current_compExps[1]
compTreatm <- current_compExps[2]
if ("Condition" %in% names(confgTable)) {
current_compConds <- expConds[current_compRows]
if ("Vehicle" %in% current_compConds && "Treatment" %in%
current_compConds) {
compRef <- current_compExps[current_compConds ==
"Vehicle"]
compTreatm <- current_compExps[current_compConds ==
"Treatment"]
}
}
compStr <- paste(compTreatm, "_vs_", compRef, sep = "")
names(compStr) <- colName
message(compStr)
allCompStrs <- c(allCompStrs, compStr)
}
message("\n")
}
return(allCompStrs)
}
#' @importFrom stringr str_to_title
.TPP_importFct_checkConditions <- function(condInfo,
expectedLength){
# internal function copied from TPP package to avoid
# import of non-exported package functions
flagGenerateConds <- FALSE
if (is.null(condInfo)) {
message("No information about experimental conditions given.",
"Assigning NA instead.\n",
"Reminder: recognition of Vehicle and Treatment groups",
"during pairwise \n",
"comparisons is only possible when they are specified ",
"in the config table.\n")
condInfo <- rep(NA_character_, expectedLength)
}
else {
condInfo <- as.character(condInfo) %>%
stringr::str_to_title()
condLevels <- unique(condInfo)
invalidLevels =
setdiff(condLevels, c("Treatment", "Vehicle"))
if (length(invalidLevels) > 0) {
stop("The entry '", invalidLevels,
paste("' in the condition column is invalid.",
"Only the values 'Treatment' and",
"'Vehicle' are allowed. Please correct",
"this and start again."))
}
}
return(condInfo)
}
.TPP_checkFunctionArgs <-
function(functionCall, expectedArguments){
# internal function copied from TPP package to avoid
# import of non-exported package functions
myArgs <- names(functionCall)
lapply(expectedArguments, function(arg) {
if (!arg %in% myArgs) {
stop("Error in ", paste(functionCall)[1],
": argument '",
arg, "' is missing, with no default",
call. = FALSE)
}
})
}
.TPP_nonLabelColumns <- function(){
# internal function copied from TPP package to avoid
# import of non-exported package functions
out <- data.frame(
column = c("Experiment", "Experiment",
"Experiment", "Path", "Path",
"Path", "Condition", "Replicate",
"Compound", "Temperature", "RefCol"),
type = c("TR", "CCR", "2D", "TR", "CCR", "2D",
"TR", "TR", "2D", "2D", "2D"),
obligatory = c(TRUE, TRUE, TRUE, FALSE, FALSE,
FALSE, TRUE, FALSE, TRUE, TRUE, TRUE),
exclusive = c(FALSE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE))
return(out)
}
.TPP_detectLabelColumnsInConfigTable <-
function(allColumns){
# internal function copied from TPP package to avoid
# import of non-exported package functions
.TPP_checkFunctionArgs(match.call(), c("allColumns"))
noLabelCols <- .TPP_nonLabelColumns()$column %>%
as.character %>%
unique
compCols <- grep("comparison", allColumns, value = TRUE,
ignore.case = TRUE)
noLabelCols <- c(noLabelCols, compCols)
labelCols <- setdiff(allColumns, noLabelCols)
return(labelCols)
}
.TPP_importCheckTemperatures <- function(temp){
# internal function copied from TPP package to avoid
# import of non-exported package functions
tempMatrix <- as.matrix(temp)
rownames(tempMatrix) <- NULL
naRows <- apply(is.na(tempMatrix), 1, all)
if (any(naRows)) {
stop("Row(s) ", paste(which(naRows), collapse = ", "),
" in the configuration table contain",
" only missing temperature values.")
}
return(tempMatrix)
}
#' @importFrom openxlsx read.xlsx
#' @importFrom utils read.table
.TPP_importFct_readConfigTable <- function(cfg){
# internal function copied from TPP package to avoid
# import of non-exported package functions
if (is.character(cfg)) {
if (file.exists(cfg)) {
strChunks <- strsplit(cfg, "\\.")[[1]]
fileExtension <- strChunks[length(strChunks)]
if (fileExtension == "txt") {
tab <- read.table(
file = cfg, header = TRUE,
check.names = FALSE, stringsAsFactors = FALSE,
sep = "\t")
}
else if (fileExtension == "csv") {
tab <- read.table(
file = cfg, header = TRUE,
check.names = FALSE, stringsAsFactors = FALSE,
sep = ",")
}
else if (fileExtension == "xlsx") {
tab <- openxlsx::read.xlsx(cfg)
}
else {
stop("Error during data import: ", cfg,
" does not belong to a valid configuration file.")
}
}
else {
stop("Error during data import: ", cfg,
" does not belong to a valid configuration file.")
}
cfg <- tab
}
return(cfg)
}
#' Import and chech configuration table
#'
#' @param infoTable character string of a file path to
#' a config table (excel,txt or csv file) or data frame
#' containing a config table
#' @param type charater string indicating dataset type
#' default is 2D
#'
#' @return data frame with config table
#'
#' @examples
#' data("config_tab")
#' TPP_importCheckConfigTable(config_tab, type = "2D")
#' @export
TPP_importCheckConfigTable <- function(infoTable, type = "2D"){
.TPP_checkFunctionArgs(match.call(), c("infoTable", "type"))
Experiment <- Path <- Compound <- NULL
isValidDF <- FALSE
if (is.data.frame(infoTable)) {
if ((ncol(infoTable) > 1) &
("Experiment" %in% colnames(infoTable))) {
isValidDF <- TRUE
}
}
if (!is.character(infoTable) & !isValidDF) {
stop("'infoTable' must either be a data frame",
" with an 'Experiment' column \n",
"and at least one isobaric label column,",
"or a filename pointing at a \n",
"table that fulfills the same criteria")
}
isValidType <- type %in% c("2D")
if (!isValidType) {
stop("'type' must have this value: '2D'")
}
infoTable <- .TPP_importFct_readConfigTable(cfg = infoTable)
infoTable$Experiment <-
.TPP_importFct_checkExperimentCol(infoTable$Experiment)
infoTable <- subset(infoTable, Experiment != "")
givenPaths <- NULL
if (any("Path" %in% colnames(infoTable))) {
if (all(infoTable$Path == "") || all(is.na(infoTable$Path))) {
message("Removing empty 'Path' column from config table")
infoTable <- infoTable %>% select(-Path)
}
else {
givenPaths <- infoTable$Path
}
}
compStrs <- NA
infoTable$Condition <- NULL
allCols <- colnames(infoTable)
labelCols <- .TPP_detectLabelColumnsInConfigTable(allColumns = allCols)
labelValues <- infoTable[, labelCols]
labelValuesNum <- suppressWarnings(labelValues %>% apply(2,
as.numeric))
if (is.matrix(labelValuesNum)) {
isInvalid <- labelValuesNum %>% apply(2, is.na) %>% apply(2,
all)
}
else if (is.vector(labelValuesNum)) {
isInvalid <- is.na(labelValuesNum)
}
invalidLabels <- labelCols[isInvalid]
infoTable[, invalidLabels] <- NULL
labelColsNew <- labelCols[!isInvalid]
labelStr <- paste(labelColsNew, collapse = ", ")
message("The following valid label columns were detected:\n",
labelStr, ".")
if (type == "2D") {
temperatures <- infoTable$Temperature
if (is.null(temperatures) | length(temperatures) < 2) {
m1 <- paste("Insufficient temperatures (<2)",
"specified in config file.")
m2 <- paste("Does your configuration table",
"have the correct column names?")
stop(m1, "\n", m2)
}
else if (length(which(!infoTable$RefCol %in% labelColsNew)) !=
0) {
stop(paste("Labels in reference column not found",
"in any of the label columns."))
}
hasCompoundCol <- any(allCols == "Compound")
if (!hasCompoundCol) {
m <- paste("Config table of a 2D-TPP experiment",
"needs a 'Compound' column.")
stop(m, "\n")
}
else {
infoTable <- infoTable %>%
mutate(Compound =
gsub("([^[:alnum:]])", "_", Compound))
}
out <- infoTable
}
else {
temperatures <- subset(infoTable, select = labelColsNew)
tempMatrix <- .TPP_importCheckTemperatures(temp = temperatures)
infoList <- list(
expNames = as.character(infoTable$Experiment),
expCond = infoTable$Condition, files = givenPaths,
compStrs = compStrs, labels = labelColsNew,
tempMatrix = tempMatrix)
out <- infoList
}
return(out)
}
#' Import 2D-TPP dataset main function
#'
#' @param configTable character string of a file path to a config table
#' @param data possible list of datasets from different MS runs
#' corresponding to a 2D-TPP dataset, circumvents loading datasets
#' referencend in config table, default is NULL
#' @param idVar character string indicating which data column provides the
#' unique identifiers for each protein.
#' @param intensityStr character string indicating which columns contain
#' raw intensities measurements
#' @param fcStr character string indicating which columns contain the actual
#' fold change values. Those column names containing the suffix \code{fcStr}
#' will be regarded as containing fold change values.
#' @param naStrs character vector indicating missing values in the data table.
#' When reading data from file, this value will be passed on to the argument
#' \code{na.strings} in function \code{read.delim}.
#' @param addCol character string indicating additional column to import
#' @param nonZeroCols column like default qssm that should be imported and
#' requested to be non-zero in analyzed data
#' @param qualColName character string indicating which column can be used for
#' additional quality criteria when deciding between different non-unique
#' protein identifiers.
#'
#' @return list of data frames containing different
#' datasets
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#' dataList <- import2dMain(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' fcStr = "rel_fc_",
#' addCol = "gene_name",
#' naStrs = NA,
#' intensityStr = "signal_sum_",
#' nonZeroCols = "qusm",
#' qualColName = "qupm")
#' @export
import2dMain <- function(configTable, data, idVar, fcStr,
addCol, naStrs, intensityStr,
qualColName, nonZeroCols){
# internal import main function, adapted from TPP package
files <- configTable$Path
if (!is.null(files)) {
if (any(files == "")) {
files <- NULL
}
}
Experiment <- Compound <- Temperature <- RefCol <- NULL
expNames <- configTable$Experiment
argList <- .TPP_importFct_CheckDataFormat(dataframes = data,
files = files,
expNames = expNames)
data <- argList[["dataframes"]]
files <- argList[["files"]]
if (!is.null(files)) {
files2 <- files[!duplicated(names(files))]
data <- .TPP_importFct_readFiles(files = files2,
naStrs = naStrs)
}
iVec <- seq_len(nrow(configTable))
dataList <- lapply(iVec, function(iTmp) {
rowTmp <- configTable[iTmp, ]
expTmp <- rowTmp$Experiment
message("Importing 2D-TPP dataset: ", expTmp)
tTmp <- rowTmp$Temperature
dataTmp <- data[[expTmp]]
noFCCols <- c("Compound", "Experiment", "Temperature",
"RefCol", "Path", "Condition")
allCols <- colnames(rowTmp)
labelCols <- setdiff(allCols, noFCCols)
labelValues <- suppressWarnings(rowTmp[, labelCols] %>%
as.numeric)
labelColsNum <- labelCols[!is.na(labelValues)]
signalCols <- paste(intensityStr, labelColsNum, sep = "")
relevant.cols <- c(idVar, qualColName, nonZeroCols, addCol,
signalCols) %>% unique
if (!is.null(fcStr)) {
fcCols <- paste(fcStr, labelColsNum, sep = "")
relevant.cols <- c(relevant.cols, fcCols)
dataCols <- fcCols
}
else {
dataCols <- signalCols
}
if (!all(relevant.cols %in% colnames(dataTmp))) {
notFound <- paste(setdiff(relevant.cols, colnames(dataTmp)),
collapse = "', '")
stop("The following columns could not be found: '",
notFound, paste("'. Please check the suffices and the",
"additional column names you have specified."))
}
dataFiltered <- .TPP_importFct_removeDuplicates(
inDF = dataTmp,refColName = idVar,
nonNAColNames = dataCols,
qualColName = qualColName[1])
idsTmp <- as.character(dataFiltered[, idVar])
idsAnnotated <- paste(expTmp, tTmp, idsTmp, sep = "_")
dataFinal <- dataFiltered %>% subset(select = relevant.cols) %>%
mutate(temperature = tTmp, experiment = expTmp, unique_ID = idsAnnotated)
return(dataFinal)
})
newNames <- vapply(seq(nrow(configTable)), function(iTmp) {
rowTmp <- configTable[iTmp, ]
tTmp <- rowTmp$Temperature
expTmp <- rowTmp$Experiment
newName <- paste(expTmp, tTmp, sep = "_")
return(newName)
}, "")
names(dataList) <- newNames
out <- .TPP_importFct_rmZeroSias(data.list = dataList,
intensityStr = intensityStr)
return(out)
}
#' Tranform configuration table from wide to long
#'
#' @param configWide data frame containing a config table
#' @return data frame containing config table in long format
#'
#' @importFrom tidyr gather
#' @examples
#' data("config_tab")
#' configWide2Long(configWide = config_tab)
#'
#' @export
configWide2Long <- function(configWide){
Path <- label <- conc <- Compound <- Experiment <-
Temperature <- RefCol <- NULL
if(any(grepl("Path", colnames(configWide)))){
configLong <- configWide %>%
dplyr::select(-Path) %>%
gather(label, conc, -Compound,
-Experiment, -Temperature, -RefCol) %>%
filter(conc != "-")
}else{
configLong <- configWide %>%
gather(label, conc, -Compound,
-Experiment, -Temperature, -RefCol) %>%
filter(conc != "-")
}
}
#' Annotate imported data list using a config table
#' @param dataList list of datasets from different MS runs
#' corresponding to a 2D-TPP dataset
#' @param geneNameVar character string of the column name that describes
#' the gene name of a given protein in the raw data files
#' @param configLong long formatted data frame of a corresponding
#' config table
#' @param intensityStr character string indicating which columns contain
#' raw intensities measurements
#' @param fcStr character string indicating which columns contain the actual
#' fold change values. Those column names containing the suffix \code{fcStr}
#' will be regarded as containing fold change values.
#'
#' @return data frame containing all data annotated
#' by information supplied in the config table
#'
#' @importFrom tidyr spread
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#' dataList <- import2dMain(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' fcStr = "rel_fc_",
#' addCol = "gene_name",
#' naStrs = NA,
#' intensityStr = "signal_sum_",
#' nonZeroCols = "qusm",
#' qualColName = "qupm")
#' configLong <- configWide2Long(configWide = config_tab)
#' annotateDataList(dataList = dataList,
#' geneNameVar = "gene_name",
#' configLong = configLong,
#' intensityStr = "signal_sum_",
#' fcStr = "rel_fc_")
#' @export
annotateDataList <- function(dataList, geneNameVar, configLong,
intensityStr, fcStr){
channel <- signal <- Temperature <- RefCol <- label <-
conc <- unique_ID <- spread_var <- NULL
combinedTab <- bind_rows(lapply(dataList, function(dat){
datLong <- dat %>% as_tibble() %>%
gather(channel, signal, matches(intensityStr), matches(fcStr)) %>%
mutate(label = gsub(fcStr, "", gsub(intensityStr, "", channel))) %>%
left_join(configLong %>%
dplyr::select(Temperature, RefCol, label, conc),
by = c("temperature" = "Temperature", "label")) %>%
mutate(spread_var =
ifelse(grepl(fcStr, channel), "rel_value", "raw_value")) %>%
dplyr::select(-channel, -unique_ID) %>%
spread(spread_var, signal)
}))
return(combinedTab)
}
#' Filter out contaminants
#'
#' @param dataLong long format data frame of imported dataset
#'
#' @return data frame containing full dataset filtered to
#' contain no contaminants
#'
#' @examples
#' data("simulated_cell_extract_df")
#' filterOutContaminants(simulated_cell_extract_df)
#'
#' @export
filterOutContaminants <- function(dataLong){
# internal function to filter out contaminants
representative <- NULL
filter(dataLong, !grepl("##", representative))
}
.checkRatioRef <- function(dataLong, idVar, concFactor = 1e6){
# internal function to check that protein
# fold changes are computed
# relative to the correct TMT channel
label <- RefCol <- rel_value <- raw_value <- conc <- NULL
if(!all(filter(dataLong, label == RefCol)$rel_value == 1,
na.rm = TRUE)){
message("Recomputing ratios!")
dataOut <- dataLong %>%
group_by(.dots = c(idVar, "temperature")) %>%
mutate(rel_value = rel_value/rel_value[label == RefCol]) %>%
ungroup %>%
filter(!is.na(raw_value)) %>%
mutate(conc = as.numeric(conc)) %>%
mutate(log_conc = log10(conc/concFactor))
return(dataOut)
}else{
message("Ratios were correctly computed!")
return(dataLong %>%
filter(!is.na(raw_value)) %>%
mutate(conc = as.numeric(conc)) %>%
mutate(log_conc = log10(conc/concFactor)))
}
}
#' @importFrom stats median
.medianNormalizeRatios <- function(dataLong){
# internal function to perform median normalization
# of ratios
rel_value <- temperature <- conc <-
raw_rel_value <- NULL
dataOut <- dataLong %>%
rename(raw_rel_value = rel_value) %>%
group_by(temperature, conc) %>%
mutate(rel_value = raw_rel_value /
median(raw_rel_value, na.rm = TRUE)) %>%
ungroup()
return(dataOut)
}
#' Rename columns of imported data frame
#'
#' @param dataLong long format data frame of imported dataset
#' @param idVar character string indicating which data column provides the
#' unique identifiers for each protein.
#' @param geneNameVar character string of the column name that describes
#' the gene name of a given protein in the raw data files
#'
#' @return data frame containing imported data with renamed
#' columns
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#'
#' dataList <- import2dMain(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' fcStr = "rel_fc_",
#' addCol = "gene_name",
#' naStrs = NA,
#' intensityStr = "signal_sum_",
#' nonZeroCols = "qusm",
#' qualColName = "qupm")
#' configLong <- configWide2Long(configWide = config_tab)
#' annoDat <- annotateDataList(dataList = dataList,
#' geneNameVar = "gene_name",
#' configLong = configLong,
#' intensityStr = "signal_sum_",
#' fcStr = "rel_fc_")
#' renameColumns(annoDat,
#' idVar = "protein_id",
#' geneNameVar = "gene_name")
#' @export
renameColumns <- function(dataLong, idVar, geneNameVar){
clustername <- representative <- NULL
dplyr::rename(dataLong, "representative" = idVar,
"clustername" = geneNameVar) %>%
group_by(clustername) %>%
mutate(representative =
.paste_rmNA(unique(unlist(strsplit(representative,
split = "\\|"))),
sep = "|")) %>%
ungroup()
}
#' Import 2D-TPP dataset using a config table
#'
#' @param configTable character string of a file path to a config table
#' @param data possible list of datasets from different MS runs
#' corresponding to a 2D-TPP dataset, circumvents loading datasets
#' referencend in config table, default is NULL
#' @param idVar character string indicating which data column provides the
#' unique identifiers for each protein.
#' @param intensityStr character string indicating which columns contain
#' raw intensities measurements
#' @param fcStr character string indicating which columns contain the actual
#' fold change values. Those column names containing the suffix \code{fcStr}
#' will be regarded as containing fold change values.
#' @param naStrs character vector indicating missing values in the data table.
#' When reading data from file, this value will be passed on to the argument
#' \code{na.strings} in function \code{read.delim}.
#' @param qualColName character string indicating which column can be used for
#' additional quality criteria when deciding between different non-unique
#' protein identifiers.
#' @param medianNormalizeFC perform median normalization (default: TRUE).
#' @param addCol character string indicating additional column to import
#' @param filterContaminants boolean variable indicating whether data
#' should be filtered to exclude contaminants (default: TRUE).
#' @param nonZeroCols column like default qssm that should be imported and
#' requested to be non-zero in analyzed data
#' @param geneNameVar character string of the column name that describes
#' the gene name of a given protein in the raw data files
#' @param concFactor numeric value that indicates how concentrations need to
#' be adjusted to yield total unit e.g. default mmol - 1e6
#'
#' @return tidy data frame representing a 2D-TPP dataset
#'
#' @examples
#' data("config_tab")
#' data("raw_dat_list")
#' import_df <- import2dDataset(configTable = config_tab,
#' data = raw_dat_list,
#' idVar = "protein_id",
#' intensityStr = "signal_sum_",
#' fcStr = "rel_fc_",
#' nonZeroCols = "qusm",
#' geneNameVar = "gene_name",
#' addCol = NULL,
#' qualColName = "qupm",
#' naStrs = c("NA", "n/d", "NaN"),
#' concFactor = 1e6,
#' medianNormalizeFC = TRUE,
#' filterContaminants = TRUE)
#'
#' @export
import2dDataset <- function(configTable, data,
idVar = "representative",
intensityStr = "sumionarea_protein_",
fcStr = "rel_fc_protein_",
nonZeroCols = "qssm",
geneNameVar = "clustername",
addCol = NULL,
qualColName = "qupm",
naStrs = c("NA", "n/d", "NaN"),
concFactor = 1e6,
medianNormalizeFC = TRUE,
filterContaminants = TRUE){
raw_value <- rel_value <- NULL
configWide <- TPP_importCheckConfigTable(
infoTable = configTable, type = "2D")
configLong <- configWide2Long(configWide = configWide)
dataList <- import2dMain(configTable = configWide,
data = data,
idVar = idVar,
fcStr = fcStr,
addCol = c(geneNameVar, addCol),
naStrs = naStrs,
intensityStr = intensityStr,
nonZeroCols = nonZeroCols,
qualColName = qualColName)
dataLong <- annotateDataList(dataList = dataList,
geneNameVar = geneNameVar,
configLong = configLong,
intensityStr = intensityStr,
fcStr = fcStr)
dataRatioChecked <- .checkRatioRef(dataLong, idVar = idVar,
concFactor = concFactor)
if(medianNormalizeFC){
message("Median normalizing fold changes...")
dataNorm <- .medianNormalizeRatios(dataRatioChecked)
}else{
dataNorm <- dataRatioChecked
}
dataOut <- renameColumns(dataNorm,
idVar = idVar,
geneNameVar = geneNameVar)
if(filterContaminants){
dataOut <- filterOutContaminants(dataOut)
}
dataOut <- filter(dataOut, !is.na(raw_value), !is.na(rel_value))
return(dataOut)
} |
#' Gi en visuell fremstilling av registerets indikatorer over tid
#'
#' @param indikatordata En dataramme med fรธlgende kolonner:
#' - AvdRESH
#' - year
#' - var
#' - SenterKortNavn
#'
#' @export
#'
nraFigIndikator_v2 <- function(indikatordata, tittel='', terskel=30, minstekrav = NA, maal = NA, skriftStr=1.3, pktStr=1.4,
legPlass='top', minstekravTxt='Min.', maalTxt='Mรฅl', graaUt=NA, decreasing=F, outfile = '',
lavDG=NA, width=800, height=700, maalretn='hoy', desimal=FALSE, xmax=NA,
lavDGtekst='Dekningsgrad < 60 %')
{
# tittel='testtittel'; terskel=5; minstekrav = NA; maal = 30; skriftStr=1.3; pktStr=1.4;
# legPlass='top'; minstekravTxt='Min.'; maalTxt='Mรฅl'; graaUt=NA; decreasing=F; outfile = '';
# lavDG=NA; width=800; height=700; inkl_konf=T; maalretn='hoy'; lavDGtekst='Dekningsgrad < 60 %'
indikatordata <- indikatordata[indikatordata$year > max(indikatordata$year)-3, ] # behold bare siste 3 รฅr
Tabell <- indikatordata %>% dplyr::group_by(SenterKortNavn, year) %>%
dplyr::summarise(Antall = sum(var),
N = n(),
Andel = Antall/N*100)
AntTilfeller <- tidyr::spread(Tabell[, -c(4,5)], 'year', 'Antall')
AntTilfeller <- dplyr::bind_cols(SenterKortNavn=c(as.character(AntTilfeller[["SenterKortNavn"]]), "Nasjonalt"),
dplyr::bind_rows(AntTilfeller[,-1], colSums(AntTilfeller[,-1], na.rm = T)))
N <- tidyr::spread(Tabell[, -c(3,5)], 'year', 'N')
N <- dplyr::bind_cols(SenterKortNavn=c(as.character(N[["SenterKortNavn"]]), "Nasjonalt"),
dplyr::bind_rows(N[,-1], colSums(N[,-1], na.rm = T)))
N[is.na(N)] <- 0
AntTilfeller[,paste0(names(AntTilfeller)[2], '-', names(AntTilfeller)[dim(AntTilfeller)[2]])] <-
rowSums(AntTilfeller[,-1], na.rm = T)
AntTilfeller <- AntTilfeller[, c(1, (dim(AntTilfeller)[2]-1):dim(AntTilfeller)[2])]
AntTilfeller <- as.data.frame(AntTilfeller)
row.names(AntTilfeller) <- AntTilfeller[["SenterKortNavn"]]
AntTilfeller <- AntTilfeller[, -1]
N[,paste0(names(N)[2], '-', names(N)[dim(N)[2]])] <-
rowSums(N[,-1], na.rm = T)
N <- N[, c(1, (dim(N)[2]-1):dim(N)[2])]
N <- as.data.frame(N)
row.names(N) <- N[["SenterKortNavn"]]
N <- N[, -1]
AntTilfeller <- AntTilfeller[,c(2,1)]; N <- N[,c(2,1)]
andeler <- AntTilfeller/N * 100
andeler[N < terskel] <- NA
andeler[rownames(andeler) %in% lavDG, ] <- NA
if (decreasing){
rekkefolge <- order(andeler[, dim(andeler)[2]], decreasing = decreasing, na.last = F)
} else {
rekkefolge <- order(andeler[, dim(andeler)[2]], decreasing = decreasing, na.last = F)
}
andeler <- andeler[rekkefolge, ]
N <- N[rekkefolge, ]
FigTypUt <- rapFigurer::figtype(outfile=outfile, width=width, height=height, pointsizePDF=11, fargepalett='BlaaOff')
farger <- FigTypUt$farger
if (max(N[,2], na.rm = T) < terskel) {
plot.new()
# title(tittel) #, line=-6)
# legend('topleft',utvalgTxt, bty='n', cex=0.9, text.col=farger[1])
text(0.5, 0.6, paste0("Det er fรฆrre enn ", terskel,
" registreringer av indikatoren nasjonalt i ", names(N)[2], "."), cex=1.2)
} else {
andeler[N[, dim(andeler)[2]]<terskel, 1:2] <- NA
KI <- binomkonf(AntTilfeller[rekkefolge, dim(andeler)[2]], N[, dim(andeler)[2]])*100
KI[, is.na(andeler[, dim(andeler)[2]])] <- NA
pst_txt <- paste0(sprintf('%.0f', andeler[, dim(andeler)[2]]), ' %')
pst_txt[N[, dim(andeler)[2]]<terskel] <- paste0('N<', terskel)
pst_txt[rownames(andeler) %in% lavDG] <- lavDGtekst
pst_txt <- c(NA, pst_txt, NA, NA)
soyleFarger <- rep(farger[3], length(andeler[,dim(andeler)[2]]))
soyleFarger[which(rownames(andeler)=='Norge')] <- farger[4]
if (!is.na(graaUt[1])) {soyleFarger[which(rownames(andeler) %in% graaUt)] <- 'gray88'}
soyleFarger <- c(NA, soyleFarger)
oldpar_mar <- par()$mar
oldpar_fig <- par()$fig
oldpar_oma <- par()$oma
cexgr <- skriftStr
rownames(andeler) <- paste0(rownames(andeler), ' (', N[, dim(N)[2]], ') ')
andeler <- rbind(andeler, c(NA,NA,NA))
rownames(andeler)[dim(andeler)[1]] <- paste0('(N, ', names(andeler)[dim(andeler)[2]], ') ')
KI <- cbind(c(NA, NA), KI, c(NA, NA))
vmarg <- max(0, strwidth(rownames(andeler), units='figure', cex=cexgr)*0.75)
par('fig'=c(vmarg, 1, 0, 1))
# par('mar'=c(5.1, 4.1, 5.1, 9.1))
par('oma'=c(0,1,0,0))
par('mar'=c(5.1, 4.1, 5.1, 2.1))
xmax <- min(max(KI, max(andeler, na.rm = T), na.rm = T)*1.15,100)
andeler <- rbind(c(NA,NA), andeler, c(NA,NA))
rownames(andeler)[dim(andeler)[1]] <- ' '
rownames(andeler)[1] <- ' '
ypos <- barplot( t(andeler[,dim(andeler)[2]]), beside=T, las=1,
xlim=c(0,xmax),
names.arg=rep('',dim(andeler)[1]),
horiz=T, axes=F, space=c(0,0.3),
col=soyleFarger, border=NA, xlab = 'Andel (%)') # '#96BBE7'
fargerMaalNiva <- c('aquamarine3','#fbf850', 'red')
if (maal > minstekrav & !is.na(maal) & !is.na(minstekrav)) {
rect(xleft=minstekrav, ybottom=1, xright=maal, ytop=max(ypos)-1.6, col = fargerMaalNiva[2], border = NA)
rect(xleft=maal, ybottom=1, xright=min(xmax, 100), ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
if (maal < minstekrav & !is.na(maal) & !is.na(minstekrav)) {
rect(xleft=maal, ybottom=1, xright=minstekrav, ytop=max(ypos)-1.6, col = fargerMaalNiva[2], border = NA)
rect(xleft=0, ybottom=1, xright=maal, ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
if (!is.na(maal) & is.na(minstekrav) & maalretn=='lav') {
rect(xleft=0, ybottom=1, xright=maal, ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
if (!is.na(maal) & is.na(minstekrav) & maalretn=='hoy') {
rect(xleft=maal, ybottom=1, xright=min(xmax, 100), ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
barplot( t(andeler[,dim(andeler)[2]]), beside=T, las=1,
names.arg=rep('',dim(andeler)[1]),
horiz=T, axes=F, space=c(0,0.3),
col=soyleFarger, border=NA, xlab = 'Andel (%)', add=TRUE)
title(main = tittel, cex.main=skriftStr*1.1)
ypos <- as.numeric(ypos) #as.vector(ypos)
yposOver <- max(ypos)-2 + 0.5*diff(ypos)[1]
if (!is.na(minstekrav)) {
lines(x=rep(minstekrav, 2), y=c(-1, yposOver), col=fargerMaalNiva[2], lwd=2)
par(xpd=TRUE)
text(x=minstekrav, y=yposOver, labels = minstekravTxt,
pos = 4, cex=cexgr*0.65, srt = 90)
par(xpd=FALSE)
}
if (!is.na(maal)) {
lines(x=rep(maal, 2), y=c(-1, yposOver), col=fargerMaalNiva[1], lwd=2)
barplot( t(andeler[, dim(andeler)[2]]), beside=T, las=1,
names.arg=rep('',dim(andeler)[1]),
horiz=T, axes=F, space=c(0,0.3),
col=soyleFarger, border=NA, xlab = 'Andel (%)', add=TRUE)
par(xpd=TRUE)
text(x=maal, y=yposOver, labels = maalTxt, pos = 4, cex=cexgr*0.65, srt = 90) #paste0(maalTxt,maal,'%')
par(xpd=FALSE)
}
arrows(x0 = KI[1,], y0 = ypos, x1 = KI[2,], y1 = ypos,
length=0.5/max(ypos), code=3, angle=90, lwd=1.8, col='gray') #, col=farger[1])
legend('bottom', cex=0.9*cexgr, bty='n',
lwd=1.8, lty = 1, pt.cex=1.8, col='gray',
legend=paste0('Konfidensintervall ', names(N)[dim(N)[2]]))
axis(1,cex.axis=0.9)
mtext( rownames(andeler), side=2, line=0.2, las=1, at=ypos, col=1, cex=cexgr)
antAar <- dim(andeler)[2]
if (dim(andeler)[2]==2) {
par(xpd=TRUE)
points(y=ypos, x=andeler[,1],cex=pktStr, pch= 19)
par(xpd=FALSE)
if (legPlass=='nede'){
legend('bottomright', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=names(N), ncol = 1)}
if (legPlass=='top'){
# legend('top', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
# lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
# legend=names(N), ncol = dim(andeler)[2])
legend('top', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=paste0(names(N), " (", as.character(round(as.numeric(andeler[substr(rownames(andeler), 1, 9) == "Nasjonalt", ]), 1)),
"%, N = ", N[rownames(N) == "Nasjonalt", ], ")"),
ncol = 1)}
if (legPlass=='topleft'){
legend('topleft', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=names(N), ncol = dim(andeler)[2])}
if (legPlass=='topright'){
legend('topright', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=names(N), ncol = dim(andeler)[2])}
} else {
par(xpd=TRUE)
points(y=ypos, x=andeler[,1],cex=pktStr) #'#4D4D4D'
points(y=ypos, x=andeler[,2],cex=pktStr,pch= 19)
par(xpd=FALSE)
if (legPlass=='nede'){
legend(x=82, y=ypos[2]+1 ,xjust=0, cex=cexgr, bty='n', #bg='white', box.col='white',
lwd=c(NA,NA,NA), pch=c(1,19,15), pt.cex=c(1.2,1.2,1.8), col=c('black','black',farger[3]),
legend=names(N) )}
if (legPlass=='top'){
legend('top', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA,NA), pch=c(1,19,15), pt.cex=c(1.2,1.2,1.8), col=c('black','black',farger[3]),
legend=names(N), ncol = dim(andeler)[2])
}
}
text(x=0, y=ypos, labels = pst_txt, cex=skriftStr, pos=4)#
par('mar'= oldpar_mar)
par('fig'= oldpar_fig)
par('oma'= oldpar_oma)
if ( outfile != '') {dev.off()}
}
}
| /R/nraFigIndikator_v2.R | no_license | Rapporteket/nra | R | false | false | 10,146 | r | #' Gi en visuell fremstilling av registerets indikatorer over tid
#'
#' @param indikatordata En dataramme med fรธlgende kolonner:
#' - AvdRESH
#' - year
#' - var
#' - SenterKortNavn
#'
#' @export
#'
nraFigIndikator_v2 <- function(indikatordata, tittel='', terskel=30, minstekrav = NA, maal = NA, skriftStr=1.3, pktStr=1.4,
legPlass='top', minstekravTxt='Min.', maalTxt='Mรฅl', graaUt=NA, decreasing=F, outfile = '',
lavDG=NA, width=800, height=700, maalretn='hoy', desimal=FALSE, xmax=NA,
lavDGtekst='Dekningsgrad < 60 %')
{
# tittel='testtittel'; terskel=5; minstekrav = NA; maal = 30; skriftStr=1.3; pktStr=1.4;
# legPlass='top'; minstekravTxt='Min.'; maalTxt='Mรฅl'; graaUt=NA; decreasing=F; outfile = '';
# lavDG=NA; width=800; height=700; inkl_konf=T; maalretn='hoy'; lavDGtekst='Dekningsgrad < 60 %'
indikatordata <- indikatordata[indikatordata$year > max(indikatordata$year)-3, ] # behold bare siste 3 รฅr
Tabell <- indikatordata %>% dplyr::group_by(SenterKortNavn, year) %>%
dplyr::summarise(Antall = sum(var),
N = n(),
Andel = Antall/N*100)
AntTilfeller <- tidyr::spread(Tabell[, -c(4,5)], 'year', 'Antall')
AntTilfeller <- dplyr::bind_cols(SenterKortNavn=c(as.character(AntTilfeller[["SenterKortNavn"]]), "Nasjonalt"),
dplyr::bind_rows(AntTilfeller[,-1], colSums(AntTilfeller[,-1], na.rm = T)))
N <- tidyr::spread(Tabell[, -c(3,5)], 'year', 'N')
N <- dplyr::bind_cols(SenterKortNavn=c(as.character(N[["SenterKortNavn"]]), "Nasjonalt"),
dplyr::bind_rows(N[,-1], colSums(N[,-1], na.rm = T)))
N[is.na(N)] <- 0
AntTilfeller[,paste0(names(AntTilfeller)[2], '-', names(AntTilfeller)[dim(AntTilfeller)[2]])] <-
rowSums(AntTilfeller[,-1], na.rm = T)
AntTilfeller <- AntTilfeller[, c(1, (dim(AntTilfeller)[2]-1):dim(AntTilfeller)[2])]
AntTilfeller <- as.data.frame(AntTilfeller)
row.names(AntTilfeller) <- AntTilfeller[["SenterKortNavn"]]
AntTilfeller <- AntTilfeller[, -1]
N[,paste0(names(N)[2], '-', names(N)[dim(N)[2]])] <-
rowSums(N[,-1], na.rm = T)
N <- N[, c(1, (dim(N)[2]-1):dim(N)[2])]
N <- as.data.frame(N)
row.names(N) <- N[["SenterKortNavn"]]
N <- N[, -1]
AntTilfeller <- AntTilfeller[,c(2,1)]; N <- N[,c(2,1)]
andeler <- AntTilfeller/N * 100
andeler[N < terskel] <- NA
andeler[rownames(andeler) %in% lavDG, ] <- NA
if (decreasing){
rekkefolge <- order(andeler[, dim(andeler)[2]], decreasing = decreasing, na.last = F)
} else {
rekkefolge <- order(andeler[, dim(andeler)[2]], decreasing = decreasing, na.last = F)
}
andeler <- andeler[rekkefolge, ]
N <- N[rekkefolge, ]
FigTypUt <- rapFigurer::figtype(outfile=outfile, width=width, height=height, pointsizePDF=11, fargepalett='BlaaOff')
farger <- FigTypUt$farger
if (max(N[,2], na.rm = T) < terskel) {
plot.new()
# title(tittel) #, line=-6)
# legend('topleft',utvalgTxt, bty='n', cex=0.9, text.col=farger[1])
text(0.5, 0.6, paste0("Det er fรฆrre enn ", terskel,
" registreringer av indikatoren nasjonalt i ", names(N)[2], "."), cex=1.2)
} else {
andeler[N[, dim(andeler)[2]]<terskel, 1:2] <- NA
KI <- binomkonf(AntTilfeller[rekkefolge, dim(andeler)[2]], N[, dim(andeler)[2]])*100
KI[, is.na(andeler[, dim(andeler)[2]])] <- NA
pst_txt <- paste0(sprintf('%.0f', andeler[, dim(andeler)[2]]), ' %')
pst_txt[N[, dim(andeler)[2]]<terskel] <- paste0('N<', terskel)
pst_txt[rownames(andeler) %in% lavDG] <- lavDGtekst
pst_txt <- c(NA, pst_txt, NA, NA)
soyleFarger <- rep(farger[3], length(andeler[,dim(andeler)[2]]))
soyleFarger[which(rownames(andeler)=='Norge')] <- farger[4]
if (!is.na(graaUt[1])) {soyleFarger[which(rownames(andeler) %in% graaUt)] <- 'gray88'}
soyleFarger <- c(NA, soyleFarger)
oldpar_mar <- par()$mar
oldpar_fig <- par()$fig
oldpar_oma <- par()$oma
cexgr <- skriftStr
rownames(andeler) <- paste0(rownames(andeler), ' (', N[, dim(N)[2]], ') ')
andeler <- rbind(andeler, c(NA,NA,NA))
rownames(andeler)[dim(andeler)[1]] <- paste0('(N, ', names(andeler)[dim(andeler)[2]], ') ')
KI <- cbind(c(NA, NA), KI, c(NA, NA))
vmarg <- max(0, strwidth(rownames(andeler), units='figure', cex=cexgr)*0.75)
par('fig'=c(vmarg, 1, 0, 1))
# par('mar'=c(5.1, 4.1, 5.1, 9.1))
par('oma'=c(0,1,0,0))
par('mar'=c(5.1, 4.1, 5.1, 2.1))
xmax <- min(max(KI, max(andeler, na.rm = T), na.rm = T)*1.15,100)
andeler <- rbind(c(NA,NA), andeler, c(NA,NA))
rownames(andeler)[dim(andeler)[1]] <- ' '
rownames(andeler)[1] <- ' '
ypos <- barplot( t(andeler[,dim(andeler)[2]]), beside=T, las=1,
xlim=c(0,xmax),
names.arg=rep('',dim(andeler)[1]),
horiz=T, axes=F, space=c(0,0.3),
col=soyleFarger, border=NA, xlab = 'Andel (%)') # '#96BBE7'
fargerMaalNiva <- c('aquamarine3','#fbf850', 'red')
if (maal > minstekrav & !is.na(maal) & !is.na(minstekrav)) {
rect(xleft=minstekrav, ybottom=1, xright=maal, ytop=max(ypos)-1.6, col = fargerMaalNiva[2], border = NA)
rect(xleft=maal, ybottom=1, xright=min(xmax, 100), ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
if (maal < minstekrav & !is.na(maal) & !is.na(minstekrav)) {
rect(xleft=maal, ybottom=1, xright=minstekrav, ytop=max(ypos)-1.6, col = fargerMaalNiva[2], border = NA)
rect(xleft=0, ybottom=1, xright=maal, ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
if (!is.na(maal) & is.na(minstekrav) & maalretn=='lav') {
rect(xleft=0, ybottom=1, xright=maal, ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
if (!is.na(maal) & is.na(minstekrav) & maalretn=='hoy') {
rect(xleft=maal, ybottom=1, xright=min(xmax, 100), ytop=max(ypos)-1.6, col = fargerMaalNiva[1], border = NA)}
barplot( t(andeler[,dim(andeler)[2]]), beside=T, las=1,
names.arg=rep('',dim(andeler)[1]),
horiz=T, axes=F, space=c(0,0.3),
col=soyleFarger, border=NA, xlab = 'Andel (%)', add=TRUE)
title(main = tittel, cex.main=skriftStr*1.1)
ypos <- as.numeric(ypos) #as.vector(ypos)
yposOver <- max(ypos)-2 + 0.5*diff(ypos)[1]
if (!is.na(minstekrav)) {
lines(x=rep(minstekrav, 2), y=c(-1, yposOver), col=fargerMaalNiva[2], lwd=2)
par(xpd=TRUE)
text(x=minstekrav, y=yposOver, labels = minstekravTxt,
pos = 4, cex=cexgr*0.65, srt = 90)
par(xpd=FALSE)
}
if (!is.na(maal)) {
lines(x=rep(maal, 2), y=c(-1, yposOver), col=fargerMaalNiva[1], lwd=2)
barplot( t(andeler[, dim(andeler)[2]]), beside=T, las=1,
names.arg=rep('',dim(andeler)[1]),
horiz=T, axes=F, space=c(0,0.3),
col=soyleFarger, border=NA, xlab = 'Andel (%)', add=TRUE)
par(xpd=TRUE)
text(x=maal, y=yposOver, labels = maalTxt, pos = 4, cex=cexgr*0.65, srt = 90) #paste0(maalTxt,maal,'%')
par(xpd=FALSE)
}
arrows(x0 = KI[1,], y0 = ypos, x1 = KI[2,], y1 = ypos,
length=0.5/max(ypos), code=3, angle=90, lwd=1.8, col='gray') #, col=farger[1])
legend('bottom', cex=0.9*cexgr, bty='n',
lwd=1.8, lty = 1, pt.cex=1.8, col='gray',
legend=paste0('Konfidensintervall ', names(N)[dim(N)[2]]))
axis(1,cex.axis=0.9)
mtext( rownames(andeler), side=2, line=0.2, las=1, at=ypos, col=1, cex=cexgr)
antAar <- dim(andeler)[2]
if (dim(andeler)[2]==2) {
par(xpd=TRUE)
points(y=ypos, x=andeler[,1],cex=pktStr, pch= 19)
par(xpd=FALSE)
if (legPlass=='nede'){
legend('bottomright', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=names(N), ncol = 1)}
if (legPlass=='top'){
# legend('top', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
# lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
# legend=names(N), ncol = dim(andeler)[2])
legend('top', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=paste0(names(N), " (", as.character(round(as.numeric(andeler[substr(rownames(andeler), 1, 9) == "Nasjonalt", ]), 1)),
"%, N = ", N[rownames(N) == "Nasjonalt", ], ")"),
ncol = 1)}
if (legPlass=='topleft'){
legend('topleft', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=names(N), ncol = dim(andeler)[2])}
if (legPlass=='topright'){
legend('topright', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA), pch=c(19,15), pt.cex=c(1.2,1.8), col=c('black',farger[3]),
legend=names(N), ncol = dim(andeler)[2])}
} else {
par(xpd=TRUE)
points(y=ypos, x=andeler[,1],cex=pktStr) #'#4D4D4D'
points(y=ypos, x=andeler[,2],cex=pktStr,pch= 19)
par(xpd=FALSE)
if (legPlass=='nede'){
legend(x=82, y=ypos[2]+1 ,xjust=0, cex=cexgr, bty='n', #bg='white', box.col='white',
lwd=c(NA,NA,NA), pch=c(1,19,15), pt.cex=c(1.2,1.2,1.8), col=c('black','black',farger[3]),
legend=names(N) )}
if (legPlass=='top'){
legend('top', cex=0.9*cexgr, bty='n', #bg='white', box.col='white',y=max(ypos),
lwd=c(NA,NA,NA), pch=c(1,19,15), pt.cex=c(1.2,1.2,1.8), col=c('black','black',farger[3]),
legend=names(N), ncol = dim(andeler)[2])
}
}
text(x=0, y=ypos, labels = pst_txt, cex=skriftStr, pos=4)#
par('mar'= oldpar_mar)
par('fig'= oldpar_fig)
par('oma'= oldpar_oma)
if ( outfile != '') {dev.off()}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.