blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f9fa2417d52c6bfc7bad8f2b20fd2d097f1a5c3 | bebf7cfba981ca362a4ca728c380d7aeb0cb8b89 | /r/PCA.r | 69720adaeee057c8bd4cc58f3b9cdcc5291a13ef | [] | no_license | flying-bear/modeling_schizo | d5bb75ef64c10c0792f6e04b87f960685772e2cf | 4dc2dd7b73c40a810f53aa8c2a1df9762dc28f66 | refs/heads/master | 2021-06-20T06:07:34.468091 | 2021-01-10T16:20:31 | 2021-01-10T16:20:31 | 158,845,232 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,604 | r | PCA.r | library(tidyverse)
library(factoextra)
library(FactoMineR)
library(languageR)
library(ggfortify)
library(plyr)
to_analyze <- read_csv('metrics_child.csv')
meta <- read_csv('All_psych.csv')
to_analyze <- left_join(meta, to_analyze)
to_analyze <- column_to_rownames(to_analyze, var = "ID")
to_analyze$diagnosis = revalue(as.factor(meta$diagnosis), c('1'="schizophrenia", '0'="control", '2'='schizoaffective disorder', '3'='control with schizo-spectrum tendencies'))
only_analyze = select(to_analyze, -diagnosis)
##### BERT
bert_names <- names(data.frame(only_analyze))[grep("bert", names(data.frame(only_analyze)))]
add_names <- c('PANSS_Total', 'PANSS_O', 'PANSS_P', 'PANSS_N', 'TD', 'age', 'education')
bert <- select(only_analyze, c(bert_names, add_names))
bert %>%
na.omit() %>%
scale(center = TRUE, scale = TRUE) -> bert
dd <- na.omit(select(to_analyze, c(bert_names, add_names, 'diagnosis') ))
### CA using factoextra & FactoMineR
autoplot(prcomp(bert), data = dd,
colour = 'diagnosis', label = TRUE, label.size = 5,
loadings = TRUE, loadings.colour = 'blue', loadings.label.repel=T,
loadings.label = TRUE, loadings.label.size = 5)+ggtitle('Principal Correspondence Analysis using ggfortify, biplot on child task BERT with PANSS')
ggsave("child_PANSS_BERT.jpeg", width = 50, height = 27, units = "cm")
bert <- select(only_analyze, bert_names)
bert %>%
na.omit() %>%
scale(center = TRUE, scale = TRUE) -> bert
dd <- na.omit(select(to_analyze, c(bert_names, 'diagnosis') ))
### CA using factoextra & FactoMineR
autoplot(prcomp(bert), data = dd,
colour = 'diagnosis', label = TRUE, label.size = 5,
loadings = TRUE, loadings.colour = 'blue', loadings.label.repel=T,
loadings.label = TRUE, loadings.label.size = 5)+ggtitle('Principal Correspondence Analysis using ggfortify, biplot on child task BERT')
ggsave("child_BERT.jpeg", width = 50, height = 27, units = "cm")
##### SIF
sif_names <- names(data.frame(only_analyze))[grep("sif", names(data.frame(only_analyze)))]
sif <- select(only_analyze, c(sif_names, add_names))
sif %>%
na.omit() %>%
scale(center = TRUE, scale = TRUE) -> sif
dd <- na.omit(select(to_analyze, c(sif_names, add_names, 'diagnosis') ))
### CA using factoextra & FactoMineR
autoplot(prcomp(sif), data = dd,
colour = 'diagnosis', label = TRUE, label.size = 5,
loadings = TRUE, loadings.colour = 'blue', loadings.label.repel=T,
loadings.label = TRUE, loadings.label.size = 5)+ggtitle('Principal Correspondence Analysis using ggfortify, biplot on child task SIF with PANSS')
ggsave("child_PANSS_SIF.jpeg", width = 50, height = 27, units = "cm")
sif <- select(only_analyze, sif_names)
sif %>%
na.omit() %>%
scale(center = TRUE, scale = TRUE) -> sif
dd <- na.omit(select(to_analyze, c(sif_names, 'diagnosis') ))
### CA using factoextra & FactoMineR
autoplot(prcomp(sif), data = dd,
colour = 'diagnosis', label = TRUE, label.size = 5,
loadings = TRUE, loadings.colour = 'blue', loadings.label.repel=T,
loadings.label = TRUE, loadings.label.size = 5)+ggtitle('Principal Correspondence Analysis using ggfortify, biplot on child task SIF')
ggsave("child_SIF.jpeg", width = 50, height = 27, units = "cm")
##### ELMO
elmo_names <- names(data.frame(only_analyze))[grep("elmo", names(data.frame(only_analyze)))]
elmo <- select(only_analyze, c(elmo_names, add_names))
elmo %>%
na.omit() %>%
scale(center = TRUE, scale = TRUE) -> elmo
dd <- na.omit(select(to_analyze, c(elmo_names, add_names, 'diagnosis') ))
### CA using factoextra & FactoMineR
autoplot(prcomp(elmo), data = dd,
colour = 'diagnosis', label = TRUE, label.size = 5,
loadings = TRUE, loadings.colour = 'blue', loadings.label.repel=T,
loadings.label = TRUE, loadings.label.size = 5)+ggtitle('Principal Correspondence Analysis using ggfortify, biplot on child task ELMo with PANSS')
ggsave("child_PANSS_ELMo.jpeg", width = 50, height = 27, units = "cm")
elmo <- select(only_analyze, elmo_names)
elmo %>%
na.omit() %>%
scale(center = TRUE, scale = TRUE) -> elmo
dd <- na.omit(select(to_analyze, c(elmo_names, 'diagnosis') ))
### CA using factoextra & FactoMineR
autoplot(prcomp(elmo), data = dd,
colour = 'diagnosis', label = TRUE, label.size = 5,
loadings = TRUE, loadings.colour = 'blue', loadings.label.repel=T,
loadings.label = TRUE, loadings.label.size = 5)+ggtitle('Principal Correspondence Analysis using ggfortify, biplot on child task ELMo')
ggsave("child_ELMo.jpeg", width = 50, height = 27, units = "cm")
|
c6b9107b5544fc426f6ff3e2177a176c2187d360 | f39590573307a87a2092282a686fccae9dd15b06 | /Data_mining/BUS_decoders/BUS_decoders/Code/Bus A seperation.R | 655026b8e4ae3ea46424df7bce782e64bdf60eaf | [] | no_license | dwipam/code | dec1e8df712af029da370ad231382bbf50140318 | 0426e9bf4be48b6ff78dcc17f42f452f40a4e812 | refs/heads/master | 2021-01-18T02:23:38.642813 | 2020-03-29T07:26:13 | 2020-03-29T07:26:13 | 45,200,803 | 7 | 8 | null | 2020-03-29T07:26:14 | 2015-10-29T17:50:53 | HTML | UTF-8 | R | false | false | 77 | r | Bus A seperation.R | A_bus <- subset(clean_data, route_id=="A")
A_bus$ID1 <- NULL
A_bus$id <- NULL |
66854d3e58851e8fa3852e4e0ce05105372b4e5e | f4e01ceee3421c07036a1779d7e078a4548cbc90 | /ui.R | 76d4b35f6304321f4d70e7868fd01b78a59b8c00 | [] | no_license | lstaugler/ShinyApp | 10fa035f011ff90df017336e53222f49e6688321 | 2108fc0db9336b8703f16a6d4a6bd1c9eb9c36f3 | refs/heads/main | 2023-06-13T02:58:10.070791 | 2021-07-05T15:09:13 | 2021-07-05T15:09:13 | 382,706,595 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,069 | r | ui.R | library(shinydashboard)
library(xlsx)
library(DT)
df <- read.xlsx("student-mat.xlsx", 1)
dashboardPage(skin="purple",
dashboardHeader(title = "Student Success in Math Class", titleWidth=350),
dashboardSidebar(sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("door-open")),
menuItem("Histogram", tabName = "histogram", icon = icon("chart-bar")),
menuItem("Regression", tabName = "regression", icon = icon("chart-line")),
menuItem("Data Table", tabName = "datatable", icon = icon("table"))
)),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
h1("Overview of this Dashboard"),
br(),
p(strong(tags$u("What is this dashboard all about?"))),
p("Here in this dashboard I will be evaluating student progress on math exams
given a variety of attributes."),
br(),
p(strong(tags$u("Where is the data from?"))),
p("Data was used from the UCI Machine Learning Repository. The attributes can be described as follows:"),
p(span("school", style="color:red"), "- student's school",
span("sex", style="color:red"),"- student's sex",
span("age",style="color:red")," - student's age",
span("address",style="color:red")," - student's home address type",
span("famsize",style="color:red")," - family size",
span("Pstatus",style="color:red")," - parent's cohabitation status",
span("Medu",style="color:red")," - mother's education",
span("Fedu",style="color:red")," - father's education",
span("Mjob",style="color:red"), "- mother's job",
span("Fjob",style="color:red"), "- father's job",
span("reason",style="color:red"), "- reason to choose this school",
span("guardian",style="color:red"), "- student's guardian",
span("traveltime",style="color:red"), "- home to school travel time",
span("studytime",style="color:red"), "- weekly study time",
span("failures",style="color:red"), "- number of past class failures",
span("schoolsup",style="color:red"), "- extra educational support",
span("famsup",style="color:red"), "- family educational support",
span("paid",style="color:red"), "- extra paid classes within the course subject",
span("activities",style="color:red"), "- extra-curricular activities",
span("nursery",style="color:red"), "- attended nursery school",
span("higher",style="color:red"), "- wants to take higher education",
span("internet",style="color:red"), "- Internet access at home",
span("romantic",style="color:red"), "- with a romantic relationship",
span("famrel",style="color:red"), "- quality of family relationships",
span("freetime",style="color:red"), "- free time after school",
span("goout",style="color:red"), "- going out with friends",
span("Dalc",style="color:red"), "- workday alcohol consumption",
span("Walc",style="color:red"), "- weekend alcohol consumption",
span("health",style="color:red"), "- current health status",
span("absences",style="color:red"), "- number of school absences",
# these grades are related with the course subject, Math or Portuguese:
span("G1",style="color:red"), "- first period grade",
span("G2",style="color:red"), "- second period grade",
span("G3",style="color:red"), "- final grade"),
br(),
p(strong(tags$u("How can I use this dashboard?"))),
p("The histogram contains a histogram of the students' final scores on their math exams.
The regression tab goes through linear regression for all of the attributes. You have the
choice to run simple linear regression or multiple linear regression.")
),
tabItem(tabName = "histogram",
fluidRow(
h1("Histogram of Final Scores"),
box(plotOutput(outputId = "distPlot")),
box(
title = "# of Students broken into bins",
sliderInput(inputId = "bins",
label = "Number of bins:",
min = 2,
max = 10,
value = 4)
)
)
),
tabItem(tabName = "regression",
fluidRow(
h1("Linear Regression"),
sidebarPanel(
uiOutput("xvariable"),
uiOutput("yvariable")
),
mainPanel(
fluidRow(column(6, verbatimTextOutput('LMSum')) , column(6, plotOutput('diagPlot')))
)
)
),
tabItem(
tabName = "datatable",
fluidRow(
h1("Student Table"),
DT::dataTableOutput("studentTable")
)
)
)
)
) |
64570877fcb97f79c2660944a6358dc6d93eb661 | 0c6d2f47a296217105f38ae32ad5cf8e5deef1e2 | /man/printMEC.Rd | c815b7f9dfa0b8c8de32167a2d512b946232cf08 | [] | no_license | cran/RFOC | 338616505b90e1f3c9511ae9b55a1d17f73d1bd6 | b2c97a028b6cf9802439a74d5a6140a3ce9d07e4 | refs/heads/master | 2021-06-01T20:24:37.900822 | 2018-01-18T10:13:23 | 2018-01-18T10:13:23 | 17,713,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 454 | rd | printMEC.Rd | \name{printMEC}
\alias{printMEC}
\title{Print focal mechanism}
\description{Print focal mechanism
}
\usage{printMEC(x, digits = max(3, getOption("digits") - 3), ...)
}
\arguments{
\item{x}{Mechanism list}
\item{digits}{digits for numeric information}
\item{\dots}{standard printing parameters}
}
\value{
Side Effects
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\examples{
mc = CONVERTSDR(65, 32, -34 )
printMEC(mc)
}
\keyword{misc}
|
4e3f314037cadb8210a01a2cfe40adfccd5394de | 99ffac052ed1a630a9572b166d0b1f8f1f27fa63 | /simOcc/pkgBuild/dependencies.R | 6e6d7d66fd9d871152065a743db7d20f3c405fcd | [] | no_license | rBatt/trawl | a253ed1cf4912c2dbf6c7599292283610f4b28a6 | 0d11503d900ddc5bf6777fbebcf01f585d6d264f | refs/heads/master | 2021-06-04T03:50:35.595206 | 2016-07-13T02:29:42 | 2016-07-13T02:29:42 | 25,880,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 389 | r | dependencies.R |
update_dependencies <- function(){
devtools::use_package("raster", type="Depends") # Basis for handling all data sets
devtools::use_package("trawlData", type="Suggests") # Meets basic requirements for data content and format
devtools::use_package("rbLib", type="Suggests")
devtools::use_package("trawlDiversity", type="Suggests")
devtools::use_package("fields", type="Depends")
}
|
dd3f72e28f601af594bb51068301befb03185ddb | 5b6d5398e2739f1eac348c0d8b5fdcd104788ca7 | /plot3.R | b60abfc8a4b48d5d468c700b56d3377d9002ccf0 | [] | no_license | mariasu11/ExData_Plotting2 | 4df91c5cb7cdff86f0048cc26988dcf8f575b40c | 167f52d2e32f04411ed5cca0b82d849d58b95cc4 | refs/heads/master | 2016-09-06T14:18:49.296968 | 2014-12-20T20:54:07 | 2014-12-20T20:54:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 822 | r | plot3.R | ##Read data in R
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##Subset for Baltimore City, fips==24510
baltimore <- NEI[NEI$fips== "24510", ]
## Calculate total emissions in Baltimore city by year and type
balemissions <- aggregate(baltimore$Emissions, list(baltimore$year, baltimore$type), FUN = "sum")
##Rename columns
names(balemissions) <- c("Year", "Type", "Emissions")
##Load ggplot2
install.packages("ggplot2")
library(ggplot2)
##Create plot
png(filename = "plot3.png",width = 480, height = 480,units = "px")
g<- ggplot(balemissions, aes(Year,Emissions))
g+geom_line(aes(color=Type))+facet_grid(.~Type)+ xlab("Year") + ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle("Total Emissions in Baltimore City from 1999 to 2008 by Source Type")
dev.off()
|
8729e7c8eb96e70d9e2d4d0a607310682e6b8bb1 | 8e219366398ff627114e7486e548801a3dfd73c3 | /R/perm_dist.R | 96f552852c3686325c363d0237802fbc1708bc9f | [
"MIT"
] | permissive | vroni-g/change_point_detection | 1730190075c0292da3c81ee49839c6142b1b0c45 | 7465dc6fdaaa1c811aed901105ba046d9d986900 | refs/heads/main | 2023-04-12T06:08:37.934998 | 2021-08-23T14:05:18 | 2021-08-23T14:05:18 | 394,594,258 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,495 | r | perm_dist.R | #' Permutation distribution
#'
#' Obtains the permutation distribution of the maximum statistic and the STCS,
#' named maxT and stcs, respectively. Returns a list containing maxT and stcs:
#' each a vector of length nperm where each entry is the maximum of the nth
#' permutation. Together form the permutation distribution, of which
#' the (1-alpha)th percentile is the threshold of significance adjusted
#' for multiple testing.
#'
#' @param data Data in a 3d array, where the first two dimensions are the
#' physical dimensions (e.g., lon and lat), and the third one is time.
#' @param fx function to be applied at each grid cell. Should be self
#' sufficient - no extra arguments needed. Should return only the test
#' statistic
#' @param nperm number of permutations. Defaults to 1000
#' @param verbose Counter returning when the function is done with 10 function
#' calls
#' @return returns the distribution of maxT, stcs, and stcs_mvt, each a vector in a list
#' @export perm_dist
perm_dist<- function(data, fx, nperm=1000,
alpha_local, alpha_global, null_distribution,
block_size = NULL, seed, verbose = TRUE){
perm_matrix<- perm_matrix(nobs = dim(data)[3], nperm = nperm, block_size = block_size, seed = seed)
maxT<- vector(length = nperm)
stcs<- vector(length = nperm)
stcs_maxT <- vector(length = nperm)
peak_intensity <- vector(length = nperm)
perm_results <- vector(length = nperm, mode = 'list') # save all cluster results to derive p-values for cluster
cat("starting permutations:\n")
# convert data to 2d matrix
data <- array_to_matrix(data)
for(i in 1:nperm){
cat("Permutation",i,"\n")
tmp1 <- apply(data$Y[perm_matrix[i,],], 2, fx)
maxT[i]<- max(abs(as.vector(tmp1)), na.rm = TRUE)
# reinsert NA values
data_info <- data[2:5]
tmp <- matrix(NA, ncol = data_info$ncol, nrow = data_info$nrow)
tmp[data_info$wh.sel]<- tmp1
tmp_stcs <- get_stcs(tmp, alpha_local, null_distribution)
perm_results[i] <- list(tmp_stcs$clusters)
stcs[i]<- tmp_stcs$stcs
stcs_maxT[i]<- tmp_stcs$stcs_maxT
peak_intensity[i] <- tmp_stcs$peak_intensity
rm(tmp_stcs)
if(verbose) if((i%%10)==0) cat(i,"\n")
}
# get empirical distribution of maxT_all and stcs
dis_peak_intensity <- ecdf(peak_intensity)
dis_stcs<- ecdf(stcs)
dis_stcs_maxT<- ecdf(stcs_maxT)
get_wt <- function(clust_perm, dis_peak_intensity, dis_stcs, dis_stcs_maxT, nperm, last = FALSE){
# retrieve p-values for cluster size and cluster maximum for each cluster in the current permutation
get_p <- function(j, clust_perm, dis_peak_intensity, dis_stcs, nperm){
p_peak_intensity <- 1 - dis_peak_intensity(clust_perm$cluster.max[j]) + 1/nperm
if(p_peak_intensity<=0) p_peak_intensity <- 0.000001
p_stcs <- 1 - dis_stcs(clust_perm$cluster.count[j]) + 1/nperm
if(p_stcs<=0) p_stcs <- 0.000001
p_stcs_maxT <- 1 - dis_stcs_maxT(clust_perm$cluster.max[j]) + 1/nperm
if(p_stcs_maxT<=0) p_stcs_maxT <- 0.000001
# combine in new test statistic
w <- 1 - min(log(p_peak_intensity), log(p_stcs), log(p_stcs_maxT))
if (is.finite(w)){
return(w)
} else{
return(0)
}
}
js <- seq(1:length(clust_perm$cluster.count))
# map over each cluster
w_tmp <- purrr::map(js, get_p, clust_perm = clust_perm, dis_peak_intensity = dis_peak_intensity,
dis_stcs = dis_stcs, dis_stcs_maxT = dis_stcs_maxT, nperm = nperm)
# return maximum tippet value for current permutation
if(last){
return(list(max(unlist(w_tmp), na.rm = TRUE), unlist(w_tmp)))
} else {
return(max(unlist(w_tmp), na.rm = TRUE))
}
}
# map over all permutations but the last one and retrieve each maximum tippet statistic
wt <- purrr::map(perm_results[1:(nperm-1)], get_wt,
dis_peak_intensity = dis_peak_intensity, dis_stcs = dis_stcs, nperm = nperm) %>%
unlist()
# get values for last permutation and also return original tippet values
l <- get_wt(perm_results[[nperm]], dis_peak_intensity = dis_peak_intensity,
dis_stcs = dis_stcs, last = TRUE, nperm = nperm)
# append last maximum tippet value to the rest
wt <- c(wt, l[[1]])
cat("finished!\n\n")
return(list(maxT = maxT, stcs = stcs, wt = wt, peak_intensity = peak_intensity,
original_results = tmp, original_wt = l[[2]]), original_cluster = perm_results[[nperm]])
}
|
9cb8731e05c43bb05093871aae759c0566b78bad | a8d4b6aaecd5dd77e48c96d90c37ae17cf15a30c | /man/fm.Mobius.Rd | e618e998c882f9692bb16e267a8f8f7edcf7f9cc | [] | no_license | cran/Rfmtool | 2c3d74fa00707d9c61e186897a04e6ce5aa49729 | 02ec854ac714c4def265b2c290bdd1843a047c94 | refs/heads/master | 2023-02-08T12:10:31.096293 | 2023-02-04T08:32:30 | 2023-02-04T08:32:30 | 54,157,875 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,157 | rd | fm.Mobius.Rd | \name{fm.Mobius}
\alias{fm.Mobius}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Mobius transform function
}
\description{Calculates Mobius representation of general fuzzy measure,
the input and output is an array of size 2^n=m in binary ordering.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
fm.Mobius(v,env=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{v}{Fuzzy measure value in standard representation.}
\item{env}{Environment variable obtained from fm.Init(n).}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\item{output}{The output is the fuzzy measure in Mobius representation.}
}
\author{
%% ~~who you are~~
Gleb Beliakov, Andrei Kelarev, Quan Vu, Daniela L. Calderon, Deakin University
}
\examples{
env<-fm.Init(3)
fm.Mobius(c(0, 0.3, 0.5, 0.6, 0.4, 0.8, 0.7, 1),env)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Mobius }
|
40a1962dac109aea8a07bf9546d8ad34c96bb7f5 | 34ff9f015f50f206d2c9a565286fe7e95696b77b | /Debutant/d.R | c24eeff8c3d3ee58b47f798b41659da182afeca8 | [] | no_license | bthieurmel/lesson | b6da235a720b4f2fab887b998ec1ee68cc088d47 | aa345d0cae259bd654bce5a8c66bbbe9b3e00e48 | refs/heads/master | 2023-07-07T02:05:06.008079 | 2023-06-21T14:59:56 | 2023-06-21T14:59:56 | 80,403,481 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 56 | r | d.R |
f <- function(){
a <- 1
b <- 2
d <- a +b
d
}
f()
|
5737a1e6b40e778c7bfb21675b76354da2af39dd | 15c7d2e44f6e0073cc8f822870697a8c392d9377 | /Identify.R | 5604beebee7588e71f33beb2b6b072086cf8adf5 | [] | no_license | lapinskm/pageIdentification | c7ef8cf350cbd39beeef5ee186778a2aa83c82da | 09f135b9c27815951b17f7aa701a75b20af292ac | refs/heads/master | 2021-04-30T18:08:14.423727 | 2017-01-29T02:52:00 | 2017-01-29T02:52:00 | 80,325,744 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,851 | r | Identify.R | library(stringi)
library(base)
library(plyr)
library(htmltidy)
library(xml2)
library(compiler)
library(randomForest)
library(e1071)
library(class)
readCategory<-function(filename) {
con <- file(filename, "r")
descrLines <- readLines(con,n=2)
close(con)
category <- strsplit(descrLines[1]," ")[[1]][1]
#url <- descrLines[2]
return(category)
}
#read files and tidy up data
readDataFile<-function(filename) {
x <- readChar(filename, file.info(filename)$size)
# remove first two lines containing metadata
x<-stri_replace_first_regex(x,"^.*?\n.*?\n", "" )
x<-tidy_html(x)
return(x)
}
tagNames=c(#'video', 'audio', 'object','canvas','blockquote',
'article', 'span',
'input', 'form', 'link',
# 'table', 'h1', 'h2', 'h3', 'li', 'ul', 'tr', 'hr',
'a' )
wordCountRegexes=c("forum", "blog","video")
extractFeatures<-function(fileContent) {
#dirtyPageSize<-stri_length(fileContent) ###
#names(dirtyPageSize)="dirtyPageSize"
#html might be malformed
cleanPageSize<-stri_length(fileContent) ###
names(cleanPageSize)="cleanPageSize"
#get tag statistics
x_html <- read_html(fileContent)
allNodes <- xml_find_all(x_html,"//*")
totalTagCount <- length(allNodes) ###
names(totalTagCount)="totalTagCount"
titleNodes= xml_find_all(x_html,"//title")
titleText = paste(xml_text(titleNodes))
metaOgType="NO_OG_TYPE";
metaNodes <- xml_find_all(x_html,"//meta")
for(i in 1:length(metaNodes) ){
nodeAttrs <- unlist(xml_attrs(metaNodes[i]))
if (!is.na(nodeAttrs["property"])) {
if(as.character( nodeAttrs["property"])=="og:type") {
if(!is.na(nodeAttrs["content"])) {
metaOgType <- nodeAttrs["content"];
}
break;
}
}
}
names(metaOgType)="metaOgType" ###
childrenCountSum <-0
leaveCount <- 0 #childless nodes ###
for(i in 1:totalTagCount[1]) {
childCount <- length(xml_children(allNodes[i][1]))
if (childCount != 0) {
childrenCountSum <- childrenCountSum + 1;
} else {
leaveCount <-leaveCount +1;
}
}
names(leaveCount) <- "leaveCount"
meanChildCount <- childrenCountSum/totalTagCount ###
names(meanChildCount) <- "meanChildCount"
tagCounts <- laply(tagNames,
function(tagName) {
length(xml_find_all(x_html, paste0("//", tagName)))
}
)
names(tagCounts)<-stri_paste(tagNames, "TagCount", collapse = NULL)
relTagCounts <- tagCounts/totalTagCount
names(relTagCounts)<-stri_paste(tagNames, "RelTagCount", collapse = NULL)
totalTaglessSize <- stri_length(xml_text(x_html)) ###
names(totalTaglessSize)<-"totalTaglessSize"
cssNodes <- xml_find_all(x_html, "//style")
jsNodes <- xml_find_all(x_html, "//script")
headNodes <- xml_find_all(x_html, "//head")
inlineCssSize <- sum(stri_length(xml_text(cssNodes))) ###
names(inlineCssSize)<-"inlineCssSize"
inlineJsSize <- sum(stri_length(xml_text(jsNodes))) ###
names(inlineJsSize)<-"inlineJsSize"
taglessHeadSize <- sum(stri_length(xml_text(headNodes))) ###
names(taglessHeadSize)<-"taglessHeadSize"
imgNodes <- xml_find_all(x_html, "//img")
imgTagCount = length(imgNodes) ###
names(imgTagCount)<-"imgTagCount" #potential trouble
imgPaths <- xml_path(imgNodes)
imgPaths <- stri_replace_all_regex(str = imgPaths, replacement ="", pattern = "\\[[0-9]*?\\]")
mostCommonImgPathCount <- table (imgPaths)[1] ###
if(is.na(mostCommonImgPathCount))
mostCommonImgPathCount=0
names(mostCommonImgPathCount) <- "mostCommonImgPathCount"
#remove not dismostCommonImgPathCountplayed directly nodes to continue processing
if(length(jsNodes))
xml_remove(jsNodes)
if(length(cssNodes))
xml_remove(cssNodes)
if(length(headNodes))
xml_remove(headNodes)
displayedText <- xml_text(x_html)
displayedTextSize <- stri_length(displayedText) ###
names(displayedTextSize) <- "displayedTextSize"
displToTotalRatio <- displayedTextSize / cleanPageSize ###
names(displToTotalRatio) <- "displToTotalRatio"
headToTotalRatio <- taglessHeadSize / cleanPageSize ###
names(headToTotalRatio) <- "headToTotalRatio"
wordCounts<-stri_count_regex(displayedText, wordCountRegexes)###
names(wordCounts)<-paste0("wordCount_", (1:length(wordCounts))) # cause troubles
#finally we formulate feature vector
featureVector=
c(#dirtyPageSize,
cleanPageSize,
totalTaglessSize,
inlineCssSize,
inlineJsSize,
taglessHeadSize,
mostCommonImgPathCount,
imgTagCount,
displayedTextSize,
headToTotalRatio,
displToTotalRatio,
tagCounts,
# relTagCounts, # relative tag counts
totalTagCount,
meanChildCount,
leaveCount,
wordCounts,
metaOgType);
}
#compile function to speed up execution
extractFeatures_ <- cmpfun(extractFeatures)
#read list of files
filenames <- paste0("train/",list.files("train/"))
#read files content
categories <- ldply( .data = filenames, .fun = readCategory )
categories <- as.factor(unlist(categories))
filesContent <- ldply( .data = filenames, .fun = readDataFile )
#extact feature vectors from file content
featureData <- ldply(.data = unlist (filesContent), .fun = extractFeatures_)
featureData$.id <- NULL
featureData["metaOgType"] <- as.factor(unlist(featureData["metaOgType"]))
#Data size
dataSize <- nrow(featureData)
vectSize <- ncol(featureData)
#Split data to learning and test sets
testIdx <- sample(dataSize,dataSize * 0.1)
xlearn <- featureData[-testIdx,]
xtest <- featureData[ testIdx,]
ylearn <- categories[-testIdx]
ytest <- categories[ testIdx]
#Some models needs to be feed by matrix
xlearn_matrix <- data.matrix(xlearn)
xtest_matrix <- data.matrix(xtest )
# try SVM model
svm_model <- svm(x=xlearn_matrix, y=ylearn)
ytest_pred <- predict(svm_model, xtest_matrix)
ylearn_pred <- predict(svm_model, xlearn_matrix)
# compare result of prediction for learn and test data
sum(rep(1,length(ylearn_pred)) [ylearn_pred == ylearn])/length(ylearn_pred)
sum(rep(1,length(ytest_pred)) [ytest_pred == ytest] )/length(ytest_pred)
#try naive bayes model
bayes_model <- naiveBayes(x=xlearn_matrix, y=ylearn)
ytest_pred <- predict(bayes_model, xtest_matrix)
ylearn_pred <- predict(bayes_model, xlearn_matrix)
# compare result of prediction for learn and test data
sum(rep(1,length(ylearn_pred)) [ylearn_pred == ylearn])/length(ylearn_pred)
sum(rep(1,length(ytest_pred)) [ytest_pred == ytest] )/length(ytest_pred)
#try random forest # (still the best we have)
forest = randomForest(xlearn, ylearn, xtest, ytest, keep.forest = TRUE)
forest
#which variables were revelant
forest$importance
|
8b7981e310c1ca714625e57eda6a3690d98492d7 | c02d409a3740360ded99c47e888358923fbf41c1 | /create_pred_data.R | a412f6453888ef8f5ffc9900c64dbd40f60e2ae2 | [] | no_license | PeerChristensen/explain_h2o | d3eb95524ad3a17231658d727d6be4de856716ff | d1548b7ef4a5996899a8944b766d27678bd40e4c | refs/heads/master | 2023-03-29T00:30:12.425952 | 2021-04-09T09:55:31 | 2021-04-09T09:55:31 | 254,924,575 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | create_pred_data.R | # create data for prediction table
library(tidyverse)
df <- read_csv("test.csv") %>%
select(income,eqpdays,totmrc_Mean,rev_Mean) %>%
mutate(CustomerID = row_number()) %>%
select(CustomerID,everything()) %>%
mutate(ChurnProbability = round(preds$p1,2),
ChurnPrediction = if_else(preds$predict == 1,"Yes","No"))
df <- df %>%
mutate(incomeSegment = case_when(income <= 3 ~ "low",
income >= 4 & income <=6 ~ "mid",
income >= 7 ~"high"),
CLVSegment = ntile(rev_Mean,3)) %>%
mutate(CLVSegment = case_when(CLVSegment == 3 ~ "Gold",
CLVSegment == 2 ~ "Silver",
CLVSegment == 1 ~ "Bronze")) %>%
mutate(recency = ntile(eqpdays,4),
frequency = ntile(totmrc_Mean,4)) %>%
mutate(RFSegment = case_when(recency >= 3 & frequency >= 3 ~ "Best",
recency >= 3 & frequency == 2 ~ "Potential",
recency >= 3 & frequency == 1 ~ "New",
recency <= 2 & frequency >= 3 ~ "Don't lose",
recency <= 2 & frequency == 2 ~ "At risk",
recency <= 2 & frequency == 1 ~ "Lost"))
df <- df %>%
select(CustomerID,ChurnPrediction,ChurnProbability,RFSegment,incomeSegment)
write_csv(df,"prediction_table.csv")
|
b14cb3b70081554edabaf08db1ac3cd714a55d23 | 6e5cd7df066b99b49f9390048af923b1fdeb8548 | /canbank_portfolio_compare.r | 11144caea7725f3c90ba051f4a4001835033b062 | [] | no_license | clueso/RScripts | 9c8d185c2721449f7fc0b94a3478a24cf540c3c0 | 2127f2725c5ff8ae4a21f678cd0e047adee807bc | refs/heads/master | 2020-12-24T05:54:28.730434 | 2017-01-31T06:05:54 | 2017-01-31T06:05:54 | 33,393,337 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,495 | r | canbank_portfolio_compare.r | source("etfs/etf_portfolio_functions.r");
canbank = c("RY.TO", "TD.TO", "BMO.TO", "CM.TO");
sp = c("SPY");
colours = c("red", "blue", "green", "black", "darkmagenta");
line_width = c(2,2,2,2,2);
fractions = c(0.25, 0.25, 0.25, 0.25);
full_canbank_data = download_clip_data(canbank);
full_sp_data = download_clip_data(sp);
full_sp_data = clip_data(full_sp_data, start(full_canbank_data), end(full_canbank_data));
port_growth_data = calc_portfolio_value(10000, full_canbank_data, fractions);
inv_growth_data = calc_single_stock_value(10000, full_sp_data);
full_growth_data = merge(inv_growth_data, port_growth_data);
full_growth_data = na.locf(full_growth_data);
legend_entries = c("SP500", "Bank portfolio");
#png("test.png", width=800);
plot(full_growth_data, type="l", plot.type = "single", xlab = "Time", ylab = "Value of $10000 investment", col = colours, lwd = line_width);
#lines(inv_growth_data, col="darkmagenta", lwd=2);
legend(x = min(index(full_growth_data[,2])), y = floor(max(full_growth_data[,2])*0.75), legend_entries, col = colours, lwd = line_width);
#grid(nx = NULL, col = "lightgray", lty = "dotted", lwd = par("lwd"), equilogs = TRUE)
#dev.off();
#full_data = download_clip_data(canbank);
#port_growth_data = calc_portfolio_value(10000, full_data[,1:4], fractions);
#full_growth_data = merge(port_growth_data, full_data[,5]);
#plot(full_growth_data, type="l", plot.type = "single", xlab = "Time", ylab = "Value of $10000 investment", col = colours, lwd = line_width);
|
c615696d9bb40b85a4315a40448b9f2c911421bf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nbc4va/examples/summary.nbc.Rd.R | fb82bc2ac4da7c04ec8cbb7bb8f1402a4ff3d250 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 506 | r | summary.nbc.Rd.R | library(nbc4va)
### Name: summary.nbc
### Title: Summarize a NBC model with metrics
### Aliases: summary.nbc
### ** Examples
library(nbc4va)
data(nbc4vaData)
# Run naive bayes classifier on random train and test data
train <- nbc4vaData[1:50, ]
test <- nbc4vaData[51:100, ]
results <- nbc(train, test)
# Obtain a summary for the results
brief <- summary(results, top=2) # top 2 causes by CSMF for all test data
briefID <- summary(results, id="v48") # top 5 causes by probability for case "v48"
|
02e6b97289008fd93f853aa0fb9617e045555017 | fee8acace1633a97fc40a5c85d4228de5ec4a1f4 | /createOutput.R | 02e7b6d831ea20cdf26d070c407f3eaa5a0a12f5 | [] | no_license | coforfe/Oscars | dd764131c7468da02384d8bc12e542817eb1eee5 | 4cab692aebf0c9242c50a5483579d447c70a3231 | refs/heads/master | 2021-01-09T06:34:16.249937 | 2017-02-05T18:26:34 | 2017-02-05T18:26:34 | 81,011,716 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 786 | r | createOutput.R | library(stringr)
datIn <- readLines("Oscars_All_.txt")
anio <- 0
j <- 0
os_end <- data.frame( year = 0, categ = 0, year = 0)
for( i in 1:length(datIn)) {
# for( i in 1:10) {
print(i)
if( str_detect(datIn[i], "\\(*st\\)|\\(*nd\\)|\\(*rd\\)|\\(*th\\)" ) == TRUE & word(datIn[i], 1) != "SCIENTIFIC" ) {
anio <- anio + 1
yea_tmp <- datIn[i]
} else {
if( str_detect(datIn[i], "NOTE") == TRUE ) {
next
} else {
if( str_detect(word(datIn[i],1),"[[:upper:]]+$" ) == TRUE) {
cat_tmp <- datIn[i]
} else {
j <- j + 1
os_end[j, 1] <- yea_tmp
os_end[j, 2] <- cat_tmp
os_end[j, 3] <- datIn[i]
}
}
}
}
write.table(os_end, file = "cat_year_win.txt", sep = "|", row.names = FALSE)
|
b8b789af06ec630a7d35a77df2478499aea30b39 | 583629c9d566252fee16e7f0aa56881231eb61fe | /cycle_2/p3/hist.R | 6ee3f5c91e921e42a4952aa9c42ebf911917b996 | [] | no_license | sashuu69/cet-mca-s1-r_lab | e01f91e35910dbfa9594744cca1e3972bd036217 | cc948fd3d27218fbf03921686234406611ac1533 | refs/heads/master | 2023-06-14T08:02:50.610743 | 2018-12-19T18:34:29 | 2018-12-19T18:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | hist.R |
data=runif(1000,min = 0,max = 1)
n=c(20,10,50,2)
myfunction=function(n)
{
mydata=sample(data,n,F)
m<<-mean(mydata)
}
par(mfrow=c(2,2))
for(i in n){
y=NULL
for(j in 1:1000) {
myfunction(i)
y=c(y,m)
}
hist(y)
}
|
d4f3cc6a48fe87b59c34e541595a9cc36b39a022 | 9e6c6d3ea78d408a6746fcdeca6ff0d3a8a3308c | /R/mycolorbar.R | 150ff61396423170051f692f58933f350984f890 | [] | no_license | stineb/rbeni | 36f28d38f58301d2af24255e9d63fe5ac6809ebe | 2f9d26d0a286c550cb90ee9d30a1f2b6c3b112f6 | refs/heads/master | 2023-02-18T22:18:52.856980 | 2023-02-16T17:29:09 | 2023-02-16T17:29:09 | 167,402,490 | 3 | 6 | null | 2020-09-25T09:35:32 | 2019-01-24T16:49:15 | R | UTF-8 | R | false | false | 5,977 | r | mycolorbar.R | mycolorbar <- function( col, # a vector of colors from which to interpolate
lev, # levels of colorbar either in the form of c(min,max, N levels) or by a vector of length > 3 containing margins of levels
plot=TRUE, # if false, then no colorbar is plotted but colors, levels and margins are returned
alpha=NA, # transparency value, 0=100% transparent, 1=0% transparent
orient="h", # orentation of colorbar
maxval=NA, # maximum value, overrides upper margin
minval=NA, # minimum value, overrides lower margin
mincol=NA, # overrides color for lowest level
dolabels=TRUE, # add labels for margins to colorbar
doticks=TRUE, # add tick marks at margins to colorbar
cex.axis=1.0, # magnification of axis tickmarks
cex.lab=1.0 # magnification of axis labels
) {
## /////////////////////////////////////////////////////////////////////////
## Function 'mycolorbar' draws a colorbar based on the image function
## 'layout' must be called before to arrange the colorbar in a subplot as
## defined by 'layout'.
## Beni Stocker, 3.6.2013
## -------------------------------------------------------------------------
library(gplots)
if (length(lev)>3){
explicit <- TRUE
} else {
explicit <- FALSE
}
if (explicit){
## Assume 'lev' declares explicit margins
# print("MYCOLORBAR: assuming explicit margins provided")
len <- length(lev)
# print(paste("len=",len))
# margins.eff is used for labels at color key and is returned as $margins
margins.eff <- lev
# print(paste("length of margins.eff",length(margins.eff)))
# print(margins.eff)
# margins defines where margins.eff are to be labelled
margins <- seq( from=0, to=(len-1), by=1 )
margins.lab <- margins.eff
# print(paste("length of margins",length(margins)))
# print(margins)
if (!is.na(maxval)) {
margins.eff[length(margins)] <- maxval
}
if (!is.na(minval)){
margins.eff[1] <- minval
}
## Define color key centers (mid-points between margins)
centers <- seq( from=0.5, to=(len-1.5), by=1 )
# print(paste("length of centers",length(centers)))
# print(centers)
## Define color range
colors <- colorRampPalette( col )( length(centers) )
} else {
## Assume 'lev' declares (min,max,number of levels)
## Define color key margins
len <- lev[3]
margins <- seq( from=lev[1], to=lev[2], by=(lev[2]-lev[1])/len )
margins.eff <- margins
margins.lab <- margins
if (!is.na(maxval)) {
margins.eff[length(margins)] <- maxval
margins.lab[length(margins)] <- margins[length(margins)]
}
if (!is.na(minval)){
margins.eff[1] <- minval
margins.lab[1] <- margins[1]
}
## Define color key centers (mid-points between margins)
centers <- seq( from=lev[1]+(lev[2]-lev[1])/(2*len), to=lev[2]-(lev[2]-lev[1])/(2*len), by=(lev[2]-lev[1])/len )
## Define color range
colors <- colorRampPalette( col )( lev[3] )
}
if (!is.na(mincol)){
colors[1] <- col2hex(mincol)
}
## Alpha is transparency value
if (!is.na(alpha)){
colors <- col2rgb( colors, alpha=TRUE )/255
colors[4,] <- alpha
colors <- rgb(colors[1,],colors[2,],colors[3,],colors[4,])
}
if (plot) {
if (dolabels==FALSE) {
labels=FALSE
} else {
if (orient=="h"){
if (explicit) {
labels <- as.character(margins.lab)
} else {
labels <- as.character(margins.lab)
}
} else if (orient=="v") {
if (explicit) {
labels <- as.character(margins.lab)
} else {
labels <- as.character(margins.lab)
}
} else {
print("argument 'orient' must be either 'v' for vertical or 'h' for horizontal.")
}
}
if (orient=="h"){
if (explicit) {
# xlim <- c(lev[1],lev[len])
xlim <- c(margins[1],margins[length(margins)])
image( centers, 0.5, as.matrix(centers), xlim=xlim, col=colors, axes=FALSE, ylab="", xlab="", cex.axis=cex.axis, cex.lab=cex.lab )
box()
} else {
xlim <- c(lev[1],lev[2])
image( centers, 0.5, as.matrix(centers), col=colors, axes=FALSE, xlim=xlim, ylab="", xlab="", cex.axis=cex.axis, cex.lab=cex.lab )
box()
}
box()
} else if (orient=="v") {
if (explicit) {
ylim <- c(margins[1],margins[length(margins)])
image( 0.5, centers, as.matrix(t(centers)), col=colors, axes=FALSE, ylim=ylim, xlab="",ylab="", cex.axis=cex.axis, cex.lab=cex.lab )
box()
# Hack by substracting 1 in the following 2 lines (commented out original lines above)
# ylim <- c(lev[1],lev[len])
# image( 0.5, centers-1, as.matrix(t(centers)), col=colors, axes=FALSE, ylim=ylim, xlab="",ylab="", cex.axis=cex.axis, cex.lab=cex.lab )
# axis( 2, at=margins-1, labels=as.character(lev) )
} else {
ylim <- c(lev[1],lev[2])
image( 0.5, centers, as.matrix(t(centers)), col=colors, axes=FALSE, ylim=ylim, xlab="",ylab="", cex.axis=cex.axis, cex.lab=cex.lab )
box()
}
box()
} else {
print("argument 'orient' must be either 'v' for vertical or 'h' for horizontal.")
}
if (doticks) {
if (orient=="h"){
axis( 1, at=margins, labels=labels, cex.axis=cex.axis, cex.lab=cex.lab )
} else if (orient=="v") {
axis( 2, at=margins, labels=labels, cex.axis=cex.axis, cex.lab=cex.lab )
}
}
}
out.mycolorbar <- list()
out.mycolorbar$colors <- colors
out.mycolorbar$margins <- margins.eff
out.mycolorbar$centers <- centers
return(out.mycolorbar)
}
|
f949d40f2806c268c617482daec40f2bdc463b60 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /HRM/man/dot-E4.Rd | cb3e62add382bb76aef158fb63bca9f8ff233295 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 273 | rd | dot-E4.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{.E4}
\alias{.E4}
\title{Unbiased estimator}
\usage{
.E4(M_i, M_j)
}
\arguments{
\item{i}{group index}
\item{M}{a matrix}
}
\description{
Unbiased estimator
}
\keyword{internal}
|
1874243f753a52957a854c712cc65b10057666d4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/phytools/examples/countSimmap.Rd.R | 3f3687a020506086c8cd39c6ae9228037a161713 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 394 | r | countSimmap.Rd.R | library(phytools)
### Name: countSimmap
### Title: Counts the number of character changes on a SIMMAP style tree or
### set of trees
### Aliases: countSimmap
### Keywords: phylogenetics utilities
### ** Examples
tree<-pbtree(n=100,scale=1)
Q<-matrix(c(-2,1,1,1,-2,1,1,1,-2),3,3)
colnames(Q)<-rownames(Q)<-c("A","B","C")
mtree<-sim.history(tree,Q)
countSimmap(mtree,states=rownames(Q))
|
e6d5d8dc00075a1abb2606a7e55a74d55a946ac2 | 33fa49cc77538be7cfccaca011df5f9952d489df | /Rcode/UR Pipeline/stratified_BB8.R | 23270b7c1174614d64f527a4b2aa6c1b516b931f | [] | no_license | HoujieWang/SUMMER-DHS | b47ac75d1a5d8616b1809f7e0dd174cdf4e514c9 | e9c16d122a0f85c0e7102ea5ad47ac69f9e05b41 | refs/heads/main | 2023-08-15T19:52:54.526515 | 2021-10-12T15:56:05 | 2021-10-12T15:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,816 | r | stratified_BB8.R | ################################################################
######### load libraries
################################################################
rm(list = ls())
#### Libraries ####
library(SUMMER)
library(classInt)
library(RColorBrewer)
library(dplyr)
library(tidyr)
library(rgdal)
library(scales)
library(INLA)
library(survey)
library(ggplot2)
library(raster)
library(maptools)
library(gridExtra)
library(mgcv)
library(caret)
library(geosphere)
library(rgeos)
library(haven)
library(labelled)
library(data.table)
options(gsubfn.engine = "R")
library(sqldf)
library(sp)
library(gstat)
# extract file location of this script
code.path <- rstudioapi::getActiveDocumentContext()$path
code.path.splitted <- strsplit(code.path, "/")[[1]]
home_dir <- paste(code.path.splitted[1: (length(code.path.splitted)-3)], collapse = "/")
countries <- countries <- scan(paste0(home_dir, "/countries_implemented.txt"), character(), quote = "")
country <- countries[length(countries)] # retrieve the country being analyzed
info.name <- paste0(country, "_general_info.Rdata")
load(file = paste0(home_dir,'/Info/',country,"/", info.name, sep='')) # load the country info
################################################################
######### set directories
################################################################
data_dir <- paste0(home_dir,'/Data/', country) # set the directory to store the data
res_dir <- paste0(home_dir,'/Results/', country) # set the directory to store the results (e.g. fitted R objects, figures, tables in .csv etc.)
################################################################
######### load files
################################################################
setwd(data_dir)
poly.path <- paste0("shapeFiles_gadm") # specify the folder of the country shape files
poly.layer.adm0 <- paste('gadm36', gadm.abbrev,
'0', sep = "_") # specify the name of the national shape file
poly.layer.adm1 <- paste('gadm36', gadm.abbrev,
'1', sep = "_") # specify the name of the admin1 shape file
poly.layer.adm2 <- paste('gadm36', gadm.abbrev,
'2', sep = "_") # specify the name of the admin2 shape file
poly.adm0 <- readOGR(dsn = poly.path,
layer = as.character(poly.layer.adm0)) # load the national shape file
poly.adm1 <- readOGR(dsn = poly.path,
layer = as.character(poly.layer.adm1)) # load the shape file of admin-1 regions
poly.adm2 <- readOGR(dsn = poly.path,
layer = as.character(poly.layer.adm2)) # load the shape file of admin-2 regions
proj4string(poly.adm0) <- proj4string(poly.adm1) <- proj4string(poly.adm2)
load(paste0('shapeFiles_gadm/', country, '_Amat.rda')) # load the adjacency matrix
load(paste0('shapeFiles_gadm/', country, '_Amat_Names.rda')) # load names of admin1 and admin2 regions
################################################################
######### Final preprocessing
################################################################
# load the DHS survey data and adjacency matrix
load(paste0(country,'_cluster_dat.rda'),
envir = .GlobalEnv)
load( paste0(poly.path,'/', country, '_Amat.rda'))
load( paste0(poly.path, '/', country, '_Amat_Names.rda'))
mod.dat<-mod.dat[mod.dat$survey==as.character(survey_year),] # filter the data of the recent survey
mod.dat$years <- as.numeric(as.character(mod.dat$years))
mod.dat<-mod.dat[as.numeric(mod.dat$years)>=beg.year,]
mod.dat$strata <- mod.dat$urban
mod.dat$country <- as.character(country)
################################################################
######### National stratified BB8
################################################################
setwd(paste0(res_dir))
# fit the national BB8 stratified model
fit.natl.strat <- smoothCluster(data = mod.dat, family = "betabinomial",
Amat = NULL, strata.time.effect = TRUE,
year_label = beg.year:end.year,
time.model = "rw2",
overdisp.mean = -7.5,
overdisp.prec = 0.39,
age.groups = levels(mod.dat$age),
age.n = c(1,11,12,12,12,12),
age.rw.group = c(1,2,3,3,3,3),
age.strata.fixed.group = c(1,2,3,3,3,3),
survey.effect = FALSE)
saveRDS(fit.natl.strat, paste0('Betabinomial/',
country,'_fit.strat.natl.3UR.rds'))
# load the national urban population fraction, this is needed to weight urban/rural estimators
natl.urb.weights <- readRDS(paste0('UR/U5_fraction/','natl_urban_weights.rds'))
natl.urb.weights$rural <- 1- natl.urb.weights$urban
# sample for national U5MR estimators and compute the estimates
res.natl.strat <- getSmoothed(inla_mod = fit.natl.strat,
year_range = beg.year:end.year,
year_label = beg.year:end.year, nsim = 1000,
weight.strata = natl.urb.weights,
weight.frame = NULL,
draws = NULL, save.draws = TRUE)
saveRDS(res.natl.strat, paste0('Betabinomial/',
country,'_res.strat.natl.3UR.rds'))
################################################################
######### Admin-1 stratified model
################################################################
mod.dat$region <- mod.dat$admin1.char
# fit the admin1 BB8 stratified model
fit.strat.admin1 <- smoothCluster(data = mod.dat, family = "betabinomial",
Amat = admin1.mat, strata.time.effect = TRUE,
year_label = beg.year:end.year,
time.model = "rw2", st.time.model = "ar1",
pc.st.slope.u = 1, pc.st.slope.alpha = 0.01,
age.strata.fixed.group = c(1,2,3,3,3,3),
type.st = type.st,
bias.adj.by = adj.varnames,
survey.effect = FALSE)
saveRDS(fit.strat.admin1,paste0('Betabinomial/',
country,'_fit.strat.admin1.3UR.rds'))
# load the admin1 urban population fraction, this is needed to weight urban/rural estimators
weight.strata.adm1 <- readRDS(paste0('UR/U5_fraction/','admin1_urban_weights.rds'))
# sample for national U5MR estimators and compute the estimates
res.strat.admin1 <- getSmoothed(inla_mod = fit.strat.admin1,
year_range = beg.year:end.year,
year_label = beg.year:end.year, nsim = 1000,
weight.strata = weight.strata.adm1,
weight.frame = NULL,
draws = NULL, save.draws = TRUE)
saveRDS(res.strat.admin1,paste0('Betabinomial/',
country,'_res.strat.admin1.3UR.rds'))
################################################################
######### Admin-2 stratified model
################################################################
mod.dat$region <- mod.dat$admin2.char
# fit the admin2 BB8 stratified model
fit.strat.admin2 <- smoothCluster(data = mod.dat, family = "betabinomial",
Amat = admin2.mat, strata.time.effect = TRUE,
year_label = beg.year:end.year,
time.model = "rw2", st.time.model = "ar1",
pc.st.slope.u = 1, pc.st.slope.alpha = 0.01,
type.st = type.st,
age.strata.fixed.group = c(1,2,3,3,3,3),
survey.effect = FALSE)
saveRDS(fit.strat.admin2,paste0('Betabinomial/',
country,'_fit.strat.admin2.3UR.rds'))
# load the admin2 urban population fraction, this is needed to weight urban/rural estimators
weight.strata.adm2 <- readRDS(paste0('UR/U5_fraction/','admin2_urban_weights.rds'))
# sample for admin2 U5MR estimators and compute the estimates
res.strat.admin2 <- getSmoothed(inla_mod = fit.strat.admin2,
year_range = beg.year:end.year,
year_label = beg.year:end.year, nsim = 1000,
weight.strata = weight.strata.adm2,
weight.frame = NULL,
draws = NULL, save.draws = TRUE)
saveRDS(res.strat.admin2,paste0('Betabinomial/',
country,'_res.strat.admin2.3UR.rds'))
|
37f2fce2d4c95b5201d4fa41fb13a09af2795656 | da4ca72079bfd05af352713cf67f04d61b2bb4b2 | /man/gskat_seq_cont.Rd | 2aac7b9b8dc026165164d744d912aa28e59d07e9 | [] | no_license | xfwang/gskat | 6ffe60a7c48a0e38b001d8d2a632c1f234b54426 | eee1f7806d71990669f2f218a2b52e89669a705b | refs/heads/master | 2021-01-11T07:26:54.426256 | 2016-08-26T01:01:22 | 2016-08-26T01:01:22 | 14,637,867 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,592 | rd | gskat_seq_cont.Rd | \name{gskat_seq_cont}
\alias{gskat_seq_cont}
\title{
Perform GEE_KM Score Test with sequencing data for continuous traits
}
\description{
This function performs family based marker set based association (score test) based on the GEE_KM.
}
\usage{
gskat_seq_cont(y,XC,Z,ID,impute.method="fixed",SNP.weights=NULL,
w_a=1,w_b=25,resampling=TRUE,pw="Rade",Uc=TRUE,sW=FALSE,np=10000)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{y}{binary phenotype coded as 0, 1}
\item{XC}{covaraite matrix, including the intercept column}
\item{Z}{SNP genotypes coded 0/1/2 (minor allele count)}
\item{ID}{ID: Pedigree ID matrix, having four columns are the Family ID (FID), Individual ID (IID),
Paternal ID (FAT), Maternal ID (MAT), respectively.}
\item{impute.method}{default is fixed method i.e. fill with means}
\item{SNP.weights}{If NULL, the default beta (1,25) desensity will be used, or a custimoized weight vector may be used}
\item{w_a}{The first parameter of the beta density in the weight function}
\item{w_b}{The second parameter of the beta density in the weight function}
\item{resampling}{If TRUE, resampling will be applied}
\item{pw}{r.v. used in the perturbation, "Norm"=Normal , "Rade"=Rademacher}
\item{Uc}{Score centered or not}
\item{sW}{standardize weights or not}
\item{np}{No. of perturbed samples}
}
\value{
\item{p1}{Returns asymptotic p-value}
\item{p2}{Returns resampling p-value}
}
%% \references{
%% ~put references to the literature/web site here ~
%% }
\author{
Xuefeng Wang
}
\examples{
# gskat_seq_cont(y,XC=X,Z,ID)
}
|
6d36f648a3893aa031f90e425c4001fa9f4f35d5 | 3bb59aeead30b4084bc7ecdff47d2b922f9fdd83 | /inst/doc/exclusionflowcharts.R | bb10aba5c6e12b1ff7bceef2eca5d775fac3478b | [] | no_license | cran/PRISMAstatement | 1bff459791a895277d88aa08e47e5f7b1824097b | 4191ad275dd6c9515f2300f7e7b2356f5dc8ad7f | refs/heads/master | 2021-01-10T23:08:13.433660 | 2019-05-08T21:50:13 | 2019-05-08T21:50:13 | 70,641,515 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,276 | r | exclusionflowcharts.R | ## ----setup, include = FALSE----------------------------------------------
library(PRISMAstatement)
suppressPackageStartupMessages(library(DiagrammeR))
knitr::opts_chunk$set(
collapse = TRUE,
fig.width = 3
)
## ----genericflowexamples, fig.cap = "Fictitious study data, presenting just counts"----
flow_exclusions(
incl_counts = c(972, 132, 77, 14),
total_label = "Total Screened",
incl_labels = c("Consented", "Completed Study", "BMI <= 30"),
excl_labels = c("Declined Consent", "Failed to Complete", "BMI > 30")
)
## ----percoftotal, fig.cap = "Percentages of the Total figure"------------
flow_exclusions(c(1000, 300, 150, 75, 38), percent_of_total = TRUE)
## ----percofprev, fig.cap = "Percentages of the previous box"-------------
flow_exclusions(c(100000, 3000, 1666, 411, 38),
percent_of_prev = TRUE,
show_count = FALSE)
## ----genericflowexamples2, fig.cap = "Demonstrating other options"-------
flow_exclusions(
incl_counts = c(972, 132, 77, 14),
total_label = "Total Screened",
incl_labels = c("Consented", "Completed Study", "BMI <= 30"),
excl_labels = c("Declined Consent", "Failed to Complete", "BMI > 30"),
percent_of_total = TRUE,
percent_of_prev = FALSE,
show_count = FALSE,
font_size = 14)
|
f9b1b3ea6b46890390833f2af1f86c765c2f4454 | 3cd621de223b237c7a7ce3556d139def186db049 | /Rplay.R | ea6fb2db6b3412a97fa3a02c5643e58adfc47553 | [] | no_license | enilmik/gitplay | 9026e168c5cf5f8085ca82c30ebe9f2031aa92b8 | 01f4621de0d12e599fab0a0f185fb308495730e0 | refs/heads/master | 2021-01-10T12:37:36.461697 | 2015-06-06T05:19:35 | 2015-06-06T05:19:35 | 36,587,047 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,414 | r | Rplay.R | #IMPORTANT NOTE
#If you downloaded and installed R in a location other than the United States, you might encounter some formating issues later in this class due to language differences. To fix this, you will need to type in your R console:
Sys.setlocale("LC_ALL", "C")
#This will only change the locale for your current R session, so please make a note to run this command when you are working on any lectures or exercises that might depend on the English language (for example, the names for the days of the week).
USDA <- read.csv("C:/Users/enilmik/Downloads/USDA.csv")
USDA$HighSodium <- as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm = TRUE))
USDA$HighProtein <- as.numeric(USDA$Protein > mean(USDA$Protein, na.rm = TRUE))
USDA$HighFat <- as.numeric(USDA$TotalFat > mean(USDA$TotalFat, na.rm = TRUE))
USDA$HighCarb <- as.numeric(USDA$Carbohydrate > mean(USDA$Carbohydrate, na.rm = TRUE))
# The tapply function takes three arguments
#tapply(argument1, argument2, argument3)
# Group argument 1 by argument 2 and apply argument 3
#To compute the average amount of iron, sorted by high and low protein
tapply(USDA$Iron, USDA$HighProtein, mean, na.rm=TRUE)
mvt <- read.csv("C:/Users/enilmik/Downloads/mvtWeek1.csv")
DateConvert = as.Date(strptime(mvtWeek1$Date, "%m/%d/%y %H:%M"))
mvt$Date = as.Date(strptime(mvt$Date, "%m/%d/%y %H:%M"))
mvt$Month = months(DateConvert)
mvt$Weekday = weekdays(DateConvert) |
12bf04818f96963e15164bb4626b7b355f55520c | ab2c51d4ad6b8a7f86d89fff3bcc0ec54ec850c7 | /Shiny_Proj1.R | 43c9409b5bc8bdc3cc0b57e7706982dead836018 | [] | no_license | stephensh24/Visualization_Shiny_proj | 368b0674e46126be5dafaf4b41f99af36d916571 | 657a82209217bd5ded83138f0536bfeabe31bc1c | refs/heads/master | 2021-06-18T23:27:20.612077 | 2021-01-08T22:23:04 | 2021-01-08T22:23:04 | 131,326,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,243 | r | Shiny_Proj1.R | tanz = read.csv("Pipe_ind.csv")
Pipe_labels = read.csv("Pipe_labels.csv")
tanz = inner_join(tanz,Pipe_labels, by = "id")
#setwd("C:/Users/Stephen/Desktop/git_proj/shiny_proj")
head(tanz)
library(dplyr)
library(ggplot2)
library(plotly)
#Data Clean
#-------------------------------------------------------------------------------------------
#longitude
mean(tanz$longitude)
#tanz[tanz$longitude == 0,]$longitude = mean(tanz$longitude)
tanz %>% filter(amount_tsh != 0, region == "Mwanza") %>% summarise(avg = mean(amount_tsh))
# Converting Longitude into Regional mean (There were 1812 total. 807 in Mwanza, 1005 in Shinyanga)
tanz = tanz %>% mutate(longitude = ifelse(longitude == 0 & region == "Mwanza",33.09156, longitude))
tanz = tanz %>% mutate(longitude = ifelse(longitude == 0 & region == "Shinyanga",33.24012, longitude))
#GPS Height
#Morogoro
#Mtwara
#Pwani
#Dar es Salaam
#tanz %>% filter(gps_height != 0, region == "Lindi") %>% summarise(avg = mean(gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Mwanza",1199.643, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Shinyanga",1350.982, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Tabora",1018.861, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Dodoma",1018.861, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Mbeya",1018.861, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Tanga",667.9148, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Kagera",1018.861, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Morogoro",405.2507, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Mtwara",258.4103, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Pwani",667.9148, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Dar es Salaam",31.22195, gps_height))
tanz = tanz %>% mutate(gps_height = ifelse(gps_height == 0 & region == "Lindi",213.4183, gps_height))
#latitude---------------------------------------------------
tanz %>% filter(latitude > -1, region == "Kagera") %>% summarise(avg = mean(amount_tsh))
tanz %>% filter(latitude <= -1, region == "Kagera") %>% summarise(avg = mean(latitude))
# Converting Longitude into Regional mean (There were 1812 total. 807 in Mwanza, 1005 in Shinyanga)
tanz = tanz %>% mutate(latitude = ifelse(latitude > -1 & region == "Mwanza",-2.620502, latitude))
tanz = tanz %>% mutate(latitude = ifelse(latitude > -1 & region == "Shinyanga",-3.495696, latitude))
tanz = tanz %>% mutate(latitude = ifelse(latitude > -1 & region == "Kagera",-1.963501, latitude))
#Remove duplicate rows ----------------------------------
tanz$payment_type = NULL
tanz$waterpoint_type_group = NULL
tanz$extraction_type_group = NULL
tanz$source = NULL
tanz = tanz %>% rename(source = source_type)
tanz$quality_group = NULL
tanz$quantity_group = NULL
tanz$management_group = NULL
tanz$extraction_type = NULL
tanz = tanz %>% rename(extraction_type = extraction_type_class)
tanz$num_private = NULL
tanz_sel = tanz %>% select(status_group, id, gps_height, longitude, latitude, basin, region, subvillage, construction_year,
extraction_type, payment, water_quality, quantity, waterpoint_type)
colnames(tanz)
#-------------------------------------------------------
head(tanz)
ggplot(tanz, aes(x = construction_year))+
geom_freqpoly(aes(color=status_group),bins=200)+coord_cartesian(xlim = c(1940,2013))
c_basin <- arrange(summarise(group_by(tanz, basin),
TotalWells = length(unique(id)) ), desc(TotalWells) )
head(c_basin, n = 10)
#
# length(tanz$amount_tsh[tanz$amount_tsh == 0])
#
# sum(tanz$construction_year == 0)
#
# summary(tanz$region)
# ggplot(tanz, aes(x = basin))+ geom_bar(aes(fill=status_group),
# position = "dodge")
#
#
# ggplot(tanz, aes(x = region ,y = population))+ geom_bar(stat = "identity")
#
#
# ggplot(tanz, aes(x = waterpoint_type))+ geom_bar(aes(fill=status_group),
# position = "dodge")
colnames(tanz)
atsh_10k = tanz$amount_tsh[tanz$amount_tsh > 1000 & tanz$amount_tsh <= 10000]
length(atsh_10k)
ggplot() +geom_density(aes(x = tanz$status_group), fill = "yellow")
View(tanz)
## Making a bar chart of perc shared between function, non-function, etc.
tanz2 <- tanz %>% group_by(status_group) %>%
summarise(count = n())
tanz2
status_group_sum <- sum(tanz2$count)
status_group_perc_graph <- tanz2 %>% mutate(perc = (count/status_group_sum))
status_group_perc_graph %>% ggplot(., aes(x= status_group, y = perc, fill = status_group)) +
geom_bar(stat = "identity")
#By basin
tanzb = tanz %>% group_by(basin,status_group) %>% summarise(count = n())
tanzb_perc_graph = tanzb %>% group_by(basin) %>% mutate(perc = count/sum(count))
tanzf = tanzb_perc_graph[tanzb$status_group == "non functional",]
tanzb_perc_graph[tanzb_perc_graph$status_group == "non functional",]
#### MEAN LINE
tanzf %>% ggplot(aes(x = basin, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge") + geom_hline(aes(yintercept = mean(perc)))
tanzb_perc_graph %>% ggplot(aes(x = basin, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
#Extraction type class
tanze = tanz %>% group_by(extraction_type,status_group) %>% summarise(count = n())
tanze_perc_graph = tanze %>% group_by(extraction_type) %>% mutate(perc = count/sum(count))
tanzg = tanze_perc_graph[tanze$status_group == "non functional",]
tanze_perc_graph[tanze_perc_graph$status_group == "non functional",]
tanzg %>% ggplot(aes(x = extraction_type, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge") + geom_hline(aes(yintercept = mean(perc)))
tanze_perc_graph %>% ggplot(aes(x = extraction_type, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
#Density of functional pumps based on gps_height
tanz %>% ggplot(aes(x= gps_height, fill = status_group)) + geom_histogram(bins = 30)
#Region-----------------------------------
tanzR = tanz %>% group_by(region,status_group) %>% summarise(count = n()) %>%
group_by(region) %>% mutate(perc = count/sum(count))
tanzR_perc_graph = tanzR %>% group_by(region) %>% mutate(perc = count/sum(count))
tanzG = tanzR_perc_graph[tanze$status_group == "non functional",]
tanzR_perc_graph[tanzR_perc_graph$status_group == "non functional",]
tanzG %>% ggplot(aes(x = region, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
tanzR %>% ggplot(aes(x = region, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
#Water quality------------------------------
tanzW = tanz %>% group_by(water_quality,status_group) %>% summarise(count = n()) %>%
group_by(water_quality) %>% mutate(perc = count/sum(count))
tanzW %>% ggplot(aes(x = water_quality, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
tanzW = tanz %>% group_by(payment,status_group) %>% summarise(count = n()) %>%
group_by(payment) %>% mutate(perc = count/sum(count)) %>%
plot_ly(x = ~payment, y = ~status_group, type = 'bar', name = 'SF Zoo') %>%
add_trace(y = ~status_group, name = 'LA Zoo') %>%
layout(yaxis = list(title = 'Count'), barmode = 'group')
tanzW
#-----------
#By waterpoint_type
tanzWP = tanz %>% group_by(waterpoint_type,status_group) %>% summarise(count = n())
tanzWP_perc_graph = tanzWP %>% group_by(waterpoint_type) %>% mutate(perc = count/sum(count))
tanzP = tanzWP_perc_graph[tanzb$status_group == "non functional",]
tanzWP_perc_graph[tanzb_perc_graph$status_group == "non functional",]
tanzP %>% ggplot(aes(x = waterpoint_type, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
tanzWP_perc_graph %>% ggplot(aes(x = waterpoint_type, y = perc, fill = status_group)) +
geom_bar(stat="identity","position" = "dodge")
#-------------------------
tanzfunc %>% ggplot(aes(x= basin, fill = status_group)) + geom_histogram(bins = 50, stat = "count")
tanzfunc = tanz %>% filter(status_group == "non functional")
str(tanz$status_group)
colnames(tanz)
tanz$source_class
tanz %>% ggplot(aes(x= water_quality, fill= status_group)) + geom_bar(position = "dodge")
#----------------
summary(tanz$region)
menuItem("Region Pump Funcionality", tabName = "data", icon = icon("bar-chart-o"))
#MAPING --------------------------------
install.packages("ggmap")
library(ggmap)
library(rworldmap)
newmap <- getMap(resolution = "low")
plot(newmap, xlim = c(29, 41), ylim = c(-12, -1), asp = 1)
points(tanz$longitude, tanz$latitude, col = "blue", cex = .6)
summary(tanz$latitude)
summary(tanz$longitude)
map <- leaflet(width = 600, height = 400)
map <- addTiles(map)
map <- setView(map, lng = 35.09,
lat = -5.801,
zoom = 7)
map
Geo=gvisGeoChart(tanz, locationvar="Country",
colorvar="Profit",
options=list(projection="kavrayskiy-vii"))
plot(Geo)
install.packages("leaflet.extras")
library(leaflet.extras)
head(tanz)
tanz_region = tanz %>% group_by(region, status_group)
tanz_region %>% leaflet() %>%
addProviderTiles("Stamen.Watercolor") %>%
addMarkers(lat = latitude,
lng = longitude,
clusterOptions = markerClusterOptions(),
popup = as.character(region))
map
colnames(tanz_region)
tanz %>% filter(population > 0 & population < 1000) %>% group_by(population,status_group) %>% summarise(count = n()) %>%
group_by(population) %>% mutate(perc = count/sum(count)) %>%
plot_ly(x = ~population, y = ~perc, color = ~status_group, type='bar') %>%
layout(barmode = 'stack', title = "Functionality by Extraction Type")
tanz_histo %>% filter(population > 0 & population < 1000) %>% plot_ly(alpha = 0.6) %>%
add_histogram(x = ~population, color = ~status_group) %>%
layout(barmode = "overlay", title = "Functionality by GPS Height")
|
06984b3320637f6ec65fa9e5f140986c450fbbc0 | 907e3add04e83b6b7fb22dca1c167cad8242de73 | /man/make_filename.Rd | 3eccf31b98cb7fa0b48c22721f88404ea38e8349 | [] | no_license | afortuny/fars | 2fdde85bbd77a558a786fa99d84ba909d563a182 | 39e5f11fcced213ad258d53a959b531987f54773 | refs/heads/master | 2021-04-30T11:35:55.035675 | 2018-02-16T16:26:42 | 2018-02-16T16:26:42 | 121,257,691 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 627 | rd | make_filename.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars.R
\name{make_filename}
\alias{make_filename}
\title{make_filename
make_filename create the filename based on the year from which we want to extract the information}
\usage{
make_filename(year)
}
\arguments{
\item{year}{An integer that indicates the year from which we can to extract the information}
}
\value{
The filename string for that specific year
}
\description{
make_filename
make_filename create the filename based on the year from which we want to extract the information
}
\examples{
\dontrun{
make_filename<-make_filename(2015)
}
}
|
b6de59532447e1cc5274dfcabb9bf9442377f700 | 64fabdfd8e2670eb57e3cdf162f94d71b58fe6b4 | /R/genes2hugo.R | 560ed81731486b24f93f996d48396d257a1ab7f5 | [] | no_license | d3b-center/celllines-profiling-analysis | 564db86b6fb6c5bc8258fdf84bf12a1ea47963a1 | 66396cc64c094d08903ef8666353e85aad7eebd1 | refs/heads/master | 2021-06-23T15:32:27.993408 | 2021-04-07T23:07:29 | 2021-04-07T23:07:29 | 212,658,648 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 679 | r | genes2hugo.R | # Maps ENSEMBL IDs to HUGO
# Use srcType = "ensembl_gene_id" for Ensembl IDs
# Use srcType = "entrezgene" for Entrez IDs
genes2hugo <- function( v, srcType = "ensembl_gene_id" )
{
## Retrieve the EMSEMBL -> HUGO mapping
ensembl <- biomaRt::useMart( "ENSEMBL_MART_ENSEMBL", host="www.ensembl.org", dataset="hsapiens_gene_ensembl" )
ID <- biomaRt::getBM( attributes=c(srcType, "hgnc_symbol"), filters=srcType, values=v, mart=ensembl )
## Make sure there was at least one mapping
if( nrow(ID) < 1 ) top( "No IDs mapped successfully" )
## Drop empty duds
j <- which( ID[,2] == "" )
if( length(j) > 0 ) ID <- ID[-j,]
stopifnot( all( ID[,1] %in% v ) )
ID
} |
e5db49af28057458a8ca4415d21581fcac276584 | 8d0b7388b86293205382e29c9a202bd40e93824b | /tests/testthat.R | 9d3eb879081e8ed703d00c4b500b598895b32060 | [] | no_license | benjcunningham/govtrackr | bd7bd903f6df4cac69e7ae41d255b38362cd707f | b0bb8db58d03e6e87bc6f60b06d303e54cacaa8a | refs/heads/master | 2016-08-11T21:45:58.701119 | 2016-01-27T04:02:06 | 2016-01-27T04:02:06 | 50,258,465 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(govtrackr)
test_check("govtrackr")
|
164853877d06758fcd8da0fc94b8344b69f88f17 | 5e613fdaaf680b7220a9331133d79a7dcbca8acd | /R/deps/taxize-master/man/getcommentdetailfromtsn.Rd | f1d60f4f32ad77843123b3d7cf523e523c6188be | [
"MIT"
] | permissive | hmarx/Alpine-Sky-Islands | df0fd965ca4e1d4e3071aa9362ee615a5510175d | 72ab7d914fea6c76c9ae105e042e11088a9be87f | refs/heads/master | 2021-05-01T02:44:59.818086 | 2017-08-08T15:02:45 | 2017-08-08T15:02:45 | 39,544,747 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 550 | rd | getcommentdetailfromtsn.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/itis.R
\name{getcommentdetailfromtsn}
\alias{getcommentdetailfromtsn}
\title{Get comment detail from TSN}
\usage{
getcommentdetailfromtsn(tsn, ...)
}
\arguments{
\item{tsn}{TSN for a taxonomic group (numeric)}
\item{...}{optional additional curl options (debugging tools mostly)}
}
\value{
A data.frame with results.
}
\description{
Get comment detail from TSN
}
\examples{
\dontrun{
getcommentdetailfromtsn(tsn=180543, config=timeout(4))
}
}
\keyword{internal}
|
66738f2e1836055bad1217f91983e9445f546f75 | c427a5b3c9c434de9765e9cf4c287a711f744d53 | /man/get_core_option_list_elements.Rd | 40eb94703e96016219266c9b5e7bdf1ac9a1ac39 | [] | no_license | bdevoe/iformr | 69ac4b6b1e762d4a855b8cd81bf338c33618da36 | fe6cbc4b1334edff5acf84785fc75308f9a8f405 | refs/heads/master | 2021-06-06T01:31:21.954796 | 2019-06-05T18:50:38 | 2019-06-05T18:50:38 | 140,438,573 | 0 | 0 | null | 2018-08-17T19:36:21 | 2018-07-10T13:45:16 | R | UTF-8 | R | false | true | 1,512 | rd | get_core_option_list_elements.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/option_lists.R
\name{get_core_option_list_elements}
\alias{get_core_option_list_elements}
\title{Get core elements in an option list}
\usage{
get_core_option_list_elements(server_name, profile_id, optionlist_id,
limit = 1000, offset = 0, access_token)
}
\arguments{
\item{server_name}{The server name as encoded in the url:
`https//server_name.iformbuilder.com`}
\item{profile_id}{The id number of your profile}
\item{optionlist_id}{The id number for the option list}
\item{limit}{The maximum number of option list items to return}
\item{offset}{Skips the offset number of options before beginning to return}
\item{access_token}{The access_token required to establish communication with
the API}
}
\value{
A dataframe of the core option list elements
}
\description{
Sends a request to the iFormBuilder API to return the core option list
elements. Function will return the id, sort_order, label, key_value, and
condition_value.
}
\examples{
\dontrun{
# Get access_token
access_token <- get_iform_access_token(
server_name = "your_server_name",
client_key_name = "your_client_key_name",
client_secret_name = "your_client_secret_name")
# Get the core elements in an option list
core_elements <- get_core_option_list_elements(
server_name = "your_server_name",
profile_id = 123456,
optionlist_id = your_option_list_id,
access_token = access_token)
# Inspect the first five core elements
head(core_elements, 5)
}
}
|
ef680bf0b1aa7fa8bd9a39e3195ed8d2b8aae252 | 6312f33861a761734b53eb140c73e5b31b3bbbf1 | /oneRep/oneRep-GLMFPCA.R | 0385ba2306e414052fd1d7e7852ae10fd3239b71 | [] | no_license | yehanxuan/MultitaskSpline | dd45623cbd79ff7a43610f3823c4fe2c4b8396cd | e0417504aa72e63ca554abd116498ffae48619b7 | refs/heads/main | 2023-08-08T09:53:34.164574 | 2021-09-09T02:58:31 | 2021-09-09T02:58:31 | 394,359,903 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 360 | r | oneRep-GLMFPCA.R | ## Generate Mixed data
nu = 1.5
if (DataType == "GLMexact"){
simuData = GenMulti_Mixed(Mont, simu_n, M, sigma_e, nu, TypeVec)
} else if (DataType == "GLMHybrid"){
simuData = GenMulti_Mixed_Hybrid(Mont, simu_n, M, sigma_e, nu, TypeVec)
}
grid = seq(1/Mont, 1, 1/Mont)
Ymat = simuData$Y
Xmat = simuData$X
CovMat = simuData$C
beta_true = simuData$beta
|
270cb4d783205cc84e97ca90d451c7c68f9955d6 | 57bcba211c26f1837a2216dd3be7e3dbf3d8217e | /R/rcopula.R | 48bfb0f56f0f9721441278be0d3b20f4982edf1c | [] | no_license | gsoutinho/survCopula | 434383f4f27476961f6d8dcdec045dd8cf909ecd | b2f64b84370db702dbdc9c3ae122d13b42b9283e | refs/heads/master | 2021-08-22T01:56:54.609031 | 2020-08-31T10:38:15 | 2020-08-31T10:38:15 | 218,321,926 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,685 | r | rcopula.R | #' Random Number Generation for Copula Functions.
#' @description Random Number Generation for Bivariate Copula Functions. Returns a number of pairs of random values
#' from a bivariate copula with marginal distributions X and Y.
#' @param typeCopula Type of copula. Possible options are \code{"clayton"}, \code{"frank"} \code{"FGM"}, \code{"AMH"},
#' \code{"gumbel-hougaard"} and \code{"joe"}. Defaults to \code{"clayton"}.
#' @param theta A numeric value for the space parameter.
#' @param typeX Type of marginal distribution. Possible options are \code{"Exp"}, \code{"Norm"} \code{"Unif"} and
#' \code{"Gamma"}. Defaults to \code{"Exp"}.
#' @param num1_X A numeric value for the first parameter of the first marginal distribution.
#' @param num2_X A numeric value for the second parameter of the first marginal distribution.
#' Only required for two parameter distributions.
#' @param typeY Type of marginal distribution. Possible options are \code{"Exp"}, \code{"Norm"} \code{"Unif"} and \code{"Gamma"}.
#' Defaults to \code{"Exp"}.
#' @param num1_Y A numeric value for the first parameter of the second marginal distribution.
#' @param num2_Y A numeric value for the second parameter of the second marginal distribution.
#' Only required for two parameter distributions.
#' @param nsim Number of observations to be generated.
#'
#' @return
#' 2-dimensional random vector with the results of the simulation.
#' @examples
#'
#' res<-rcopula(typeCopula = 'clayton', theta = 2, typeX='Exp', num1_X=0.9,
#' typeY='Exp', num1_Y=0.3, nsim=1000)
#'
#' res
#'
#' res2<-rcopula(typeCopula = 'AMH', theta = 2, typeX='Norm', num1_X=0.9, num2_X=0.3,
#' typeY='Gamma', num1_Y=3, num2_Y=2, nsim=1000)
#'
#' res2[,2]
#'
#' @author Gustavo Soutinho, Luis Meira-Machado
rcopula<- function(typeCopula = 'clayton', theta = 1, typeX='Exp', num1_X=1, num2_X=NULL,
typeY='Exp', num1_Y=1, num2_Y=NULL, nsim=500){
TAB<-NULL
for(i in 1:nsim){
#i<-1
v1<-runif(1,0,1)
v2<-runif(1,0,1)
res<-copula(v1, v2, theta=theta,type=typeCopula,typeX=typeX, num1_X=num1_X, num2_X=num2_X,
typeY=typeY, num1_Y=num1_Y, num2_Y=num2_Y)
x<-res[1]
y<-res[2]
TAB<-rbind(TAB,cbind(i, x, y))
}
TAB<-as.data.frame(TAB)
colnames(TAB)<-c('ID','X','Y')
#res<-list(tab=TAB, typeCopula=typeCopula,teta=teta,typeX=typeX, num1_X=num1_X, num2_X=num2_X,
# typeY=typeY, num1_Y=num1_Y, num2_Y=num2_Y,
# nsim=nsim)
res<-TAB[,2:3]
return(res)
} |
774bdb281fcf580cf66fb61814cdcea98150ca47 | 62e1665efcbd67bc0de0d9be749d5d2b222c80ce | /man/sinpoint.Rd | df0ad774a1119f8f2c54a1447f53f48ce4ee0a12 | [] | no_license | sewouter/StratigrapheR | 25e669143eeb73051e79e0b4cb490e6060ed0d4b | 2d19b6cc5dbbb4bade454ad83b61842d2f8871e1 | refs/heads/main | 2021-09-28T00:26:51.110494 | 2021-09-24T12:23:45 | 2021-09-24T12:23:45 | 341,558,856 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,300 | rd | sinpoint.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sinpoint.R
\name{sinpoint}
\alias{sinpoint}
\title{Gives a table of equally sampled points following a sinusoidal function}
\usage{
sinpoint(x, y, delta, x0 = 0, pos = 1, phase = 1.5, nwave = 1, nint = 50)
}
\arguments{
\item{x}{the x value of the end of the interval}
\item{y}{the y offset (see next parameter)}
\item{delta}{the difference between the min- and maxima in y}
\item{x0}{the x value of the beginning of the interval (0 as default)}
\item{pos}{an integer specifying the kind of vertical offset; should
the sinusoidal function be shifted so that y is the first value (pos = 1, is
the default), the last value (2),the minimum (3) or the maximum (4) of the
function}
\item{phase}{the phase of the function at x0 in multiples of pi (1.5
as default; begins at its lowest)}
\item{nwave}{number of complete sinuses waves (1 as default)}
\item{nint}{number of intervals for the sampling (50 as default)}
}
\value{
a table of points following a sinusoidal function
}
\description{
Gives a table of equally sampled points following a sinusoidal function
}
\examples{
res <- sinpoint(c(4,5), 5, 1, x0 = c(0,1), pos = 3)
plot(res$x, res$y)
multilines(res$i, res$x, res$y, col = c("black" ,"red"), type = "o")
}
|
375e56edb1a3583c6becd4b8e1021722e9588732 | 6172d8feb1bf4e1b7112023eec206b89095b7c2b | /plot2.R | 5c82974195b8cded93d0f2a121cc2b0fbfd4fa33 | [] | no_license | LifeWareGit/ExData_Plotting1 | 285cd80159d2a6cb0ca8173d937fa8e4f6eed5da | 367a3d60d7eadafc7bf0ccfc5531dc9a06e3be9c | refs/heads/master | 2021-01-18T22:39:19.546234 | 2016-01-09T03:30:34 | 2016-01-09T03:30:34 | 49,216,739 | 0 | 0 | null | 2016-01-07T16:39:26 | 2016-01-07T16:39:26 | null | UTF-8 | R | false | false | 879 | r | plot2.R | # Check for required packages and install if necessary
curPackages <- installed.packages()[,1]
reqPackages <- c("data.table","dplyr","lubridate","sqldf")
for (pkg in reqPackages){
if (!is.element(pkg,curPackages))
install.packages(pkg)
}
# Load the required packages if necessary
library(data.table)
library(dplyr)
library(lubridate)
library(sqldf)
# Load a subset of the data and combine the Date and Time columns
dataFile <- file("./household_power_consumption.txt")
pwr_dt <- sqldf("select * from dataFile where Date in ('1/2/2007','2/2/2007')",
file.format = list(header = TRUE, sep = ";")) %>%
mutate(datetime=dmy_hms(paste(Date,Time)))
close(dataFile)
# Plot 2 Code
png(file="plot2.png",width=480,height=480)
par(mar=c(3,4,4,2))
with(pwr_dt,plot(datetime,Global_active_power,type="l",ylab="Global Active Power (Kilowatts)"))
dev.off() |
8bd0d1573bd1dfb7ef012e1dc90624ebd0cedccd | 458162de244feea036cdda22d2415085ad303e1c | /ui.R | 159cdfbf716abeb275b256a14875c3f24f7d954c | [] | no_license | map0logo/devdataprod-011 | f9d0eede5d566bba1175614cacdd08a5fce9dc1e | 21af1626484dd78e4ee5c9ee8277c11b4ed8dcef | refs/heads/master | 2020-06-04T11:00:59.331385 | 2015-02-23T03:49:46 | 2015-02-23T03:49:46 | 31,187,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,273 | r | ui.R |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
load("./data.Rdata")
shinyUI(fluidPage(
# Application title
titlePanel("Índice Nacional de Precios al Consumidor"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
dateRangeInput("dates",
"Rango de fechas:",
language = "es",
start = min(data$date),
end = max(data$date),
min = min(data$date),
max = max(data$date)),
selectInput("cities",
"Ciudades:",
choices = as.list(names(data[1:12])),
multiple = TRUE,
selected = c("Nacional")),
selectInput("domains",
"Dominios:",
choices = levels(data$group),
multiple = TRUE,
selected = c("GENERAL"))
),
# Show a plot of the generated distribution
mainPanel(
h2("IPC por grupo según dominio de estudio, 2008 - Mayo 2014"),
plotOutput("distPlot")
)
)
))
|
f7679c5b5f25b27ef8d1ba7a7cc79331f878d337 | 05f59cd61540ce4c9961d91377cff9008b78c009 | /R/rank_integer_index.R | 81437b2f73241043d55a6f2e512abb2ceafda998 | [
"MIT"
] | permissive | rzgross/uRbanmatching | e3abddab1946e2f84c32378820585b96abe4840d | 356b3d1ac10d72e3c5a20de08903f137c2c22965 | refs/heads/master | 2023-04-12T21:20:24.938362 | 2021-05-09T19:04:41 | 2021-05-09T19:04:41 | 355,984,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,013 | r | rank_integer_index.R | #' rank_integer_index
#'
#' Converts \code{rank_cols} in all allowed forms to an integer index. Takes in ranked cols, either given by integer, logical, or named index. Converts to integer index.
#'
#' @param rank_cols Integer/number, or logical, or names within \code{colnames(x_mat)}
#' @param x_mat the x matrix of interest
#' @export
rank_integer_index <- function(rank_cols, x_mat) {
if (is.null(rank_cols)) {
return(vector("integer", 0L))
}
if (all(rank_cols %in% 1L:ncol(x_mat))) {
return(rank_cols)
}
if (is.logical(rank_cols)) {
if (length(rank_cols) == ncol(x_mat)) {
return(which(rank_cols))
} else {
stop("logical `rank_cols` must be same length as `ncol(x_mat)`")
}
} else {
if (is.null(colnames(x_mat))) {
stop("x_mat must have colnames to use named rank_cols")
}
if (any(!(rank_cols %in% colnames(x_mat)))) {
stop("not all rank_cols are present in x_mat colnames")
}
return(which(colnames(x_mat) %in% rank_cols))
}
}
|
54ee5ec9870efa85f71fb7d8ae760f2c63d64205 | 457db4c1e02e976477d9177801c8589835e302f4 | /test.R | 87776f725ef6301746de6c8090970e65ccbd8d54 | [] | no_license | asousos/test | f3c48b1741ca1e0630490d03f53004b04ac303c9 | 1ae2f3ac9481fdd76f8610bfc8987443f1d43895 | refs/heads/master | 2021-05-10T10:16:09.575979 | 2018-01-21T22:49:52 | 2018-01-21T22:49:52 | 118,377,651 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 88 | r | test.R | # This is a script
# to test rstudio and git integration
# This is a third comment line
|
0d468f3ea84ca6149c3917c3717d22c775d7087e | 3629a3a9c89f2bb28aa9fb39657e3f855862fcf2 | /man/pkg_check.Rd | 365b7a7040eae6d6bb3a171d1f3cf3a677636741 | [] | no_license | AkselA/R-projector | 48c47655edb656508a9faebe7f51b4e2d2035937 | 8ae70dfe3d3e91aff4061a5b83e2abf84976afcb | refs/heads/master | 2021-03-19T03:48:52.619153 | 2020-06-06T18:22:40 | 2020-06-06T18:22:40 | 247,130,462 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,072 | rd | pkg_check.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkg_xyz.R
\name{pkg_check}
\alias{pkg_check}
\title{Package check}
\usage{
pkg_check(projname = ".", bopt = c("--no-manual"),
copt = c("--no-manual", "--timings"), ropt = c("--vanilla"),
rm.src = TRUE)
}
\arguments{
\item{projname}{path to the project file}
\item{bopt}{a character vector, or space-delimited character string, of options
to be passed to \code{\link{build}}. By default \code{--no-manual}}
\item{copt}{a character vector, or space-delimited character string, of options
to be passed to \code{\link{check}}. By default \code{--vanilla} to run in a baseline R
session, reading no profile or environment files, and no saving of workspace}
\item{ropt}{a character vector, or space-delimited character string, of options
to be passed to \code{R}. By default \code{--vanilla} to run in a baseline R
session, reading no profile or environment files, and no saving of workspace}
\item{rm.src}{remove source file}
}
\description{
Build and run diagnostic checks on the package
}
|
defa3ffacaf830166e7f3b8315cdfe9c82c58c2f | 5ea3d789fae96cd90e06334ee3e5e9bfa7f89227 | /global.R | 513167d255229931b089160757d34ca1af838e4f | [] | no_license | Blackbawks/WSTC6_Scheduler | 85fe891598af7862bdb2d0602967b1d02e0c6bc2 | 72fd45a80a8c6b1fc7892e4291317c8dd3aa69d9 | refs/heads/master | 2022-12-20T12:41:25.111062 | 2020-09-04T13:36:00 | 2020-09-04T13:36:00 | 263,449,167 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 47 | r | global.R | testfunction <- function(x){
return("this")
} |
63c4f738748f4b94738e433d275954a03e4193aa | ee0689132c92cf0ea3e82c65b20f85a2d6127bb8 | /32-basicGraphs/12b-graphs2.R | 073c1b6d52945e12da158ca936fdec6799b53093 | [] | no_license | DUanalytics/rAnalytics | f98d34d324e1611c8c0924fbd499a5fdac0e0911 | 07242250a702631c0d6a31d3ad8568daf9256099 | refs/heads/master | 2023-08-08T14:48:13.210501 | 2023-07-30T12:27:26 | 2023-07-30T12:27:26 | 201,704,509 | 203 | 29 | null | null | null | null | UTF-8 | R | false | false | 743 | r | 12b-graphs2.R |
women
plot(women)
plot(x=women$weight, y=women$height, pch=15, xlab='Weight', ylab='Height', col='red', cex=2, type='b')
title(main='Main Title', sub='Sub Title')
boxplot(women$height)
abline(h=c(58, 62,65,68,72))
?boxplot
summary(women)
quantile(women$height)
hist(women$height, breaks=10)
x = rnorm(100,50,100)
hist(x)
hist(x, freq=F)
lines(density(x))
plot(density(x), col='red')
#pie
x = c(10,20,40,50)
pie(x)
xlabels = c('A','B','C','D')
pie(x, labels=xlabels)
barplot(x,col=1:4)
barplot(x,col=1:4, horiz = T)
pairs(women)
head(mtcars)
pairs(mtcars)
cor(women$height,women$weight)
options(digits=4)
plot(women)
abline(lm(women$weight ~ women$height), col='red', lty=2, lwd=4)
mosaic::qdist("norm", p = 0.95, mean = 0, sd = 1)
|
44197644ef774227d4bb0500656193306225b72a | 33e47772221e64495af2f76fbe946106cd9dd5b5 | /doc/paper/figure2/enrichment_plots.R | 705fc2b962f2e5ac4804965b74fc3ed1acb68bfa | [] | no_license | mikelaff/mirna-eqtl-manuscript | 8ddaed87bd3eee1923afa6f5ef0ebd80de7c205c | a87deb4dc063e03e5371ff7c4d19db782e295e12 | refs/heads/main | 2023-08-31T20:01:11.324071 | 2023-08-24T23:44:31 | 2023-08-24T23:44:31 | 590,512,745 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,884 | r | enrichment_plots.R |
library(here)
library(dplyr)
library(readr)
library(magrittr)
library(ggplot2)
#library(DESeq2)
#library(RColorBrewer)
#library(qqman)
library(mikelaffr)
# FIGURE ###############################################################################################################
# figure 2B: miRNA-eQTL enrichment in chrom annotations
output.odds.pdf <- paste0(here("doc/paper/figure2/pdfs/"), "figure2B_enrichment_odds.pdf")
output.pval.pdf <- paste0(here("doc/paper/figure2/pdfs/"), "figure2B_enrichment_pval.pdf")
# OUTPUT FILES #########################################################################################################
# output directory for pdf files
#dir.pdf <- here("doc/paper/figure2/pdfs/")
# INPUT FILES ##########################################################################################################
# mirQTL enrichment on chromHMM using mirQTL-MIXED maf/ld and minimum P-value
minP.results.txt <- here("results/garfield/hg38-mirQTL-MIXED_mirQTL-minP_chromHMM/garfield.test.hg38-mirQTL-MIXED_mirQTL-minP_chromHMM.out")
# p-value file
minP.pvalue.txt <- here("results/garfield/hg38-mirQTL-MIXED_mirQTL-minP_chromHMM/garfield.Meff.hg38-mirQTL-MIXED_mirQTL-minP_chromHMM.out")
# GLOBALS ##############################################################################################################
# Import Results #######################################################################################################
df.minP.results <- read_table2(minP.results.txt)
minP.pvalue.thresh <- as.numeric(strsplit(read_lines(minP.pvalue.txt), "\t")[[2]][2])
df.minP.results %<>%
mutate(Annotation_Name = sapply(strsplit(Annotation, "_"), `[`, 2),
Annotation_Sex = sapply(strsplit(Annotation, "_"), `[`, 3),
Annotation_Number = as.integer(sapply(strsplit(Annotation, "_"), `[`, 1)),
SIG = Pvalue <= minP.pvalue.thresh)
df.minP.results$Beta[df.minP.results$SE > 10] <- NA
df.minP.results$CI95_lower[df.minP.results$SE > 10] <- NA
df.minP.results$CI95_upper[df.minP.results$SE > 10] <- NA
df.minP.results$Pvalue[df.minP.results$SE > 10] <- NA
df.minP.results %>%
filter(PThresh == 1.434e-6) %>%
ggplot(mapping = aes(x = reorder(Annotation_Name, Annotation_Number), y = Beta, color = Annotation_Sex, alpha = SIG)) +
geom_point(position = position_dodge(0.7), shape = 15, size = 1) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
legend.position = "bottom",
legend.title = element_text(size = 4),
legend.text = element_text(size = 2),
axis.text = element_text(size = 4),
axis.title = element_text(size = 4),
plot.caption = element_text(size = 4)) +
scale_alpha_manual(values = c(0.3,1)) +
scale_color_manual(values = c("green", "navy")) +
geom_errorbar(aes(ymin=CI95_lower, ymax=CI95_upper), position = position_dodge(0.7), width = 0.2, size = 0.5) +
geom_hline(yintercept = 0) +
labs(y = "Log Odds Ratio (95% Conf. Int.)",
x = "ChromHMM 15-state Mnemonic",
color = "Sex",
alpha = "Significant Enrichment P-value",
#title = "GARFIELD: mirQTL Enrichment within Fetal Brain Chromatin States",
#subtitle = "(minimum eQTL P-value at each variant)",
caption = "Annotations with high error (SE>10) removed. Using mirQTL (MIXED ancestry) MAF and LD.")
ggsave(filename = output.odds.pdf, width = 6.5, height = 2, units = "in")
df.minP.results %>%
filter(PThresh == 1.434e-6) %>%
ggplot(aes(x = reorder(Annotation_Name, Annotation_Number), y = -log10(Pvalue), fill = Annotation_Sex)) +
geom_col(position = "dodge") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
legend.position = "bottom") +
scale_fill_manual(values = c("green", "navy")) +
geom_hline(aes(yintercept = -log10(minP.pvalue.thresh)), linetype = "dashed") +
labs(y = "-Log10(P-value)",
x = "ChromHMM 15-state Mnemonic",
fill = "Sex",
title = "GARFIELD: mirQTL Enrichment within Fetal Brain Chromatin States",
subtitle = "(minimum eQTL P-value at each variant)",
caption = "Annotations with high error (SE>10) removed. Using mirQTL (MIXED ancestry) MAF and LD.")
ggsave(filename = output.pval.pdf, width = 6.5, height = 2, units = "in")
theme_bw() +
theme(legend.position = "none",
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.spacing.x = unit(1, "mm"),
panel.border = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
strip.background = element_blank(),
axis.title.x = element_blank(),
axis.title = element_text(size = 18),
axis.text = element_text(size = 14),
strip.text = element_text(size = 10)) +
|
79410e75ac03e2e3f10aa81e99569d00699d8ce7 | 74453745dd2a15c8e310e8f4446ccada9702435e | /tests/testthat/test-install.R | ef5ff37ba69c5e52d17e7eba8d496ea6595114b1 | [
"MIT"
] | permissive | rstudio/renv | ffba012525e8b1e42094899c3df9952b54ecb945 | 8c10553e700cad703ddf4dd086104f9d80178f3a | refs/heads/main | 2023-08-29T08:45:28.288471 | 2023-08-28T22:01:19 | 2023-08-28T22:01:19 | 159,560,389 | 958 | 169 | MIT | 2023-09-14T00:55:28 | 2018-11-28T20:25:39 | R | UTF-8 | R | false | false | 16,005 | r | test-install.R |
# TODO: This test assumes 'pak' integration is disabled?
test_that("install works when DESCRIPTION contains no dependencies", {
renv_tests_scope()
desc <- c("Type: Package", "Package: test", "Version: 1.0")
writeLines(desc, con = "DESCRIPTION")
expect_length(install(), 0L)
})
test_that("requested version in DESCRIPTION file is honored", {
renv_tests_scope()
desc <- c(
"Type: Package",
"Package: test",
"Imports: bread (== 0.1.0), toast"
)
writeLines(desc, con = "DESCRIPTION")
install()
expect_true(renv_package_version("bread") == "0.1.0")
})
test_that("installation failure is well-reported", {
# TODO: test seems to fail because a connection gets
# left open by utils::package.skeleton()
skip_on_os("windows")
renv_scope_tempdir()
# init dummy library
library <- renv_scope_tempfile("renv-library-")
ensure_directory(library)
# dummy environment
envir <- new.env(parent = emptyenv())
envir[["hello"]] <- function() {}
# prepare dummy package
package <- "renv.dummy.package"
unlink(package, recursive = TRUE)
suppressMessages(utils::package.skeleton(package, environment = envir))
# remove broken man files
unlink("renv.dummy.package/Read-and-delete-me")
unlink("renv.dummy.package/man", recursive = TRUE)
# give the package a build-time error
writeLines("parse error", con = file.path(package, "R/error.R"))
# try to build it and confirm error
record <- list(Package = package, Path = package)
expect_error(renv_install_package_impl(record))
})
test_that("install forces update of dependencies as needed", {
# TODO: this fails on CRAN presumedly because the wrong
# version of the breakfast package is searched for; need
# to figure out where the repositories are getting changed.
skip_on_cran()
renv_tests_scope("breakfast")
# install the breakfast package
install("breakfast")
# ensure its dependencies were installed
packages <- c("bread", "oatmeal", "toast")
for (package in packages)
expect_true(file.exists(renv_package_find(package)))
# remove breakfast
remove("breakfast")
# modify 'toast' so that it's now too old
path <- renv_package_find("toast")
descpath <- file.path(path, "DESCRIPTION")
desc <- renv_description_read(descpath)
desc$Version <- "0.1.0"
renv_dcf_write(desc, file = descpath)
# try to install 'breakfast' again
install("breakfast")
# validate that 'toast' was updated to 1.0.0
desc <- renv_description_read(package = "toast")
expect_equal(desc$Version, "1.0.0")
})
test_that("packages can be installed from sources", {
renv_tests_scope()
init()
# get path to package sources in local repos
repos <- getOption("repos")[["CRAN"]]
tarball <- file.path(repos, "src/contrib/bread_1.0.0.tar.gz")
# try to install it
install(tarball)
expect_true(renv_package_version("bread") == "1.0.0")
})
test_that("various remote styles can be used during install", {
skip_if_no_github_auth()
renv_tests_scope()
init()
# install CRAN latest
install("bread")
expect_true(renv_package_installed("bread"))
expect_true(renv_package_version("bread") == "1.0.0")
# install from archive
install("bread@0.1.0")
expect_true(renv_package_installed("bread"))
expect_true(renv_package_version("bread") == "0.1.0")
# install from github
install("kevinushey/skeleton")
expect_true(renv_package_installed("skeleton"))
expect_true(renv_package_version("skeleton") == "1.0.1")
# install from github PR
install("kevinushey/skeleton#1")
expect_true(renv_package_installed("skeleton"))
expect_true(renv_package_version("skeleton") == "1.0.2")
# install from branch
install("kevinushey/skeleton@feature/version-bump")
expect_true(renv_package_installed("skeleton"))
expect_true(renv_package_version("skeleton") == "1.0.2")
# install from subdir
install("kevinushey/subdir:subdir")
expect_true(renv_package_installed("subdir"))
expect_true(renv_package_version("subdir") == "0.0.0.9000")
# install from URL to zip
install("https://github.com/kevinushey/skeleton/archive/master.zip")
expect_true(renv_package_installed("skeleton"))
expect_true(renv_package_version("skeleton") == "1.0.1")
})
test_that("Remotes fields in a project DESCRIPTION are respected", {
skip_if_no_github_auth()
renv_tests_scope()
init()
desc <- c(
"Type: Package",
"Package: renv.test.package",
"Suggests: skeleton",
"Remotes: kevinushey/skeleton"
)
writeLines(desc, con = "DESCRIPTION")
install()
record <- renv_snapshot_description(package = "skeleton")
expect_true(record$Source == "GitHub")
})
test_that("source packages in .zip files can be installed", {
renv_tests_scope()
location <- download.packages("bread", destdir = renv_scope_tempfile())
path <- location[1, 2]
renv_archive_decompress(path, exdir = "bread")
zippath <- file.path(getwd(), "bread_1.0.0.zip")
setwd("bread")
status <- catchall(zip(zippath, files = ".", extras = "-q"))
setwd("..")
if (inherits(status, "condition"))
skip("could not zip archive")
install(zippath)
expect_true(renv_package_installed("bread"))
})
test_that("renv warns when installing an already-loaded package", {
skip_on_cran()
renv_tests_scope()
install("bread@1.0.0")
renv_namespace_load("bread")
defer(renv_namespace_unload("bread"))
expect_snapshot(install("bread@0.1.0"))
})
test_that("install() writes out Github fields for backwards compatibility", {
skip_if_no_github_auth()
renv_tests_scope()
install("rstudio/packrat")
descpath <- file.path(.libPaths()[1], "packrat/DESCRIPTION")
dcf <- renv_description_read(descpath)
expect_equal(dcf$RemoteRepo, dcf$GithubRepo)
expect_equal(dcf$RemoteUsername, dcf$GithubUsername)
expect_equal(dcf$RemoteRef, dcf$GithubRef)
expect_equal(dcf$RemoteSha, dcf$GithubSHA1)
})
test_that("renv uses safe library paths on Windows", {
skip_if_not(renv_platform_windows())
renv_tests_scope()
goodlib <- "Research and Development"
expect_true(renv_libpaths_safe(goodlib) == goodlib)
badlib <- "R&D"
expect_false(renv_libpaths_safe(badlib) != badlib)
ensure_directory(badlib)
renv_libpaths_set(badlib)
install("bread")
descpath <- file.path(getwd(), "R&D/bread")
desc <- renv_description_read(descpath)
expect_true(desc$Package == "bread")
expect_true(desc$Version == "1.0.0")
})
test_that("renv uses safe library path when needed", {
renv_tests_scope()
badlib <- file.path(getwd(), "Has'Single'Quote")
dir.create(badlib)
expect_false(renv_libpaths_safe(badlib) == badlib)
})
test_that("renv can install packages from Bitbucket", {
skip_on_cran()
renv_tests_scope()
install("bitbucket::kevinushey/skeleton")
expect_true(renv_package_installed("skeleton"))
})
test_that("install via version succeeds", {
skip_on_cran()
renv_tests_scope()
install("bread@0.0.1")
expect_true(renv_package_installed("bread"))
expect_true(renv_package_version("bread") == "0.0.1")
})
test_that("install() installs inferred dependencies", {
skip_on_cran()
renv_tests_scope("breakfast")
# try installing packages
records <- install()
# validate that we've installed breakfast + deps
expect_length(records, 4L)
expect_true(renv_package_installed("breakfast"))
# try calling install once more; nothing should happen
records <- install()
expect_length(records, 0L)
})
test_that("install() prefers cellar when available", {
skip_on_cran()
renv_tests_scope()
locals <- paste(
renv_tests_path("nowhere"),
renv_tests_path("local"),
sep = ";"
)
renv_scope_options(renv.config.cache.enabled = FALSE)
renv_scope_envvars(RENV_PATHS_CELLAR = locals)
records <- install("skeleton")
record <- records$skeleton
expect_equal(record$Source, "Cellar")
path <- renv_path_normalize(renv_tests_path("local/skeleton"))
prefix <- if (renv_platform_windows()) "file:///" else "file://"
uri <- paste0(prefix, path)
expect_equal(attr(record, "url"), uri)
})
test_that("packages can be installed from the archive w/libcurl", {
skip_on_cran()
# validate that we have libcurl
ok <- identical(capabilities("libcurl"), c(libcurl = TRUE))
skip_if(!ok, "libcurl is not available")
# perform test
renv_tests_scope()
renv_scope_envvars(RENV_DOWNLOAD_FILE_METHOD = "libcurl")
install("bread@0.1.0")
expect_true(renv_package_installed("bread"))
expect_equal(renv_package_version("bread"), "0.1.0")
})
test_that("issue #609", {
skip_on_cran()
renv_tests_scope()
renv_scope_options(configure.vars = c(breakfast = ""))
install("bread")
expect_true(renv_package_installed("bread"))
})
test_that("we can install packages from git remotes within subdirs", {
skip_on_cran()
skip_on_ci()
skip("unreliable test")
renv_tests_scope("subdir")
install("git@github.com:kevinushey/subdir.git:subdir", rebuild = TRUE)
expect_true(renv_package_installed("subdir"))
snapshot()
remove("subdir")
expect_false(renv_package_installed("subdir"))
restore(packages = "subdir", rebuild = TRUE)
expect_true(renv_package_installed("subdir"))
})
test_that("packages embedded in the project use a project-local RemoteURL", {
skip_if(getRversion() < "4.1")
skip_if_not_installed("usethis")
renv_tests_scope("example")
usethis <- renv_namespace_load("usethis")
skip_if(is.null(usethis$create_package))
renv_scope_options(usethis.quiet = TRUE)
unlink("example", recursive = TRUE)
usethis$create_package("example", rstudio = FALSE, open = FALSE)
install("./example")
lockfile <- snapshot(lockfile = NULL)
expect_equal(lockfile$Packages$example$RemoteUrl, "./example")
# TODO: if the user provides a "weird" path, we'll use it as-is.
# is that okay? what about relative paths that resolve outside of
# the project root directory?
install("./././example")
lockfile <- snapshot(lockfile = NULL)
expect_equal(lockfile$Packages$example$RemoteUrl, "./././example")
})
test_that("packages installed from cellar via direct path", {
skip_on_cran()
renv_tests_scope("skeleton")
locals <- paste(
renv_tests_path("nowhere"),
renv_tests_path("local"),
sep = ";"
)
renv_scope_options(renv.config.cache.enabled = FALSE)
renv_scope_envvars(RENV_PATHS_CELLAR = locals)
path <- renv_tests_path("local/skeleton/skeleton_1.0.1.tar.gz")
records <- install(path, rebuild = TRUE)
expect_equal(records$skeleton$Source, "Cellar")
lockfile <- snapshot(lockfile = NULL)
expect_equal(lockfile$Packages$skeleton$Source, "Cellar")
})
test_that("staging library path has same permissions as library path", {
skip_on_cran()
skip_on_windows()
renv_tests_scope()
library <- renv_paths_library()
ensure_directory(library)
renv_scope_libpaths(library)
umask <- Sys.umask("0")
Sys.chmod(library, "0775")
Sys.umask(umask)
staging <- renv_install_staged_library_path()
expect_equal(file.mode(staging), file.mode(library))
})
test_that("packages installed from a RemoteSubdir can be retrieved from cache", {
skip_on_windows()
skip_slow()
renv_tests_scope()
cachepath <- renv_scope_tempfile("renv-cache-")
ensure_directory(cachepath)
renv_scope_envvars(RENV_PATHS_CACHE = cachepath)
init()
# install first from remote
install("kevinushey/subdir:subdir")
# remove, and re-install from cache
remove("subdir")
install("kevinushey/subdir:subdir")
expect_true(renv_package_installed("subdir"))
})
test_that("repositories containing multiple packages can be installed", {
skip_on_windows()
skip_slow()
renv_tests_scope()
install("kevinushey/subdir:pkgA")
expect_true(renv_package_installed("pkgA"))
install("kevinushey/subdir:pkgB")
expect_true(renv_package_installed("pkgB"))
})
test_that("Suggest dependencies are used when requested", {
renv_tests_scope("breakfast")
fields <- c("Imports", "Depends", "LinkingTo", "Suggests")
settings$package.dependency.fields(fields)
install("breakfast")
expect_true(renv_package_installed("egg"))
})
test_that("custom dependency fields in install are supported", {
skip_on_cran()
skip_on_windows()
renv_tests_scope()
install("breakfast", dependencies = "strong")
expect_false(renv_package_installed("egg"))
install("breakfast", dependencies = c("strong", "Config/Needs/protein"))
expect_true(renv_package_installed("egg"))
})
test_that("install has user-friendly output", {
renv_scope_libpaths()
renv_scope_envvars(RENV_PATHS_CACHE = renv_scope_tempfile("renv-tempcache-"))
renv_tests_scope("breakfast")
expect_snapshot(install())
renv_tests_scope("breakfast")
expect_snapshot(install())
})
test_that("package sources of the form <pkg>_<sha>.zip can be installed", {
skip_on_cran()
skip_if(!renv_platform_windows())
renv_tests_scope()
renv_tests_scope_repos()
# get path to .tar.gz
source <- download.packages("bread", type = "source")[1, 2]
# repack as a .zip archive
exdir <- renv_scope_tempfile("bread-")
ensure_directory(exdir)
renv_archive_decompress(source, exdir = exdir)
zipfile <- file.path(tempdir(), "bread_f96a78e23d44d68d329c2dbf168a4dee1882a1c6.zip")
local({
renv_scope_wd(exdir)
zip(zipfile, files = "bread")
})
# now try to install it
install(zipfile)
expect_true(renv_package_installed("bread"))
})
test_that("package binaries of the form <pkg>_<sha>.zip can be installed", {
skip_on_cran()
skip_if(!renv_platform_windows())
renv_tests_scope()
renv_tests_scope_repos()
# install bread
install("bread")
# create a zipfile from the installed package
library <- renv_libpaths_active()
zipfile <- file.path(tempdir(), "bread_f96a78e23d44d68d329c2dbf168a4dee1882a1c6.zip")
local({
renv_scope_wd(library)
zip(zipfile, files = "bread", extras = "-q")
})
# remove bread
remove("bread")
expect_false(renv_package_installed("bread"))
# now try to install from zipfile
install(zipfile)
expect_true(renv_package_installed("bread"))
})
test_that("install() reports failure when a 'bad' binary is installed", {
skip_on_cran()
renv_tests_scope()
# test package load in this scope on all platforms
renv_scope_envvars(RENV_INSTALL_TEST_LOAD = TRUE)
# install bread
install("bread")
# copy the installed package, and create a 'broken' binary
src <- renv_package_find("bread")
tgt <- file.path(tempdir(), "bread")
renv_file_copy(src, tgt)
local({
renv_scope_wd(tgt)
dir.create("R")
writeLines("stop('oh no')", con = "R/bread")
})
# try installing the broken binary
remove("bread")
expect_false(renv_package_installed("bread"))
expect_error(install(tgt))
expect_false(renv_package_installed("bread"))
# try skipping the load test
renv_scope_options(INSTALL_opts = c(bread = "--no-test-load"))
install(tgt)
expect_true(renv_package_installed("bread"))
expect_error(renv_namespace_load(bread))
remove("bread")
})
test_that("install() respects dependencies argument", {
skip_on_cran()
project <- renv_tests_scope()
init()
contents <- heredoc("
Type: Project
Depends: coffee
Imports: bread
Suggests: muffin
")
writeLines(contents, con = "DESCRIPTION")
install(dependencies = "Imports")
expect_true(renv_package_installed("bread"))
expect_false(renv_package_installed("coffee"))
expect_false(renv_package_installed("muffin"))
})
test_that("install() succeeds even some repositories cannot be queried", {
renv_tests_scope()
repos <- getOption("repos")
repos[["NARC"]] <- file.path(repos[["CRAN"]], "missing")
renv_scope_options(repos = repos)
init()
install("bread")
expect_true(renv_package_installed("bread"))
})
test_that("install() doesn't duplicate authentication headers", {
renv_scope_envvars(RENV_DOWNLOAD_METHOD = "libcurl")
project <- renv_tests_scope()
init()
install("kevinushey/skeleton")
expect_true(renv_package_installed("skeleton"))
})
|
72d989fc7b7f22f301cd0d31cdba615a9fbd09c3 | 891b26bc2b3dfaeaf58d3980c7c99fa48d4d74e8 | /code/plot4.R | adfef0ccbb17796767083a3e86a49c29360fd774 | [] | no_license | asuresh4/Power-Consumption-Analysis | a3000789183a668bf88521029bd4c0e2d72a49b1 | 259cccf1ec74398f5ee0f1666fc8d08f6ef2d182 | refs/heads/master | 2021-01-17T05:55:08.605607 | 2016-07-04T03:00:34 | 2016-07-04T03:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,552 | r | plot4.R | ## Download file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", mode = "wb",
destfile = "./data/Course4/ExData_Plotting1/household_power_consumption.zip")
## Unzip downloaded file
unzip("./data/Course4/ExData_Plotting1/household_power_consumption.zip")
## Read data from textfile
rawData <- read.csv("./data/Course4/household_power_consumption.txt", sep = ";", header = TRUE)
## Extract required data
reqData <- subset(rawData,Date == "1/2/2007" | Date == "2/2/2007")
## Change format of Date and Time fields
reqData$Time <- strptime(paste(reqData$Date,reqData$Time),"%d/%m/%Y %H:%M:%S")
reqData$Date <- weekdays(as.Date(reqData$Date,"%d/%m/%Y"))
colnames(reqData)[1] <- "Day"
## Variable containing vector of Global Active Power
## Convert factor varibles to vectors
## For figure (1,1)
reqData$Global_active_power <- as.numeric(levels(reqData$Global_active_power)[reqData$Global_active_power])
## For figure (2,1)
reqData$Sub_metering_1 <- as.numeric(levels(reqData$Sub_metering_1)[reqData$Sub_metering_1])
reqData$Sub_metering_2 <- as.numeric(levels(reqData$Sub_metering_2)[reqData$Sub_metering_2])
## reqData$Sub_metering_3 is already in numeric format
## For figure (1,2)
reqData$Voltage <- as.numeric(levels(reqData$Voltage)[reqData$Voltage])
## For figure (2,2)
reqData$Global_reactive_power <- as.numeric(levels(reqData$Global_reactive_power)[reqData$Global_reactive_power])
## Select device to plot
png(file = "./data/Course4/ExData_Plotting1/figures/plot4.png", bg = "transparent")
## Make the grid for plots
par(mfrow = c(2,2))
## Plot line graphs
## Figure (1,1)
with(reqData,plot(Time,Global_active_power,type = "l", xlab = "", ylab = "Global Active Power"))
## Figure (1,2)
with(reqData,plot(Time,Voltage,type = "l", xlab = "", ylab = "Voltage"))
## Figure (2,1)
with(reqData,plot(Time,Sub_metering_1,type = "l", xlab = "", ylab = "Energy sub metering"))
with(reqData,lines(Time,Sub_metering_2, col = "red"))
with(reqData,lines(Time,Sub_metering_3, col = "blue"))
## Legend
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red","blue"), lwd = 1, box.col = "transparent")
## Figure (2,2)
with(reqData,plot(Time,Global_reactive_power,type = "l", xlab = "", ylab = "Global Reactive Power"))
## Switch off the device
dev.off() |
fc979266a76b0abae3899b2e68ae5ace212e5195 | 4bb50955e639c6fa4c516e81d46f13b4f2b7f6e4 | /docs/plot_results.R | 03f914bce6c3207d94d7fff953d0959b8300e74a | [] | no_license | danielhstahl/credit_faas_demo | 984bfa481ed1d246e3e27b76d2ed185da790318d | 7508859cb99984605b58df039583a717597d94cb | refs/heads/master | 2023-01-15T14:04:02.654172 | 2020-11-27T13:07:48 | 2020-11-27T13:07:48 | 147,034,307 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 568 | r | plot_results.R | #to run, use setwd([this directory])
require(jsonlite)
res_full=fromJSON('./loan_density_full.json')
res_aggr=fromJSON('./loan_density_aggr.json')
get_max_vectors=function(vec1, vec2){
mx_vec1=max(vec1)
mx_vec2=max(vec2)
max(mx_vec1, mx_vec2)
}
#to save as jpg
jpeg('density_compare.jpg')
plot(res_full$x, res_full$density, type='l',
col='blue', xlab="Dollar Losses", ylab="Density",
ylim=c(0, get_max_vectors(res_aggr$density, res_full$density)*1.05),
xlim=c(.3*min(res_aggr$x), 0)
)
lines(res_aggr$x, res_aggr$density, col='red')
dev.off() |
046661a6755e35f4d12bf71703893bdc79836c87 | fb9cf1a37ac3847b136e152269b477f83f4bf723 | /R/hqa.r | 7d60881264e04f48ff24d4143cbc1c36965e4769 | [] | no_license | cran/qha | 58bc0f91ff55cf8e1281c16ab7c7b3694c5acb9d | 3e7b893dbf412f0411afde9c5158ea566c80a4a2 | refs/heads/master | 2020-12-22T17:35:12.268824 | 2016-09-10T01:29:30 | 2016-09-10T01:29:30 | 236,876,607 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,475 | r | hqa.r | ##########################################################################################################
## Funcion de enlace: Combinaci?n del AAC y el AFM ##
## ##
## Elaborado por: Martha Luc?a Corrales ##
## Revisado y modificado por: Campo El?as Pardo INGL?S Fecha ##
## Universidad Nacional de Colombia ##
## ##
## requiere:ade4,FactoMineR,FactoClass library(ade4) ##
## library(FactoMineR) ##
## library(FactoClass) ##
## hqa ( base = a data frame with individuals and 3 or 4 columns ##
## with the longitudinal active variable. ##
## La estructura de base es ID,MOD,DURATION or ##
## ID, START-TIME, END-TIME, MODALIDAD ##
## o la tabla ya con los conteos realizados ##
## conteos = boolean, if TRUE do data frame ID,MOD,DURATION (default TRUE) ##
## vector = a vector containing the number of categories for each fuzzy variable ##
## ilustra = data frame with the illustrative variables (deafault NULL) ##
## ilustc = a vector containing the number of variables in each ilustrative group ##
## ilustype = tipo de variables que contiene ##
## nfact = number of axes to use into the factorial analysis (by default 5) ##
## nfcl = number of dimensions for the classification(default 5 ) ##
## k.clust = number of class (default NULL) ##
## combinat = boolean, if TRUE do combination HQA and MFA (default TRUE) ##
## durat = boolean, if TRUE calculate duration by the function (default FALSE) ##
## units = time: "secs", "mins", "hours", "days", "weeks", "months", ##
## "years" (default = NULL) ##
## periodos = a vector containing the duration of each period of time ##
## tableclass = boolean, if TRUE Suggests the number of axes to realize the classificacion ##
## clasifica = boolean, if TRUE realizes the classificacion ##
## pesos = a vector of row weights (default NULL) ##
##########################################################################################################
hqa <- function(base, conteos=FALSE, units=NULL,
durat=FALSE, periodos=NULL, pesos = NULL,
ilustra=NULL, ilustc = NULL,ilust.type = NULL,
nfact=5, nfcl=5, k.clust=NULL, combinat=TRUE,
vector, tableclass=FALSE, clasifica=TRUE)
{
#require(FactoClass)
#require(FactoMineR)
#require(ade4)
## Controles de la funcion
# base que no se encuentre en blanco
if(!is.matrix(base) & !is.data.frame(base)) {
stop("base is not a matrix nor a dataframe\n")
}
#if(is.matrix(base) & is.null(dim(base[complete.cases(base)]))) {
# stop("base is an empty matrix\n")
#}
if(is.data.frame(base) & prod(dim(base))==0) {
stop("base is an empty data frame\n")
}
if(durat==TRUE)
{ #### Calcular la duracion en la modalidad de la variable longitudinal
datos <- duration(base, units)
base <- datos
}
if(conteos==TRUE)
{ #### Calcular la tabla de conteos
conteos <- durationtotableA(base, periodos)
base <- data.frame(conteos$Conteo)
}
if(combinat==TRUE)
{ #### Realiza la combinacion del analisis armonico cualitativo y el analisis factorial multiple
# AAC es un objeto tipo MFA
AAC <- combination(base, vector, ilustra, ilustc, ilust.type, pesos)
tabla <- AAC$global.pca$ind$coord
#Para calcular el numero de ejes
#solo para la combinacion
nval.propios <- nrow(AAC$eig)
val.propios <- AAC$eig[,1]
dif.valores <- c(rep(0, nval.propios-1))
for (i in 1:nval.propios-1)
{ dif.valores[i] <- abs(val.propios[i+1]-val.propios[i])}
nfcl <- which(dif.valores==max(dif.valores))
} else { ## Realiza solamente el an?lisis armonico cualitativo
AAC <- fuzzy(base, vector, nfact, pesos)
## AAC es un objeto tipo dudi
nfcl <- dim(AAC$li)[2]
tabla <- AAC$li
}
if(clasifica==TRUE)
{
if(is.null(k.clust)) {
# Clasificacion inicial para tomar el numero de clases
coo <- tabla[, 1:nfcl]
W <- dist(coo)^2/nrow(coo)/2
HW <- hclust(W,method="ward.D")
coord <- as.vector(HW$height)
dif.saltos <- c(rep(0, length(coord)-1))
for (i in 1:length(coord)-1) dif.saltos[i] <- abs(coord[i+1]-coord[i])
k.clust <- which(dif.saltos==max(dif.saltos))
k.clust <- length(coord) - k.clust + 1
}
#### Sugiere el numero de ejes para realizar la clasificacion
if(tableclass==TRUE)
{
tablaclases <- tableclass(tabla, nfcl, k.clust)
nfcl <- tablaclases[which(tablaclases[,2]== min(tablaclases[,2])), 1]
}
# Analisis de clasificacion
clasificar <- classification(tabla, k.clust, nfcl,pesos)
#Para las var ilustrativas
if (!is.null(ilustra)) Ilust <- cluster.carac(ilustra, clasificar$cluster, tipo.v="n") else Ilust <- NULL
Active <- cluster.carac(base, clasificar$cluster, tipo.v="co")
# Graficos
#Para la variable longitudinal
#windows()
#s.class(coor,as.factor(factorc),cellipse=0)
} else {
clasificar <- NULL
Active <- NULL
Ilust <- NULL
}
return(list(HQA=AAC, Clases=clasificar, Active = Active, Ilust = Ilust))
}
####################################################################################################
|
c3c72a366e6a78e1571e05ebc43a4ceca7f97dfa | b952752e8dee1e3ae599fdb4b940c78d561afb49 | /man/zqtl-ld-svd.Rd | 8b9985a636ca226afe1fa039a04a6b064bb59c67 | [] | no_license | rintukutum/zqtl | d802762c84ee09dd14010d0400a4e3d0093bf8a3 | 6b7310023320bbe88ee36ea169daeef33525e1eb | refs/heads/master | 2021-08-16T22:07:28.490036 | 2017-11-20T11:20:17 | 2017-11-20T11:20:17 | 111,323,826 | 0 | 0 | null | 2017-11-19T19:08:02 | 2017-11-19T19:08:02 | null | UTF-8 | R | false | false | 502 | rd | zqtl-ld-svd.Rd | \name{SVD of LD matrix}
\alias{take.ld.svd}
\title{Calculate covariance matrix and perform SVD}
\description{
Decompose X/sqrt(n) = U D V' so that
LD = V D^2 V' for subsequent analysis.
}
\usage{
take.ld.pairs(X, options=list())
}
\arguments{
\item{X}{n x p matrix}
\item{options=list(eigen.tol = 1e-8)}{numerical tolerance of SVD}
\item{options=list(do.stdize = TRUE)}{standardized LD matrix}
}
\value{
returns list(D, V.t)
}
\details{
}
\references{
}
\examples{
}
\author{Yongjin Park}
|
8d9610f3ba0bb001f909310447b897c5511ecfea | 29585dff702209dd446c0ab52ceea046c58e384e | /ggtern/R/plot-construction.R | 1f249da0b42994ad31d801f1fe2a7e03588f0fdb | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | plot-construction.R | #' Plot Construction
#'
#' \code{"+.gg"} is a local copy of the ggplot2 add function, no change other than exporting from the ggtern namespace
#' @param e1 first object
#' @param e2 second object
#' @rdname plotconstruction
#' @export
"+.gg" <- function(e1, e2){
# Get the name of what was passed in as e2, and pass along so that it
# can be displayed in error messages
e2name <- deparse(substitute(e2))
if (is.theme(e1)) add_theme( e1, e2, e2name)
else if (is.ggplot(e1)) ggint$add_ggplot(e1, e2, e2name)
}
|
dfc58dd6121aab9346d76a68d68a83e1e8bbbe1b | 7e6464d860d03df8262ba7093735a1a1e2a96c71 | /transcripts.R | e9192080dae697c8c76fd7c87ee7edadf471d1f7 | [] | no_license | transcripts-rproject/transcripts_rproject | 45d51a65de0afef320b9f54a6298829bf0d919d8 | 58e7c46c50c5576b43eb8907b217ff90a36d5808 | refs/heads/master | 2022-04-23T01:04:52.387750 | 2020-04-17T23:21:16 | 2020-04-17T23:21:16 | 254,750,177 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | transcripts.R | library(tidyverse)
library(dplyr)
getwd()
setwd("../transcripts_rproject")
getwd()
transcripts17<-read_csv("UCLA_CS_CLASS_OF_2017_NO NAMES_NO IDs.csv")
view(transcripts17)
transcripts17<-transcripts17 %>% select(5:12)
#we did it! yay! |
25979058d5be10fc4bb57aa466c984e7e0049cee | 50a4e150c9d5e44475df2afe24072097f5e16c6b | /another simulation approach.R | 66f4165f6dd3f0649f7a3b323d9c55f24bb771bf | [] | no_license | eamonn2014/PARTIAL-LIKELIHOOD-METHOD | a2f8f2e08da8f9e3569cd976078f3ac4115e5294 | ad7f35da84421c38c3dce5a56f3212d8f2d0a627 | refs/heads/master | 2023-04-17T07:48:33.987805 | 2021-04-12T10:54:10 | 2021-04-12T10:54:10 | 316,730,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,731 | r | another simulation approach.R | # Survival3Notes in survival simulation paper folder on one drive slide 20
library(survival)
lambda1=0.058
lambda2=0.116
n=80
timeev1=rexp(n,lambda1)
timeev2=rexp(n,lambda2)
timeev=c(timeev1,timeev2)
timecensor=runif((2*n),1,10)
time=apply(cbind(timeev,timecensor),1,min)
stat=(time==timeev)+0
x=c(rep(0,n),rep(1,n))
fitcox=coxph(Surv(time,stat)~x)
fitkm=survfit(Surv(time,stat)~x)
hr=exp(fitcox$coef)
est=summary(fitkm,times=5)$surv
pvwald=summary(fitcox)$waldtest[3]
pvlrt=summary(fitcox)$logtest[3]
pvscore=summary(fitcox)$sctest[3]
out=data.frame(hr,estlowrisk=est[1],esthighrisk=est[2],pvwald,pvlrt,pvscore)
out
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
library(survival)
fsim=function(i,n,lambda1,lambda2,f,a)
{
timeev1=rexp(n,lambda1)
timeev2=rexp(n,lambda2)
timeev=c(timeev1,timeev2)
timeev=c(timeev1,timeev2)
timecensor=runif((2*n),f,a)
time=apply(cbind(timeev,timecensor),1,min)
stat=(time==timeev)+0
x=c(rep(0,n),rep(1,n))
fitcox=coxph(Surv(time,stat)~x)
fitkm=survfit(Surv(time,stat)~x)
hr=exp(fitcox$coef)
est=summary(fitkm,times=5)$surv
pvwald=summary(fitcox)$waldtest[3]
pvlrt=summary(fitcox)$logtest[3]
pvscore=summary(fitcox)$sctest[3]
numev<-sum(fitkm$n.event)
out=data.frame(hr,numev,
estlowrisk=est[1],
esthighrisk=est[2],
pvwald,pvlrt,pvscore)
print(i)
return(out)
}
fsim(i=1, n=80, lambda1=0.058, lambda2= 0.116, f=1, a=9)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
set.seed(123)
nsim=1000 ## try it first with 10
a=sapply(c(1:nsim),fsim,n=80,lambda1=0.058, lambda2=0.116,f=1,a=10)
#a # when you try it with 10, check to see how it looks
bcr=data.frame(apply(a,1,unlist))
head(bcr)
sum(bcr$pvwald<=0.05)/nsim
sum(bcr$pvlrt<=0.05)/nsim
sum(bcr$pvscore<=0.05)/nsim
mean(bcr$hr)
mean(bcr$estlowrisk)
mean(bcr$esthighrisk)
mean(bcr$numev)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The accrual should not be longer than 6 years.
# The added follow-up at the end of accrual can be as long as 2 years but no longer.
# A difference of 10% at 5 years is considered clinically important.
# The accrual rate is about 50/year.
alpha=0.05
beta=0.2
zalpha=qnorm(1-alpha/2)
zbeta=qnorm(1-beta)
sigma=1/2
S0=0.65
S1=0.75
HR=log(S0)/log(S1)
HR
HR=1.5
sqrtnev=(zalpha+zbeta)/(sigma*log(HR))
sqrtnev^2
ac=5
fup=2#3
lambda0=0.058
lambda1=0.0856255 # guess work
Pev0=1-(exp(-lambda0*fup)-exp(-lambda0*(ac+fup)))/(lambda0*ac)
Pev1=1-(exp(-lambda1*fup)-exp(-lambda1*(ac+fup)))/(lambda1*ac)
Pev=mean(c(Pev0,Pev1))
N=191/Pev #705.3517
N
##########################
|
b2f19eb9902647edcddd198e339945d824567ead | fe8bc17d3ade21dab0745ce88f85371b7e075531 | /2.R | 6683d08a85df993b47b4f3c4d26afa60420fad69 | [] | no_license | reptyrax/R | 6ec42c208689781297ca0b1e9057ef68f4775b1d | 1cb2a49119aaee4921c92f4b876c2fd0e6a6e679 | refs/heads/main | 2023-01-07T11:55:15.095196 | 2020-11-12T08:03:04 | 2020-11-12T08:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,081 | r | 2.R | #1
regions <- c("Московская область", "Тульская область")
years <- c(2015, 2016)
months <- c("январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь")
comb.num. <- length(regions)*length(years)*length(months) # 2*2*12=48
region <- rep(regions, comb.num./length(region))
years <- rep(years, comb.num./length(years))
months <- rep(months, comb.num./length(months))
file.names <- paste(region, paste(years, paste(months, ".csv", sep = ""), sep = "_"), sep = "_Y")
#2
n <- 27
flow <- c("импорт", "экспорт", "реимпорт", "реэкспорт")
okpd.code <- 22
c <- length(regions)*length(flow)*length(okpd.code) # 2 * 4 * 1 = 8
num <- seq(from = n, to = n + (c - 1))
regions <- rep(regions, c/length(regions))
flow <- rep(flow, each = c/length(flow))
df.seq <- data.frame(num, region, flow, okpd.code)
#3
set.seed(n*10)
df.rand <- data.frame(x.n = rnorm(9, 25, 8), x.u = runif(9, -3, 16), x.t = rt(9, 8))
|
f5b0ad45a66e200fc9e4b5eb7d0afc5975e05e4a | bdac8e85849ee8fae35f2a49e0a25f467bfbf000 | /R/canvas.R | dd7092ed447cffaf0839825ff4bdc52be16a8ba9 | [] | no_license | dshea89/uba | 04483c6430988d2099db32a0170703330b15e88c | 4fde73b307d28c187f20a48e69e2f8ba97d0a8ac | refs/heads/master | 2021-08-30T18:28:18.485705 | 2017-12-19T01:04:57 | 2017-12-19T01:04:57 | 114,703,119 | 0 | 0 | null | 2017-12-19T01:00:45 | 2017-12-19T01:00:45 | null | UTF-8 | R | false | false | 2,435 | r | canvas.R |
## ======================================================================
## All the canvas setting for plots
##
## ======================================================================
canvas.setup <- function(n, col = T, pch = c(21:25,4:6,8:7), colored = T, ...) {
if(col)
cols <- canvas.cols(n, colored = colored,...)
else
cols <- rep(1,n)
n.pchs <- length(pch)
if(!n.pchs) pch <- 21
if(n.pchs > 1)
pchs <- pch[(0:(n-1) %% n.pchs) + 1]
else
pchs <- rep(pch,n)
lty <- 1:n
list(cols = cols, pchs = pchs, lty = lty)
}
## ======================================================================
##
##
## ======================================================================
canvas.leg <- function(leg.text,cols,pchs) {
plot.new()
par(mar=c(5,0,2,1)+0.1)
plot.window(c(0,1), c(0,1))
legend(x=0,y=1,leg.text,
lwd=0.5,y.intersp=2,
col=cols,pt.bg=cols,
lty=rep(1,length(cols)),pt.lwd=0.5,
pch=pchs,
merge=T,text.width=0.9,
bg=c("lightgrey"),
bty="n",
cex=0.6)
}
## ======================================================================
##
##
## ======================================================================
canvas.cols <- function(n = 64, colored=T, surf=F, ...) {
if(colored)
if(surf)
cols <- color.scheme('orskblu7',n)
else
cols <- color.scheme('RdOrBlu',n)
else
cols <- color.scheme('mygrayscale',n)
}
## RColorBrewer
color.scheme <- function(name, n = 64) {
switch(name,
'rdyblu7' = cs <- colorRampPalette(c('#d73027','#fc8d59','#fee090',
'#ffffbf', '#e0f3f8','#91bfdb','#4575b4'))(n),
'orskblu7' = cs <- colorRampPalette(c('#FF6600','#FFCC00','#FFFF66',
'#CCFFFF','#99FFFF','#33FFFF','#0099FF'))(n),
'RdYlBlu' = cs <- colorRampPalette(c('#d73027', '#CCFFFF','#0099FF'))(n),
'RdOrBlu' = cs <- colorRampPalette(c('#C11A1A', '#F8B230','#4D2ADE'))(n),
'mygrayscale' = cs <- colorRampPalette(c('#CACACA','#8C8C8C','#373737'))(n)
)
cs
}
canvas.theta <- function(theta.persp,theta.init=-45) {
theta <- switch(theta.persp,
"x0y0"=theta.init,
"x1y0"=theta.init+90,
"x1y1"=theta.init+180,
"x0y1"=theta.init+270,
"x01"=0,
"y01"=90)
theta
}
|
6a2e6a4e8923b6bab3d1960b579cb4210889eb75 | edd4f57fd1bc652a9fba6d62e22e7cc6be46ed19 | /code/functions/dataGen.R | 8c125604a6720e9c6053b85252a21671e69d41a7 | [] | no_license | EdoardoCostantini/latentMAR | 2031dae042f3caf423982b65ac3ce6e621fa9860 | 91e778620a32c67a53f49039c9b12c836a5fa257 | refs/heads/master | 2023-07-13T05:22:15.124900 | 2021-08-25T07:46:42 | 2021-08-25T07:46:42 | 390,039,896 | 0 | 0 | null | 2021-08-25T07:44:20 | 2021-07-27T15:45:54 | R | UTF-8 | R | false | false | 2,219 | r | dataGen.R | # Project: latentMAR
# Objective: function to generate data according to a CFA model
# Author: Edoardo Costantini
# Created: 2021-07-27
# Modified: 2021-07-27
dataGen <- function(parms, cond){
# Example Input -----------------------------------------------------------
# cond <- conds[2, ]
# Latent Variables Covariance matrix --------------------------------------
Phi <- matrix(parms$lv_cov, parms$L, parms$L)
diag(Phi) <- parms$lv_var
# Factor loadings (random factor) -----------------------------------------
lambda <- rep(cond$lambda, parms$P)
# Observed Items Error Covariance matrix ----------------------------------
# Note: you are creating uncorrelated errors for the observed items
Theta <- diag(parms$P)
for (i in 1:length(lambda)) {
Theta[i, i] <- 1 - lambda[i]^2 * Phi[1, 1]
}
# Items Factor Complexity = 1 (simple measurement structure) --------------
# Reference: Bollen1989 p234
Lambda <- matrix(nrow = parms$P, ncol = parms$L)
start <- 1
for (j in 1:parms$L) {
end <- (start + parms$J) - 1
vec <- rep(0, parms$P)
vec[start:end] <- lambda[start:end]
Lambda[, j] <- vec
start <- end + 1
}
# Sample Scores -----------------------------------------------------------
scs_lv <- mvrnorm(parms$N, rep(parms$lv_mean, parms$L), Phi)
scs_delta <- mvrnorm(parms$N, rep(0, parms$P), Theta)
# Compute Observed Scores -------------------------------------------------
x <- data.frame(matrix(nrow = parms$N, ncol = parms$P))
for(i in 1:parms$N){
x[i, ] <- t(Lambda %*% scs_lv[i, ] + scs_delta[i, ])
}
# Rescale Observed Scores -------------------------------------------------
x_scaled <- sapply(x, function(x) x*sqrt(parms$item_var))
x_center <- x_scaled + parms$item_mean
x <- as.data.frame(x_center)
# Give meaningful names ---------------------------------------------------
colnames(x) <- paste0("z", 1:ncol(x))
colnames(scs_lv) <- paste0("lv", 1:ncol(scs_lv))
# Return Output -----------------------------------------------------------
return(
list(dat_ob = as.data.frame(x),
dat_lv = as.data.frame(scs_lv),
Phi = Phi,
Theta = Theta,
Lambda = Lambda)
)
}
|
6199158ec902193765e0257f809620d131b1e453 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed_and_cleaned/10139_0/rinput.R | b1b6eac7f1ff4580f0cf2c58913a7648379e643b | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("10139_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10139_0_unrooted.txt") |
22f8de8b2cf462af50de855cb44b8040ead1d686 | 6814891e77ca4c264b3e6dfdedb0b623ce1461b1 | /R/greedy.R | 6ab79fcd8180a270f6a6d1d91595820ad52452e9 | [] | no_license | kshirley/summarytrees | b276ea49d5a785e2311b69e2c6c8c6cb1a2035aa | 3bb1f6c65da6c7c8783c03225d4977d48a7f58b0 | refs/heads/master | 2020-05-17T07:58:40.698466 | 2016-01-13T03:33:52 | 2016-01-13T03:33:52 | 30,562,705 | 38 | 9 | null | null | null | null | UTF-8 | R | false | false | 8,904 | r | greedy.R | #' Compute summary trees using greedy algorithm
#'
#' Compute a series of summary trees for a given
#' input tree using a greedy algorithm. This algorithm returns summary
#' trees whose entropies are typically within 5\% of the maximum
#' possible entropy (based on a series of experiments described by
#' the paper cited in the references), but there is no performace guarantee
#' such as that provided by the approximation algorithm. The greedy
#' algorithm runs substantially faster than the optimal algorithm, which
#' is also described in the paper in the references.
#'
#' @param node integer vector containing a set of positive, unique integers
#' denoting the identities of the nodes of the tree
#'
#' @param parent integer vector containing the (positive integer) id of the
#' parent of each node in the tree. One and only one element of
#' \code{parent} must be set to zero, indicating which node is the root of
#' the tree.
#'
#' @param weight numeric vector containing non-negative weight of each node
#' in the tree.
#'
#' @param label character vector containing the label for each node in the tree
#'
#' @param K integer. The number of nodes in the largest summary tree.
#'
#' @return A list of five objects:
#' \enumerate{
#' \item \code{data} a data frame containing the re-ordered input data,
#' as returned by the \code{\link{order.nodes}} function, including the level
#' of each node.
#' \item \code{tree} a matrix containing the structure of the tree as
#' returned by the \code{\link{order.nodes}} function.
#' \item \code{summary.trees} a length-K list of matrices, where each matrix
#' represents the
#' summary tree with K nodes. The kth matrix contains k rows and 5 columns:
#' \enumerate{
#' \item the first column contains the node ID, which is NA if the node in
#' the summary tree is an 'other' cluster
#' \item the second column contains the ID of the node's parent
#' \item the third column contains the weight of the node in the summary
#' tree
#' \item the fourth column contains the 'type' of node in the summary tree,
#' where 1 indicates a singleton (whose weight in the summary tree is equal
#' to its weight in the input tree), 2 indicates a subtree (whose weight in
#' the summary tree is equal to the sum of the weights of the nodes in its
#' subtree in the input tree), and 3 indicates an 'other cluster'.
#' \item the fifth column contains the label, which is the same as the input
#' label for summary tree nodes of type = 1 or type = 2, but for summary
#' tree nodes of type 3, the label is 'x others' where 'x' indicates how
#' many sibling nodes comprise the 'other' cluster.
#' }
#' \item \code{entropy} a (K x 2) matrix containing the entropy of each
#' summary tree
#' \item \code{order} the ordering applied to the original rows of data to
#' produce the 'data' object returned by this function
#' }
#'
#' @references \url{http://www.research.att.com/~kshirley/papers/KarloffShirleyWebsite.pdf}
#'
#' @useDynLib summarytrees
#'
#' @export
#'
#' @examples
#' data(Gauss) # load the sample from the Math Genealogy tree
#'
#' # abbreviate the original full-name labels by first initial + last name:
#' last <- sapply(strsplit(Gauss[, "label"], " "), function(x) x[length(x)])
#' first.initial <- substr(Gauss[, "label"], 1, 1)
#' new.label <- paste(first.initial, last, sep = " ")
#'
#' x <- greedy(node = Gauss[, "node"],
#' parent = Gauss[, "parent"],
#' weight = Gauss[, "weight"],
#' label = new.label,
#' K = 50)
#'
#' # look at a few pieces of the output:
#'
#' # The first 10 rows of the re-ordered input data
#' x$data[1:10, ]
#'
#' # The first 10 rows of the 'tree' representation of the data:
#' x$tree[1:10, ]
#'
#' # The 5-node summary tree
#' x$summary.trees[[5]]
#'
#' # The 20-node summary tree:
#' x$summary.trees[[20]]
#'
#' # The entropy sequence, a (K x 2) matrix with entropies in the second column
#' x$entropy
#'
#' # If you want to reconcile your original copy of the data with the newly
#' # ordered version, check it:
#' s <- sample(dim(Gauss)[1], 10) # randomly select a few rows
#' Gauss[x$order, ][s, ]
#' x$data[s, ]
#' # the node IDs and parent IDs will be different, but the weights and labels
#' # will match.
#'
#'
greedy <- function(node = integer(), parent = integer(), weight = numeric(),
label = character(), K = integer()) {
# convert the node-parent input data to the parent-children data required by
# the C program:
print("Running order.nodes() function to prepare data")
new <- order.nodes(node = node, parent = parent,
weight = weight, label = label)
tree <- new$tree
data <- new$data
# check the input tree:
if (any(duplicated(tree[, 1]))) {
stop("Error: The elements of the first column of 'tree' must be unique.")
}
if (any(duplicated(tree[, 2]))) {
stop("Error: The elements of the second column of 'tree' must be unique.")
}
if (any(duplicated(tree[, 3]))) {
stop("Error: The elements of the third column of 'tree' must be unique.")
}
if (any(tree[, 2] <= tree[, 1])) {
stop("Error: At least one of the children nodes has an equal or lower index
than its parent")
}
if (any(tree[, 3] < tree[, 2])) {
stop("Error: At least one of the 'last child' nodes has a smaller index
than the 'first child' nodes. Child nodes of each parent must
be indexed in increasing order.")
}
# read in total number of nodes:
n <- as.integer(length(weight))
# Check that length of weights is equal to length of labels:
if (n != length(label)) {
stop("Error: The length of the 'weight' vector must be equal to the length
of the 'label' vector.")
}
# check that K <= n:
if (K > n) {
stop("Error: K must be less than or equal to n, the number of nodes in the
input tree.")
}
if (K < 1) stop("Error: K must be greater than or equal to 1.")
# check that the weights are non-negative:
if (any(weight < 0)) stop("Error: All weights must be non-negative.")
# collect some variables and force types:
numparents <- as.integer(dim(tree)[1])
childindex <- as.integer(tree[, 1])
childstart <- as.integer(tree[, 2])
childend <- as.integer(tree[, 3])
# Run the C function:
print("Running C function to compute summary trees")
tmp <- capture.output(.C("Rgreedy",
R_K = as.integer(K),
R_n = as.integer(n),
R_numparents = as.integer(numparents),
R_weight = as.double(data[, "weight"]),
R_childindex = as.integer(childindex),
R_childstart = as.integer(childstart),
R_childend = as.integer(childend),
PACKAGE = "summarytrees"))
print("Computation finished; now formatting output")
# gather the output
output <- tmp[1:(which(tmp == "$R_K") - 1)]
# parse the output to gather it into a list of length K:
out <- parse.greedy.stdout(output)$x
# convert each summary tree into the node-parent-weight-label format:
final <- vector("list", K)
for (k in 1:K) {
# get original labels for these nodes:
lab.vec <- data[out[[k]][, "node"], "label"]
# set up the labels for the 'other' clusters in the summary tree:
# type = 3 indicates that this node is an 'other' cluster
sel <- out[[k]][, "type"] == 3
if (sum(sel) > 0) {
parent.vec <- data[out[[k]][sel, "node"], "parent"]
for (i in 1:sum(sel)){
total.children <- tree[tree[, 1] == parent.vec[i], 3] - tree[tree[, 1] == parent.vec[i], 2] + 1
not.in.cluster <- sum(data[out[[k]][, "node"], "parent"] == parent.vec[i]) - 1
num.other.cluster <- total.children - not.in.cluster
lab.vec[sel][i] <- paste(num.other.cluster, "others", sep=" ")
}
}
# assemble in a data.frame:
final[[k]] <- data.frame(node = out[[k]][, "node"],
parent = data[out[[k]][, "node"], "parent"],
weight = out[[k]][, "weight"],
type = out[[k]][, "type"],
label = lab.vec,
stringsAsFactors = FALSE)
final[[k]] <- final[[k]][order(final[[k]][, "type"],
final[[k]][, "node"]), ]
rownames(final[[k]]) <- 1:k
final[[k]][final[[k]][, "type"] == 3, 1] <- NA
}
# Compute entropy of each of the K trees:
norm <- function(x) if (sum(x) == 0) numeric(length(x)) else x/sum(x)
xlogx <- function(x) ifelse(x == 0, 0, x*log(x, 2))
ent <- function(x) -sum(xlogx(norm(x)))
ent.vec <- sapply(final, function(x) ent(x[, "weight"]))
entropy <- cbind(k = 1:K, entropy = ent.vec)
return(list(data = data,
tree = tree,
summary.trees = final,
entropy = entropy,
order = new$order))
}
|
05de0db6fef5fda38bbc42a1992f514b78e664c9 | e9424b5876e734e6b0b8e50e3e3cf05a869446a2 | /RRegrs/R/RRegrs_Functions.R | 058290250187e6f075af286c2d3ba787c29db111 | [
"BSD-2-Clause"
] | permissive | enanomapper/RRegrs | a200ef8981230b696bdecf9e4cc00d4024eb65fd | e5194f812e949be7694023ede0e62e27264a13b1 | refs/heads/master | 2021-01-15T16:57:13.454179 | 2018-04-23T09:51:46 | 2018-04-23T09:51:46 | 23,657,385 | 21 | 15 | null | 2015-07-24T07:56:42 | 2014-09-04T10:02:04 | R | UTF-8 | R | false | false | 196,536 | r | RRegrs_Functions.R | # ======================================================================
# RRegrs - R Regressions
# ======================================================================
# Get the best regression models for one dataset using R caret methods
# eNanoMapper.net
# -------------------------------------------------------------------------------------------------------------
# AUTHORS:
# -------------------------------------------------------------------------------------------------------------
# Georgia Tsiliki: ChemEng - NTUA, Greece, g_tsiliki@hotmail.com
# Cristian R. Munteanu: RNASA-IMEDIR, University of A Coruna, Spain, muntisa@gmail.com
# Jose A. Seoane: Stanford Cancer Institute, USA, seoane@stanford.edu
# Carlos Fernandez-Lozano: RNASA-IMEDIR, University of A Coruna, Spain, carlos.fernandez@udc.es
# Haralambos Sarimveis: ChemEng - NTUA, Greece, hsarimv@central.ntua.gr
# Egon Willighagen: BiGCaT - Maastricht University, Netherlands, egon.willighagen@gmail.com
# -------------------------------------------------------------------------------------------------------------
#library(caret)
#======================================================================================================================
# General functions
#======================================================================================================================
r2.adj.t.funct<- function(obs,pred,num.pred){
#obs==y, pred=predicted, num.pred=number of idependent variables (predictors)
#t: traditional formula
y.mean<- mean(obs)
x.in<- sum((obs-pred)^2)/sum((obs-y.mean)^2)
x.in<- 1-x.in #r squared
x.in<- (1-x.in)*((length(obs)-1)/(length(obs)-num.pred-1))
x.in<- 1 - x.in
return(x.in)
}
#----------------------------------------------------------------------------------------------------------------------
r2.adj.funct<- function(obs,pred,num.pred){
#obs==y, pred=predicted, num.pred=number of idependent variables (predictors)
x.in<- cor(obs,pred)^2
x.in<- (1-x.in)*((length(obs)-1)/(length(obs)-num.pred-1))
x.in<- 1 - x.in
return(x.in)
}
#----------------------------------------------------------------------------------------------------------------------
rmse.funct<- function(obs,pred){
#obs==y, pred=predicted
return(sqrt(mean((pred - obs)^2)))
}
#----------------------------------------------------------------------------------------------------------------------
r2.funct<- function(obs,pred){
#obs==y, pred=predicted
x.in<- cor(obs,pred)^2
return(x.in)
}
#----------------------------------------------------------------------------------------------------------------------
r2.t.funct<- function(obs,pred){
#obs==y, pred=predicted
y.mean<- mean(obs)
x.in<- sum((obs-pred)^2)/sum((obs-y.mean)^2)
x.in<- 1-x.in #r squared
return(x.in)
}
#----------------------------------------------------------------------------------------------------------------------
AppendList2CSv <- function(l,csvFile) {
#--------------------------------------------------------------------
# Write a LIST to CSV file
#--------------------------------------------------------------------
out_file <- file(csvFile, open="a") #creates a file in append mode
for (i in seq_along(l)){
# writes the name of the list elements ("A", "B", etc.)
write.table(names(l)[i],file=out_file,sep=",",dec=".",quote=F,col.names=F,row.names=F)
write.table(l[[i]], file=out_file,sep=",",dec=".",quote=F,col.names=NA,row.names=T) #writes the data.frames
}
close(out_file) #close connection to file.csv
}
#----------------------------------------------------------------------------------------------------------------------
AppendList2txt <- function(l,csvFile) {
#--------------------------------------------------------------------
# Write a LIST to TXT file
#--------------------------------------------------------------------
out_file <- file(csvFile, open="a") #creates a file in append mode
for (i in seq_along(l)){
#writes the name of the list elements ("A", "B", etc)
write.table(names(l)[i],file=out_file,sep=" ",dec=".",quote=F,col.names=F, row.names=F)
write.table(l[[i]], file=out_file,sep=" ",dec=".",quote=F,col.names=NA,row.names=T) #writes the data.frames
}
close(out_file) #close connection to file.csv
}
# ************************************
# RRegrs Specific functions
# ************************************
RemNear0VarCols <- function(ds,fDet=FALSE,outFile="ds3.No0Var.csv") {
#================================================
# Removal of near zero variance columns (Step 3)
#================================================
# inputs:
# - ds = dataset frame
# - fDet = flag for detais (TRUE/FALSE)
# - outFileName = new file name (it could include the path)
# output = ds.Rem0NearVar (ds without columns with near zero variance)
# if datails = TRUE, output the new ds as a file
# ------------------------------------------
# default parameters are no details, with a CSV file name
#library(caret)
ds.Rem0NearVar <- ds # default output without any modification
ds.var <- nearZeroVar(ds) # get the near zero columns
if (!length(ds.var) == FALSE) { # remove the columns only if nearZeroVar identified; if no columns to remove, ds will be the same
ds.Rem0NearVar <- ds[,-(ds.var)] # get only the columns without this problem
if (fDet == TRUE) { # write as details the corrected ds file
write.csv(ds.Rem0NearVar, outFile,row.names=F, quote=F)
}
}
return(as.data.frame(ds.Rem0NearVar)) # return the new data frame without near zero variance
}
#----------------------------------------------------------------------------------------------------------------------
ScalingDS <- function(ds,s=1,c=2,fDet=FALSE,outFileName="ds4.scaled.csv") {
#===========================
# Scaling dataset (Step 4)
#===========================
# s = { 1,2,3 } - type of scaling: 1 = normalization, 2 = standardization, 3 = other
# c = the number of column into the dataset to start scaling
# - if c = 1: included the dependent variable
# - if c = 2: only the features will be scaled
# fDet = if details need to be printed (TRUE/FALSE)
# outFileName = new file name (it could include the path)
# Default scaling = NORMALIZATION !
# DEFAULT scaled dataset = original
# if other s diffent of 1,2,3 is used => no scaling!
DataSet.scaled <- ds
# if NORMALIZATION
if (s==1) {
# Scale all the features (from column c; column 1 is the predictor output)
if(c==2){
maxs <- apply(ds[c:ncol(ds)], 2, max)
mins <- apply(ds[c:ncol(ds)], 2, min)
ds.norm.scale<-scale(ds[c:ncol(ds)], center = mins, scale = maxs - mins)
DataSet.scaled<-cbind(ds[,1],ds.norm.scale)
}else{
maxs <- apply(ds, 2, max)
mins <- apply(ds, 2, min)
DataSet.scaled<-scale(ds, center = mins, scale = maxs - mins)
}
}
# if STADARDIZATION
if (s==2) {
# Scale all the features (from column c; column 1 is the predictor output)
if(c==2){
DataSet.scaled <- scale(ds[c:ncol(ds)],center=TRUE,scale=TRUE)
DataSet.scaled<-cbind(ds[,1],DataSet.scaled)
}else{
DataSet.scaled<-scale(ds,center=TRUE,scale=TRUE)
}
}
# if other scaling
if (s==3) {
# Scale all the features (from feature 2 bacause feature 1 is the predictor output)
# TO ADD THE CODE !
}
# if DETAILS
if (fDet ==TRUE) {
# write the result into a separated file
write.csv(DataSet.scaled, outFileName,row.names=F, quote=F)
}
return (as.data.frame(DataSet.scaled)) # return the scaled data frame
}
#----------------------------------------------------------------------------------------------------------------------
RemCorrs <- function(ds,fDet,cutoff,outFile) {
# ========================================
# Remove the correlated columns (Step 5)
# ========================================
# ds = dataset frame
# fDet = flag fro details (TRUE/FALSE)
# cutoff = correlation cut off (ex: 0.9)
# outFileName = new file name (it could include the path)
# Generates 5 file results:
# - returns a dataset without the correlated columns (1 file)
# - generate initial correlation matrix
# and the one after removing the correlated features (2 files)
# - plots for the before and after correlation removal (2 files)
# ------------------------------------------------------------------------
# another version of this function should be implemented using
# pairwise test between i and j descriptors- if(r2>=0.9){remove the j descriptor}
# using findCorelations() from caret
#library(corrplot) #corrplot: the library to compute correlation matrix.
#library(caret)
DataSet <- ds # input dataset
DataSetFiltered.scale <- ds # default results without any modification
# calculate the correlation matrix for the entire file!
# !!! NEED TO BE CORRECTED to avoid dependent variable (first column) but to report it!
corrMat <- cor(DataSet) # get corralation matrix
if (fDet==TRUE) {
CorrMatFile <- paste(outFile,".corrMAT.csv",sep='')
# write correlation matrix as output file
write.csv(corrMat, CorrMatFile, row.names=F, quote=F)
# Plot the matrix, clustering features by correlation index
# corrplot(corrMat, order = "hclust")
# plot the correlatio plot before correlation removal
CorrPlotFile <- paste(outFile,".corrs.png",sep='')
png(height=1200, width=1200, pointsize=25, filename=CorrPlotFile)
col1 <-rainbow(100, s = 1, v = 1, start = 0, end = 0.9, alpha = 1)
corrplot(corrMat,tl.cex=3,title="Initial feature correlation matrix",
method="circle",is.corr=FALSE,#type="full",
cl.lim=c(-1,1),cl.cex=2,addgrid.col="red",
addshade="positive",col=col1,
addCoef.col = rgb(0,0,0, alpha = 0.6), mar=c(0,0,1,0), diag= FALSE)
dev.off()
}
highlyCor <- findCorrelation(corrMat, cutoff) # find corralated columns
# if no correlation found, return the original dataset
if (length(highlyCor) == 0){
return (ds)
}
# Apply correlation filter with the cutoff only if exists!
# by removing all the variable correlated with more than cutoff
DataSetFiltered.scale <- DataSet[,-highlyCor]
if (fDet==TRUE) {
corrMat <- cor(DataSetFiltered.scale)
# plot again the rest of correlations after removing the correlated columns
#corrplot(corrMat, order = "hclust")
# plot the correlation plot AFTER correlation removal
#CorrPlotFile2 = paste(outFile,".afterRemCorr.png",sep='')
#png(height=1200, width=1200, pointsize=25, file=CorrPlotFile2)
#col1 <-rainbow(100, s = 1, v = 1, start = 0, end = 0.9, alpha = 1)
#corrplot(corrMat,tl.cex=3,title="Correlation matrix after removing correlated features",
# method="circle",is.corr=FALSE,type="full",
# cl.lim=c(-1,1),cl.cex=2,addgrid.col="red",
# addshade="positive",col=col1,
# addCoef.col = rgb(0,0,0, alpha = 0.6), mar=c(0,0,1,0), diag= FALSE)
#dev.off()
# correlation matrix for the rest of the columns after removal
#CorrMatFile2 <- paste(outFile,".corrMAT4Selected.csv",sep='')
# write correlation matrix as output file
#write.csv(corrMat, CorrMatFile2, row.names=F, quote=F)
# write the new dataset without the correlated features
write.csv(DataSetFiltered.scale, outFile, row.names=F, quote=F)
}
return(as.data.frame(DataSetFiltered.scale))
}
#----------------------------------------------------------------------------------------------------------------------
DsSplit <- function(ds,trainFrac=3/4,fDet=FALSE,PathDataSet="",iSeed) {
# ===============================================
# Dataset spliting in Training and Test (Step 6)
# ===============================================
# Inputs
# - ds = frame dataset object
# - fDet = flag for detais (TRUE/FALSE)
# - PathDataSet = pathway for results
# Output = training and test datasets (to be used for regressions in other functions)
# if datails = TRUE, output files will be created
my.datf<- ds
# create TRAIN and TEST sets to build a model
set.seed(iSeed)
inTrain <- createDataPartition(1:dim(my.datf)[1],p = trainFrac,list = FALSE,groups=2)
# groups==2 forces to NOT partition
# based on quantiles of numeric values
my.datf.train<- my.datf[inTrain,] # TRAIN dataset frame
my.datf.test <- my.datf[-inTrain,] # TEST dataset frame
if (fDet == TRUE) {
# write the TRAIN and TEST set files
# the index of each row will in the dataset will not be saved (row.names=F)
outTrain <- file.path(PathDataSet,paste("ds.Train.split",iSeed,".csv")) # the same folder as the input
write.csv(my.datf.train,outTrain,row.names=FALSE)
outTest <- file.path(PathDataSet,paste("ds.Test.split",iSeed,".csv")) # the same folder as the input
write.csv(my.datf.test,outTest,row.names=FALSE)
}
MyList<- list("train"=my.datf.train, "test"=my.datf.test)
return(MyList) # return a list with training and test datasets
}
# *************************************
# REGRESSION METHODS
# *************************************
LMreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#==================
# 8.1. Basic LM
#==================
net.c = my.datf.train[,1] # make available the names of variables from training dataset
RegrMethod <- "lm" # type of regression
# Define the CV conditions
ctrl<- trainControl(method=sCV, number=10,repeats=10,
summaryFunction=defaultSummary)
# Train the model using only training set
set.seed(iSplit)
lm.fit<- train(net.c~.,data=my.datf.train,
method='lm', tuneLength = 10,trControl=ctrl,
metric='RMSE')
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- lm.fit$results[,2]
R2.tr <- lm.fit$results[,3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- lm.fit$results[,4]
R2sd.tr <- lm.fit$results[,5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(lm.fit)
lm.test.res <- postResample(predict(lm.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(lm.fit,my.datf.train) # predicted Y for training
pred.ts <- predict(lm.fit,my.datf.test) # predicted Y for test
noFeats.fit <- length(predictors(lm.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(lm.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(lm.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# -------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(RMSE.tr),
"R2.tr" = as.numeric(R2.tr),
"RMSEsd.tr" = as.numeric(RMSEsd.tr),
"R2sd.tr" = as.numeric(R2sd.tr),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if T, print details about any resut
write("RRegr package | eNanoMapper", file=outFile, append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Fitting Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(summary(lm.fit)$coefficients), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.train.res),file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(lm.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- lm.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- residuals(fitModel) # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# Leverage / Hat values
hat.fit <- hatvalues(fitModel) # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance
cook.dists<- cooks.distance(fitModel)
cutoff.Cook <- 4/((nrow(my.datf.train)-length(fitModel$coefficients)-2)) # Cook's distance cutoff
write.table("Cook's distances output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Cook's distance cutoff: ", cutoff.Cook), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Cook's distances: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(cook.dists), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# Influence
infl <- influence(fitModel)#produces several statistics of the kind
write.table("Point influence output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Influences: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(infl), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# PDF with 12 plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
# par(mfrow = c(3, 4)) # all plots into one page!
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred") # plot 1
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred") # plot 2
if(length(is.na(c(FeatImp$importance$Overall)))<=(length(c(FeatImp$importance$Overall))-3)){
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")} # plot 3
# Fitted vs Residuals - plot 4
plot(fitted(fitModel),residuals(fitModel),
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots - plot 5
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
# Cook's distance - plot 6
if(length(is.na(cook.dists))<=(length(cook.dists)-3)){
plot(cook.dists,
main="Cook's Distance for Fitted Model",
xlab="Index", ylab="Cook Distance")
for (p in 1:6) {
plot(fitModel, which=p, cook.levels=cutoff.Cook) # 6 standard fitting plots
}
}
# plot(FeatImp, top = components,main="Feature Importance") # ERROR !
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=lm.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
GLMreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#======================================================
# 8.2- GLM stepwise regression - based on AIC (caret)
#======================================================
# Inputs:
# - my.datf.train,my.datf.test = training and test dataset frames
# - sCV = type of cross-validation such as repeatedcv, LOOCV, etc.
# - iSplit = index of splitalse
# - fDet = flag for detais (True/F)
# - outFile = output file for GLM details
# Output:
# - list of statistics equal with the header introduced in the main script and the full model
# (tr = train, ts = test, both = tr+ts = full dataset)
# -----------------------------------------------------------------------------------------------
#library(caret)
#attach(my.datf.train) # make available the names of variables from training dataset
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "glmStepAIC" # type of regression
# Define the CV conditions
ctrl<- trainControl(method=sCV, number=10,repeats=10,
summaryFunction=defaultSummary)
# Train the model using only training set
set.seed(iSplit)
glm.fit<- train(net.c~.,data=my.datf.train,
method='glmStepAIC', tuneLength=10, trControl=ctrl,
metric='RMSE')
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- glm.fit$results[,2]
R2.tr <- glm.fit$results[,3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- glm.fit$results[,4]
R2sd.tr <- glm.fit$results[,5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(glm.fit)
lm.test.res <- postResample(predict(glm.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(glm.fit,my.datf.train) # predicted Y
pred.ts <- predict(glm.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(glm.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(glm.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(glm.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# -------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(RMSE.tr),
"R2.tr" = as.numeric(R2.tr),
"RMSEsd.tr" = as.numeric(RMSEsd.tr),
"R2sd.tr" = as.numeric(R2sd.tr),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if T, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Fitting Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(summary(glm.fit)$coefficients), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(glm.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(glm.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- glm.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- residuals(fitModel) # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# Leverage / Hat values
hat.fit <- hatvalues(fitModel) # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance
cook.dists<- cooks.distance(fitModel)
cutoff.Cook <- 4/((nrow(my.datf.train)-length(fitModel$coefficients)-2)) # Cook's distance cutoff
write.table("Cook's distances output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Cook's distance cutoff: ", cutoff.Cook), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Cook's distances: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(cook.dists), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# Influence
infl <- influence(fitModel)#produces several statistics of the kind
write.table("Point influence output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Influences: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(infl), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# PDF with 12 plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
# par(mfrow = c(3, 4)) # all plots into one page!
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred") # plot 1
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred") # plot 2
if(length(is.na(c(FeatImp$importance$Overall)))<=(length(c(FeatImp$importance$Overall))-3)){
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")} # plot 3
# Fitted vs Residuals - plot 4
plot(fitted(fitModel),residuals(fitModel),
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots - plot 5
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
# Cook's distance - plot 6
if(length(is.na(cook.dists))<=(length(cook.dists)-3)){
plot(cook.dists,
main="Cook's Distance for Fitted Model",
xlab="Index", ylab="Cook Distance")
for (p in 1:6) {
plot(fitModel, which=p, cook.levels=cutoff.Cook) # 6 standard fitting plots
}
}
# plot(FeatImp, top = components,main="Feature Importance") # ERROR !
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values= my.stats, model=glm.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
PLSreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#================================
# 8.3. PLS regression (caret)
#================================
#library(caret)
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "pls" # type of regression
# Define the CV conditions
ctrl<- trainControl(method = sCV, number = 10,repeats = 10,
summaryFunction = defaultSummary)
# Train the model using only training set
set.seed(iSplit)
floor.param<- floor((dim(my.datf.train)[2]-1)/5)
if(floor.param<1){floor.param <- 1}
pls.fit<- train(net.c~.,data=my.datf.train,
method = 'pls', tuneLength = 10, trControl = ctrl,
metric = 'RMSE',
tuneGrid=expand.grid(.ncomp=c(1:floor.param)))
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- pls.fit$results[,2]
R2.tr <- pls.fit$results[,3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- pls.fit$results[,4]
R2sd.tr <- pls.fit$results[,5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(pls.fit)
lm.test.res <- postResample(predict(pls.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(pls.fit,my.datf.train) # predicted Y
pred.ts <- predict(pls.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(pls.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(pls.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(pls.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# --------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(min(RMSE.tr)), # these 4 lines correspond to the min of RMSE.tr !!!
"R2.tr" = as.numeric(R2.tr[which.min(RMSE.tr)]),
"RMSEsd.tr" = as.numeric(RMSEsd.tr[which.min(RMSE.tr)]),
"R2sd.tr" = as.numeric(R2sd.tr[which.min(RMSE.tr)]),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if true, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(pls.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(pls.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- pls.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- residuals(fitModel) # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
# ADDED !
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance ?
}
# Influence ?
# PDF plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
# Fitted vs Residuals
plot(fitted(fitModel),residuals(fitModel),
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=pls.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
LASSOreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#================================
# 8.4 Lasso Regression (caret)
#================================
#library(caret)
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "lasso.RMSE" # type of regression
# Define the CV conditions
ctrl<- trainControl(method = sCV, number = 10,repeats = 10,
summaryFunction = defaultSummary)
# Train the model using only training set
set.seed(iSplit)
las.fit<- train(net.c~.,data=my.datf.train,
method='lasso', tuneLength = 10, trControl = ctrl,
metric='RMSE' ) #,tuneGrid=expand.grid(.fraction= seq(0.1,1,by=0.1)))
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- las.fit$results[,2]
R2.tr <- las.fit$results[,3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- las.fit$results[,4]
R2sd.tr <- las.fit$results[,5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(las.fit)
lm.test.res <- postResample(predict(las.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(las.fit,my.datf.train) # predicted Y
pred.ts <- predict(las.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(las.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(las.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(las.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# --------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(min(RMSE.tr)), # these 4 lines correspond to the min of RMSE.tr !!!
"R2.tr" = as.numeric(R2.tr[which.min(RMSE.tr)]),
"RMSEsd.tr" = as.numeric(RMSEsd.tr[which.min(RMSE.tr)]),
"R2sd.tr" = as.numeric(R2sd.tr[which.min(RMSE.tr)]),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if true, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(las.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2CSv(predictors(lm.train.res),outFile)
#write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,quote=F)
AppendList2CSv(predictors(lm.test.res),outFile)
#write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(las.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- las.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- pred.both - ds.full[,1] # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance ?
}
# Influence ?
# PDF plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
# Fitted vs Residuals
plot(pred.both,resids,
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=las.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
# RBF_DDAreg <- function(my.datf.train,my.datf.test,sCV,negThrStep=0.5,iSplit=1,fDet=F,outFile="") {
# #============================================================
# # 8.5. RBF network with the DDA algorithm regression (caret)
# #============================================================
#
# library(caret)
#
# net.c = my.datf.train[,1] # dependent variable is the first column in Training set
# RegrMethod <- "rbfDDA" # type of regression
#
# # Define the CV conditions
# ctrl<- trainControl(method=sCV,number=10,repeats=10,
# summaryFunction=defaultSummary)
#
# # Train the model using only training set
# set.seed(iSplit)
#
# rbf.fit<- train(net.c~.,data=my.datf.train,
# method='rbfDDA',trControl=ctrl,
# tuneGrid=expand.grid(.negativeThreshold=seq(0,1,negThrStep)))
#
# #------------------------------
# # Training RESULTS
# #------------------------------
# RMSE.tr <- rbf.fit$results[,2]
# R2.tr <- rbf.fit$results[,3]
# if (sCV == "repeatedcv"){ # if 10-fold CV
# RMSEsd.tr <- rbf.fit$results[,4]
# R2sd.tr <- rbf.fit$results[,5]
# }
# if (sCV == "LOOCV"){ # if LOOCV
# RMSEsd.tr <- 0 # formulas will be added later!
# R2sd.tr <- 0 # formulas will be added later!
# }
#
# #------------------------------------------------
# # RMSE & R^2, for train/test respectively
# #------------------------------------------------
# lm.train.res <- getTrainPerf(rbf.fit)
# lm.test.res <- postResample(predict(rbf.fit,my.datf.test),my.datf.test[,1])
#
# #------------------------------------------------
# # Adj R2, Pearson correlation
# #------------------------------------------------
# pred.tr <- predict(rbf.fit,my.datf.train) # predicted Y
# pred.ts <- predict(rbf.fit,my.datf.test) # predicted Y
# noFeats.fit <- length(predictors(rbf.fit)) # no. of features from the fitted model
# Feats.fit <- paste(predictors(rbf.fit),collapse="+") # string with the features included in the fitted model
#
# ds.full <- rbind(my.datf.train,my.datf.test)
# pred.both <- predict(rbf.fit,ds.full) # predicted Y
# adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
# adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
# corP.ts <- cor(my.datf.test[,1],pred.ts)
#
# adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
# RMSE.both <- rmse.funct(ds.full[,1],pred.both)
# r2.both <- r2.funct(ds.full[,1],pred.both)
#
# # Generate the output list with statistics for each cross-validation type
# # --------------------------------------------------------------------
# my.stats <- list("RegrMeth" = RegrMethod,
# "Split No" = as.numeric(iSplit), # from function param
# "CVtype" = sCV, # from function param
# "NoModelFeats" = as.numeric(noFeats.fit),
# "ModelFeats" = Feats.fit,
# "adjR2.tr" = as.numeric(adjR2.tr),
#
# "RMSE.tr" = as.numeric(min(RMSE.tr)), # these 4 lines correspond to the min of RMSE.tr !!!
# "R2.tr" = as.numeric(R2.tr[which.min(RMSE.tr)]),
# "RMSEsd.tr" = as.numeric(RMSEsd.tr[which.min(RMSE.tr)]),
# "R2sd.tr" = as.numeric(R2sd.tr[which.min(RMSE.tr)]),
#
# "adjR2.ts"= as.numeric(adjR2.ts),
# "RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
# "R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
# "corP.ts" = as.numeric(corP.ts),
# "adjR2.both" = as.numeric(adjR2.both),
# "RMSE.both" = as.numeric(RMSE.both),
# "R2.both" = as.numeric(r2.both))
# #---------------------------------------------------------------------
# # Write to file DETAILS for GLM for each cross-validation method
# #---------------------------------------------------------------------
# if (fDet==T) { # if flag for details if true, print details about any resut
# write("RRegr package | eNanoMapper", file=outFile,append=T)
# write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
# write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
#
#
# write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(predictors(rbf.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
#
# write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
# write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
#
# write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,row.names=F,quote=F)
#
# # Variable Importance (max top 20)
# FeatImp <- varImp(rbf.fit, scale = F)
# components = length(FeatImp) # default plot all feature importance
# if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
# components = 20
# }
# # Append feature importance to output details
# AppendList2CSv(FeatImp,outFile)
#
# fitModel <- rbf.fit$finalModel
#
# # =============================================================================
# # Assessment of Applicability Domain (plot leverage)
# # =============================================================================
#
# # Residuals
# resids <- residuals(fitModel) # residuals
# write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
#
# predVals.pls.ad <- pred.ts
# Traind.pls= as.matrix(my.datf.train)
# Testd.pls = as.matrix(my.datf.test)
# mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
# det.Traind.pls<- det(mat.Traind.pls)
#
# if(det.Traind.pls!=0){
#
# Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
# Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
#
# # Leverage / Hat values
# hat.fit <- Hat.test # hat values
# hat.fit.df <- as.data.frame(hat.fit) # hat data frame
# hat.mean <- mean(hat.fit) # mean hat values
# hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
#
# write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#
# #THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
# thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
# hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
#
# write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
#
# # Cook's distance ?
# }
#
# # Influence ?
#
# # PDF plots
# # --------------------------------------------------------------
# pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
# plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
# plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
# dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
#
# # Fitted vs Residuals
# plot(fitted(fitModel),residuals(fitModel),
# main="Fitted vs. Residuals for Fitted Model",
# xlab="Fitted", ylab="Residuals")
# abline(h = 0, lty = 2)
#
# # Leverage plots
# if(det.Traind.pls!=0){
# plot(hat.fit, type = "h",
# main="Leverage for Fitted Model",
# xlab="Index", ylab="Hat")
# abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
# }
# dev.off()
# # --------------------------------------------------------------
# }
# return(list(stat.values=my.stats, model=rbf.fit)) # return a list with statistics and the full model
# }
#----------------------------------------------------------------------------------------------------------------------
SVRMreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="",cs=c(1,5,10,15,20)) {
#====================================
# 8.6 SVM Radial Regression (caret)
#====================================
#library(caret)
#library(kernlab)
cs = as.numeric(cs)
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "svmRadial" # type of regression
# Define the CV conditions
ctrl<- trainControl(method=sCV,number=10,repeats=10,
summaryFunction=defaultSummary)
# Train the model using only training set
set.seed(iSplit)
sigma = sigest (as.matrix(my.datf.train[,-1]))[2]
svmL.fit<- train(net.c~.,data=my.datf.train,
method='svmRadial',tuneLength=10,trControl=ctrl,
metric='RMSE',
tuneGrid=expand.grid(.sigma=sigma,.C= cs))
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- svmL.fit$results[,which(names(svmL.fit$results)=='RMSE')]#2]
R2.tr <- svmL.fit$results[,which(names(svmL.fit$results)=='Rsquared')]#3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- svmL.fit$results[,which(names(svmL.fit$results)=='RMSESD')]##4]
R2sd.tr <- svmL.fit$results[,which(names(svmL.fit$results)=='RsquaredSD')]#5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(svmL.fit)
lm.test.res <- postResample(predict(svmL.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(svmL.fit,my.datf.train) # predicted Y
pred.ts <- predict(svmL.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(svmL.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(svmL.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(svmL.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# ------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(min(RMSE.tr)), # these 4 lines correspond to the min of RMSE.tr !!!
"R2.tr" = as.numeric(R2.tr[which.min(RMSE.tr)]),
"RMSEsd.tr" = as.numeric(RMSEsd.tr[which.min(RMSE.tr)]),
"R2sd.tr" = as.numeric(R2sd.tr[which.min(RMSE.tr)]),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if true, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(svmL.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(svmL.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- svmL.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids = pred.tr-svmL.fit$trainingData$.outcome # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance ?
}
# Influence ?
# PDF plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
# Fitted vs Residuals
plot(fitted(fitModel),resids,
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=svmL.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
NNreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#========================================
# 8.8 Neural Network Regression (caret)
#========================================
#library(caret)
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "nnet" # type of regression
# Define the CV conditions
ctrl<- trainControl(method = sCV, number = 10,repeats = 10,
summaryFunction = defaultSummary)
# Train the model using only training set
set.seed(iSplit)
nn.fit<- train(net.c~.,data=my.datf.train,
method = 'nnet',trControl = ctrl,
linout=T, trace = F,MaxNWts=20000,
#Grid of tuning parameters to try:
tuneGrid=expand.grid(.size=c(1,5,10,15),.decay=c(0,0.001,0.1)))
#Grid parameters are appearing at the print out of the model
#size==#of units in hidden layer, decay==parameter of weight decay (default:0)
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- nn.fit$results[,which(names(nn.fit$results)=='RMSE')]#2]
R2.tr <- nn.fit$results[,which(names(nn.fit$results)=='Rsquared')]#3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- nn.fit$results[,which(names(nn.fit$results)=='RMSESD')]#4]
R2sd.tr <- nn.fit$results[,which(names(nn.fit$results)=='RsquaredSD')]#5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(nn.fit)
lm.test.res <- postResample(predict(nn.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(nn.fit,my.datf.train) # predicted Y
pred.ts <- predict(nn.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(nn.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(nn.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(nn.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# ----------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(min(RMSE.tr)), # these 4 lines correspond to the min of RMSE.tr !!!
"R2.tr" = as.numeric(R2.tr[which.min(RMSE.tr)]),
"RMSEsd.tr" = as.numeric(RMSEsd.tr[which.min(RMSE.tr)]),
"R2sd.tr" = as.numeric(R2sd.tr[which.min(RMSE.tr)]),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if true, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(nn.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(nn.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- nn.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- residuals(fitModel) # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance ?
}
# Influence ?
# PDF plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
# Fitted vs Residuals
plot(fitted(fitModel),residuals(fitModel),
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=nn.fit)) # return a list with statistics and the full model
}
# **************************************
# WRAPPER METHODS
# **************************************
PLSregWSel <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#====================================================================================================
# 8.3W. PLS regression with filter feature selection (caret)
#====================================================================================================
#library(caret)
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "pls.WSel" # type of regression
# Define the CV conditions
ctrlw <- rfeControl(method = 'boot', number = 25,saveDetails=T)#number=10,repeats=10
ctrl <- trainControl(method = sCV, number = 10,repeats = 1,#numebr=10,repeats=10,
summaryFunction = defaultSummary,savePredictions=T)
subsetsx<- seq(2,dim(my.datf.train)[2]-1, by = 10)
# Train the model using only training set
set.seed(iSplit)
floor.param<- floor((dim(my.datf.train)[2]-1)/5)
if(floor.param<1){floor.param <- 1}
pls.fit<- rfe(net.c~.,data=my.datf.train,
method = 'pls',
rfeControl = ctrlw, trControl=ctrl, sizes=subsetsx, importance=T,
metric = 'RMSE',
tuneGrid=expand.grid(.ncomp=c(1:floor.param)))#ncomp=c(1:5)
#------------------------------
# Training RESULTS
#------------------------------
pls.fit.best <- subset(pls.fit$results, pls.fit$results$Variables == 5) # best selected fit
RMSE.tr <- pls.fit.best$RMSE
R2.tr <- pls.fit.best$Rsquared
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- pls.fit.best$RMSESD
R2sd.tr <- pls.fit.best$RsquaredSD
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- pls.fit # ??
lm.test.res <- postResample(predict(pls.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(pls.fit,my.datf.train) # predicted Y
pred.ts <- predict(pls.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(pls.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(pls.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(pls.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# -------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(RMSE.tr), # these 4 lines correspond to the min of RMSE.tr !!!
"R2.tr" = as.numeric(R2.tr),
"RMSEsd.tr" = as.numeric(RMSEsd.tr),
"R2sd.tr" = as.numeric(R2sd.tr),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if True, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test),file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(pls.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("NNet variable importance: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2txt(varImp(pls.fit),outFile)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(pls.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
# PDF plots
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
par(mfrow = c(2, 2))
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
#plot(FeatImp, top = components,main="Feature Importance")
dev.off()
}
return(list(stat.values=my.stats, model=pls.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
Yrandom<- function(dss,trainFrac,best.reg,best.R2.ts,noYrand,ResBestF,rfe_SVM_param_c,rfe_SVM_param_eps){
#================================================
# Y-randomization for the best model (Step 12)
#================================================
# - 1 splitting, 1 CV type, best method
# - best.R2.ts will be compared with Yrand.R2.ts
# - returns ratios DiffsR2/bestR2
# (ex param for rbfDDa: negThrStep)
# --------------------------------------------------
cat("-> Best model Y-Randomization ...\n")
dss[,1] <- sample(dss[,1]) # randomize Y values for the entire dataset
# splitting dataset in training and test
#---------------------------------------
Yrand.R2.ts <- NULL # all values of R2 for each Y randomization
for (i in 1:noYrand){
iSeed=i
dsList <- DsSplit(dss,trainFrac,F,PathDataSet,iSeed) # return a list with 2 datasets = dsList$train, dsList$test
# get train and test from the resulted list
ds.train<- dsList$train
ds.test <- dsList$test
# Run the caret function with the method from the best method
# for one training-test split only; no details, we need only R2 values
if (best.reg=="lm") {
my.stats.reg <- LMreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run GLM for each CV and regr method
}
if (best.reg=="glmStepAIC") {
my.stats.reg <- GLMreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run GLM for each CV and regr method
}
if (best.reg=="pls") {
my.stats.reg <- PLSreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run SVRM Radial for each CV and regr method
}
if (best.reg=="lasso.RMSE") {
my.stats.reg <- LASSOreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run SVRM Radial for each CV and regr method
}
if (best.reg=="svmRadial") {
my.stats.reg <- SVRMreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF,rfe_SVM_param_c)$stat.values # run SVRM Radial for each CV and regr method
}
if (best.reg=="nnet") {
my.stats.reg <- NNreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="rf") {
my.stats.reg <- RFreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="svmRFE") {
my.stats.reg <- SVMRFEreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF,rfe_SVM_param_c,rfe_SVM_param_eps)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="glmnet") {
my.stats.reg <- ENETreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="rfRFE") {
my.stats.reg <- RFRFEreg(ds.train,ds.test,"repeatedcv",i,F,ResBestF)$stat.values # run NNet for each CV and regr method
}
# if (best.reg=="rbfDDA") {
# my.stats.reg <- RBF_DDAreg(ds.train,ds.test,"repeatedcv",negThrStep,i,F,ResBestF)$stat.values # run SVRM Radial for each CV and regr method
# }
Yrand.R2.ts <- c(Yrand.R2.ts,my.stats.reg$R2.ts) # adding test R2 value Y randomization
}
R2diffsPerBestR2 <- NULL
if (is.na(my.stats.reg$R2.ts)) { # check for NA values
cat(" --> Y-Randomization error due to NA values!\n")
write("Y-Randomization error due to NA values!", file=ResBestF,append=T)
}
else{
# get histogram for differences between best R2 and the values for each Y randomization
R2diffs <- abs(Yrand.R2.ts - best.R2.ts) # absolute differences between R2 values (best model vs Y randomized results)
R2diffsPerBestR2 <- abs(R2diffs/best.R2.ts) # the same difference in percents
pdf(file=paste(ResBestF,".Yrand.Hist.pdf",sep="")) # save histogram if ratio diffs R2 into PDF for Y random
Yrand.hist <- hist(R2diffsPerBestR2) # draw histogram of the ratio diffs/Best R2 for Y random
dev.off()
write.table("Y randomization test: ",file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("=====================", file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Diffs R2 (Best Model - Y rand):",file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2CSv(R2diffs, ResBestF)
write.table("Summary Difs:",file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2CSv(summary(R2diffs), ResBestF)
write.table("Ratio Diffs R2 / Best R2 (Best Model - Y rand):",file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2CSv(R2diffsPerBestR2, ResBestF)
write.table("Summary Difs %:",file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2CSv(summary(R2diffsPerBestR2),ResBestF)
}
return(R2diffsPerBestR2) # return the ratio of diffs with the best R2 from the same
}
# -----------------------------------------------------------------------
# svm regression function helper
# -----------------------------------------------------------------------
# jseoane
# use:
# svmFuncsGradW: RAKOTOMAMONJY gradient w
load(system.file("models", "model.svmRadialReg.RData", package = "RRegrs"))
svmFuncsW = caretFuncs ## regular ranking using w
svmFuncsW$fit=function(x,y,first,last,...,tuneGrid){
#cat(param$sigma,"\n")
#library(kernlab)
sigma = sigest(x)[2]
cs = tuneGrid$.C
eps = tuneGrid$.epsilon
tuneGrid = expand.grid(.C=cs,.sigma=sigma,.epsilon=eps)
train(x,y,...,tuneGrid=tuneGrid)
}
#--------------------------------------------------------------------------------
svmFuncsW$rank=function(object,x,y){
alphas = alpha(object$finalModel)
alpha.idxs = alphaindex(object$finalModel)
y.sv = as.numeric(y[alpha.idxs])
w = (y.sv * alphas) %*% xmatrix(object$finalModel)
sig = ifelse(object$finalModel@fitted>y,yes=1,no=-1)
avImp = t(w*w)
out = data.frame(avImp)
colnames(out) = "Overall"
out = out[order(out$Overall, decreasing = TRUE), , drop = FALSE]
out$var <- rownames(out)
out
}
#--------------------------------------------------------------------------------
svmFuncsW$pred= function(object, x)
{
tmp = predict(object, newdata=x)
if(object$modelType == "Classification" &
!is.null(object$modelInfo$prob))
{
out1 =cbind(data.frame(pred = tmp),
as.data.frame(predict(object$finalModel, newdata=x, type = "prob")))
} else out1 <- tmp
out1
}
#--------------------------------------------------------------------------------
# Based on the gradient of svm coefs
svmFuncsGradW = svmFuncsW
svmFuncsGradW$rank=function(object,x,y){ # RAKOTOMAMONJY gradient w
alphas = alpha(object$finalModel)#[[1]]
alpha.idxs = alphaindex(object$finalModel)#[[1]]
y.sv = y[alpha.idxs]
krnFun = kernelf(object$finalModel)
kernel = kernelMatrix(krnFun,x)
sigma = krnFun@kpar$sigma
xmat = xmatrix(object$finalModel)[[1]]
kerSV = kernel[alpha.idxs,alpha.idxs]
nSV = length(alpha.idxs)
nfeat = dim(x)[2]
avImp = numeric(nfeat)
names(avImp) = colnames(x)
for(i in 1:nfeat){
deraux = ( x[alpha.idxs,i] %*% t(as.matrix(rep(1,nSV))) ) - (as.matrix(rep(1,nSV)) %*% t(x[alpha.idxs,i]) )
kernelDeriv1 = -(deraux * kerSV) / (sigma^2)
kernelDeriv2 = (deraux * kerSV) / (sigma^2)
gradMarg1= -t(y.sv*alphas) %*% kernelDeriv1 %*% (y.sv*alphas)
gradMarg2= -t(y.sv*alphas) %*% kernelDeriv2 %*% (y.sv*alphas)
avImp[i] = gradMarg1^2 + gradMarg2^2
}
out = data.frame(avImp)
colnames(out) = "Overall"
out = out[order(out$Overall, decreasing = TRUE), , drop = FALSE]
out$var <- rownames(out)
out
}
#----------------------------------------------------------------------------------------------------------------------
RFreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#======================================
# Basic RandomForest
#======================================
net.c = my.datf.train[,1] # make available the names of variables from training dataset
RegrMethod <- "rf" # type of regression
# Define the CV conditions
ctrl<- trainControl(method=sCV, number=10,repeats=10,#number=10,repeats=10,
summaryFunction=defaultSummary)
tuneParam = data.frame(.mtry=c(ncol(my.datf.train)/3,ncol(my.datf.train)/2,ncol(my.datf.train)))
# Train the model using only training set
set.seed(iSplit)
rf.fit<- train(net.c~.,data=my.datf.train,
method='rf', trControl=ctrl,
metric='RMSE',ntree=1500,tuneGrid =tuneParam)
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- rf.fit$results[rownames(rf.fit$bestTune),2]
R2.tr <- rf.fit$results[rownames(rf.fit$bestTune),3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- rf.fit$results[rownames(rf.fit$bestTune),4]
R2sd.tr <- rf.fit$results[rownames(rf.fit$bestTune),5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
rf.train.res <- getTrainPerf(rf.fit)
rf.test.res <- postResample(predict(rf.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(rf.fit,my.datf.train) # predicted Y for training
pred.ts <- predict(rf.fit,my.datf.test) # predicted Y for test
noFeats.fit <- length(predictors(rf.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(rf.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(rf.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# ------------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(RMSE.tr),
"R2.tr" = as.numeric(R2.tr),
"RMSEsd.tr" = as.numeric(RMSEsd.tr),
"R2sd.tr" = as.numeric(R2sd.tr),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((rf.test.res["RMSE"])),
"R2.ts" = as.numeric((rf.test.res["Rsquared"])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if T, print details about any resut
write("RRegr package | eNanoMapper", file=outFile, append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(rf.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(rf.train.res),file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(rf.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- importance(rf.fit$finalModel, scale = T)
FeatImp = FeatImp[order(FeatImp,decreasing=T),]
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- rf.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- fitModel$predicted-fitModel$y # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
}
# PDF with 12 plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
# par(mfrow = c(3, 4)) # all plots into one page!
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred") # plot 1
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred") # plot 2
dotchart(as.matrix(FeatImp),main="Feature Importance") # plot 3
# Fitted vs Residuals - plot 4
plot(fitModel$predicted,resids,
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots - plot 5
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
# plot(FeatImp, top = components,main="Feature Importance") # ERROR !
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=rf.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
SVMRFEreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="",cs=c(1,5,10,15,20),eps=c(0.01,0.1,0.3)) {
#SVMRFEreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="",cs=c(1:10),eps=c(0.01,0.1,0.3),noCores=1) {
#===========================================
# SVM-RFE
#===========================================
#library(kernlab)
net.c = my.datf.train[,1] # make available the names of variables from training dataset
RegrMethod <- "svmRFE" # type of regression
# Define the CV conditions
ctrl<- trainControl(method=sCV, number=3,repeats=1,#number=10,repeats=10,
summaryFunction=defaultSummary,verboseIter = F)
rfeCtr = rfeControl(functions=svmFuncsGradW,method="cv",number=10,repeats=10, saveDetails = T, verbose=T,rerank = T,allowParallel=T)#number=10,repeats=10,
sigma = sigest (as.matrix(my.datf.train[,-1]))[2]
#cs = c(0.0001,0.1,1,5,15,50) # EXTERNAL PARAMETERS!!!
#cs = c(1,5,15,50)
#eps=c(0.01,0.1,0.3) # EXTERNAL PARAMETERS!!!
sizes = 2^(1:sqrt(ncol(my.datf.train)-1))
tuneVars = expand.grid(.C=cs, .sigma=sigma, .epsilon=eps)
# Train the model using only training set
set.seed(iSplit)
rfesvm.fit = rfe(as.matrix(my.datf.train[,-1]),net.c,sizes = sizes,rfeControl=rfeCtr,prob.model =F,method=svmRadialReg,tuneGrid = tuneVars,trControl=ctrl ,allowParallel=T)
# warning in some of the parameters is a extreme value
if(rfesvm.fit$fit$bestTune$C %in% cs[c(1,length(cs))])
warning("Best fitted value of C=",rfesvm.fit$fit$bestTune$C," is a extreme value in your possible c values. You may want to reset your C paramenter options", call. = FALSE)
if(rfesvm.fit$fit$bestTune$epsilon %in% eps[c(1,length(cs))])
warning("Best fitted value of eps=",rfesvm.fit$fit$bestTune$epsilon," is a extreme value in your possible epsilon values. You may want to reset your eps paramenter options", call. = FALSE)
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- rfesvm.fit$results[rfesvm.fit$results$Variables ==rfesvm.fit$bestSubset,2]
R2.tr <- rfesvm.fit$results[rfesvm.fit$results$Variables ==rfesvm.fit$bestSubset,3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- rfesvm.fit$results[rfesvm.fit$results$Variables ==rfesvm.fit$bestSubset,4]
R2sd.tr <- rfesvm.fit$results[rfesvm.fit$results$Variables ==rfesvm.fit$bestSubset,5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later! TODOOOOOOOOOOOOOO
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
rfesvm.train.res <- getTrainPerf(rfesvm.fit$fit)
rfesvm.test.res <- postResample(predict(rfesvm.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(rfesvm.fit,my.datf.train) # predicted Y for training
pred.ts <- predict(rfesvm.fit,my.datf.test) # predicted Y for test
noFeats.fit <- length(predictors(rfesvm.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(rfesvm.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(rfesvm.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# -----------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(RMSE.tr),
"R2.tr" = as.numeric(R2.tr),
"RMSEsd.tr" = as.numeric(RMSEsd.tr),
"R2sd.tr" = as.numeric(R2sd.tr),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((rfesvm.test.res["RMSE"])),
"R2.ts" = as.numeric((rfesvm.test.res["Rsquared"])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if T, print details about any resut
write("RRegr package | eNanoMapper", file=outFile, append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(rfesvm.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(rfesvm.train.res,file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(rfesvm.test.res, file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- svmFuncsGradW$rank(rfesvm.fit$fit,as.matrix(ds.full[,-1]),ds.full[,1])
FeatImp = FeatImp[order(FeatImp[,1],decreasing=T),]
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- rfesvm.fit$fit
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- pred.both-ds.full[,1] # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
}
# PDF with 12 plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
# par(mfrow = c(3, 4)) # all plots into one page!
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred") # plot 1
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred") # plot 2
fi = as.matrix(FeatImp[,1])
rownames(fi)=FeatImp[,2]
dotchart(fi,main="Feature Importance") # plot 3
# Fitted vs Residuals - plot 4
plot(pred.both,resids,
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots - plot 5
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
# plot(FeatImp, top = components,main="Feature Importance") # ERROR !
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=rfesvm.fit)) # return a list with statistics and the full model
}
#----------------------------------------------------------------------------------------------------------------------
RFRFEreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#=============================
# Random Forest-RFE
#=============================
net.c = my.datf.train[,1] # make available the names of variables from training dataset
RegrMethod <- "rfRFE" # type of regression
# Define the CV conditions
ctrl<- trainControl(method=sCV, number=5,repeats=1,#number=10,repeats=10,
summaryFunction=defaultSummary,verboseIter = F)
rfeCtr = rfeControl(functions = rfFuncs,method="cv",number=10,repeats=10, saveDetails = T, verbose=T,rerank = F,allowParallel=T)#number=10,repeats=10,
sizes = 2^(1:sqrt(ncol(my.datf.train)-1))
# Train the model using only training set
set.seed(iSplit)
input = as.matrix(my.datf.train[,2:ncol(my.datf.train)])
rferf.fit = rfe(input,net.c,sizes = sizes,rfeControl=rfeCtr,prob.model =F,trControl=ctrl ,allowParallel=T, tuneGrid=expand.grid(.mtry=c(floor(sqrt(ncol(input))),ncol(input))), metric='RMSE')
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- rferf.fit$results[rferf.fit$results$Variables ==rferf.fit$bestSubset,2]
R2.tr <- rferf.fit$results[rferf.fit$results$Variables ==rferf.fit$bestSubset,3]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- rferf.fit$results[rferf.fit$results$Variables ==rferf.fit$bestSubset,4]
R2sd.tr <- rferf.fit$results[rferf.fit$results$Variables ==rferf.fit$bestSubset,5]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later! TODOOOOOOOOOOOOOO
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
rfesvm.train.res <- rferf.fit$results[ rferf.fit$results$Variables== rferf.fit$bestSubset, c(2,3)]
rfesvm.test.res <- postResample(predict(rferf.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(rferf.fit,my.datf.train) # predicted Y for training
pred.ts <- predict(rferf.fit,my.datf.test) # predicted Y for test
noFeats.fit <- length(predictors(rferf.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(rferf.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(rferf.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# ----------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(RMSE.tr),
"R2.tr" = as.numeric(R2.tr),
"RMSEsd.tr" = as.numeric(RMSEsd.tr),
"R2sd.tr" = as.numeric(R2sd.tr),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((rfesvm.test.res["RMSE"])),
"R2.ts" = as.numeric((rfesvm.test.res["Rsquared"])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if T, print details about any resut
write("RRegr package | eNanoMapper", file=outFile, append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(rferf.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(rfesvm.train.res,file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(rfesvm.test.res, file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- importance(rferf.fit$fit, scale = T)
FeatImp = FeatImp[order(FeatImp,decreasing=T),]
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- rferf.fit$fit
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- pred.both-ds.full[,1] # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
}
# PDF with 12 plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
# par(mfrow = c(3, 4)) # all plots into one page!
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred") # plot 1
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred") # plot 2
dotchart(FeatImp,main="Feature Importance") # plot 3
# Fitted vs Residuals - plot 4
plot(pred.both,resids,
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots - plot 5
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
# plot(FeatImp, top = components,main="Feature Importance") # ERROR !
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=rferf.fit)) # return a list with statistics and the full model
}
findResamps.funct<- function(caret.obj){
#=============================================================================
# A function to find the number of re-samples for caret, rfe or sbf objects
# from caret package
#=============================================================================
#caret.obj== caret object of class train, rfe or sbf
in.caret.obj<- caret.obj$control$index
return(length(in.caret.obj))
}
impute.funct<- function(ds,FUN=mean){
#=============================================================================
# A function to impute missing values from columns of matrix or data frame
# using the mean value as the default
#=============================================================================
#ds== data.frame or matrix to be imputed
sum.na <- apply(ds,2,sum)
ind.na <- which(is.na(sum.na)!=FALSE)
ds.imputeV <- apply(as.matrix(ds[,ind.na]),2,function(x)FUN(x,na.rm=T))
ds.imputeI <- apply(as.matrix(ds[,ind.na]),2,function(x)which(is.na(x)))
if(is.list(ds.imputeI)!=TRUE){ds.imputI<- list(ds.imputeI)}
for(i in 1:length(ds.imputI)){ds[ds.imputI[[i]],ind.na[i]]<- ds.imputeV[i]}
return(ds)
}
ENETreg <- function(my.datf.train,my.datf.test,sCV,iSplit=1,fDet=F,outFile="") {
#================================
# 8.4 ElasticNet Regression (caret/glmnet)
#================================
#library(caret)
#library(glmnet)
load(system.file("models", "glmnetModel.RData", package = "RRegrs"))
net.c = my.datf.train[,1] # dependent variable is the first column in Training set
RegrMethod <- "glmnet" # type of regression
# Define the CV conditions
ctrl<- trainControl(method = sCV, number = 10,repeats = 10,verboseIter=F,#number=10,repeats=10,
summaryFunction = defaultSummary)
tuneGrid=expand.grid(.alpha = seq(0.1,1,length=10),.lambda=99 )
# Train the model using only training set
set.seed(iSplit)
enet.fit<- train(net.c~.,data=my.datf.train,
method=glmnetModel, tuneLength = 20, trControl = ctrl,family="gaussian",
metric='RMSE',tuneGrid=tuneGrid)
pos = which.min(abs(enet.fit$finalModel$lambda-enet.fit$finalModel$lambdaOpt))
#------------------------------
# Training RESULTS
#------------------------------
RMSE.tr <- enet.fit$results[,3]
R2.tr <- enet.fit$results[,4]
if (sCV == "repeatedcv"){ # if 10-fold CV
RMSEsd.tr <- enet.fit$results[,5]
R2sd.tr <- enet.fit$results[,6]
}
if (sCV == "LOOCV"){ # if LOOCV
RMSEsd.tr <- 0 # formulas will be added later!
R2sd.tr <- 0 # formulas will be added later!
}
#------------------------------------------------
# RMSE & R^2, for train/test respectively
#------------------------------------------------
lm.train.res <- getTrainPerf(enet.fit)
lm.test.res <- postResample(predict(enet.fit,my.datf.test),my.datf.test[,1])
#------------------------------------------------
# Adj R2, Pearson correlation
#------------------------------------------------
pred.tr <- predict(enet.fit,my.datf.train) # predicted Y
pred.ts <- predict(enet.fit,my.datf.test) # predicted Y
noFeats.fit <- length(predictors(enet.fit)) # no. of features from the fitted model
Feats.fit <- paste(predictors(enet.fit),collapse="+") # string with the features included in the fitted model
ds.full <- rbind(my.datf.train,my.datf.test)
pred.both <- predict(enet.fit,ds.full) # predicted Y
adjR2.tr <- r2.adj.funct(my.datf.train[,1],pred.tr,noFeats.fit)
adjR2.ts <- r2.adj.funct(my.datf.test[,1],pred.ts,noFeats.fit)
corP.ts <- cor(my.datf.test[,1],pred.ts)
adjR2.both <- r2.adj.funct(ds.full[,1],pred.both,noFeats.fit)
RMSE.both <- rmse.funct(ds.full[,1],pred.both)
r2.both <- r2.funct(ds.full[,1],pred.both)
# Generate the output list with statistics for each cross-validation type
# ------------------------------------------------------------------------------
my.stats <- list("RegrMeth" = RegrMethod,
"Split No" = as.numeric(iSplit), # from function param
"CVtype" = sCV, # from function param
"NoModelFeats" = as.numeric(noFeats.fit),
"ModelFeats" = Feats.fit,
"adjR2.tr" = as.numeric(adjR2.tr),
"RMSE.tr" = as.numeric(min(RMSE.tr)), # these 4 lines correspond to the min of RMSE.tr !!!
"R2.tr" = as.numeric(R2.tr[which.min(RMSE.tr)]),
"RMSEsd.tr" = as.numeric(RMSEsd.tr[which.min(RMSE.tr)]),
"R2sd.tr" = as.numeric(R2sd.tr[which.min(RMSE.tr)]),
"adjR2.ts"= as.numeric(adjR2.ts),
"RMSE.ts" = as.numeric((lm.test.res["RMSE"][[1]])),
"R2.ts" = as.numeric((lm.test.res["Rsquared"][[1]])),
"corP.ts" = as.numeric(corP.ts),
"adjR2.both" = as.numeric(adjR2.both),
"RMSE.both" = as.numeric(RMSE.both),
"R2.both" = as.numeric(r2.both))
#---------------------------------------------------------------------
# Write to file DETAILS for GLM for each cross-validation method
#---------------------------------------------------------------------
if (fDet==T) { # if flag for details if true, print details about any resut
write("RRegr package | eNanoMapper", file=outFile,append=T)
write.table(paste("Regression method: ", RegrMethod), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Split no.: ", iSplit), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("CV type: ", sCV), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Training Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.train), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Set Summary: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(summary(my.datf.test), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Predictors: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(predictors(enet.fit), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Trainig Results: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
AppendList2CSv(lm.train.res,outFile)
#write.table(predictors(lm.train.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Test Results: ", file=outFile,append=T,sep=",",col.names=F,quote=F)
AppendList2CSv(lm.test.res,outFile)
#write.table(predictors(lm.test.res), file=outFile,append=T,sep=",",col.names=T,quote=F)
write.table("Full Statistics: ", file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(my.stats, file=outFile,append=T,sep=",",col.names=T,quote=F)
# Variable Importance (max top 20)
FeatImp <- varImp(enet.fit, scale = F)
components = length(FeatImp) # default plot all feature importance
if (length(FeatImp)>20){ # if the number of features is greater than 20, use only 20
components = 20
}
# Append feature importance to output details
AppendList2CSv(FeatImp,outFile)
fitModel <- enet.fit$finalModel
# =============================================================================
# Assessment of Applicability Domain (plot leverage)
# =============================================================================
# Residuals
resids <- residuals(enet.fit) # residuals
write.table("Residuals of the fitted model: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(data.frame(resids), file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write residuals
predVals.pls.ad <- pred.ts
Traind.pls= as.matrix(my.datf.train)
Testd.pls = as.matrix(my.datf.test)
mat.Traind.pls<- t(Traind.pls) %*%(Traind.pls)
det.Traind.pls<- det(mat.Traind.pls)
if(det.Traind.pls!=0){
Hat.train = diag(Traind.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Traind.pls))
Hat.test = diag(Testd.pls %*% solve(t(Traind.pls) %*%(Traind.pls), tol=1e-40) %*% t(Testd.pls))
# Leverage / Hat values
hat.fit <- Hat.test # hat values
hat.fit.df <- as.data.frame(hat.fit) # hat data frame
hat.mean <- mean(hat.fit) # mean hat values
hat.fit.df$warn <- ifelse(hat.fit.df[, 'hat.fit']>3*hat.mean, 'x3',ifelse(hat.fit.df[, 'hat.fit']>2*hat.mean, 'x2', '-' ))
write.table("Leverage output: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(paste("Mean of hat values: ", hat.mean), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Leverage / Hat values with warnings (X3 & X2 = values 3 & 2 times than hat mean): ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.fit.df, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F) # write hat values and the levels X3, X2 (of hat mean)
#THRESHOLD values: 3m/n, where m is the number of parameters, and n number of observations
thresh.lever<- (3*(dim(my.datf.train)[2]-1))/dim(my.datf.train)[1] # leverage thresh
hat.problems<- data.frame(hat.fit[hat.fit>thresh.lever]) # points with high leverage
write.table(paste("Leverage Threshold: ", thresh.lever), file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table("Points with leverage > threshold: ",file=outFile,append=T,sep=",",col.names=F,row.names=F,quote=F)
write.table(hat.problems, file=outFile,append=T,sep=",",col.names=T,row.names=T, quote=F)
# Cook's distance ?
}
# Influence ?
# PDF plots
# --------------------------------------------------------------
pdf(file=paste(outFile,".",sCV,".","split",iSplit,".pdf",sep=""))
plot(my.datf.train[,1],pred.tr,xlab="Yobs", ylab="Ypred", type="b", main="Train Yobs-Ypred")
plot(my.datf.test[,1], pred.ts,xlab="Yobs", ylab="Ypred", type="b", main="Test Yobs-Ypred")
dotchart(as.matrix(FeatImp$importance),main="Feature Importance")
# Fitted vs Residuals
plot(fitted(enet.fit),residuals(enet.fit),
main="Fitted vs. Residuals for Fitted Model",
xlab="Fitted", ylab="Residuals")
abline(h = 0, lty = 2)
# Leverage plots
if(det.Traind.pls!=0){
plot(hat.fit, type = "h",
main="Leverage for Fitted Model",
xlab="Index", ylab="Hat")
abline(h = thresh.lever, lty = 2, col="red") # leverage thresh
}
dev.off()
# --------------------------------------------------------------
}
return(list(stat.values=my.stats, model=enet.fit)) # return a list with statistics and the full model
}
###############################################################################################
# RRegrs MAIN FUNCTION
###############################################################################################
RRegrs <- function(DataFileName="ds.House.csv",DataFileSep=",",PathDataSet="DataResults",noCores=1,
ResAvgs="RRegsResAvgs.csv",ResBySplits="RRegrsResAllSplits.csv",ResBest="RRegrsResBest.csv",
fDet="T",fFilters="F",fScaling="T",fRemNear0Var="T",fRemCorr="T",
fLM="T",fGLM="T",fPLS="T",fLASSO="T",fSVRM="T",fNN="T",fRF="T",fRFRFE="T",fSVMRFE="T",fENET="T",
RFE_SVM_C="1;5;10;15;20",RFE_SVM_epsilon="0.01;0.1;0.3",
cutoff=0.9,iScaling=1,iScalCol=1,trainFrac=0.75,iSplitTimes=10,noYrand=100,
CVtypes="repeatedcv;LOOCV",NoNAValFile="ds.NoNA.csv",
No0NearVarFile="ds.No0Var.csv",ScaledFile="ds.scaled.csv",NoCorrFile="ds.scaled.NoCorrs.csv",
lmFile="LM.details.csv",glmFile="GLM.details.csv",plsFile="PLS.details.csv",
lassoFile="Lasso.details.csv",svrmFile="SVMRadial.details.csv",
nnFile="NN.details.csv",rfFile="RF.details.csv",rfrfeFile="RFRFE.details.csv",svmrfeFile="SVMRFE.details.csv",
enetFile="ENET.details.csv",fR2rule="T") { # input = file with all parameters
methodCount = 0;
if (fLM=="T") methodCount = methodCount + 1;
if (fGLM=="T") methodCount = methodCount + 1;
if (fPLS=="T") methodCount = methodCount + 1;
if (fLASSO=="T") methodCount = methodCount + 1;
if (fSVRM=="T") methodCount = methodCount + 1;
if (fNN=="T") methodCount = methodCount + 1;
if (fRF=="T") methodCount = methodCount + 1;
if (fRFRFE=="T") methodCount = methodCount + 1;
if (fSVMRFE=="T") methodCount = methodCount + 1;
if (fENET=="T") methodCount = methodCount + 1;
if (methodCount < 2) stop("You must select at least two modelling methods to compare.");
# fRBFdda="T", rbfDDAFile="RBF_DDA.details.csv",negThrStep=0.5
# Minimal use:
# RRegrs() # all default params
# RRegrs(DataFileName="MyDataSet.csv")
# RRegrs(DataFileName="MyDataSet.csv",PathDataSet="MyResultsFolder")
# Default: all methods, no feature selection
ptmTot <- proc.time() # total time
# ----------------------------------
# Parallel support
# ----------------------------------
if (noCores==0 | noCores>1){ # all available CPU cores or specific no of cores (if noCores = 1, no parallel support!)
#noCoresSys=as.numeric(Sys.getenv('NUMBER_OF_PROCESSORS')) # automatically detected no. of CPU cores
#library(parallel)
noCoresSys=detectCores()
if (noCores==0 | noCores>noCoresSys){ # all available CPU cores or the specific cores is greater than the available ones
noCores=noCoresSys # use the available no of cores
}
# parallel for Linux or Mac:
# ------------------------------------------
if ( Sys.info() [['sysname']] == "Linux" | Sys.info() [['sysname']] == "Darwin" ){
#library(doMC)
}
# ------------------------------------------
# parallel for windows:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Windows"){
#library(doSNOW)
#library(foreach)
}
}
#==========================================================================================
# (1) Load dataset and parameters
#==========================================================================================
# (1.1) PARAMETERS
#------------------------------------------
# Write parameter file
#------------------------------------------
dir.create(PathDataSet, showWarnings = FALSE)
ParamFile <- file.path(PathDataSet, "Parameters.csv") # file to output the parameters
# define a data frame with all parameters of the current calculation
Params.df = data.frame(RRegrs.Parameters="DataFileName",Parameter.Value=as.character(DataFileName),Description="Input dataset file (Step 1)") # data frame with used parameters
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="PathDataSet",Parameter.Value=as.character(PathDataSet),Description="Working folder for all input and output files"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="noCores",Parameter.Value=as.character(noCores),Description="No of CPU cores (0=all available; 1=no parallel; >1 = specific no. of cores)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="ResAvgs",Parameter.Value=as.character(ResAvgs),Description="Output file averaged statistics (by splits) for each regression method"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="ResBySplits",Parameter.Value=as.character(ResBySplits),Description="Output file statistics for each splitting and each regression method"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="ResBest",Parameter.Value=as.character(ResBest),Description="Output file statistics for the best model"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fDet",Parameter.Value=as.character(fDet),Description="If calculate and print details for all the functions"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fFilters",Parameter.Value=as.character(fFilters),Description="If run Filters (Step 2)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fScaling",Parameter.Value=as.character(fScaling),Description="If Scaling dataset (Step 3)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fRemNear0Var",Parameter.Value=as.character(fRemNear0Var),Description="If run Removal of near zero variance columns (Step 4)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fRemCorr",Parameter.Value=as.character(fRemCorr),Description="If run Removal of correlated columns (Step 5)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fLM",Parameter.Value=as.character(fLM),Description="If run LM (Step 8.1)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fGLM",Parameter.Value=as.character(fGLM),Description="If run GLM (Step 8.2)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fPLS",Parameter.Value=as.character(fPLS),Description="If run PLS (Step 8.3)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fLASSO",Parameter.Value=as.character(fLASSO),Description="If run LASSO (Step 8.4)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fENET",Parameter.Value=as.character(fENET),Description="If run ENET (Step 8.5)"))
# Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fRBFdda",Parameter.Value=as.character(fRBFdda),Description="If run RBF DDA (Step 8.6)"))
# Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="negThrStep",Parameter.Value=as.character(negThrStep),Description="Negative Threshold step parameter for RBF DDA (Step 8.6)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fSVRM",Parameter.Value=as.character(fSVRM),Description="If run svmRadial.RMSE (Step 8.7)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fNN",Parameter.Value=as.character(fNN),Description="If run Neural Networks (Step 8.8)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fRF",Parameter.Value=as.character(fRF),Description="If run Random Forest (Step 8.9)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fRFRFE",Parameter.Value=as.character(fRFRFE),Description="If run Random Forest RFE (Step 8.10)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fSVMRFE",Parameter.Value=as.character(fSVMRFE),Description="If run Random Forest (Step 8.11)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="RFE_SVM_C",Parameter.Value=as.character(RFE_SVM_C),Description="Values of C for SVM RFE"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="RFE_SVM_epsilon",Parameter.Value=as.character(RFE_SVM_epsilon),Description="Values of epsilon for SVM RFE"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="cutoff",Parameter.Value=as.character(cutoff),Description="Cut-off for correlated features (default = 0.9)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="iScaling",Parameter.Value=as.character(iScaling),Description="Type of scaling: 1 = normalization; 2 = standardization; 3 = other; any other: no scaling"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="iScalCol",Parameter.Value=as.character(iScalCol),Description="Scaling columns: 1 = including dependent variable; 2: only all the features"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="trainFrac",Parameter.Value=as.character(trainFrac),Description="Fraction of training set from the entire dataset; the rest of dataset is the test set"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="iSplitTimes",Parameter.Value=as.character(iSplitTimes),Description="Number of splitting the dataset into train and test (default = 10)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="noYrand",Parameter.Value=as.character(noYrand),Description="Number of Y-Randomization (default = 100)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="CVtypes",Parameter.Value=as.character(CVtypes),Description="Cross-validation types: 10-CV (repeatedcv) and LOOCV"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="NoNAValFile",Parameter.Value=as.character(NoNAValFile),Description="Dataset without NA values (if fDet is True)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="No0NearVarFile",Parameter.Value=as.character(No0NearVarFile),Description="Dataset without zero near features from Step 3 (if fDet is True)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="ScaledFile",Parameter.Value=as.character(ScaledFile),Description="Scaled dataset file from Step 4 (if fDet is True)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="NoCorrFile",Parameter.Value=as.character(NoCorrFile),Description="Dataset after correction removal in Step 5 (if fDet is True)"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="lmFile",Parameter.Value=as.character(lmFile),Description="LM output file with details"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="glmFile",Parameter.Value=as.character(glmFile),Description="GLM output file with details"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="plsFile",Parameter.Value=as.character(plsFile),Description="PLS output file with details"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="lassoFile",Parameter.Value=as.character(lassoFile),Description="Lasso output file with details"))
#Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="rbfDDAFile",Parameter.Value=as.character(rbfDDAFile),Description="RBF DDA output file with details"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="svrmFile",Parameter.Value=as.character(svrmFile),Description="SVM Radial output file with details"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="nnFile",Parameter.Value=as.character(nnFile),Description="NN output file with details"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="rfFile",Parameter.Value=as.character(rfFile),Description="RF output"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="rfrfeFile",Parameter.Value=as.character(rfrfeFile),Description="RF-RFE output"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="svmrfeFile",Parameter.Value=as.character(svmrfeFile),Description="SVM-RFE output"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="enetFile",Parameter.Value=as.character(enetFile),Description="ENET output"))
Params.df = rbind(Params.df,data.frame(RRegrs.Parameters="fR2rule",Parameter.Value=as.character(fR2rule),Description="Best model rule: R2 (default = T) or adjR2 (F)"))
write.csv(Params.df,file=ParamFile,row.names=F,quote=F) # write parameters to a CSV in the working folder
# Get calculation parameters
fDet = as.logical(fDet) # flag to calculate and print details for all the functions
fFilters = as.logical(fFilters) # flag to apply filters (2)
fScaling = as.logical(fScaling) # flag for dataset Scaling (3)
fRemNear0Var = as.logical(fRemNear0Var) # flag for Removal of near zero variance columns (4)
fRemCorr = as.logical(fRemCorr) # flag for Removal of correlated columns (5)
cutoff = as.numeric(as.character(cutoff)) # cut off for correlated features
fLM = as.logical(fLM) # flag to run LM (8.1)
fGLM = as.logical(fGLM) # flag to run GLM (8.2)
fPLS = as.logical(fPLS) # flag to run PLS (8.3)
fLASSO = as.logical(fLASSO) # flag to run LASSO (8.4)
fenet = as.logical(fENET) # flag to run ElasticNet (8.5)
#fRBFdda = as.logical(fRBFdda) # flat to run RBF DDA (8.6)
fSVRM = as.logical(fSVRM) # flat to run svmRadial (8.7)
fNN = as.logical(fNN) # flat to run NN (8.8)
fRF = as.logical(fRF) # flag to run RandomForest (8.9)
fRFRFE = as.logical(fRFRFE) # flag to run RF-RFE (8.10)
fSVMRFE = as.logical(fSVMRFE) # flag to run SVM RFE (8.11)
rfe_SVM_param_c = strsplit(as.character(RFE_SVM_C),";")[[1]] # values of C for SVM RFE
rfe_SVM_param_eps = strsplit(as.character(RFE_SVM_epsilon),";")[[1]] # values of epsilon for SVM RFE
# negThrStep
fR2rule = as.logical(fR2rule) # flag to decide order rule for the best model (True for R2 and False for adjR2, default = True)
# ----------------------------------------------------------------------------------------
trainFrac = as.numeric(as.character(trainFrac)) # the fraction of training set from the entire dataset; trainFrac = the rest of dataset, the test set
CVtypes = strsplit(as.character(CVtypes),";")[[1]] # types of cross-validation methods
CVtypes2 = c("repeatedcv") # for complex methods we run only 10-fold CV even the user is using other parameters!
# Generate path + file name = original dataset
if (file.exists(DataFileName)) { # is it a full path already?
inFile <- DataFileName
} else {
inFile <- file.path(PathDataSet, DataFileName)
}
sDescription=paste("=======================================================================================================",
"RRegrs - R Regression Models",
"Get the best regression models for one dataset using R caret methods", "eNanoMapper.net","AUTHORS:",
"Georgia Tsiliki: ChemEng - NTUA, Greece, g_tsiliki@hotmail.com",
"Cristian R. Munteanu: RNASA-IMEDIR, University of A Coruna, Spain, muntisa@gmail.com",
"Jose A. Seoane: Stanford Cancer Institute, USA, seoane@stanford.edu",
"Carlos Fernandez-Lozano: RNASA-IMEDIR, University of A Coruna, Spain, carlos.fernandez@udc.es",
"Haralambos Sarimveis: ChemEng - NTUA, Greece, hsarimv@central.ntua.gr",
"Egon Willighagen: BiGCaT - Maastricht University, The Netherlands, egon.willighagen@gmail.com",
"=======================================================================================================",sep="\n")
cat(sDescription) # print package header information
# -----------------------------------
# (1.2) Load the ORIGINAL DATASET
# -----------------------------------
cat("\n-> Loading original dataset ...\n") # it can contain errors, correlations, near zero variance columns
cat(" ---> ",inFile,"\n")
ds.dat0 <- read.csv(inFile,header=T,sep=DataFileSep) # original dataset frame
# resolving the text to number errors for future calculations
ds.indx<- colnames(ds.dat0)[2:dim(ds.dat0)[2]] # FEATURE names (no dependent variable)
ds.dat1<- ds.dat0[1:dim(ds.dat0)[1],2:dim(ds.dat0)[2]] # dataset as columns
ds.dat1<- apply(ds.dat1,1,function(x)as.numeric(as.character(x))) # dataset as row vectors to be used with caret!!!
# dependent variable
net.c<- ds.dat0[,1]
net.c<- as.numeric(as.character(net.c)) # values
# full ds frame with training and test
ds<- as.data.frame(cbind(net.c,t(ds.dat1)))
#========================================================
# (2) FILTERS
# (it will be implemented in the future versions)
#========================================================
# 2.1 Outlier removal
# 2.2 Custom filter (percentage threshold)
# 2.3 Processing of missing values - use of preProcess();
# caret employs knnImpute algorithm to impute values from a neighborhood of k
# TO BE IMPLEMENTED
# -----------------------------------------------------------------------
# (2) Remove NA values
# -----------------------------------------------------------------------
if(length(which(is.na(ds)==TRUE))!=0){
cat("-> Removal of NA values ...\n")
outFile <- file.path(PathDataSet,NoNAValFile) # the same folder as input
# get the ds without NA values-- currently use the default function (mean)
ds <- impute.funct(ds)
if (fDet == TRUE){ # write as details the corrected ds file
write.csv(ds, outFile,row.names=F, quote=F)}
}
# -----------------------------------------------------------------------
# (3) Remove near zero variance columns
# -----------------------------------------------------------------------
if (fRemNear0Var==T) {
cat("-> Removal of near zero variance columns ...\n")
outFile <- file.path(PathDataSet,No0NearVarFile) # the same folder as input
# get the ds without near zero cols
ds <- cbind("net.c" = ds[,1],RemNear0VarCols(ds[,2:dim(ds)[2]],fDet,outFile))
# use df without Y (predicted values), reconstruct the ds
# inputs: ds, flag for details, output file
}
# -----------------------------------------------------------------------
# (4) Scaling dataset: normalization (default), standardization, other
# -----------------------------------------------------------------------
if (fScaling==T) {
cat("-> Scaling original dataset ...\n")
outFile <- file.path(PathDataSet,ScaledFile) # the same folder as input
# run fuction for scaling input dataset file
ds <- ScalingDS(ds,iScaling,iScalCol,fDet,outFile)
# use df without Y (predicted values), reconstruct the ds
# inputs: ds, type of scaling, flag for details, starting column, output file
}
# -----------------------------------------------------------------------
# (5) Remove correlated features
# -----------------------------------------------------------------------
if (fRemCorr==T) {
cat("-> Removing correlated features ...\n")
outFile <- file.path(PathDataSet,NoCorrFile) # the same folder as the input
# run function to remove the correlations between the features
ds <- cbind("net.c" = ds[,1],RemCorrs(ds[,2:dim(ds)[2]],fDet,cutoff,outFile))
}
# Check data has at least 5 columns for meaningful analysis
if(dim(ds)[2] < 5 || dim(ds)[1] < 3){
print(c(dim(ds)))
stop(paste("Your corrected data set has dimensions:", paste(as.character(dim(ds)),collapse=', '),". Try repeating analysis without filtering options.",sep=''))
}
# print no of CPU cores used for calculation
# noCoresSys=as.numeric(Sys.getenv('NUMBER_OF_PROCESSORS')) # automatically detected no. of CPU cores
#library(parallel)
noCoresSys=detectCores()
if (noCores==0){ cat(" -> CPU Cores = ",noCoresSys,"(only complex methods)\n") }
else{ cat(" -> CPU Cores = ",noCores, "(only complex methods)\n") }
#=========================================================================================================
# Steps 6 - 8 will be repeated 10 times for reporting each result and average (iSplitTimes = 10, default)
#=========================================================================================================
# Initialize the list with the statistics results; the same HEADER as the function output
dfRes <- list("RegrMeth" = NULL,
"Split No" = NULL,
"CVtype" = NULL,
"NoModelFeats" = NULL,
"ModelFeats" = NULL,
"adjR2.tr" = NULL,
"RMSE.tr" = NULL,
"R2.tr" = NULL,
"RMSEsd.tr" = NULL,
"R2sd.tr" = NULL,
"adjR2.ts"= NULL,
"RMSE.ts" = NULL,
"R2.ts" = NULL,
"corP.ts" = NULL,
"adjR2.both" = NULL,
"RMSE.both" = NULL,
"R2.both" = NULL)
#-------------------------------------------------------------------------------------------------
for (i in 1:iSplitTimes) { # Step splitting number = i
# -----------------------------------------------------------------------
# (6) Dataset split: Training and Test sets
# -----------------------------------------------------------------------
cat("-> Splitting dataset in Training and Test sets ...\n")
cat(paste("--> Split No.",i,"from",iSplitTimes,"\n"))
# Initialize the list with the statistical models for all types of CV; per iSplitTimes
dfMod <- sapply(CVtypes,function(x) NULL)
for(cv in 1:length(CVtypes)){class(dfMod[[cv]])<- 'list'; names(dfMod)[[cv]]<- CVtypes[cv]}
mod.ind<- rep(1,length(CVtypes)) # dummy variable to indicate the index of each new dfMod entry (per CVtype)
iSeed=i # to reapeat the ds splitting, different values of seed will be used
dsList <- DsSplit(ds,trainFrac,fDet,PathDataSet,iSeed) # return a list with 2 datasets = dsList$train, dsList$test
# get train and test from the resulted list
ds.train<- dsList$train
ds.test <- dsList$test
# -----------------------------------------------------------------------
# (7) Feature selection
# -----------------------------------------------------------------------
# TO BE IMPLEMENTED
# -----------------------------------------------------------------------
# (8) REGRESSION METHODS
# -----------------------------------------------------------------------
# ----------------------------------
# Parallel support - registed cores
# ----------------------------------
if (noCores!=1){
# ------------------------------------------
# parallel for Linux or Mac:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Linux" | Sys.info()[['sysname']]=="Darwin"){
registerDoMC(cores = noCores) # CPU cores
}
# ------------------------------------------
# parallel for windows:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Windows"){
cl<-makeCluster(noCores,outfile="")
registerDoSNOW(cl)
}
}
# --------------------------------------------
# 8.1. Basic LM : default
# --------------------------------------------
if (fLM==T) { # if LM was selected, run the method
outFile.LM <- file.path(PathDataSet,lmFile) # the same folder as the input is used for the output
cat("-> LM : Linear Multi-regression ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes)) { # there is no CV but it will be implemented in the future!!!
cat(" -->",CVtypes[cv],"\n")
ptmLM <- proc.time()
lm.model <- LMreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.LM) # run GLM for each CV and regr method
print(proc.time() - ptmLM) # print running time
my.stats.LM <- lm.model$stat.values # stat values
my.model.LM <- lm.model$model # model
#-------------------------------------------------------
# Add output from GLM to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.LM, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.LM)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.LM
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end LM
# -----------------------------------------------------------------------------------------
# (8.2) GLM based on AIC regression - Generalized Linear Model with Stepwise Feature Selection
# -----------------------------------------------------------------------------------------
if (fGLM==T) { # if GLM was selected, run the method
outFile.GLM <- file.path(PathDataSet,glmFile) # the same folder as the input is used for the output
cat("-> GLM : Generalized Linear Model stepwise - based on AIC ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes)) {
cat(" -->",CVtypes[cv],"\n")
ptmGLM <- proc.time()
glm.model <- GLMreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.GLM) # run GLM for each CV and regr method
print(proc.time() - ptmGLM) # print running time
my.stats.GLM <- glm.model$stat.values # stat values
my.model.GLM <- glm.model$model # model
#my.stats.split <- c(my.stats.dsInfo,my.stats.GLM) # merge the ds info with statistics results for each Cv & reg method
#-------------------------------------------------------
# Add output from GLM to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.GLM, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.GLM)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.GLM
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end GLM
# --------------------------------------------
# 8.3. PLS
# --------------------------------------------
if (fPLS==T) { # if PLS was selected, run the method
outFile.PLS <- file.path(PathDataSet,plsFile) # the same folder as the input is used for the output
cat("-> PLS : Partial Least Squares Regression ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes)) {
cat(" -->",CVtypes[cv],"\n")
ptmPLS <- proc.time()
pls.model <- PLSreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.PLS) # run PLS for each CV and regr method
print(proc.time() - ptmPLS) # print running time
my.stats.PLS <- pls.model$stat.values
my.model.PLS <- pls.model$model
#-------------------------------------------------------
# Add output from PLS to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.PLS, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.PLS)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.PLS
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end PLS
#cat("-> PLS Wrapper Feature Selection ...\n")
## For each type of CV do all the statistics
## -----------------------------------------------------
#for (cv in 1:length(CVtypes)) {
# cat(" -->",CVtypes[cv],"\n")
# ptmPLSw <- proc.time()
# pls.model <- PLSregWSel(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.PLS) # run PLSw for each CV and regr method
# print(proc.time() - ptmPLSw) # print running time
# my.stats.PLS <- pls.model$stat.values
# my.model.PLS <- pls.model$model
# #-------------------------------------------------------
# # Add output from PLSw to the list of results
# #-------------------------------------------------------
# # List of results for each splitting, CV type & regression method
# dfRes = mapply(c, my.stats.PLS, dfRes, SIMPLIFY=F)
#} # end CV types
# --------------------------------------------
# 8.4. LASSO regression
# --------------------------------------------
if (fLASSO==T) { # if LASSO was selected, run the method
outFile.LASSO <- file.path(PathDataSet,lassoFile) # the same folder as the input is used for the output
cat("-> Lasso ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes2)) {
cat(" -->",CVtypes2[cv],"\n")
ptmLASSO <- proc.time()
lasso.model <- LASSOreg(ds.train,ds.test,CVtypes2[cv],i,fDet,outFile.LASSO) # run LASSO for each CV and regr method
print(proc.time() - ptmLASSO) # print running time
my.stats.LASSO <- lasso.model$stat.values
my.model.LASSO <- lasso.model$model
#-------------------------------------------------------
# Add output from Lasso to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.LASSO, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.LASSO)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.LASSO
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end Lasso
# --------------------------------------------
# 8.5. Elastic Net regression
# --------------------------------------------
if (fenet==T) { # if ENET was selected, run the method
outFile.ENET <- file.path(PathDataSet,enetFile) # the same folder as the input is used for the output
cat("-> ENET : Elastic Nets ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes)) {
cat(" -->",CVtypes[cv],"\n")
ptmENET <- proc.time()
enet.model <- ENETreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.ENET) # run elastic net for each CV and regr method
print(proc.time() - ptmENET) # print running time
my.stats.ENET <- enet.model$stat.values
my.model.ENET <- enet.model$model
#-------------------------------------------------------
# Add output from ENET to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.ENET, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.ENET)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.ENET
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end enet
# # ----------------------------------------------------------------
# # 8.6. RBF network with the DDA algorithm regression (caret)
# # ----------------------------------------------------------------
# if (fRBFdda==T) { # if RBF-DDA was selected, run the method
# outFile.rbfDDA <- file.path(PathDataSet,rbfDDAFile) # the same folder as the input is used for the output
#
# cat("-> RBF-DDA : Radial Basis Functions - Dynamic Decay Adjustment ...\n")
# # For each type of CV do all the statistics
# # -----------------------------------------------------
# for (cv in 1:length(CVtypes2)) {
# cat(" -->",CVtypes2[cv],"\n")
# ptmRBF_DDA <- proc.time()
# rbfDDA.model <- RBF_DDAreg(ds.train,ds.test,CVtypes2[cv],negThrStep,i,fDet,outFile.rbfDDA) # run rbfDDA for each CV and regr method
# print(proc.time() - ptmRBF_DDA) # print running time
#
# my.stats.rbfDDA <- rbfDDA.model$stat.values
# my.model.rbfDDA <- rbfDDA.model$model
# #-------------------------------------------------------
# # Add output from RBF-DDA to the list of results
# #-------------------------------------------------------
# # List of results for each splitting, CV type & regression method
# dfRes = mapply(c, my.stats.rbfDDA, dfRes, SIMPLIFY=F)
#
# # List of models for each splitting, CV type & regression method
# names1 <- strsplit(deparse(quote(my.model.rbfDDA)),'my.model.')[[1]][2]
# dfMod[[cv]]$names1 <- my.model.rbfDDA
# names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
# mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
# } # end CV types
# } # end rbfDDA
# --------------------------------------------
# 8.7. SVM radial regression
# --------------------------------------------
if (fSVRM==T) { # if SVM Radial was selected, run the method
outFile.SVRM <- file.path(PathDataSet,svrmFile) # the same folder as the input is used for the output
cat("-> SVM radial : Support vector machine using radial functions ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes)) {
cat(" -->",CVtypes[cv],"\n")
ptmSVRM <- proc.time()
SVRM.model <- SVRMreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.SVRM,rfe_SVM_param_c) # run SVRM Radial for each CV and regr method
print(proc.time() - ptmSVRM) # print running time
my.stats.SVRM <- SVRM.model$stat.values
my.model.SVRM <- SVRM.model$model
#-------------------------------------------------------
# Add output from SVM Radial to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.SVRM, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.SVRM)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.SVRM
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end SVRM
# --------------------------------------------
# 8.8. Neural Networks Regression
# --------------------------------------------
if (fNN==T) { # if NNet was selected, run the method
outFile.NN <- file.path(PathDataSet,nnFile) # the same folder as the input is used for the output
cat("-> NN : Neural Networks ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes)) {
cat(" -->",CVtypes[cv],"\n")
ptmNN <- proc.time()
nn.model <- NNreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.NN) # run NNet for each CV and regr method
print(proc.time() - ptmNN) # print running time
my.stats.NN <- nn.model$stat.values
my.model.NN <- nn.model$model
#-------------------------------------------------------
# Add output from NNet to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.NN, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.NN)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.NN
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end NNet
# --------------------------------------------
# 8.9. Random Forest Regression (RF)
# --------------------------------------------
if (fRF==T) { # if RF was selected, run the method
outFile.RF <- file.path(PathDataSet,rfFile) # the same folder as the input is used for the output
cat("-> RF : Random Forest ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes2)) {
cat(" -->",CVtypes2[cv],"\n")
ptmRF <- proc.time()
rf.model <- RFreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.RF) # run RF for each CV and regr method
print(proc.time() - ptmRF) # print running time
my.stats.RF <- rf.model$stat.values
my.model.RF <- rf.model$model
#-------------------------------------------------------
# Add output from RF to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.RF, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.RF)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.RF
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
}
# --------------------------------------------
# 8.10. RF-RFE: Random Forest Regression Recursive Feature Elimination
# --------------------------------------------
if (fRFRFE==T) { # if RF-RFE was selected, run the method
outFile.RFRFE <- file.path(PathDataSet,rfrfeFile) # the same folder as the input is used for the output
cat("-> RF-RFE: Random Forest-Recursive Feature Elimination ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes2)) {
cat(" -->",CVtypes2[cv],"\n")
ptmRFRFE <- proc.time()
rfrfe.model <- RFRFEreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.RFRFE) # run RF for each CV and regr method
print(proc.time() - ptmRFRFE) # print running time
my.stats.RFRFE <- rfrfe.model$stat.values
my.model.RFRFE <- rfrfe.model$model
#-------------------------------------------------------
# Add output from RF to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.RFRFE, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.RFRFE)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.RFRFE
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end RF-REF
# --------------------------------------------
# 8.11. SVM-RFE
# --------------------------------------------
if (fSVMRFE==T) { # if SVM-RFE was selected, run the method
outFile.SVMRFE <- file.path(PathDataSet,svmrfeFile) # the same folder as the input is used for the output
cat("-> SVM-RFE : Support Vector Machines Recursive Feature Elimination ...\n")
# For each type of CV do all the statistics
# -----------------------------------------------------
for (cv in 1:length(CVtypes2)) {
cat(" -->",CVtypes2[cv],"\n")
ptmSVMRFE <- proc.time()
svmrfe.model <- SVMRFEreg(ds.train,ds.test,CVtypes[cv],i,fDet,outFile.SVMRFE,rfe_SVM_param_c,rfe_SVM_param_eps) # run SVM RFEet for each CV and regr method
print(proc.time() - ptmSVMRFE) # print running time
my.stats.SVMRFE <- svmrfe.model$stat.values
my.model.SVMRFE <- svmrfe.model$model
#-------------------------------------------------------
# Add output from SVM RFE to the list of results
#-------------------------------------------------------
# List of results for each splitting, CV type & regression method
dfRes = mapply(c, my.stats.SVMRFE, dfRes, SIMPLIFY=F)
# List of models for each splitting, CV type & regression method
names1 <- strsplit(deparse(quote(my.model.SVMRFE)),'my.model.')[[1]][2]
dfMod[[cv]]$names1 <- my.model.SVMRFE
names(dfMod[[cv]])[mod.ind[cv]] <- names1[1]
mod.ind[cv] <- mod.ind[cv] +1 # update mod.ind indicator variable
} # end CV types
} # end SVM RFE
# Kill parallel server
# ------------------------
if (noCores!=1){
# ------------------------------------------
# parallel for windows:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Windows"){ # clean the memory!
stopCluster(cl)
}
}
# ------------------------
# END OF REGRESSION METHODS/FUNCTIONS
# -----------------------------------------------------------------------
# (8.final) Produce comparison plots amongst models
# -----------------------------------------------------------------------
for(cv in 1:length(CVtypes)){
dfMod.n<- dfMod[[cv]]# keep only models with the same number of resamples
dfMod.ind<- unlist(lapply(dfMod[[cv]],findResamps.funct))
dfMod.ind.d<- which(duplicated(dfMod.ind)=='TRUE')[1]
dfMod.in<- which(dfMod.ind!=dfMod.ind[dfMod.ind.d])
dfMod.flag<- 0 # flag to indicate that dfMod consists of models with different resamples
if(is.na(dfMod.ind.d)!= TRUE){if(length(dfMod.in)!=0){dfMod.n<- dfMod.n[-dfMod.in]}}
else{dfMod.flag<- 1}
if(CVtypes[cv]!='LOOCV' && length(dfMod.n)>=2 && dfMod.flag!=1){
cat("-> Comparisons plots for multiple regression methods ...\n")
resamps <- resamples(dfMod.n)#,modelNames=names(dfMod[[cv]]))
# calculate their differences in terms of R2 and RMSE values
difValues <- diff(resamps)
#summary(difValues)
#plot different models in terms of R2 adn RMSE values in the training set
pdf(file=paste(PathDataSet,"/ModelsComp.","iSplits.",i,".pdf",sep=""))
print(bwplot(resamps, layout = c(2, 1),main=paste('Resampling results on the training set',' (data split ',i,')',sep='')))
dev.off()
#plot differences of models in terms of R2 adn RMSE values in the training set
pdf(file=paste(PathDataSet,"/DifModels.R2.","iSplits.",i,".pdf",sep=""))
print(dotplot(difValues,metric='Rsquared',main=paste('Models` differences on the training set',' (data split ',i,')',sep='')))
dev.off()
pdf(file=paste(PathDataSet,"/DifModels.RMSE.","iSplits.",i,".pdf",sep=""))
print(dotplot(difValues,metric='RMSE',main=paste('Models` differences on the training set',' (data split ',i,')',sep='')))
dev.off()
}
}
} # END SPLITTING
#------------------------------------------------------------------------------
# 9. Results for all splittings (not ordered)
#-------------------------------------------------------------------------------
cat("-> Results for all splitings ...\n")
df.res <- data.frame(dfRes) # including possible NA or -inf
# print(df.res) # print all results as data frame
# Writing the statistics into output files: one with detailed splits, other with only averages
# File names includin paths for the statistics outputs (only averages and split detailed; +averages [to be implemented])
ResBySplitsF <- file.path(PathDataSet,ResBySplits) # the main output file with statistics for each split
write.csv(df.res, file = ResBySplitsF) # write statistics data frame into a CSV output file
#-------------------------------------------------------------------------------------
# Averaged values of the results by each Regression Method & CV type
#-------------------------------------------------------------------------------------
cat("-> Averaged statistics ...\n")
ResAvgsF <- file.path(PathDataSet,ResAvgs) # the main output file with averaged statistics for each regression method
#library(data.table)
# clear ds for NA and -inf
invisible(lapply(names(df.res),function(.name) set(df.res, which(is.infinite(df.res[[.name]])), j = .name,value =NA))) # -inf
df.res <- na.omit(df.res) # NA
dt.res <- data.table(df.res) # convert data frame into data table (for sorting abd averaging)
# MEANS for each Regression Method & CV type
#--------------------------------------------------------------------------------------------------------------
# means for all CV types, not only 10CV
dt.mean <- dt.res[,list(adjR2.tr.Avg=mean(adjR2.tr),RMSE.tr.Avg=mean(RMSE.tr),R2.tr.Avg=mean(R2.tr),
RMSEsd.tr.Avg=mean(RMSEsd.tr),R2sd.tr.Avg=mean(R2sd.tr),adjR2.ts.Avg=mean(adjR2.ts),
RMSE.ts.Avg=mean(RMSE.ts),R2.ts.Avg=mean(R2.ts),corP.ts.Avg=mean(corP.ts),
adjR2.both.Avg=mean(adjR2.both),RMSE.both.Avg=mean(RMSE.both),
R2.both.Avg=mean(R2.both),NoModelFeats.Avg=round(mean(NoModelFeats),1)),by="RegrMeth,CVtype"]
dt.mean <- dt.mean[dt.mean$CVtype=="repeatedcv",] # keep only the 10CV results to be used to find the best model
if (fR2rule==T) { # R2 rule for the best model (default)
dt.mean.ord <- dt.mean[order(-rank(R2.ts.Avg))] # descendent order the averages by R2.ts.Avg
} else { # adjR2 rule for the best model
dt.mean.ord <- dt.mean[order(-rank(adjR2.ts.Avg))] # descendent order the averages by adjR2.ts.Avg
}
# Write averages descendent ordered by R2.ts.Avg / adjR2.ts.Avg
#-------------------------------------------------------------------------------
write.csv(data.frame(dt.mean.ord), file = ResAvgsF) # write statistics data frame into a CSV output file
#------------------------------------------------------------------------------
# 10. Best model selection - detailed statistics
#-------------------------------------------------------------------------------
cat("-> Best model analysis ...\n")
# Algorithm to verify similar R2 / adjR2 values
# -> from the best ones (+/- 0.05 of R2 / adjR2), chose the one with less variables, after that the one with min RMSE
best.dt <- dt.mean.ord[1] # the best model (R2.ts / adjR2.ts) should be the first value in the descendent ordered results
# best.reg <- paste(best.dt$RegrMeth,collapse="") # best regression method
# Best model rule: R2 or adjR2 for ordering
# +/- 0.05 R2 / adjR2ts --> min(RMSE)
# best.adjR2.ts is used for R2 or adjR2 rule
if (fR2rule==T) { # R2 rule for the best model (default)
best.adjR2.ts <- as.numeric(data.frame(best.dt)[,10]) # best R2.ts avgs
} else { # adjR2 rule for the best model
best.adjR2.ts <- as.numeric(data.frame(best.dt)[,8]) # best adjR2.ts avgs
}
# best model with R2 or adjR2.ts +/- 0.05 and min of RMSE for Avgs
if (fR2rule==T) { # R2 rule for the best model (default)
best.dt <- dt.mean.ord[R2.ts.Avg %between% c(best.adjR2.ts-0.05,best.adjR2.ts+0.05)][which.min(RMSE.ts.Avg)]
} else { # adjR2 rule for the best model
best.dt <- dt.mean.ord[adjR2.ts.Avg %between% c(best.adjR2.ts-0.05,best.adjR2.ts+0.05)][which.min(RMSE.ts.Avg)]
}
best.reg <- paste(best.dt$RegrMeth,collapse="") # best regrression method
cat(" -> Method:",best.reg,"\n")
# best model non-averaged ? no. of features
# -----------------------------------------------
# best.method <- dt.res[CVtype == "repeatedcv"][RegrMeth == best.reg] # best modes corresponding with the avg best values
# best.method.mean <- mean(as.numeric(data.frame(best.method)[,11])) # best adjR2.ts)
# dt.res[CVtype == "repeatedcv"][RegrMeth == best.reg][NoModelFeats == min(NoModelFeats)][RMSE.ts == min(RMSE.ts)]
#----------------------------------------------------------
# 11. Best model detailed statistics
#----------------------------------------------------------
# Write the best model statistics
ResBestF <- file.path(PathDataSet,ResBest)
write.table("Averaged values for all spits: ",file=ResBestF,append=T,sep=",",col.names=F,row.names=F,quote=F)
# write.csv(data.frame(best.dt), file = ResBestF) # write statistics data frame into a CSV output file
write.table(data.frame(best.dt), file=ResBestF,append=T,sep=",",col.names=T,quote=F) # write statistics data frame into a CSV output file
# Use the last split for dataset (ds.train & ds.test) ! (or chose other one?)
# Run the caret function with the method from the best method, for one training-test split only
# and append the details in the best model output file
# ----------------------------------
# Parallel support - registed cores
# ----------------------------------
if (noCores!=1){
# ------------------------------------------
# parallel for Linux or Mac:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Linux" | Sys.info()[['sysname']]=="Darwin"){
registerDoMC(cores = noCores) # CPU cores
}
# ------------------------------------------
# parallel for windows:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Windows"){
cl<-makeCluster(noCores,outfile="")
registerDoSNOW(cl)
}
}
if (best.reg=="lm") {
my.stats.reg <- LMreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run GLM for each CV and regr method
}
if (best.reg=="glmStepAIC") {
my.stats.reg <- GLMreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run GLM for each CV and regr method
}
if (best.reg=="pls") {
my.stats.reg <- PLSreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run PLS for each CV and regr method
}
if (best.reg=="lasso.RMSE") {
my.stats.reg <- LASSOreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run LASSO for each CV and regr method
}
if (best.reg=="svmRadial") {
my.stats.reg <- SVRMreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF,rfe_SVM_param_c)$stat.values # run SVRM Radial for each CV and regr method
}
if (best.reg=="nnet") {
my.stats.reg <- NNreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="rf") {
my.stats.reg <- RFreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="svmRFE") {
my.stats.reg <- SVMRFEreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF,rfe_SVM_param_c,rfe_SVM_param_eps)$stat.values # run NNet for each CV and regr method
}
if (best.reg=="glmnet") {
my.stats.reg <- ENETreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run ENET for each CV and regr method
}
if (best.reg=="rfRFE") {
my.stats.reg <- RFRFEreg(ds.train,ds.test,"repeatedcv",i,T,ResBestF)$stat.values # run RF RFE for each CV and regr method
}
# if (best.reg=="rbfDDA") {
# my.stats.reg <- RBF_DDAreg(ds.train,ds.test,"repeatedcv",negThrStep,i,T,ResBestF)$stat.values # run rbfDDA for each CV and regr method
# }
#--------------------------------------------------------------------------------------
# 12. Test best model with test dataset + Y randomization
#--------------------------------------------------------------------------------------
# ratios Yrand R2 - Best model R2 / Best model R2
R2Diff.Yrand = NA
if (noYrand > 0) {
R2Diff.Yrand <- Yrandom(
ds,trainFrac,best.reg,my.stats.reg$R2.ts,noYrand,ResBestF,rfe_SVM_param_c,rfe_SVM_param_eps
) # mean value of ratio (deatails are printed to output file)
}
# (ex param: negThrStep)
# Kill parallel server
# ------------------------
if (noCores!=1){
# ------------------------------------------
# parallel for windows:
# ------------------------------------------
if (Sys.info()[['sysname']]=="Windows"){ # clean the memory!
stopCluster(cl)
}
}
# Assessment of Applicability Domain (plot leverage) was included as details in each regression function
# Print total execution time
cat("\nRRegrs total execution time\n")
print(proc.time() - ptmTot) # print running time
#----------------------------
# Indicate main result files
#----------------------------
cat("\nMAIN RESULT FILES\n")
cat("======================\n")
cat("Statistics for all data set splittings/methods/CV types:", ResBySplits,"\n")
cat("Averages by method/CV type:",ResAvgsF,"\n")
cat("Best model statistics:",ResBestF,"\n")
cat("Best model plots:",paste(ResBestF,".repeatedcv.split",i,".pdf",sep=""),"\n")
if (noYrand > 0) cat("Best model Y-randomization plot:",paste(ResBestF,".Yrand.Hist.pdf",sep=""),"\n")
cat("\n* if you choose Details, additional CSV and PDF files will be create for each method.\n")
return(list(BestMethod=best.reg,BestStats=my.stats.reg, Models=dfMod))
# return a list with 3 items: the name of the best method, the statistics for the best model, the list with all the fitted models (including the best one)
}
|
c5e1e973e6c37dc21b7d0deb4bbb56d00cdab6a5 | 6c4614d5dbeab14096cacd28bbd6b45233beb251 | /man/crrstep.Rd | 970c465035a7a890c95df134278f0333c2875f43 | [] | no_license | cran/crrstep | 5991d63547bdc74d1c89c2a8c3fec8fb31051729 | b2cd71cb883fd9fe3c4cb4212531f286e846e428 | refs/heads/master | 2023-08-31T07:29:54.906821 | 2023-08-22T21:40:10 | 2023-08-22T22:30:22 | 17,695,307 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,584 | rd | crrstep.Rd | \name{crrstep}
\alias{crrstep}
\title{Stepwise regression for competing risks regression}
\description{
Performs forward and backward stepwise regression for the Fine & Gray regression model in competing risks. Procedure uses AIC, BIC and BICcr as selection criteria. BICcr has a penalty of k = log(n*), where n* is the number of Type I events.
}
\usage{
crrstep(formula, scope.min = ~1, etype, ..., subset,
data, direction = c("backward", "forward"),
criterion = c("AIC", "BICcr", "BIC"), crr.object = FALSE,
trace = TRUE, steps = 100)
}
\arguments{
\item{formula}{
formula object where LHS is failure time and RHS is linear predictors; intercept `1' should always be included.
}
\item{scope.min}{
formula object denoting final model for backward selection and starting model for forward selection.
}
\item{etype}{
integer variable that denotes type of failure for each person.
}
\item{\dots}{
variables passed to `crr' function; two key variables are \emph{failcode} and \emph{cencode}; see below in \code{Description}.
}
\item{subset}{
subset of data to be used for model selection.
}
\item{data}{
data-frame containing all the variables. Only complete cases are used in the analysis, i.e. rows of dataframe with missing values in any of the predictors are deleted.
}
\item{direction}{
forward or backward direction for model selection.
}
\item{criterion}{
selection criterion; default is AIC. BIC uses log(n) as penalty, where `n' is total sample size, and BICcr uses log(n*) as the penalty where n* is the number of primary events.
}
\item{crr.object}{
logical variable indicating whether to return final `crr' object.
}
\item{trace}{
logical indicating whether to display stepwise model selection process.
}
\item{steps}{
maximum number of steps in stepwise selection.
}
}
\details{
Based on the existing code of stepAIC in the MASS package. Variables passed to 'crr' function include two key variables: \emph{failcode} and \emph{cencode}. \emph{failcode} is an integer value that denotes primary failure, and \emph{cencode} is an integer denoting censoring event.
}
\value{
\item{variables}{Variables in the final model}
\item{coefficients}{The estimated coefficients of the variables}
\item{std.errors}{Standard errors of the estimated coefficients}
\item{log.lik}{The partial log-likelihood of the model}
}
\references{
Fine, J. P. and Gray, R. J. (1999). A proportional hazards model for
the subdistribution of a competing risk. Journal of the American Statistical
Association.
Volinsky, C. T. and Raftery, A. E. (2000). Bayesian information criterion
for censored survival models. Biometrics.
Kuk, D. and Varadhan, R. (2013). Model selection in competing risks regression. \emph{Statistics in Medicine}.
}
\author{
Ravi Varadhan & Deborah Kuk.
}
\seealso{
\code{\link{crr}}
}
\examples{
set.seed(123)
n <- 500
ftime <- rexp(n)
fstatus <- sample(0:2,n,replace=TRUE)
cov1 <- matrix(runif(5*n),nrow=n)
x61 <- as.factor(sample(3, size=n, rep=TRUE))
x71 <- as.factor(sample(5, size=n, rep = TRUE))
cov1 <- cbind(cov1, x61, x71)
dimnames(cov1)[[2]] <- c('x1','x2','x3','x4','x5', 'x6', 'x7')
formula1 <- ftime ~ x1 + x2 + x3 + x4 + x5 + as.factor(x6) + as.factor(x7)
crrstep(formula1, , fstatus, data = as.data.frame(cov1), direction = "backward", criterion = "BIC")
ans2 <- crrstep(formula1, , fstatus, data = as.data.frame(cov1), direction = "forward",
failcode=2, criterion = "AIC")
ans2
}
\keyword{stepwise}
\keyword{competing risks}
|
c2949dbf0f4a96aab76fa3ec59464571c914d0b7 | cb010e06c0809347d2d0a46caaef7eae185bd1d1 | /xmsPANDA_v1.1.4_test/R/workflow1/graphical_options_wf1.R | 1190b6d04126408eb54ccc7a8688485609c1aac1 | [] | no_license | chunyuma/xmsPANDA_shinyapp | c9259f3a2253b6a2a217aedb514df4e3f8ba8331 | cc0e02e7262592fdd84505baab3243f10cd64515 | refs/heads/master | 2022-10-25T01:10:39.164309 | 2020-06-18T19:58:27 | 2020-06-18T19:58:27 | 273,328,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,805 | r | graphical_options_wf1.R | library(shiny)
graphical_options_wf1<-fluidRow(
tags$div(
id="maindiv",
column(width=12,
column(width=6,selectInput(width="350px","heatmap_color_scheme_wf1","Heatmap color palettes/scheme:",c("redblue","yellowblue","redyellowgreen","yellowwhiteblue","redwhiteblue","topo","heat"))),
column(width=6,numericInput(width="350px","pca_cex_val_wf1", "Size of points on PCA plots (1-20 limit):", 4, min = 1, max = 20))
),
column(width=12,
column(width=6,numericInput(width="350px","ellipse_conf_level_wf1", "Confidence interval for PCA ellipses (0-1 limit):", 0.95, min = 0, max = 1)),
column(width=6,selectInput(width="350px","pca_ellipse_wf1","Should ellipse be plotted on PCA plots?",c("TRUE","FALSE")))
),
column(width=12,
column(width=6,selectInput(width="350px","boxplot_type_wf1", "Boxplot type:", c("ggplot","simple"))),
column(width=6,selectInput(width="350px","boxplot_jitter_wf1", "Add jitter to boxplots:", c("TRUE","FALSE")))
),
column(width=12,
column(width=6,selectInput(width="350px","timeseries_lineplots_wf1", "Plot time series lineplots (for time-series data):",c("FALSE","TRUE"))),
column(width=6,selectInput(width="350px","alphabetical_order_wf1", "Plot classes on the x-axis in alphabetical order:",c("TRUE","FALSE")))
),
column(width=12,
column(width=6,textInput(width="350px","ylabel_text_wf1", "Label for y-axis in boxplots, barplots, and lineplots:","Abundance",placeholder="Default: Abundance")),
column(width=6,style='margin-top:25px;',actionButton("graph_argumentbutton_wf1", "More arguments")),
bsModal("graph_argument_modal", "More arguments for generating graphs", "graph_argumentbutton_wf1", size = "large",
tags$div(
width=12,
style="height:200px",
column(width=6,textInput(width="350px","boxplot_color_theme_wf1", "Color theme or options for boxplots:","journal",placeholder="Default: journal")),
column(width=6,textInput(width="350px","barplot_color_theme_wf1", "Color theme or options for barplots:","journal",placeholder="Default: journal")),
column(width=6,textInput(width="350px","sample_color_theme_wf1", "Color theme or options for samples:","journal",placeholder="Default: journal")),
column(width=6,textInput(width="350px","lineplot_color_theme_wf1", "Color theme or options for lineplots:","journal",placeholder="Default: journal")),
column(width=12,tags$br()),
column(width=12,tags$p("Note: the common options for above arguments include heat, rainbow, red, grey57."))
)
)
)
)
)
|
bf7efbfdc04ccc2b778f40c888bf4af803ac58c3 | f6035f5d08ea7bda4bd31c909cde4a79070c4752 | /man/tree.surrogate.Rd | 32807d793ae2f9ecb7b2dd93dd4e3087686b72d3 | [
"MIT"
] | permissive | spark-lin/iml | 5394d54b1e7219d0c0c802fa7acd53126ba304bf | 1b845671011e92915da4245c6d754985c6f3caa8 | refs/heads/master | 2021-04-28T01:08:22.308473 | 2018-02-17T13:14:39 | 2018-02-17T13:14:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,668 | rd | tree.surrogate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree-surrogate.r
\name{tree.surrogate}
\alias{tree.surrogate}
\title{Decision tree surrogate model}
\usage{
tree.surrogate(object, X, sample.size = 100, class = NULL, maxdepth = 2,
tree.args = NULL, ...)
}
\arguments{
\item{object}{The machine learning model. Different types are allowed.
Recommended are mlr WrappedModel and caret train objects. The \code{object} can also be
a function that predicts the outcome given features or anything with an S3 predict function,
like an object from class \code{lm}.}
\item{X}{data.frame with the data for the prediction model}
\item{sample.size}{The number of instances to be sampled from X.}
\item{class}{In case of classification, class specifies the class for which to predict the probability.
By default the multiclass classification is done.}
\item{maxdepth}{The maximum depth of the tree. Default is 2.}
\item{tree.args}{A list with further arguments for \code{ctree}}
\item{...}{Further arguments for the prediction method.}
}
\value{
A TreeSurrogate object (R6). Its methods and variables can be accessed with the \code{$}-operator:
\item{tree}{the fitted tree of class \code{party}. See also \link[partykit]{ctree}.}
\item{maxdepth}{the maximal tree depth set by the user.}
\item{data()}{method to extract the results of the tree.
Returns the sampled feature X together with the leaf node information (columns ..node and ..path)
and the predicted \eqn{\hat{y}} for tree and machine learning model (columns starting with ..y.hat).}
\item{plot()}{method to plot the leaf nodes of the surrogate decision tree. See \link{plot.TreeSurrogate}}
\item{predict()}{method to predict new data with the tree. See also \link{predict.TreeSurrogate}}
\item{\code{run()}}{[internal] method to run the interpretability method. Use \code{obj$run(force = TRUE)} to force a rerun.}
General R6 methods
\item{\code{clone()}}{[internal] method to clone the R6 object.}
\item{\code{initialize()}}{[internal] method to initialize the R6 object.}
}
\description{
tree.surrogate() fits a decision tree on the predictions of a machine learning model to make it interpretable.
}
\details{
A conditional inference tree is fitted on the predicted \eqn{\hat{y}} from the machine learning model and the data \eqn{X}.
The \code{partykit} package and function are used to fit the tree.
By default a tree of maximum depth of 2 is fitted to improve interpretability.
}
\examples{
# Fit a Random Forest on the Boston housing data set
library("randomForest")
data("Boston", package = "MASS")
mod = randomForest(medv ~ ., data = Boston, ntree = 50)
# Fit a decision tree as a surrogate for the whole random forest
dt = tree.surrogate(mod, Boston[-which(names(Boston) == 'medv')], 200)
# Plot the resulting leaf nodes
plot(dt)
# Use the tree to predict new data
predict(dt, Boston[1:10,])
# Extract the results
dat = dt$data()
head(dat)
# It also works for classification
mod = randomForest(Species ~ ., data = iris, ntree = 50)
# Fit a decision tree as a surrogate for the whole random forest
X = iris[-which(names(iris) == 'Species')]
dt = tree.surrogate(mod, X, 200, predict.args = list(type = 'prob'), maxdepth=2, class=3)
# Plot the resulting leaf nodes
plot(dt)
# If you want to visualise the tree directly:
plot(dt$tree)
# Use the tree to predict new data
set.seed(42)
iris.sample = X[sample(1:nrow(X), 10),]
predict(dt, iris.sample)
predict(dt, iris.sample, type = 'class')
# Extract the dataset
dat = dt$data()
head(dat)
}
\seealso{
\link{predict.TreeSurrogate}
\link{plot.TreeSurrogate}
For the tree implementation
\link[partykit]{ctree}
}
|
83d09f998d88e3a6f443a55e29d57a9a2f1ab1f7 | 09e5b42813d3c184aab2b461c75faee5fbfb2c7b | /PublicFunctions/Function02_DESeq2_PreProcessing.R | 2365826716fc4cc3ef4618a47a4b1d5d296cac44 | [
"MIT"
] | permissive | wong-ziyi/Code4RNA-seq | f0df587840d4d82a20da527b51369a4975fcaccb | 61b001e0fccb370bfbc7e8a8f1472c377823bb6a | refs/heads/master | 2021-06-20T13:04:25.999024 | 2021-06-14T07:47:50 | 2021-06-14T07:47:50 | 215,089,793 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,478 | r | Function02_DESeq2_PreProcessing.R | P2_DESeq2_PreProcessing<-function(){
if(!file.exists("00.Resource//Annotation.Rdata")){
stop("no such file: Annotation.Rdata")
}
if(!file.exists("00.Results//Parameters.Rdata")){
stop("no such file: Parameters.Rdata")
}
if(!file.exists("00.Results//SalmonRawData.Rdata")){
stop("no such file: almonRawData.Rdata")
}
#|----DESeq2 linear model Pre-possesing----|####
#|----DESeq2 linear model----|####
load("00.Results//Parameters.Rdata")
load("00.Results//SalmonRawData.Rdata")
load("00.Resource//Annotation.Rdata")
DEGs_dds <- DESeqDataSetFromMatrix(
countData = counts(RUV_set),
colData = pData(RUV_set), #cbind(pData(RUV_set),Load_ExDesign[,Par_FaN])
design = ~ W_1 + RUV_x #W_1 + CellType*Loading*Drug
) #attr(DEGs_dds, "modelMatrixType") View(attr(DEGs_dds, "modelMatrix"))
if(Par_Test=="none"){
DEGs_dds <- DESeq(
DEGs_dds, test = "LRT", reduced = as.formula("~ W_1"), betaPrior=FALSE
#minReplicatesForReplace=Par_Rep
)
}else if(Par_Test=="Wald"){
DEGs_dds <- DESeq(
DEGs_dds, test = "LRT", reduced = as.formula("~ W_1"), betaPrior=FALSE
#minReplicatesForReplace=Par_Rep
)
}else if(Par_Test=="LRT"){
DEGs_dds <- DESeq(
DEGs_dds, test = "LRT", reduced = as.formula("~ W_1"), betaPrior=FALSE
#minReplicatesForReplace=Par_Rep
)
}
#
#|----Getting Normalized Read Counts (NorCounts)----|####
temp<-as.data.frame(counts(DEGs_dds, normalize = TRUE))
DEGs_NorCounts<-merge(
temp,
Txi$length[rownames(Txi$length)%in%rownames(temp),],
by='row.names', all=TRUE
)
rownames(DEGs_NorCounts)<-DEGs_NorCounts[,1]
DEGs_NorCounts<-DEGs_NorCounts[,-1]
colnames(DEGs_NorCounts)<-c(paste("NorCounts", RUV_x, c(1:Par_Rep), sep="_"),
paste("EffLength",RUV_x, c(1:Par_Rep), sep="_"))
#
#|----Transcript Million Mapped reads (log2TPM)----|####
DEGs_log2TPM<-DEGs_NorCounts[,1:(ncol(DEGs_NorCounts)/2)]/
DEGs_NorCounts[,(ncol(DEGs_NorCounts)/2+1):ncol(DEGs_NorCounts)]
Par_a<-(colSums(DEGs_log2TPM)/1000000)
Par_cl<-makeCluster(detectCores())
registerDoSNOW(Par_cl)
Par_pb <- tkProgressBar("Parallel task", min=1, max=length(rownames(DEGs_log2TPM)))
progress <- function(n) setTkProgressBar(Par_pb, n)
Par_opts<-list(progress = progress)
temp<-foreach(i=1:nrow(DEGs_log2TPM), .combine=rbind,
.options.snow=Par_opts, .packages = "tcltk") %dopar% {
log2(DEGs_log2TPM[i,1:(ncol(DEGs_NorCounts)/2)]/Par_a)
}
close(Par_pb)
stopCluster(Par_cl)
DEGs_log2TPM<-temp
rm(temp, Par_a, Par_cl, Par_opts, Par_pb)
#|____File with log2TPM only____|####
DEGs_log2TPM[DEGs_log2TPM=="-Inf"]<-0
colnames(DEGs_log2TPM)<-paste("log2TPM", RUV_x, c(1:Par_Rep), sep="_")
DEGs_Out_TPM<-merge(DEGs_log2TPM[rownames(DEGs_log2TPM)%in%rownames(DEGs_NorCounts),],
DEGs_NorCounts, by="row.names", all=TRUE)
rownames(DEGs_Out_TPM)<-DEGs_Out_TPM[,1]
DEGs_Out_TPM<-DEGs_Out_TPM[,-1]
DEGs_Out_TPM<-merge(Anno[rownames(Anno)%in%rownames(DEGs_Out_TPM),],
DEGs_Out_TPM, by="row.names", all=TRUE)
rownames(DEGs_Out_TPM)<-DEGs_Out_TPM[,1]
DEGs_Out_TPM<-DEGs_Out_TPM[,-1]
#
#|----Xlsx output----|####
n1<-length(RUV_x)
n2<-length(levels(RUV_x))
xlsx<-createWorkbook()
addWorksheet(xlsx,"ReadsCount")
writeDataTable(xlsx,"ReadsCount",DEGs_Out_TPM[,1:4],startCol = 1,startRow = 2)
writeData(xlsx,"ReadsCount",DEGs_Out_TPM[,-(1:4)],startCol = 5,startRow = 2)
setColWidths(xlsx, "ReadsCount", cols=1, widths = 20, hidden = rep(FALSE,length(cols)), ignoreMergedCells = FALSE)
setColWidths(xlsx, "ReadsCount", cols=4, widths = 14, hidden = rep(FALSE,length(cols)), ignoreMergedCells = FALSE)
mergeCells(xlsx,"ReadsCount", cols=1:4, rows=1)
writeData(xlsx, "ReadsCount", "Basic Information", startCol=1, startRow=1)
style<-createStyle(fgFill="#ABBEDE", halign="LEFT", border="bottom", textDecoration="Bold")
addStyle(xlsx, "ReadsCount", style=style, cols=1, rows=1)
mergeCells(xlsx,"ReadsCount", cols=5:(4+n1), rows=1)
writeData(xlsx, "ReadsCount", "Log2TPM", startCol=5, startRow=1)
style<-createStyle(fgFill="#C7D3E9", halign="CENTER", border="bottom", textDecoration="Bold")
addStyle(xlsx, "ReadsCount", style=style, cols=5, rows=1)
mergeCells(xlsx,"ReadsCount", cols=(5+n1):(4+2*n1), rows=1)
writeData(xlsx, "ReadsCount", "Normalized Raw Reads Count", startCol=(5+n1), startRow=1)
style<-createStyle(fgFill="#E3E9F4", halign="CENTER", border="bottom", textDecoration="Bold")
addStyle(xlsx, "ReadsCount", style=style, cols=(5+n1), rows=1)
mergeCells(xlsx,"ReadsCount", cols=(5+2*n1):(4+3*n1), rows=1)
writeData(xlsx, "ReadsCount", "Effective gene length from Salmon software", startCol=(5+2*n1), startRow=1)
style<-createStyle(fgFill="#E9DDC7", halign="CENTER", border="bottom", textDecoration="Bold")
addStyle(xlsx, "ReadsCount", style=style, cols=(5+2*n1), rows=1)
freezePane(xlsx, "ReadsCount", firstActiveRow = 3, firstActiveCol = 5, firstRow = FALSE, firstCol = FALSE)
saveWorkbook(
xlsx,
file = file.path(
"02.TableOutput",
"00.DEGs_Out_TPM.xlsx"
),overwrite = TRUE
)
#|----Finishing----|####
save(DEGs_dds,DEGs_log2TPM,DEGs_NorCounts,DEGs_Out_TPM,file = "00.Results\\DESeq2_Processing.Rdata")
} |
a31be4c653d6881733aa4bf196cb3e1d8689e8bb | 888bc7297bce0524768099a664dbb8cdb9ce6795 | /inst/extdata/downloader.R | 6cee9e2bead2e12bef6fa0c9d52a6abad2a3591e | [
"MIT"
] | permissive | chinhungtseng/wtotariffcrawler | b9da6814b5c1f47ff9ed5d5d7b5d06bb41935b86 | db8f4f8a85c897ea4fd69e94f12b7a5eb398ebd3 | refs/heads/master | 2020-12-08T14:28:51.613865 | 2020-01-14T02:50:01 | 2020-01-14T02:50:01 | 233,005,191 | 0 | 0 | NOASSERTION | 2020-01-13T05:16:56 | 2020-01-10T08:49:54 | R | UTF-8 | R | false | false | 187 | r | downloader.R | countries <- country_pool()
for (i in seq_len(nrow(countries))) {
import <- countries$import[i]
export <- countries$export[i]
tariff_downloader(import, export)
Sys.sleep(3600)
}
|
1639e00cba1cb7c37249ed7fcf54974e76602ea6 | b64230574442e2118c0bd4277d26e99b6394707b | /man/ars.Rd | 4f57efd9dbd65200f6b5d4976734f2ecb806c43e | [] | no_license | jonathanchen888/ars | 58a373732b0af48e00a6934eef39baefb1426f70 | 65ca8a3d95c483719dedfec9b8be4506d05afe5b | refs/heads/master | 2020-04-10T07:57:48.054754 | 2018-12-11T18:37:41 | 2018-12-11T18:37:41 | 160,893,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,779 | rd | ars.Rd | \name{Implementing an Adaptive-Rejection Sampler}
\alias{Documentation}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Implementing an Adaptive-Rejection Sampler
%% Implementing an Adaptive-Rejection Sampler
}
\description{Adaptive-Rejection Sampler
%% Implementing the Adaptive-Rejection Sampler based on Gilks & Wild (1992)
}
\usage{
Implementing an Adaptive-Rejection Sampler
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{density}{A probability density function
%% ~~Describe \code{x} here~~
}\item{n}{Number of samples to generate
%% ~~Describe \code{x} here~~
}
\item{lb}{The lower bound of the domain
%% ~~Describe \code{x} here~~
}
\item{ub}{The upper bound of the domain
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{n samples from the probability density function
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{Adaptive Rejection Sampling for Gibbs Sampling
W. R. Gilks and P. Wild (1992)
%% ~put references to the literature/web site here ~
}
\author{Rachel Hu, Lin Yang, Jonathan Chen
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
g <- function(x){return(dnorm(x,0,1))}
ars(g, 500, -1, 10)
f <- function(x){return(dgamma(x, 2, rate = 1))}
ars(f, 1000, 1, 5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
12e14a0396cc52fda0c4d463a64ee0ffee0ee9ce | fa9161a9e8da6fbca1bfc55ed5a72e4e849513cc | /man/avg_pay_plt.Rd | a70a0454cdb7f47f8c3c633929a5f4a748cf95f5 | [] | no_license | AlChow514/lab2_ippspkg | d299e6c5a847edbe87185d81a96fefa85bf0c1ce | 8674b3af81a618b92d044253f0dff9ebf0fde8bf | refs/heads/main | 2023-01-18T17:49:56.285026 | 2020-11-02T01:20:59 | 2020-11-02T01:20:59 | 309,219,203 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 501 | rd | avg_pay_plt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/avg_pay_plot.R
\name{avg_pay_plt}
\alias{avg_pay_plt}
\title{Box plot of Average Medicare Payment vs DRUG code}
\usage{
avg_pay_plt(xcode)
}
\arguments{
\item{xcode}{input a 3 digit drug code}
}
\value{
a box plot of Average Medicare Payment vs DRUG code
}
\description{
This function produces a box plot of average medicare payment with a 3 digit drug code
requires ipps data to be loaded
}
\examples{
avg_pay_plot(039)
}
|
ea485f4e31dbecaf3b2a13c56efd367532d451c3 | c656fcc9590e2b118311a7ab9e45eec5d66b1c69 | /man/Tb_grasshopper.Rd | ea5c174fa8e196c851f485210e2ccfa1e3271dc9 | [
"MIT"
] | permissive | Brybrio/TrenchR | db2186eb6f43bb1602ac749fa800799d20bedbe3 | aeadf1c96daee10964b35eca073d3906323fcac4 | refs/heads/master | 2021-08-20T06:34:01.171420 | 2018-10-25T05:34:36 | 2018-10-25T05:34:36 | 144,772,995 | 0 | 0 | null | 2018-08-14T21:17:25 | 2018-08-14T21:17:25 | null | UTF-8 | R | false | true | 1,925 | rd | Tb_grasshopper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biophysmodel_Grasshopper.R
\name{Tb_grasshopper}
\alias{Tb_grasshopper}
\title{Predicts body temperatures (operative environmental temperature) of a grasshopper in degrees C.}
\usage{
Tb_grasshopper(Ta, Tg, u, rad, kt, psi, L, Acondfact = 0.25, z = 0.001,
abs = 0.7, r_g = 0.3)
}
\arguments{
\item{Ta}{is air temperature in C}
\item{Tg}{is surface temperature in C, Kingsolver (1983) assumes Tg-Ta=8.4}
\item{u}{is wind speed in m/s}
\item{rad}{is total (direct + diffuse) solar radiation flux in W/m^2}
\item{kt}{is the clearnex index (dimensionless), which is the ratio of the global solar radiation measured at the surface to the total solar radiation at the top of the atmosphere.}
\item{psi}{is solar zenith angle in degrees}
\item{L}{in grasshopper length in m}
\item{Acondfact}{is the proportion of the grasshopper surface area that is in contact with the ground}
\item{z}{is grasshopper's distance from the ground in m}
\item{abs}{is absorptivity of grasshopper to solar radiation (proportion), See Anderson et al. (1979).}
\item{r_g}{is substrate solar reflectivity (proportion), see Kingsolver (1983)}
}
\description{
Predicts body temperature (operative environmental temperature) of a grasshopper in degrees C. Described in Buckleyet al. (2014, Phenotypic clines, energy balances, and ecological responses to climate change. Journal of Animal Ecology 83:41-50.) See also a related model by Anderson et al. (1979, Habitat selection in two species of short-horned grasshoppers. Oecologia 38:359–74.)
}
\details{
Predicts body temperatures (operative environmental temperature) of a grasshopper in degrees C.
}
\examples{
\dontrun{
Tb_grasshopper(Ta=25, Tg=25, u=0.4, rad=400, kt=0.7, psi=30, L=0.02, Acondfact=0.25, z=0.001, abs=0.7, r_g=0.3)
}
}
\keyword{biophysical}
\keyword{body}
\keyword{model}
\keyword{temperature,}
|
3351bbabda02be47be25f712f890de018381e35d | f0e5fa531bb22aebf6fc733e6acf41b855072901 | /Plot 2.R | 18ef89792ba7dd4c2a17de23816a26a10bc06f71 | [] | no_license | Rookie323/Course4Week1Project | 2a356813188f70fe8ec0c40bb6de619379085725 | f83233bbf43f82c6365e69cc0c9624e35003ef95 | refs/heads/master | 2022-09-07T02:33:55.727114 | 2020-05-31T03:40:11 | 2020-05-31T03:40:11 | 268,203,302 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 723 | r | Plot 2.R | library(lubridate)
dat <- read.table("household_power_consumption.txt", sep = ";", header=T)
head(dat)
str(dat)
dat$Date2 <- as.Date(dat$Date, format="%d/%m/%Y")
head(dat$Date2)
head(dat$Date)
dat2<- dat[dat$Date2 >= as.POSIXct("2007-02-01") & dat$Date2 <= as.POSIXct("2007-02-02"),]
dat2$Global_active_power<-as.numeric(as.character(dat2$Global_active_power))
dat2$Time2 <- as.character(dat2$Time)
dat2$DateTime <- paste0(dat2$Date2, " ",dat2$Time2)
dat2$DateTime2 <- as.POSIXlt(dat2$DateTime)
plot(dat2$DateTime2, dat2$Global_active_power, type = "l", main = "", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.copy (png, "Plot 2.png", width = 480, height = 480)
dev.off()
|
4807b77e176a9be99eac86c08fa6be40c6bcc8be | 3c452b0b285add7e8d2bc6627258cd410ecb53fa | /R/RcppExports.R | 7cfe8856a40777bc0d2533cf42340bbf34c46e5b | [
"MIT"
] | permissive | Non-Contradiction/JuliaCall | 295ff96fb5e6f1dbf9e79ad10871572a7ff8e1d1 | c05473bea78a0197c639f7e82ab1c6f2e943e1cc | refs/heads/master | 2023-07-22T02:56:15.201964 | 2023-07-13T16:17:34 | 2023-07-13T16:17:34 | 99,278,989 | 267 | 40 | NOASSERTION | 2023-06-14T18:43:16 | 2017-08-03T22:08:52 | R | UTF-8 | R | false | false | 660 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
juliacall_initialize <- function(libpath, julia_bindir, image_path) {
.Call('_JuliaCall_juliacall_initialize', PACKAGE = 'JuliaCall', libpath, julia_bindir, image_path)
}
juliacall_cmd <- function(libpath) {
.Call('_JuliaCall_juliacall_cmd', PACKAGE = 'JuliaCall', libpath)
}
juliacall_docall <- function(jcall) {
.Call('_JuliaCall_juliacall_docall', PACKAGE = 'JuliaCall', jcall)
}
juliacall_atexit_hook <- function(status) {
invisible(.Call('_JuliaCall_juliacall_atexit_hook', PACKAGE = 'JuliaCall', status))
}
|
5c161ed8e8005f01fa7cbfe3ff73790201b85c2f | 8d32ef9051f1aa3a593c1ab9ca70012195571dee | /data-raw/raw_data.R | 21a31de7a0c4550bc14b2b32bcc3f40b81eff9a1 | [] | no_license | ibn-salem/genepair | 42108b6fd5f08da5f05f4d117956d7d57472a1b6 | 74dd693b3139c46d987470f550669d21b4cc7241 | refs/heads/master | 2021-01-11T16:12:13.410209 | 2017-08-06T19:01:44 | 2017-08-06T19:01:44 | 80,034,684 | 2 | 1 | null | 2017-08-06T19:01:45 | 2017-01-25T16:49:07 | R | UTF-8 | R | false | false | 443 | r | raw_data.R | #
#'
#'Load data from loop_predicton v08 project
#'
load("data-raw/loopPred.v08.ancGR.Rdata")
load("data-raw/loopPred.v08.loopDF.tmp.Rdata")
load("data-raw/loopPred.v08.Rad21.w1001.b10.datamat.Rdata")
# remove duplicates
loopDF <- loopDF[!duplicated(loopDF[,1:2]),]
# sort rows according to first two columns
loopDF <- loopDF[order(loopDF[,1], loopDF[,2]),]
devtools::use_data(ancGR)
devtools::use_data(loopDF)
devtools::use_data(datamat)
|
579fad7d4d87198f04d9c679208c35b8e9bc2f31 | db184b5e5e81c46490f7e7d15ceb9033fdb59f17 | /man/npv.Rd | 1dbe5c55ccde078201f746113c1dbeb8e0be09d8 | [] | no_license | cran/cban | 05da62dea1e79ba209effde8f751bbd9614a3e89 | 86fa13404e13b14d2b9a3928163441a010f98ec1 | refs/heads/master | 2023-01-29T01:59:36.836223 | 2020-12-07T09:20:05 | 2020-12-07T09:20:05 | 321,291,863 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 827 | rd | npv.Rd | \name{npv}
\alias{npv}
\title{
Net Present Value
}
\description{
It calculates the net present value of a project in financial terms.
}
\usage{
npv(x)
}
\arguments{
\item{x}{
a data frame or list containing of variables: discounted cost and discounted benefit in
that order.
}
}
\details{
The function specifically targets the second list component returned by the function bcr
as its argument but can use other data frame or list of discounted cost and discounted
benefit. Net present value is also a method of cost nenefit analysis.
}
\value{
returns a double or integer
}
\author{
Abubakar Akanbi
}
\examples{
tb = data.frame(cost=c(2500, 28000,15800.50), benefit=c(2600.95,30600,16200.50), rate =
c(4,4,4.2), no.of.years=c(1,1,1))
bc = bcr(tb)
bc = bc[2]
pv = npv(bc)
}
|
8d1f5ed328ababfbf783092377fea69a6f813fa6 | 1fba1b4a40c79bf3c62f39434a3f6453977245aa | /week35_lemurs/w35_lemurs.R | dc114df74de56655ef73aa85e064bd4550adeed7 | [] | no_license | kkulbir/tidytuesday | 718b978364961d451c3470f3e06259a2e66ccadc | 0410d955858b1a787c53b5ebfe1d58ba879fc727 | refs/heads/main | 2023-07-27T18:29:03.128279 | 2021-09-19T08:49:20 | 2021-09-19T08:49:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,637 | r | w35_lemurs.R | library(tidyverse)
library(extrafont)
library(networkD3)
library(htmlwidgets)
lemurs <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-08-24/lemur_data.csv')
#Keep only unique dlc_id, dam_taxon and sire_taxon
lemur_clean <- lemurs %>% distinct(dlc_id, .keep_all = TRUE) %>%
select(dlc_id, dam_taxon, sire_taxon) %>% na.omit()
#Keep only instances of cross-breeding
lemur_clean <- lemur_clean[which(lemur_clean$dam_taxon != lemur_clean$sire_taxon), ]
lemur_unique <- data.table::as.data.table(lemur_clean)
lemur_unique <- lemur_unique[, .N, by = c('dam_taxon','sire_taxon')]
#Assign source and target
lemur_unique$source <- lemur_unique$dam_taxon
lemur_unique$target <- lemur_unique$sire_taxon
#Replace with common names
lemur_unique$source <- str_replace(lemur_unique$source, "EUL", "Eulemur hybrid")
lemur_unique$source <- str_replace(lemur_unique$source, "ERUF", "Red-fronted brown lemur")
lemur_unique$source <- str_replace(lemur_unique$source, "EALB", "White-fronted brown lemur")
lemur_unique$source <- str_replace(lemur_unique$source, "EMAC", "Black lemur")
lemur_unique$source <- str_replace(lemur_unique$source, "VAR", "Varecia hybrid")
lemur_unique$source <- str_replace(lemur_unique$source, "ESAN", "Sanford’s brown lemur")
lemur_unique$source <- str_replace(lemur_unique$source, "EFUL", "Common brown lemur")
lemur_unique$source <- str_replace(lemur_unique$source, "ERUB", "Red-bellied lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "EMAC", "Black lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "ECOL", "Collared brown lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "EUL", "Eulemur hybrid")
lemur_unique$target <- str_replace(lemur_unique$target, "EFUL", "Common brown lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "VRUB", "Red ruffed lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "EALB", "White-fronted brown lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "ERUF", "Red-fronted brown lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "ECOR", "Crowned lemur")
lemur_unique$target <- str_replace(lemur_unique$target, "ESAN", "Sanford’s brown lemur")
#Add a blank space to make nodes unique
lemur_unique$source <- paste0(lemur_unique$source, " ")
nodes <- data.frame(name=c(as.character(lemur_unique$source), as.character(lemur_unique$target)) %>% unique())
lemur_unique$IDsource=match(lemur_unique$source, nodes$name)-1
lemur_unique$IDtarget=match(lemur_unique$target, nodes$name)-1
ColourScal ='d3.scaleOrdinal() .range(["#420C14","#D96C06","#BAD9B5","#EFF7CF","#606C38","#F5C396","#FFBC0A","#9BC53D","#A51C30","#A53F2B"])'
sn <- sankeyNetwork(
Links = lemur_unique, Nodes = nodes,
Source = "IDsource", Target = "IDtarget",
Value = "N", NodeID = "name",
sinksRight=FALSE,
colourScale=ColourScal,
nodeWidth=40, nodePadding=20,
fontSize=13, fontFamily="Roboto")
sn
sn <- onRender(
sn,
'
function(el,x){
// select all our node text
d3.select(el)
.selectAll(".node text")
.filter(function(d) { return d.name.endsWith(" "); })
.attr("x", x.options.nodeWidth - 45)
.attr("text-anchor", "end");
}
'
)
sn <- onRender(sn, '
function(el) {
var cols_x = this.sankey.nodes().map(d => d.x).filter((v, i, a) => a.indexOf(v) === i);
var labels = ["Female", "Male"];
cols_x.forEach((d, i) => {
d3.select(el).select("svg")
.append("text")
.attr("x", d)
.attr("y", 12)
.attr("font-family", "Roboto")
.text(labels[i]);
})
}
')
sn
|
6ac1347d3e41f2f92e10c8b7782978a35b832b2b | 0f9fa909a1a2175302f2c8eb405482791145ee74 | /tests/testthat/test-summary.R | e6b64d50bcc24eefd14790b423f4387133d452aa | [] | no_license | jasenfinch/metaboMisc | 21942aac4a41043b35bfe36cb26f6d79031fc9a6 | 36d6630c151e29fadb687a77f5b946c80293029c | refs/heads/master | 2023-08-08T13:19:36.745124 | 2023-07-21T16:49:37 | 2023-07-21T16:49:37 | 144,775,967 | 0 | 0 | null | 2023-07-21T16:49:38 | 2018-08-14T21:56:31 | R | UTF-8 | R | false | false | 126 | r | test-summary.R | test_that("featureSummary works", {
feature_summary <- featureSummary(bd)
expect_s3_class(feature_summary,'tbl_df')
})
|
bd25a105a453cda4a271d5c8f234fe6ed87c9d21 | 67588b620e82f3baa1eadb83ce33a0aa73fa1809 | /man/getBuiltinTime.GString.Rd | 167af64d30829aee4f0b86c92d9b9e530f410964 | [] | no_license | HenrikBengtsson/R.utils | 88254b239c2b3df8cbb92f5c066fcde4c7e2378e | 483d663494b1531e048b53f64652b574f4523d5e | refs/heads/develop | 2023-04-07T10:40:23.836673 | 2023-03-18T04:27:44 | 2023-03-18T04:27:44 | 19,393,805 | 62 | 20 | null | 2021-09-24T17:44:50 | 2014-05-03T02:32:57 | R | UTF-8 | R | false | false | 1,003 | rd | getBuiltinTime.GString.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% GString-class.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{GString$getBuiltinTime}
\alias{GString$getBuiltinTime}
\alias{getBuiltinTime.GString}
\alias{GString.getBuiltinTime}
\alias{getBuiltinTime,GString-method}
\title{Gets the current time}
\description{
Gets the current time.
}
\usage{
## Static method (use this):
## GString$getBuiltinTime(format="\%H:\%M:\%S", ...)
## Don't use the below:
\method{getBuiltinTime}{GString}(static, format="\%H:\%M:\%S", ...)
}
\arguments{
\item{format}{A \code{\link[base]{character}} format string.}
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{character}} string.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{GString}}.
}
\keyword{internal}
\keyword{methods}
|
a6a5dd1b4cd03e1ce0df47ccca2a9dcb5fa00fce | 7193be8e52b2095d8b6472f3ef2104843dca339e | /Project1-ExploreVis/DinaMirijanian/codes/diabetes1.R | 1fa2096deda7de2960f8fc9df500a223aaf50bc5 | [] | no_license | vuchau/bootcamp007_project | 4b0f37f49a1163ea6d8ee4143a5dcfdef0d352dd | ffbd0f961b18510fc72fd49770187ec1b4b013ae | refs/heads/master | 2020-03-20T13:19:58.812791 | 2017-06-08T05:13:24 | 2017-06-08T05:13:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,234 | r | diabetes1.R | library(dplyr)
library(ggplot2)
setwd("/Users/dmirij/Desktop/VIS_PROJECT")
mydata = read.csv(file = "U.S._Chronic_Disease_Indicators__CDI_.csv")
###diabetes diagnosis in adults - line plot #############
dset1 = mydata[mydata$LocationDesc == "United States", ]
dset2 = dset1[dset1$Topic=="Diabetes", ]
dset3 = dset2[dset2$StratificationCategory1=="Overall", ]
dset4 = dset3[dset3$DataValueType=="Crude Prevalence", ]
dset5=dset4[dset4$Question=="Prevalence of diagnosed diabetes among adults aged >= 18 years", ]
dset6 = mutate(dset5,number=as.numeric(as.character(DataValue)))
d = ggplot(data=dset6, aes(YearStart, number)) + geom_line(size = 2) + geom_point(size = 6)
dd = d + scale_x_continuous(breaks=c(2012,2013,2014))
ddd = dd + ylab("%") + xlab("year") + labs(title = "diabetes diagnosis among adults")
dddd = ddd + theme(axis.text=element_text(size=18),axis.title=element_text(size=20,face="bold"), title=element_text(size=22,face="bold") )
dddd
#########################################################
###looking for state wise map of diabetes in 2014
library(googleVis)
diab1 = mydata[mydata$Topic=="Diabetes", ]
diab2 = diab1[diab1$Question=="Prevalence of diagnosed diabetes among adults aged >= 18 years", ]
diab3=diab2[diab2$StratificationCategory1=="Overall", ]
diab4=diab3[diab3$DataValueType=="Crude Prevalence", ]
diab5=diab4[diab4$YearStart==2014, ]
diab6=select(diab5,LocationDesc,DataValue)
write.csv(diab6, file = "diab_state_pre.csv")
###run upto here and write out diab6. copy diab_state_pre.csv to diab_state.csv
###and delete the id column. read back in diab_state.csv.
###(I'll figure out a better way to do this)
diab_state=read.csv(file="diab_state.csv")
state_em <- gvisGeoChart(diab_state, "LocationDesc", "DataValue",
options=list(region="US",
displayMode="regions",
resolution="provinces",
colorAxis="{colors: ['blue', 'white', 'red']}"))
plot(state_em)
#########################################################################
##Prevalence of depressive disorders among adults with diagnosed diabetes
##State wise map
dep1=mydata[mydata$Question=="Prevalence of depressive disorders among adults aged >= 18 years with diagnosed diabetes", ]
dep2 = dep1[dep1$StratificationCategory1=="Overall", ]
dep3 = dep2[dep2$DataValueType=="Crude Prevalence", ]
dep4 = dep3[dep3$YearStart==2014, ]
dep5=select(dep4,LocationDesc,DataValue)
write.csv(dep5,file="depression_state1.csv")
###run upto here and write out diab6. copy depression_state1.csv to depression_state.csv
###and delete the id column. read back in depression_state.csv.
###(I'll figure out a better way to do this)
depression_state=read.csv(file="depression_state.csv")
state_em <- gvisGeoChart(depression_state, "LocationDesc", "DataValue",
options=list(region="US",
displayMode="regions",
resolution="provinces",
colorAxis="{colors: ['green', 'white', 'red']}"))
plot(state_em)
#########################################################################
|
269ec78b4d095fc33f8ca748a171e4854f525758 | 30172bc6b24a66712562adc4b19f93569842b75c | /cachematrix.R | 47ce0659677e0d9f055f1bfbe01dd2b22009d402 | [] | no_license | kepler123/ProgrammingAssignment2 | 7151507d4ddfe03288905ac591dcb3c6fdc9ea58 | 600d81a712bfa469cec0ecd1e43ad8e41099f766 | refs/heads/master | 2020-04-06T03:44:11.026106 | 2014-06-21T12:35:37 | 2014-06-21T12:35:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,196 | r | cachematrix.R | ##
## The makeCacheMatrix and cacheSolve functions in this R script
## together provide a way for creating a matrix, lazily computing
## its inverse, and caching the inverse on the matrix so it's not
## recomputed after the first time it is computed. In other words,
## the inverse is cached in order to avoid a potentially expensive
## re-calculation.
##
## Note that makeCacheMatrix creates a matrix capable of holding on
## to the cached inverse value. cacheSolve computes the inverse and
## sets it into the matrix.
##
## A sample usage pattern would be as follows:
##
## m <- matrix(c(2, 4, 3, 1, 5, 7), nrow=3, ncol=3)
## cm <- makeCacheMatrix(m)
## invM <- cacheSolve(cm)
##
## This creates a 3x3 matrix, generates our special "extension"
## capable of caching the computed inverse, then computes the inverse.
## A subsequent call into cacheSolve returns the cached inverse value:
##
## invM2 <- cacheSolve(cm)
##
## The "extended" matrix implementation also allows you to set a new
## matrix in. That clears the cached inverse and it'll be re-computed
## the next time the cacheSolve is invoked.
##
## *** Assumptions ***
## The implementation assumes that the input matrix is always invertible.
## This implies square, invertible matrix, only.
##
## *** Example ***
##
## m <- matrix(c(4, 7, 2, 6), nrow=2, ncol=2)
## > m
## [,1] [,2]
## [1,] 4 2
## [2,] 7 6
##
## cm <- makeCacheMatrix(m)
##
## > inv <- cacheSolve(cm)
## Getting a freshly computed inverse value.
## > inv
## [,1] [,2]
## [1,] 0.6 -0.2
## [2,] -0.7 0.4
##
## > inv <- cacheSolve(cm)
## Getting cached inverse.
## > inv
## [,1] [,2]
## [1,] 0.6 -0.2
## [2,] -0.7 0.4
##
## Author's perspective:
##
## I believe we'd benefit from a more streamlined implementation where
## we'd do away with cacheSolve and the set method within makeCacheMatrix
## and just make the getinverse method compute the inverse then cache it,
## or just return the cached value if already computed.
##
## The reasons for that would be to:
## a) have less code
## b) avoid having to invoke cacheSolve every time that matrix value is reset
## c) encapsulate/protect the calculation of the inverse into the makeCacheMatrix
## logic, in order to avoid a possible frivolous setting of an incorrect
## inverse value via the setinverse call.
##
## However, the assignment calls for the specific implementation as below.
##
## Creates a matrix capable of holding on to a cached matrix inverse value.
## Input: a generic matrix
## Output: an "extended" matrix capable of holding on to a cached inverse
## See also: cacheSolve
makeCacheMatrix <- function(x = matrix()) {
## Contains the cached value of the matrix inverse
inv <- NULL
## Allows you to set the matrix in.
## Note: doing so clears any cached inverse. An invokation of cacheSolve
## is required to re-compute the inverse and cache it.
set <- function(y) {
x <<- y
inv <<- NULL
}
## Returns the matrix.
get <- function() x
## Sets in the computed matrix inverse.
setinverse <- function(inverse) inv <<- inverse
## Returns the matrix inverse.
getinverse <- function() inv
## Makes the methods available on the "extended" matrix implementation.
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## Computes a matrix inverse and sets it into the matrix.
##
## *** Assumes that the matrix supplied is always invertible. ***
## This implies square, invertible matrix. No sanity checking is
## performed on the input, e.g. to check the matrix dimensions.
##
## Input: an "extended" matrix capable of caching the inverse value.
## Ouput: a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)) {
message("Getting cached inverse.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
message("Getting a freshly computed inverse value.")
inv
}
|
34e365f027c9f24ddce99bdb2a4cacd741199795 | 092cb24bea69ae3bb285b4aabdca65ac86726a2c | /plot3.R | 590ad0136828f764a24396ad0111310c78a3c357 | [] | no_license | lars-von-buchholtz/ExData_Plotting1 | 1cd2b3a31d83a9f507ea935cb0efe5b3cda38489 | cc490828d9cddd45fc7bdaa53cbe8f338d1e9d24 | refs/heads/master | 2021-01-22T04:49:09.466325 | 2017-02-11T21:05:37 | 2017-02-11T21:05:37 | 81,586,403 | 0 | 0 | null | 2017-02-10T16:56:20 | 2017-02-10T16:56:20 | null | UTF-8 | R | false | false | 1,390 | r | plot3.R |
# download zip file with raw data from url specified in README file and unzip it
dataUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipfilename <- "hpc.zip"
datafilename <- "household_power_consumption.txt"
download.file(dataUrl,zipfilename)
unzip(zipfilename)
#read in data
hpc_raw <- read.table(datafilename, header = T, comment.char = "", na.strings = "?", sep = ";",colClasses = c("character","character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
#get subset for the desired dates
hpc_sub <- hpc_raw[with(hpc_raw, Date == "1/2/2007" | Date == "2/2/2007"), ]
#create additional column with datetime as POSIXlt objects
hpc_clean <- within(hpc_sub, datetime <- strptime(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S", tz = ""))
#display plot on screen device with parameters to resemble sample image 2 from README file
plot(hpc_clean$datetime,hpc_clean$Sub_metering_1,xlab ="", ylab = "Energy sub metering", col = "black", type = "l")
lines(hpc_clean$datetime,hpc_clean$Sub_metering_2,col = "red")
lines(hpc_clean$datetime,hpc_clean$Sub_metering_3,col = "blue")
legend("topright", lty=c(1,1,1), cex = 0.75, col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#copy screen device to png file
dev.copy(png,"plot3.png", width = 480, height = 480)
dev.off()
|
b2bc64fa4773cca3069a34c7e272889d37e686dd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/treeman/examples/getNdsKids.Rd.R | 96c000e73e8d52290371b8deb3a293a76f15e57b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 203 | r | getNdsKids.Rd.R | library(treeman)
### Name: getNdsKids
### Title: Get children IDs for multiple nodes
### Aliases: getNdsKids
### ** Examples
library(treeman)
tree <- randTree(10)
getNdsKids(tree, id=tree['nds'])
|
3aeeb878630f20baa5cbafa5571e1dc79a3aeeb7 | 4dcb808a0b3ef5656fa2dcdf3e931f90befe19ec | /HouseholdSourcedata.R | 3cb50c9368368f5656f8245da70aeabf0d62c9c3 | [] | no_license | TejovardhanM/ExData_Plotting1 | 7116fb06827d9c4e4f2bc866aa9280c0dfd2180c | 03affcd9b24a0dee6be2763b3e77dab8176eb791 | refs/heads/master | 2022-06-06T05:51:14.106056 | 2022-05-13T18:40:20 | 2022-05-13T18:40:20 | 62,902,675 | 0 | 0 | null | 2016-07-08T16:33:36 | 2016-07-08T16:33:36 | null | UTF-8 | R | false | false | 887 | r | HouseholdSourcedata.R |
setwd("C:\\Tejo\\Datascience\\ExploratoryDataAnalysis\\ExData_Plotting1\\")
housepowerconsumption<-read.table("C:\\Tejo\\Datascience\\ExploratoryDataAnalysis\\exdata_data_household_power_consumption\\household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
###head(housepowerconsumption)
###str(housepowerconsumption)
##housepowerconsumption$Date<- as.Date(housepowerconsumption$Date, "%m/%d/%Y")
##filter the data, which includes the dates with (2007-02-01 , 2007-02-02)
data.filtered<- housepowerconsumption[housepowerconsumption$Date == "1/2/2007"| housepowerconsumption$Date == "2/2/2007" ,]
data.filtered$Date<-as.Date(data.filtered$Date, format="%d/%m/%Y")
data.filtered$Time<-with (data.filtered, strptime(paste(Date,Time), format= "%Y-%m-%d %H:%M:%S"))
##str(housepowerconsumption)
rm(housepowerconsumption)
print ("Data loaded into data.filtered...")
|
38713dd9657e2de12762127bc4be7ede01f197d1 | 3c37e20c65d3918fede609648fccc7e997527c47 | /man/crc.Rd | fa37a3f824cb187296ad660de531f97ef844103c | [] | no_license | cran/Rwave | 67cb1e7b8e234c1f49b1ac4cbadef024a04afc21 | 72a5fc18fdb0c4ae4cf6aa9985617268585ffae5 | refs/heads/master | 2022-11-04T20:48:09.750339 | 2022-10-21T22:17:49 | 2022-10-21T22:17:49 | 17,713,902 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,672 | rd | crc.Rd | \name{crc}
\alias{crc}
\title{
Ridge Extraction by Crazy Climbers
}
\description{
Uses the "crazy climber algorithm" to detect ridges in the modulus of
a continuous wavelet or a Gabor transform.
}
\usage{
crc(tfrep, tfspec=numeric(dim(tfrep)[2]), bstep=3, iteration=10000,
rate=0.001, seed=-7, nbclimb=10, flag.int=TRUE, chain=TRUE,
flag.temp=FALSE)
}
\arguments{
\item{tfrep}{
modulus of the (wavelet or Gabor) transform.
}
\item{tfspec}{
numeric vector which gives, for each value of the scale or frequency the
expected size of the noise contribution.
}
\item{bstep}{
stepsize for random walk of the climbers.
}
\item{iteration}{
number of iterations.
}
\item{rate}{
initial value of the temperature.
}
\item{seed}{
initial value of the random number generator.
}
\item{nbclimb}{
number of crazy climbers.
}
\item{flag.int}{
if set to TRUE, the weighted occupation measure is computed.
}
\item{chain}{
if set to TRUE, chaining of the ridges is done.
}
\item{flag.temp}{
if set to TRUE: constant temperature.
}}
\value{
Returns a 2D array called beemap containing the (weighted or unweighted)
occupation measure (integrated with respect to time)
}
%\details{}
\references{
See discussion in text of ``Practical Time-Frequency Analysis''.
}
\seealso{
\code{\link{corona}}, \code{\link{icm}}, \code{\link{coronoid}},
\code{\link{snake}}, \code{\link{snakoid}} for ridge estimation,
\code{\link{cfamily}} for chaining and
\code{\link{crcrec}},\code{\link{gcrcrec}},\code{\link{scrcrec}} for
reconstruction.
}
\examples{
data(HOWAREYOU)
plot.ts(HOWAREYOU)
cgtHOWAREYOU <- cgt(HOWAREYOU,70,0.01,100)
clHOWAREYOU <- crc(Mod(cgtHOWAREYOU),nbclimb=1000)
}
\keyword{ts}
|
5251079291aff8192ff04963282df5929060c70c | 35e451f2286f9afe4fa15f630dc20818b567328a | /ProgAssignment1_2.R | 1949556259a2f59c2faec3b1af8f2450b8dc0ea7 | [] | no_license | ruiduque/datasciencecoursera | 79fca3e7c58b1163e5ee1dc17ba2d204ff5c067a | 5975f05031809c5ca222d41f2eeb25a5704dac8b | refs/heads/master | 2021-03-12T20:38:36.699909 | 2018-12-11T14:57:28 | 2018-12-11T14:57:28 | 157,734,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 721 | r | ProgAssignment1_2.R |
complete <- function(directory, id = 1:32) {
filelist <- list.files(directory, full.names = TRUE)
# Init list
my_output <- data.frame()
for (i in seq_along(id)) {
my_output <- rbind(my_output, c(id[i], sum(complete.cases(read.csv(file = filelist[id[i]])))))
}
colnames(my_output) <- c("id", "nobs")
my_output
}
# --
# -- Trying with lapply
#--
complete1 <- function(directory, id = 1:32) {
data.frame(id = id, nobs = sapply(list.files(directory, full.names = TRUE)[id],
function (filen) {
sum(complete.cases(read.csv(file = filen)))
}), row.names = NULL)
}
|
16a4042ffbfc1a0b5ec0855b5c7dc2bb46578865 | 29585dff702209dd446c0ab52ceea046c58e384e | /survJamda/R/det.set.ind.R | c3b4ceba6af9a47fa09f5a4bcd61f0954cb49a59 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 474 | r | det.set.ind.R | det.set.ind <- function(geno.files,train,i)
{
curr.ind = 0
ind = NULL
for (m in i){
if(train) cat (geno.files[m], " ")
if (m == 1)
ind = c(ind,1:nrow(get(geno.files[1])))
else{
curr.ind = 0
curr.ind = curr.ind+nrow(get(geno.files[m-1]))
ind = c(ind,(curr.ind+1):(curr.ind+nrow(get(
geno.files[m]))))
}
}
return(ind)
}
|
34243dd7f66119c6c5ef215de00f20897144c222 | b59cc783d2da2f32737432c1b13cf72c5802f067 | /tests/testthat.R | 38b2c039f32fe1068c40d53a3c306028897c1586 | [] | no_license | jdsimkin04/shinyinla | 9a16007b375975a3f96b6ca29a1284aa6cafb180 | e58da27a2a090557058b2a5ee63717b116216bf7 | refs/heads/master | 2023-06-05T08:34:34.423593 | 2021-06-24T00:27:04 | 2021-06-24T00:27:04 | 330,322,338 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 222 | r | testthat.R | # For development, run with devtools::test() from the rinla/ directory
# Requires the binaries to be locally available via the relevant
# rinla/inst/bin/... directory
library(testthat)
library(INLA)
test_check("INLA")
|
83ec69c43b6b083c14e685bff6c46928503382c2 | 61076ee22971ff0979c282ee7f6760e43a19634f | /Machine Learning Algorithms/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/Data_Preprocesing.R | 0a86783a9a8fbb0e7bfcd487b91af5757a7677de | [] | no_license | vaishnavinalawade/Data-Science-Python-Projects | 419618da83fba74b9bd7065b6318a59eb18f49fd | 3f1ae8e3e85e869dde0f168f5928ded8d52b96a5 | refs/heads/master | 2022-11-28T01:41:48.796116 | 2020-08-10T06:12:32 | 2020-08-10T06:12:32 | 166,856,648 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 751 | r | Data_Preprocesing.R | dataset = read.csv('Data.csv')
dataset$Age = ifelse(is.na(dataset$Age), ave(dataset$Age, FUN = function(x) mean(x,na.rm = TRUE)),dataset$Age)
dataset$Salary = ifelse(is.na(dataset$Salary), ave(dataset$Salary, FUN = function(x) mean(x,na.rm = TRUE)), dataset$Salary)
dataset$Country = factor(dataset$Country, levels = c('France','Spain','Germany'), labels = c(1,2,3))
dataset$Purchased = factor(dataset$Purchased, levels = c('Yes', 'No'), labels = c(1,0))
install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
training_set[,2:3] = scale(training_set[,2:3])
test_set[,2:3] = scale(test_set[,2:3]) |
c0e23e9031b56384c171ff1ebc9e6b997d6bf50e | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/custom-tree-models.Rd | cbe28e00ed848a262f30eb12bcd31e3dc4c39f3f | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 1,890 | rd | custom-tree-models.Rd | \alias{gtkTreeIterGetId}
\alias{gtkTreeIterSetId}
\alias{gtkTreeIterGetStamp}
\alias{gtkTreeIterSetStamp}
\alias{gtkTreeIter}
\name{custom-tree-models}
\title{Custom GtkTreeModel implementations}
\description{Functions that allow one to implement a custom \code{\link{GtkTreeModel}}}
\usage{
gtkTreeIter(id, stamp)
gtkTreeIterGetId(iter)
gtkTreeIterSetId(iter, id)
gtkTreeIterGetStamp(iter)
gtkTreeIterSetStamp(iter, stamp)
}
\arguments{
\item{iter}{The \code{\link{GtkTreeIter}} of a custom model}
\item{id}{The integer code identifying \code{iter}}
\item{stamp}{The integer code for tracking the version of \code{iter}}
}
\details{
These functions allow one to create and access
\code{\link{GtkTreeIter}} structures when implementing a \code{\link{GtkTreeModel}}.
\code{gtkTreeIter} creates an iter from scratch, given an id and stamp.
\code{gtkTreeIterGetId} and \code{gtkTreeIterSetId} access the integer that
identifies the data element referred to by \code{iter}.
\code{gtkTreeIterGetStamp} and \code{gtkTreeIterSetStamp} access the integer
that serves as a version stamp. After the model changes, the model version
should be incremented, so that all existing iters are invalidated, as
evidenced by their stamp.
}
\value{
For \code{gtkTreeIter}, an external pointer to the underlying C structure.
For \code{gtkTreeIterGetId}, the integer code identifying the element
referred to by \code{iter}.
For \code{gtkTreeIterGetStamp}, the integer code identifying the version
of \code{iter}.
}
\note{
These functions are for implementing \code{\link{GtkTreeModel}}s only! Most
of the time, one can use one of the implementations included with GTK+
(\code{\link{GtkListStore}} or \code{\link{GtkTreeStore}}) or
\code{\link{RGtkDataFrame}}.
}
\seealso{gClass}
\author{Michael Lawrence}
\keyword{interface}
\keyword{internal}
|
3c5b0babdc9076d4d36c22a8eebc3a0f9b1d74b3 | 075de50cf5166329502555fa39d863f2619cf27b | /app.R | f47cf824d9f307bdc1475bdc0baa6a2dc7a2f94d | [] | no_license | ardata-fr/municipales-paris | 75841f7d68803d2e8db686b95b37209d7098f6e5 | 851b1c595924b293b680f6c5453fecd9e92a4f8e | refs/heads/master | 2023-04-20T15:30:43.996497 | 2021-05-06T15:13:34 | 2021-05-06T15:13:34 | 278,660,914 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,889 | r | app.R | library(shiny)
library(shinyWidgets)
library(ggplot2)
library(ggiraph)
library(flextable)
source("Rscripts/flextable_tooltip.R")
source("Rscripts/summary_data.R")
# get data ----
paris_arr <- readRDS(file = "data/paris_arr.RDS")
nuances_col <- readRDS(file = "data/nuances_col.RDS")
scores <- readRDS(file = "data/scores.RDS")
summ_detail <- readRDS(file = "data/summ_detail.RDS")
flextables_tour_secteur <- readRDS(file = "data/flextables_tour_secteur.RDS")
# for scales colors ----
scale_values <- setNames(nuances_col$fill, nuances_col$nuance)
scale_labels <- setNames(nuances_col$lib_nuance, nuances_col$nuance)
scale_data_id <- setNames(nuances_col$nuance, nuances_col$nuance)
scale_barplot_interactive <- scale_fill_manual_interactive(
values = scale_values, labels = scale_labels, data_id = scale_data_id)
# base map ----
CarteSimple <- ggplot() +
geom_sf(data = paris_arr, colour = "black", fill="transparent", size = .2) +
theme_void()
# ui ---–
ui <- fluidPage(
fluidRow(
column(width = 6,
radioGroupButtons(
inputId = "tour", label = "Choose Tour 1 or Tour 2:", selected = "Tour 1",
choices = c("Tour 1", "Tour 2"))
),
column(width = 6,
sliderInput(inputId = "gradient", label = "gradient max for the map",
min = .15, max = 1, value = .6)
)
),
fluidRow(
column(width = 7, girafeOutput(outputId = "summary", height = "500px")),
column(width = 5, girafeOutput(outputId = "map", height = "500px"))
)
)
# server ---–
server <- function(input, output, session) {
output$summary <- renderGirafe({
dat <- summ_detail[summ_detail$TOUR %in% tour(),]
gg <- ggplot(dat, aes(x = x, y = score, fill = nuance, tooltip = tooltip, data_id = nuance)) +
geom_col_interactive() + coord_polar(theta = "y") +
scale_barplot_interactive +
theme_void() +
theme(plot.caption = element_text_interactive(
tooltip = "https://parisdata.opendatasoft.com/explore",
data_id = "caption_id",
onclick = "window.open(\"https://parisdata.opendatasoft.com/explore\")",
hover_css = "fill:magenta;cursor:pointer;"
)) +
labs(title = "Scores par nuance", x = "", y = "",
subtitle = "Municipales Paris 2020",
caption = "source: parisdata")
girafe(ggobj = gg, width_svg = 6, height_svg = 6, options = list(
opts_hover(reactive = TRUE, css = "stroke:black;cursor:pointer;"),
opts_hover_inv(css = "opacity:.3;"),
opts_hover_key(css = "cursor:pointer;stroke:black;"),
opts_selection(type = "none"),
opts_selection_key(type = "single", css = "stroke:black;stroke-width:3px;"),
opts_tooltip(offy = -50),
opts_toolbar(saveaspng = FALSE)
))
})
observe({
if(isTruthy(input$summary_hovered)){
session$sendCustomMessage(type = 'summary_key_set', message = input$summary_hovered)
}
})
tour <- reactive({
return(which(c("Tour 1", "Tour 2") %in% input$tour))
})
nuance <- reactive({
req(input$summary_key_selected)
input$summary_key_selected
})
output$map <- renderGirafe({
req(nuance())
dat <- scores[scores$TOUR %in% tour() & scores$nuance %in% nuance(),]
# add tooltips ----
ft_tooltips <- flextables_tour_secteur[flextables_tour_secteur$TOUR %in% tour(),]
formula_highlight <- as.formula(sprintf("~ nuance %%in%% '%s'", nuance()))
ft_tooltips$ft <- lapply(ft_tooltips$ft, function(ft, f){
ft <- color(ft, i = f, color = "yellow")
as.character(flextable::htmltools_value(ft, ft.shadow = FALSE))
}, f = formula_highlight)
dat$tooltip <- ft_tooltips$ft[match( dat$secteur, ft_tooltips$secteur)]
gmap <- CarteSimple +
geom_sf_interactive(
mapping = aes(data_id = secteur, tooltip = tooltip, fill = score_pct),
data = dat, colour = "white") +
scale_fill_gradientn(colors = rev(c("#000004FF", "#BB3754FF", "#FCFFA4FF")),
limits = c(0, input$gradient), na.value = "#000004FF") +
labs(title = nuance(), x = "", y = "", subtitle = "Scores en %")
girafe(ggobj = gmap, width_svg = 6, height_svg = 6, options = list(
opts_hover(css = "opacity:.3;stroke:wheat;cursor:pointer;"),
opts_tooltip(offx = 20, offy = -50),
opts_selection(type = "none")
))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
33b8e6cb599e9df13dad566db0e268ed692b4b9f | 0e10a3642916ab4bbe1d0162e7774d83bfb9fa74 | /R/ClusterVariablePractice.R | ff345e4e8cbe384d1e9869fd0925ebc01938ebba | [] | no_license | ryamada22/atom | 5f82300643351ba41f6f818205bc1d75cbb11f8f | 45f0ceda958d89982e487965ae6e4788bb2c1801 | refs/heads/master | 2022-06-10T10:23:01.922811 | 2022-03-20T23:53:20 | 2022-03-20T23:53:20 | 163,775,778 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,822 | r | ClusterVariablePractice.R |
library(rgl)
# 正四面体の4頂点座標を計算する関数
CategoryVector<-function (d = 3){
df <- d - 1
diagval <- 1:d
diagval <- sqrt((d)/df) * sqrt((d - diagval)/(d - diagval + 1))
others <- -diagval/(d - (1:d))
m <- matrix(rep(others, d), nrow = d, byrow = TRUE)
diag(m) <- diagval
m[upper.tri(m)] <- 0
as.matrix(m[, 1:df])
}
# 正四面体
Tetra <- CategoryVector(4)
plot3d(Tetra)
# 正四面体の4頂点座標
Va <- Tetra[1,]
Vb <- Tetra[2,]
Vc <- Tetra[3,]
Vd <- Tetra[4,]
theta <- 0.2
Va <- c(0,1/2,0)
Vb <- c(-sqrt(3)/2,0,0)
Vc <- c(0,-1/2,0)
Vd <- c(sqrt(3)/2 * cos(thea),0,sqrt(3)/2 * sin(theta))
# 正四面体の6辺ベクトルを適当な向きでとる
AB <- Vb - Va
AC <- Vc - Va
AD <- Vd - Va
BC <- Vc - Vb
CD <- Vd - Vc
BD <- Vd - Vb
sum(AB * CD) + sum(BC * (-AD)) - sum(AC * (-BD))
sum(AB * CD) + sum(BC * (-AD)) - sum(AC * (-BD))
# Cross積・ベクトル外積計算関数
# V x U = (v2u3-v3u2,v3u1-v1u3, v1u2-v2u1)
Here is a generalized cross product:
xprod <- function(...) {
args <- list(...)
# Check for valid arguments
if (length(args) == 0) {
stop("No data supplied")
}
len <- unique(sapply(args, FUN=length))
if (length(len) > 1) {
stop("All vectors must be the same length")
}
if (len != length(args) + 1) {
stop("Must supply N-1 vectors of length N")
}
# Compute generalized cross product by taking the determinant of sub-matricies
m <- do.call(rbind, args)
sapply(seq(len),
FUN=function(i) {
det(m[,-i,drop=FALSE]) * (-1)^(i+1)
})
}
# 箙変異に話を戻す
# 2つの三角形ABC と ACD とが辺ACを共有しているとする
# ACは四角形ABCDの対角線のようにも見える
# このACをDBに付け替えるのが箙の変異
# 箙の変異により、次のような変数変換が知られている
# X(PQ) を辺PQに付けた変数とする
# X(newAC = DB) -> (X(AB) * X(CD) + X(BC) * X(DA)) / X(AC)
# "->" の右辺の分母を、左辺に移行してみる(どんな代数なのかわからないが…)
# X(DB) * X(AC) -> (X(AB) * X(CD) + X(BC) * X(DA))
# "->" の両辺が、何らかの意味で等しいことがわかると、何かが見えてくると期待したい
# X(AB) * X(CD) を、辺ABベクトルと辺CDベクトルとのクロス積だとしてみよう
# そうすれば、"->"の左辺も右辺も3次元ベクトルである
XABxXCD <- xprod(AB,-CD)
XBCxXDA <- xprod(BC,AD)
Uhen <- XABxXCD + XBCxXDA
Sahen.mat <- matrix(c(10^(-8),-BD[3],BD[2],BD[3],10^(-8),-BD[1],-BD[2],BD[1],10^(-8)),byrow=TRUE,3,3)
Sahen.mat.inv <- solve(Sahen.mat)
Sahen.mat.inv
Sahen.mat.inv %*% matrix(Uhen,ncol=1)
Sahen <- xprod(-BD,AC)
Uhen
Sahen
xprod((ABxCD + BCxDA),-AC)
BD
|
e96867067b64209eb0ce9b5408ac95fe3d4b9719 | 79b935ef556d5b9748b69690275d929503a90cf6 | /man/Triplets.Rd | e292e275ebcc678aa04559a6c2de80768ad500a1 | [] | no_license | spatstat/spatstat.core | d0b94ed4f86a10fb0c9893b2d6d497183ece5708 | 6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70 | refs/heads/master | 2022-06-26T21:58:46.194519 | 2022-05-24T05:37:16 | 2022-05-24T05:37:16 | 77,811,657 | 6 | 10 | null | 2022-03-09T02:53:21 | 2017-01-02T04:54:22 | R | UTF-8 | R | false | false | 3,266 | rd | Triplets.Rd | \name{Triplets}
\alias{Triplets}
\title{The Triplet Point Process Model}
\description{
Creates an instance of Geyer's triplet interaction point process model
which can then be fitted to point pattern data.
}
\usage{
Triplets(r)
}
\arguments{
\item{r}{The interaction radius of the Triplets process}
}
\value{
An object of class \code{"interact"}
describing the interpoint interaction
structure of the Triplets process with interaction radius \eqn{r}.
}
\details{
The (stationary) Geyer triplet process (Geyer, 1999)
with interaction radius \eqn{r} and
parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}
is the point process
in which each point contributes a factor \eqn{\beta}{beta} to the
probability density of the point pattern, and each triplet of close points
contributes a factor \eqn{\gamma}{gamma} to the density.
A triplet of close points is a group of 3 points,
each pair of which is closer than \eqn{r} units
apart.
Thus the probability density is
\deqn{
f(x_1,\ldots,x_n) =
\alpha \beta^{n(x)} \gamma^{s(x)}
}{
f(x_1,\ldots,x_n) =
alpha . beta^n(x) gamma^s(x)
}
where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the
points of the pattern, \eqn{n(x)} is the number of points in the
pattern, \eqn{s(x)} is the number of unordered triples of
points that are closer than \eqn{r} units apart,
and \eqn{\alpha}{alpha} is the normalising constant.
The interaction parameter \eqn{\gamma}{gamma} must be less than
or equal to \eqn{1}
so that this model describes an ``ordered'' or ``inhibitive'' pattern.
The nonstationary Triplets process is similar except that
the contribution of each individual point \eqn{x_i}{x[i]}
is a function \eqn{\beta(x_i)}{beta(x[i])}
of location, rather than a constant beta.
The function \code{\link{ppm}()}, which fits point process models to
point pattern data, requires an argument
of class \code{"interact"} describing the interpoint interaction
structure of the model to be fitted.
The appropriate description of the Triplets process pairwise interaction is
yielded by the function \code{Triplets()}. See the examples below.
Note the only argument is the interaction radius \code{r}.
When \code{r} is fixed, the model becomes an exponential family.
The canonical parameters \eqn{\log(\beta)}{log(beta)}
and \eqn{\log(\gamma)}{log(gamma)}
are estimated by \code{\link{ppm}()}, not fixed in
\code{Triplets()}.
}
\seealso{
\code{\link{ppm}},
\code{\link{triplet.family}},
\code{\link{ppm.object}}
}
\references{
Geyer, C.J. (1999)
Likelihood Inference for Spatial Point Processes.
Chapter 3 in
O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds)
\emph{Stochastic Geometry: Likelihood and Computation},
Chapman and Hall / CRC,
Monographs on Statistics and Applied Probability, number 80.
Pages 79--140.
}
\examples{
Triplets(r=0.1)
# prints a sensible description of itself
ppm(cells ~1, Triplets(r=0.2))
# fit the stationary Triplets process to `cells'
# ppm(cells ~polynom(x,y,3), Triplets(r=0.2))
# fit a nonstationary Triplets process with log-cubic polynomial trend
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{models}
|
6fea1b963c79f56a70d3cab0d84f1ac0a1d69589 | 9085705aae9740c3a27ff08900d8e98d2cf8f277 | /MFFP_PEN_analysis.R | 81fb29e0ce86c5981d9960f01cf392d0db3b2e7f | [] | no_license | VFugere/FisHab | ce3bda8a0de0fd3ca263d07a12575a940db91b05 | e65484d0f1a17673ada08c892d6899e395c67130 | refs/heads/master | 2022-03-10T12:25:13.478930 | 2022-02-17T22:35:28 | 2022-02-17T22:35:28 | 226,241,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,510 | r | MFFP_PEN_analysis.R | # analyse de données PEN du MFFP
# Vincent Fugère 2021+
#cleanup
rm(list=ls())
#libs
library(readxl)
library(itsadug)
library(scales)
library(sp)
library(raster)
library(rworldmap)
library(RColorBrewer)
library(devtools)
library(party)
library(skimr)
library(tidyverse)
library(mgcv)
#functions
make.italic <- function(x) as.expression(lapply(x, function(y) bquote(italic(.(y)))))
'%!in%' <- function(x,y)!('%in%'(x,y))
mean.nona <- function(x){mean(x, na.rm=T)}
sum.nona <- function(x){sum(x, na.rm=T)}
#utils
#colors <- wesanderson::wes_palette('FantasticFox1',3)
#map <- getMap(resolution='low')
#load data
sites <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/GIS output/MFFP_PENsites_BasicInfo.xlsx')
hlakes <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/GIS output/MFFP_PENsites_HydroLakes.xlsx') %>%
filter(`in_QGIS_LCE_falls_within_HL_polygon` == 'yes')
yves <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/Yves Prairie/QC_all_fish_sites_lakes_GSW_w_attributes.xlsx')
land.use <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/GIS/MFFP_lulc/clean_MELCC_UT_LacsPEN.xlsx', sheet='merge')
lu.code <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/GIS/MFFP_lulc/clean_MELCC_UT_LacsPEN.xlsx', sheet='codes')
connect <- read.csv('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/Alex connectivity/MFFP_LakeCentrality.csv') %>% select(-X) %>% filter(cent.length != 0)
cpue <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/MFFP/deuxieme_envoi/PÊCHES EXPÉRIMENTALES 1988-2019 v20mars_SG (3).xlsx', sheet=1)
com <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/MFFP/deuxieme_envoi/PÊCHES EXPÉRIMENTALES 1988-2019 v20mars_SG (3).xlsx', sheet=2)
fish.codes <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/LP/fish_output/taxonomic_codes.xlsx', sheet='QC-MFFP')
geo <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/SIGEOM/phys-chem_SIGEOM(2021-03-17).xlsx')
habitat <- read_xlsx('~/Desktop/FisHab_Habitat PEN.xlsx', sheet = 'Valeurs ponctuelles')
profiles <- read_xlsx('~/Desktop/FisHab_Habitat PEN.xlsx', sheet = 'Profil')
#### format fish data ####
#cpue
cpue <- select(cpue, 3, 6:7, 10, 12)
colnames(cpue) <- c('LCE','year','survey','species','CPUE')
cpue <- arrange(cpue, LCE, desc(year))
#filter(cpue, survey == 'PENOC') %>% with(.,table(species,LCE))
#parfois saal, parfois saoq, jamais les deux dans le même lac
#plutôt que de remplacer le code d'espèce, je peux simplement filtrer par type de survey
#when a lake has two consecutive years, we average (only applies to PENDJ)
#otherwise we take the last year
cpue.clean <- cpue[0,]
for(survey in c('PENOF','PENT','PENDJ','PENOC')){
surv.dat <- cpue[cpue$survey == survey,]
for(lac in unique(surv.dat$LCE)){
sub <- surv.dat[surv.dat$LCE == lac,]
if(nrow(sub) == 1){
cpue.clean <- rbind(cpue.clean,sub)
}else{
if(-1 %in% diff(sub$year)){
idx <- which.min(diff(sub$year)==-1)
sub <- sub[c(idx,idx+1),]
mean <- mean(x = sub$CPUE, na.rm=T)
yr <- max(sub$year)
sub <- sub[1,]
sub$CPUE <- mean
sub$year <- yr
cpue.clean <- rbind(cpue.clean,sub)
}else{
sub <- sub[1,]
cpue.clean <- rbind(cpue.clean,sub)
}
}
}
}
cpue.clean <- filter(cpue.clean, survey != 'PENOC')
#com
com <- select(com, 3, 5:7, 10)
colnames(com) <- c('LCE','surv.number','year','survey','species')
bad.sp.codes <- c('RIEN','-','POIS','NI','AU')
com <- filter(com, species %!in% bad.sp.codes)
genera <- fish.codes %>% filter(!is.na(genus))
speciesid <- fish.codes %>% filter(!is.na(species))
taxon.list <- com %>% distinct(species) %>% pull(species)
id.at.other.levels <- c('COSP','CYSP','CRSP','STSP','SFXN','PRSP')
com <- filter(com, species %!in% id.at.other.levels)
#replacing older codes with newer ones
com$species[com$species == 'STVI'] <- 'SAVI'
com$species[com$species == 'NOCO'] <- 'LUCO'
com$species[com$species == 'STCA'] <- 'SACA'
com$species[com$species == 'SEMA'] <- 'MAMA'
com$species[com$species == 'ICNE'] <- 'AMNE'
com$species[com$species == 'SASV'] <- 'SAAL'
#saoq to saal
com$species[com$species == 'SAOQ'] <- 'SAAL'
#on enleve les duplicats
com <- com %>% distinct(LCE,year,survey,species,.keep_all = T)
#cleanup
rm(genera,speciesid,bad.sp.codes,id.at.other.levels)
taxon.list <- com %>% distinct(species) %>% pull(species)
# #combien de time series
# ts <- cpue %>% add_count(LCE) %>% group_by(LCE) %>% mutate('yr.range' = (max(year) - min(year))) %>% rename(yrs.of.data = n) %>% distinct(LCE, .keep_all = T) %>% ungroup
# ts <- filter(ts, yrs.of.data > 1)
# hist(ts$yrs.of.data, breaks=30)
# hist(ts$yr.range, breaks=30)
# plot(yr.range~yrs.of.data,ts)
#### format env data ####
sites$coords <- paste(round(sites$lce_lat,2),round(sites$lce_long,2),sep=',')
env <- select(sites, LCE, lce_lat, lce_long, coords)
yves$coords <- paste(round(yves$lce_lat,2),round(yves$lce_long,2),sep=',')
env <- yves %>% select(coords, Zmax:q) %>% right_join(env)
hlakes <- hlakes %>% filter(in_QGIS_LCE_falls_within_HL_polygon == 'yes') %>%
select(LCE,HL_Lake_area,HL_Shore_dev,HL_Depth_avg,HL_Wshd_area,HL_Elevation,HL_Res_time) %>%
filter(LCE %in% env$LCE)
env <- right_join(hlakes, env, by='LCE')
rm(yves,hlakes)
watershed.area.km2 <- data.frame('LCE' = land.use$LCE, 'watershed.area.km2' = apply(land.use[,2:ncol(land.use)], 1, sum)/1000000, stringsAsFactors = F)
env <- left_join(env, watershed.area.km2, by='LCE')
#### add habitat data ####
hab.clean <- habitat %>% select(No_lac,Transparence:conductivité) %>% group_by(No_lac) %>% summarize_all(~mean(.,na.rm=T))
env <- left_join(env,hab.clean, by = c('LCE' = 'No_lac'))
rm(hab.clean)
#### add world clim data ####
tmax <- raster('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/GIS/WorldClim/tmaxBrick.tif') %>%
mean
tmin <- raster('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/GIS/WorldClim/tminBrick.tif') %>%
mean
# library(rgdal)
# writeRaster(tmax, '~/Desktop/worldclim_max_mean.tif', format='GTiff')
pensites <- SpatialPoints(coords = env[,c('lce_long','lce_lat')])
env$tmin <- raster::extract(tmax, pensites) #names inverted in Marco's worldclim files
env$tmax <- raster::extract(tmin, pensites)
# library(viridis)
# cols <- viridis(50)
# plot(lce_lat ~ lce_long, data=env,pch=16,col=cols[cut(env$tmax,50)])
rm(tmax,tmin,pensites)
#### format land use data ####
lu.long <- land.use %>% pivot_longer(cols=UT_2050:UT_6141, names_to = 'CODE_UT', values_to = 'm.sq')
codes <- lu.code %>% select(CODE_UT, DESC_CAT)
codes$CODE_UT <- paste0('UT_',codes$CODE_UT)
#unique(lu.long$CODE_UT) %in% codes$CODE_UT
lu.long <- lu.long %>% left_join(codes, by = 'CODE_UT')
lu.long <- lu.long %>% select(-CODE_UT) %>% group_by(LCE, DESC_CAT) %>% summarize(m.sq = sum(m.sq))
land.use <- pivot_wider(lu.long, id_cols = LCE, names_from = DESC_CAT, values_from = m.sq)
land.use <- select(land.use, -Aquatique)
land.use[,2:8] <- vegan::decostand(land.use[,2:8], method = 'total')
rm(codes,lu.code,lu.long)
env <- left_join(env, land.use)
# #plot of histograms
# plots <- lapply(c(2:6,8),
# function(col) ggplot2::qplot(land.use[[col]],
# geom='histogram',
# binwidth=0.05,
# xlab = names(land.use)[[col]]))
# cowplot::plot_grid(plotlist=plots,ncol=2)
# #### formatting SIGEOM data ####
geo <- read_xlsx('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/SIGEOM/phys-chem_SIGEOM(2021-03-17).xlsx')
geo$coords <- paste(round(geo$lce_lat,2),round(geo$lce_long,2),sep=',')
geo$year <- as.numeric(substr(geo$DATE_ECHN,1,4))
#filter very old obs and only keep data from the same period as fish data
geo <- filter(geo, year > 1987)
geo$PH <- str_replace(geo$PH, ',', '.')
geo$PH[geo$PH == '0'] <- NA
geo$PH <- as.numeric(geo$PH)
geo <- geo %>% group_by(coords) %>% summarize_at(vars(PH,SIO2:TR2O3), mean.nona)
geo <- geo[-331,] #abberant outlier
ordi <- geo[,3:93]
ordi <- as.matrix(ordi)
ordi[ordi < 0] <- 0
ordi <- log1p(ordi)
p <- vegan::rda(ordi ~ 1)
#s <- summary(p)
g <-as.data.frame(vegan::scores(p)$sites[,1:2])
# xlims <- range(g[,1])
# ylims <- range(g[,2])
# par(mfrow=c(1,1),mar=c(2,2,2,1),oma=c(3,3,1,1))
# plot(g$PC2 ~ g$PC1, type = 'p',pch=16,col=alpha('black',0.5),ann=F,xlim=xlims,ylim=ylims,cex=1.2,lwd=1)
# mtext(paste("PC1 (", round(s$cont$importance[2,1]*100, 1), "%)", sep = ""),side=1,outer=T,line=1.2)
# mtext(paste("PC2 (", round(s$cont$importance[2,2]*100, 1), "%)", sep = ""),side=2,outer=T,line=1.2)
geo <- bind_cols(geo, g[,1:2])
env <- geo %>% select(coords, PC1, PC2) %>% right_join(env)
rm(p,g,ordi)
#### a few filters ####
#remove lat > 55
env <- env %>% filter(lce_lat < 55)
#HL is better for altitude than lce, yves is better for depth than HL, and simon better for watershed area
env <- env %>% select(-HL_Depth_avg, - HL_Wshd_area)
#removing bad columns: some rare LU
env <- env %>% select(-(`Non classifié`:`Sol nu et lande`))
#removing useless column
env <- env %>% select(-q, -coords)
env$HL_Elevation <- as.numeric(env$HL_Elevation)
#renaming columns
names(env) <- c('Sigeom PC1','Sigeom PC2','LCE','lake.area','ShoreDev','masl','residence_time','Zmax','Zmean','lat','long',
'watershed.area','transparency','pH','conductivity','Tmin','Tmax','LU.%ag','LU.%Anthro','LU.%Cut','LU.%Forest','LU.%Wetlands')
# #remove vars with a lot of NAs?
# skimr::skim(env)
# env <- drop_na(env)
#rearrange
env <- env %>% select(LCE, lat, long, -`Sigeom PC1`, -`Sigeom PC2`, everything()) %>%
arrange(LCE)
# M <- cor(env[,6:22], use='pairwise.complete.obs')
# library(corrplot)
# corrplot(M, type = "upper",tl.col=1,diag=F)
# corrplot(M, add = TRUE, type = "lower", method = "number",
# diag = FALSE, tl.pos = "n", cl.pos = "n")
#connect
connect <- connect %>% select(1:3)
names(connect) <- c('LCE','species','connectivity')
#cols
cols <- RColorBrewer::brewer.pal(4,'Dark2')
cols2 <- viridis::viridis(50)
# #checking if genetic diversity correlates with connectivity
# gd <- read_csv('/Users/vincentfugere/Google Drive/Recherche/FisHab_LakePulse/data/Ferchaud gen div/ferchaud_joined.csv')
# gd <- gd %>% filter(ID_RHS %in% sites$RHS, distance < 1000)
# gd$LCE <- sites$LCE[match(gd$ID_RHS,sites$RHS)]
# gd <- connect %>% filter(LCE %in% gd$LCE) %>%
# pivot_wider(id_cols = LCE, names_from = species, values_from = connectivity) %>%
# left_join(gd)
# gd.sub <- gd %>% select(`Brook trout (high)`,MeanFst,`Average Pi`,POLYMORPHIC_PROP)
# plot(gd.sub)
#### random forest for walleye
dat <- filter(cpue.clean, survey == 'PENDJ') %>%
select(LCE, year, CPUE) %>%
inner_join(select(env,-`Sigeom PC1`,-`Sigeom PC2`))
dat <- connect %>% filter(str_detect(species, 'Walleye'), LCE %in% dat$LCE, str_detect(species, '(high)')) %>%
select(-species) %>% inner_join(dat) %>% select(-LCE) %>% select(CPUE, year, everything()) %>%
mutate_at(vars(CPUE), log10) %>% mutate_at(vars(year), as.numeric) %>%
mutate_at(vars(year:`LU.%Wetlands`), scale)
dat <- filter(dat, is.finite(CPUE), !is.na(CPUE))
r2.vec <- numeric(0)
imp.vec <- data.frame()
for(i in 1:100){
smpl <- sample(nrow(dat), round(nrow(dat)*.75))
training.set <- dat[smpl,]
test.set <- dat[-smpl,]
mod <-cforest(CPUE ~ ., training.set, controls = cforest_unbiased(mtry=6))
test.results <- predict(mod, newdata = test.set)
r2.vec <- c(r2.vec,round(caret::postResample(pred = test.results, obs = test.set$CPUE)[2],4))
vars <- varimp(mod, conditional = F)
vars[vars < 0] <- 0
vars <- (vars/sum(vars))*100
imp.vec <- bind_rows(imp.vec, vars)
}
r2 <- round(median(r2.vec),4)
r2 <- bquote(italic(r)^2~'(pred vs. obs)'==.(r2))
vars <- apply(imp.vec, 2, median)
vars <- sort(vars)
pdf('~/Desktop/walleye1.pdf',width = 8, height = 5, pointsize = 12)
par(mar=c(4,12,1,1))
barplot(vars,col=cols[1],border=0,horiz=T,las=1)
title(xlab='relative influence (%)')
legend('bottomright',bty='n',legend=r2)
dev.off()
# dat2 <- filter(cpue.clean, survey == 'PENDJ') %>%
# select(LCE, CPUE, year) %>% mutate_at(vars(year), as.factor)
# env2 <- select(env, LCE, lat, long, Tmax, masl, Zmean)
# dat2 <- inner_join(dat2, env2) %>% select(-LCE) %>%
# mutate_at(vars(CPUE), log10) %>% mutate_at(vars(lat:masl), scale)
# dat2 <- filter(dat2, masl > -1.5)
#
# mod <- gam(CPUE ~ te(lat,long,bs='gp') + ti(Tmax, k=5) + ti(masl, k=5) + ti(Tmax,masl,k=5) + s(year,bs='re',k=2), data=dat2, correlation = corSpher(form = ~ lat + long))
# gam.check(mod)
# summary(mod)
# pdf('~/Desktop/walleye2.pdf',width = 6, height = 5, pointsize = 12)
# par(mar=c(4,4,2,2))
# fvisgam(mod, view=c('Tmax','masl'),color=cols2,hide.label=T,plot.type='persp',theta=45,main=NULL,zlab = 'log10 CPUE',xlab='Tmax',ylab='masl')
# #fvisgam(mod, view=c('Tmax','masl'),dec=1,color=cols2,hide.label=T,xlab='log10 Tmax',ylab='log10 masl',main = 'log10 CPUE')
# dev.off()
#
# pdf('~/Desktop/walleye3.pdf',width = 6, height = 5, pointsize = 12)
# plot_smooth(mod, view = 'Tmax',col=cols[1],rug=F,print.summary=F,rm.ranef = T,xlab = 'altitude',ylab = 'log10 CPUE', hide.label = T,bty='o',h0 = NULL)
# pval <- round(summary(mod)$s.table[3,4],4)
# r2 <- round(summary(mod)$r.sq,4)
# legend('bottomright',legend=c(paste0('p = ',pval),paste0('r2 = ',r2)),bty='n')
# dev.off()
#### random forest for brook char
dat <- filter(cpue.clean, survey == 'PENOF') %>%
select(LCE, CPUE, year) %>%
inner_join(env)
dat <- connect %>% filter(str_detect(species, 'Brook trout'), LCE %in% dat$LCE, str_detect(species, '(high)')) %>%
select(-species) %>% inner_join(dat) %>% select(-LCE) %>% select(CPUE, year, everything()) %>%
mutate_at(vars(CPUE), log10) %>% mutate_at(vars(year), as.numeric) %>%
mutate_at(vars(year:`LU.%Wetlands`), scale)
dat <- filter(dat, is.finite(CPUE), !is.na(CPUE))
r2.vec <- numeric(0)
imp.vec <- data.frame()
for(i in 1:100){
smpl <- sample(nrow(dat), round(nrow(dat)*.75))
training.set <- dat[smpl,]
test.set <- dat[-smpl,]
mod <-cforest(CPUE ~ ., training.set, controls = cforest_unbiased(mtry=6))
test.results <- predict(mod, newdata = test.set)
r2.vec <- c(r2.vec,round(caret::postResample(pred = test.results, obs = test.set$CPUE)[2],4))
vars <- varimp(mod, conditional = F)
vars[vars < 0] <- 0
vars <- (vars/sum(vars))*100
imp.vec <- bind_rows(imp.vec, vars)
}
r2 <- round(median(r2.vec),4)
r2 <- bquote(italic(r)^2~'(pred vs. obs)'==.(r2))
vars <- apply(imp.vec, 2, median)
vars <- sort(vars)
pdf('~/Desktop/brookTrout1.pdf',width = 8, height = 5, pointsize = 12)
par(mar=c(4,12,1,1))
barplot(vars,col=cols[3],border=0,horiz=T,las=1)
title(xlab='relative influence (%)')
legend('bottomright',bty='n',legend=r2)
dev.off()
# dat2 <- filter(cpue.clean, survey == 'PENOF') %>%
# select(LCE, CPUE, year)
# env2 <- select(env, LCE, lat, long, Tmax, Zmean)
# dat2 <- inner_join(dat2, env2) %>% select(-LCE) %>%
# mutate_at(vars(CPUE), log10) %>% mutate_at(vars(year), as.factor)
#
# mod <- gam(CPUE ~ te(lat,long,bs='gp') + s(Tmax, k=8) + s(year, bs='re',k=2), data=dat2, correlation = corSpher(form = ~ lat + long))
# gam.check(mod)
# summary(mod)
# pdf('~/Desktop/brookTrout2.pdf',width = 6, height = 5, pointsize = 12)
# par(mar=c(4,4,2,2))
# plot_smooth(mod, view = 'Tmax',col=cols2[3],rug=F,rm.ranef = T,print.summary=F,xlab = 'Tmax',ylab = 'log10 CPUE', hide.label = T,bty='o',h0 = NULL,xlim=c(21.5,25))
# pval <- round(summary(mod)$s.table[2,4],4)
# r2 <- round(summary(mod)$r.sq,4)
# legend('bottomright',legend=c(paste0('p = ',pval),paste0('r2 = ',r2)),bty='n')
# dev.off()
#### random forest for lake trout ####
dat <- filter(cpue.clean, survey == 'PENT') %>%
select(LCE, CPUE, year) %>%
inner_join(env)
dat <- connect %>% filter(str_detect(species, 'Lake trout'), LCE %in% dat$LCE, str_detect(species, '(low)')) %>%
select(-species) %>% inner_join(dat) %>% select(-LCE) %>% select(CPUE, year, everything()) %>%
mutate_at(vars(CPUE), log10) %>% mutate_at(vars(year), as.numeric) %>%
mutate_at(vars(year:`LU.%Wetlands`), scale)
dat <- filter(dat, is.finite(CPUE), !is.na(CPUE))
r2.vec <- numeric(0)
imp.vec <- data.frame()
for(i in 1:100){
smpl <- sample(nrow(dat), round(nrow(dat)*.75))
training.set <- dat[smpl,]
test.set <- dat[-smpl,]
mod <-cforest(CPUE ~ ., training.set, controls = cforest_unbiased(mtry=6))
test.results <- predict(mod, newdata = test.set)
r2.vec <- c(r2.vec,round(caret::postResample(pred = test.results, obs = test.set$CPUE)[2],4))
vars <- varimp(mod, conditional = F)
vars[vars < 0] <- 0
vars <- (vars/sum(vars))*100
imp.vec <- bind_rows(imp.vec, vars)
}
r2 <- round(median(r2.vec),4)
r2 <- bquote(italic(r)^2~'(pred vs. obs)'==.(r2))
vars <- apply(imp.vec, 2, median)
vars <- sort(vars)
pdf('~/Desktop/touladi1.pdf',width = 8, height = 5, pointsize = 12)
par(mar=c(4,12,1,1))
barplot(vars,col=cols[4],border=0,horiz=T,las=1)
title(xlab='relative influence (%)')
legend('bottomright',bty='n',legend=r2)
dev.off()
#### random forest for diversity
dat <- com %>% group_by(LCE,survey) %>%
summarise('richness' = n_distinct(species), 'nb_inv' = sum(surv.number)) %>%
left_join(env, by = 'LCE') %>% arrange(LCE) %>% drop_na
dat <- connect %>% filter(LCE %in% dat$LCE, str_detect(species, '(high)')) %>%
group_by(LCE) %>% summarize(connectivity = mean(connectivity)) %>%
inner_join(dat) %>% select(-LCE) %>%
mutate_at(vars(richness,nb_inv), log10) %>% mutate_at(vars(survey), as.factor)
par(mfrow=c(1,2))
plot(richness~nb_inv,dat,ylab='log10 richness',xlab="log10 Nb. d'inventaire",pch=16)
plot(y=(10^dat$richness),x=(10^dat$nb_inv),ylab='richesse spécifique',xlab="Nb. d'inventaire",pch=16)
mod <- gam(richness ~ s(nb_inv,k=10),data=dat)
plot_smooth(mod, view = 'nb_inv',col=1,add=T,rug=F)
dat$richness <- resid(mod)
dat <- select(dat, -nb_inv)
r2.vec <- numeric(0)
imp.vec <- data.frame()
for(i in 1:100){
smpl <- sample(nrow(dat), round(nrow(dat)*.75))
training.set <- dat[smpl,]
test.set <- dat[-smpl,]
mod <-cforest(richness ~ ., training.set, controls = cforest_unbiased(mtry=6))
test.results <- predict(mod, newdata = test.set)
r2.vec <- c(r2.vec,round(caret::postResample(pred = test.results, obs = test.set$richness)[2],4))
vars <- varimp(mod, conditional = F)
vars[vars < 0] <- 0
vars <- (vars/sum(vars))*100
imp.vec <- bind_rows(imp.vec, vars)
}
r2 <- round(median(r2.vec),4)
r2 <- bquote(italic(r)^2~'(pred vs. obs)'==.(r2))
vars <- apply(imp.vec, 2, median)
vars <- sort(vars)
pdf('~/Desktop/richness.pdf',width = 8, height = 5, pointsize = 12)
par(mar=c(4,12,1,1))
barplot(vars,col=cols[2],border=0,horiz=T,las=1)
title(xlab='relative influence (%)')
legend('bottomright',bty='n',legend=r2)
dev.off()
## map
library(rnaturalearth)
library(rnaturalearthdata)
library(ggspatial)
library(viridis)
theme_set(theme_bw())
world <- ne_countries(scale = "medium", returnclass = "sf")
xlims <- range(dat$long)+c(-1,1)
ylims <- range(dat$lat)+c(-2,0.5)
ggplot(data = world) +
geom_sf() +
coord_sf(xlim = xlims, ylim = ylims) +
#annotation_scale(location = "bl", width_hint = 0.35) +
annotation_north_arrow(location = "tr", which_north = "true",
pad_x = unit(0.2, "in"), pad_y = unit(0.2, "in"),
style = north_arrow_fancy_orienteering) +
geom_point(data=dat,aes(x=long,y=lat,col=richness,pch=survey)) + scale_color_viridis() + scale_shape_manual(values=c(15,16,17,18))
bp <- ggplot(dat, aes(x=survey, y=richness, fill=survey)) +
geom_boxplot()+
labs(x="Inventaire", y = "Richesse normalisée")
bp + theme_classic()
dat2 <- dat %>%
select(richness, survey, lat, long, Tmax)
mod <- gam(richness ~ te(lat,long,bs='gp') + s(Tmax, k=15) + s(survey, bs='re',k=3), data=dat2, correlation = corSpher(form = ~ lat + long))
gam.check(mod)
summary(mod)
pdf('~/Desktop/richness2.pdf',width = 6, height = 5, pointsize = 12)
par(mar=c(4,4,2,2))
plot_smooth(mod, view = 'Tmax',xlim=c(22,26),col=cols[2],rug=F,rm.ranef = T,print.summary=F,xlab = 'Tmax',ylab = 'richness (resid.)', hide.label = T,bty='o',h0 = NULL)
pval <- round(summary(mod)$s.table[2,4],4)
r2 <- round(summary(mod)$r.sq,4)
legend('bottomright',legend=c(paste0('p = ',pval),paste0('r2 = ',r2)),bty='n')
dev.off()
|
00246d2273397bf8fb1b89488cfd08a8a755f11b | e04b360ce5307d44c775cde976a39ce71e93e89f | /man/pad_ws.Rd | caf92b0658c3dc5e5672dd15a4ae1e8a958d832e | [] | no_license | haukelicht/AnnotationModelsR | efe73983e0f0171c7cf75aa1688f1994aa6626fd | b507e0d5f581adcaea93fc214cbb05cd665afa21 | refs/heads/master | 2020-09-14T11:32:10.682643 | 2020-07-07T07:57:47 | 2020-07-07T07:57:47 | 223,117,022 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 400 | rd | pad_ws.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{pad_ws}
\alias{pad_ws}
\title{Pad white space}
\usage{
pad_ws(x, n, left = TRUE)
}
\arguments{
\item{x}{character vector}
\item{n}{total No. characters}
\item{left}{padding to the left (default: \code{TRUE}). otherwise padding to the right}
}
\value{
a character vector
}
\description{
Pad white space
}
|
ebffb7f1103659dc89edf5e1f900db2dc6194679 | 9389d9795b92792d565dc8417b212485710725e7 | /conc.fun.R | 42339a7ab5f6ea848a38bf3bdf645a1a4164bb03 | [] | no_license | SoilWaterLab/Septic-GHGs | 3b445154331cf60941aaf063ce264856fb76ddf8 | 89842ecb2e7f6954416b7b4891b6962fc8ad0842 | refs/heads/master | 2021-01-10T22:06:28.538616 | 2015-10-01T21:09:08 | 2015-10-01T21:09:08 | 42,467,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 755 | r | conc.fun.R | conc.fun <-function(sample_data, standard_coeffs, chamber_volumes, directory, gas, print=FALSE){
#Note: edited on 8/28/15 to include a fixed standard coefficient slope and intercept
collar_surface_area <- 0.064 #meters
std_curve_nums <- unique(standard_coeffs$Std_Curve_Num)
for(j in 1:nrow(sample_data)) {
sample_data$Concentration[j] <- sapply(sample_data$Area[j], function(x) x*standard_coeffs$Slope + standard_coeffs$Intercept)
if (is.na(sample_data$Concentration[j])){
sample_data$Concentration[j]=NA
}else if(sample_data$Concentration[j]<0){
sample_data$Concentration[j]=0
}
}
sample_data$Time <- as.numeric(sample_data$Time)
sample_data$Time_min <- sapply(sample_data$Time, function(x) x/60)
conc_data <- sample_data
} |
e0f2d249ac3cafce8fe18d9ec43941c635286215 | 1042861ed128d3208977181b5838955a86406b43 | /.init.R | c54804aea3256572300029e1dd12c1e9605a2c55 | [] | no_license | pseabour/https-github.com-hyginn-R-Intro | fd12c0d87d15adf1fc8303efcd84185178cccde8 | 33585b1f9a3300126f011a87beec599b131a8046 | refs/heads/master | 2020-03-30T05:12:26.757151 | 2018-05-15T19:19:41 | 2018-05-15T19:19:41 | 150,786,721 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,972 | r | .init.R | # .init.R
# Functions to initialize this session
# Boris Steipe
# ==============================================================================
cat("Initializing ...\n")
# Source local functions
cat(" sourcing local functions from \"./R\" directory ...\n")
for (script in list.files(path = "./R",
pattern = "\\.R$",
full.names = TRUE)) {
source(script)
cat(sprintf(" ... %s\n", script))
}
cat("\n")
# Functions for making local, editable copies of scripts
checkFileExists <- function(FN) {
if (! file.exists(FN)) {
stop(sprintf("PANIC: expected file \"%s\" not found. \ %s ",
FN,
"Aborting initialization. Contact your instructor.\n\n"))
}
}
writeMyCopy <- function(FN, prefix = "my", outFile) {
# Create a local copy of file FN if the copy doesn't exist yet.
# Side effect: write it to <prefix><FN>, or to outFile, if outFile
# is specified.
if (missing(outFile)) {
outFile <- sprintf("%s%s", prefix, FN)
i <- nchar(prefix) + 1
substr(outFile, i, i) <- toupper(substr(outFile, i, i))
}
checkFileExists(FN)
if (! file.exists(outFile)) {
cat(sprintf(" creating local script file: \"%s\" ... ", outFile))
txt <- readLines(FN)
txt[1] <- sprintf("# %s", outFile)
txt[5] <- sprintf("# | %s%s |",
"Edit this file with your notes,",
" experiments, and comments.")
writeLines(txt, outFile)
cat(" oK.\n")
}
}
# Create a local copy of all core .R modules if those copies don't exist yet.
writeMyCopy("tmp.R", outFile = "myIntroNotes.R")
writeMyCopy("sequenceAnalysis.R")
writeMyCopy("dataIntegration.R")
writeMyCopy("numericData.R")
# Clean up
rm(checkFileExists)
rm(writeMyCopy)
cat("... done.\n")
# Open main document in the script pane
file.edit("R-Intro.R")
# [End]
|
4ba126a8aef17d21d9f074a56005f855a1056f3b | 5f712d7949490943b913c1558a940187ab883015 | /R/Proba_pH.R | 62e83848f66c01fdfe1fb707405637b6ddbee60c | [] | no_license | Subhasishbasak/predictive-microbiology | fdfa9e752f7ceb5447bea2e58ec992fdd9eb6bd1 | a8af1d12f7685703f4343cd4f3cc0610401e8295 | refs/heads/master | 2022-04-20T00:07:25.311176 | 2020-04-16T16:07:27 | 2020-04-16T16:07:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,026 | r | Proba_pH.R | #' This function describes the evolution of the single-cell growth probability with pH. For this, we used the model proposed by Jean Christophe Augustin and Aurelia Czarnecka-Kwasiborski in 2012, to describe the increase of the probability from 0 to 1 with increasing values of pH.
#'
#' @param pH pH value # a number
#' @param pHinf The cellular minimal pH for growth # a number
#' @param pHsup The cellular maximal pH for growth # a number
#'
#' @return proba_pH_Aug The single-cell growth probability # a number
#' @export
#'
#' @examples
#' Proba_pH(5,pHinf=-4.34,pHsup=5.93) the values of phinf & phsup were imported from Augustin's 2012 review
#' Proba_pH(4,pHinf=-4.34,pHsup=5.93)
#' Proba_pH(7,pHinf=-4.34,pHsup=5.93)
Proba_pH<-function(pH,pHinf,pHsup){
proba_pH_Aug<-c()
for (i in 1:length(pH)) {
}
if(pH[i]<=pHinf){
proba_pH_Aug[i]=0
}
else
{if(pH[i]>=pHsup){
proba_pH_Aug[i]=1
} else
proba_pH_Aug[i]<- (exp(-pH[i])-exp(-pHinf))/(exp(-pHsup)-exp(-pHinf))
}
return(proba_pH_Aug)
}
|
e726b2c1b8092b58a966c5c9eabcea2f73ea5183 | e0ef9d469d33b1d04a10e7fd44e3a491767cc439 | /MovieLens.R | c1000fc4c07ea42b0e8cc194fdf8b365dcd5b28e | [] | no_license | klim-hb/capstone | 95b9713e7618215a5e9a60c425be71f8ba1b487f | 8fdbed849cb9ce0a09a1306847dc1da030574080 | refs/heads/master | 2022-07-07T06:19:43.002643 | 2020-05-16T07:35:40 | 2020-05-16T07:35:40 | 264,367,851 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,869 | r | MovieLens.R | # It is easier to follow the flow in the RMD script.
# Loading libraries
library(dplyr) #data wrangling
library(tidyverse)
library(kableExtra) #very useful package to change the style of output tables
library(knitr)
library(tidyr)
library(stringr)
library(ggplot2)
library(recosystem) #package for recommendation system; used as second method in this report
library(tinytex) #to enable LaTeX
#library(readr) - on early stage of the project, to export data from MovieLens to csv files
################################
# Create edx set, validation set
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
## the following line was changed from
## movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId)[movieID],
## in order to ensure no NAs are in movie ID and genres for this dataset.
## we randomly checked several records and it appears that data was not compromised.
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# UPDATE: the below is no longer appicable after moving to GCP:
# The original EDX code for forming the datasets EDX and VALIDATION was changed due to limitation of computing resources
# (the R Studio and entire PC was hanging for days hence it was not possible to perform the analysis). Instead,
# the process was divided into 2 parts: forming the datasets as per EDX instructrions and the analysis itself.
# The files for datasets were saved separetely as csv documents and imported back to perform the analysis
#edx <- read_csv("edx.csv", col_types = cols(movieId = col_integer(),
# timestamp = col_integer(), userId = col_integer()), trim_ws = FALSE)
#validation <- read_csv("validation.csv", col_types = cols(movieId = col_integer(),
# timestamp = col_integer(), userId = col_integer()), trim_ws = FALSE)
#installation of TinyTeX in R (was required on the first attemp to create PDF: tinytex::install_tinytex()
## RMSE
# The RMSE function in R code:
RMSE <- function(true_ratings = NULL, predicted_ratings = NULL) {
sqrt(mean((true_ratings - predicted_ratings)^2))
}
## Regularization
regularization <- function(lambda, trainset, testset){
# Mean
mu <- mean(trainset$rating)
# Movie effect (bi)
b_i <- trainset %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda))
# User effect (bu)
b_u <- trainset %>%
left_join(b_i, by="movieId") %>%
filter(!is.na(b_i)) %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+lambda))
# Prediction: mu + bi + bu
predicted_ratings <- testset %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
filter(!is.na(b_i), !is.na(b_u)) %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
return(RMSE(predicted_ratings, testset$rating))
}
## Recosystem
# There are different utilities and libraries available in R packages for testing and training the recommendation systems. In particular, recommenderlab and recosystem proved to be most efficient.
#
# In this project we will utilize recosystem, as it is intuitively more clear and uses simple syntax.
#
# recosystem is an R wrapper of the [LIBMF library](http://www.csie.ntu.edu.tw/~cjlin/libmf/) developed by [Yu-Chin Juan, Yong Zhuang, Wei-Sheng Chin and Chih-Jen Lin](http://www.csie.ntu.edu.tw/~cjlin/libmf/), an open source library for recommender system using marix factorization.
#
# We will use this library as our second method for the project.
## Steps of this project
# The project will be performed in the following steps:
#
# 1. We will analyze the provided data
# 2. We will preprocess the provided data and split edx into two parts
# 3. We will build two methods for our task: Linear Model (via RMSE and regularization) & Factorization Model (via recosystem)
# 4. We will test Linear Model against Final Validation (as a requirement of the project, the validation set could only be used once; since the aim of the project is to practice Linear Model - we will only utilize it with the LM itself.)
# 5. We will record and analyze the results
# 6. We will conclude the discussion on both methods used with observed limitations and recommendations for future research.
#
# Exploratory Data Analysis
## Initial Datasets
#this commented step was removed as the problem with big data processing was solved:
#exporting initial data in case R studio crashes again
#this code was commented in the final version
#write.csv(edx, "edxInitial.csv", row.names=F)
#write.csv(validation, "validationInitial.csv", row.names=F)
#Utilizing kable and kableExtra packages to format the tables for our pdf report as described here: https://cran.r-project.org/web/packages/kableExtra/vignettes/awesome_table_in_html.html
#Unfortunately, in the final pdf it does not look as neat :(
#create a dataframe to display the basic info about both sets
text_tbl <- data.frame(
Feature = c("Number of Rows", "Number of Columns", "Unique Users","Unique Movies","Variety of Genres"),
EDX = c(nrow(edx), ncol(edx),n_distinct(edx$userId),n_distinct(edx$movieId),n_distinct(edx$genres)),
Validation = c(nrow(validation), ncol(validation),n_distinct(validation$userId),n_distinct(validation$movieId),n_distinct(validation$genres)))
#display the dataframe with kable package, making the first row background red, font color - white and bold;
#only first column to be also bold in values for display purposes.
kable(text_tbl) %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
column_spec(2, bold=F) %>%
column_spec(3, bold=F) %>% row_spec(0, bold = T, color = "white", background = "#D7261E")
# The datasets have the following columns:
#Same as previous text_tbl - make the output neat.
text_tbl1 <- data.frame(
Name = c(names(edx)),
Comment = c("Unique identification number for each user in the dataset","Unique identification number for each movie in the dataset","A range of marks(rating) given to a movie by specific user","A record of specific time when the rating was given by the user to a particular movie","Title of the movie with Release date","Genre(s) of the movie"))
kable(text_tbl1) %>%
kable_styling(full_width = F) %>%
column_spec(1, bold =T) %>%
column_spec(2, bold=F) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
edx %>% head() %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 7,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
# We can see the following observations:
#
# * timestamp - the format currently displays the number of second since Jan 1, 1970 (EPOCH) and is hard to understand
# * title - Movies' titles have the year of their release, this data might be helpful in our analysis
# * genres - column consists of a variety of genres divided with |-sign; it might be useful to segregate the genres for our analysis
#
# Hence, prior to starting any analysis of the data, the dataset must be put in order.
# Methods
## Preprocessing of the data
#Preprocessing
#Please refer to the textbook section for a detailed explanation of colSds and nearZeroVar()
#no point to use because we need all columns and they were pre-selected by EDX team
#Also described here: https://topepo.github.io/caret/pre-processing.html - the type of data is really not appropriate for our purpose, we shall skipp this step and preprocess "manually"
# Convert timestamp of each rating to a proper format in EDX dataset
edx$date <- as.POSIXct(edx$timestamp, origin="1970-01-01")
# As discussed on EDX forum - we cannot change validation dataset until last RMSE function is done. This is actually a great news, since we want to keep the Global Enviroment clean
# Convert timestamp of each rating to a proper format in validation dataset
#validation$date <- as.POSIXct(validation$timestamp, origin="1970-01-01")
# Separate the date of each rating in EDX dataset and change the format of displayed year/month/day. We want to split it because we want to dig deep into patterns between the user behavior
edx$yearR <- format(edx$date,"%Y")
edx$monthR <- format(edx$date,"%m")
edx$dayR <- format(edx$date,"%d")
# Ensure the timedate data recorded as numeric in EDX dataset
edx$yearR <- as.numeric(edx$yearR)
edx$monthR <- as.numeric(edx$monthR)
edx$dayR <- as.numeric(edx$dayR)
# Extracting the release year for each movie in edx dataset
edx <- edx %>%
mutate(title = str_trim(title)) %>%
extract(title,
c("titleTemp", "release"),
regex = "^(.*) \\(([0-9 \\-]*)\\)$",
remove = F) %>%
mutate(release = if_else(str_length(release) > 4,
as.integer(str_split(release, "-",
simplify = T)[1]),
as.integer(release))
) %>%
mutate(title = if_else(is.na(titleTemp),
title,
titleTemp)
) %>%
select(-titleTemp)
# An alternative solution to the above is to consider genre variability as an extension of the movie specification and
# treat each particular combination of genres as a unique category.
# As reported by several peers in the EDX forum, the genre specificity might have a low impact on the total RMSE,
# and hence at this stage of the project, in order to not overwhelm the environment, we will consider the alternative solution. Once the calculations for RMSE models are made, we might reconsider this step, if the RMSE values will not meet the required low values.
# only keeping what we need for EDX without any temp columns
edx <- edx %>% select(userId, movieId, rating, title, genres, release, yearR, monthR, dayR)
# After applying the above changes, the datasets have the following structure:
edx %>% head() %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 7,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
## Arrange edx as training and test datasets as 90/10%
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = edx$rating, p=0.1, list = FALSE)
trainSet <- edx[-test_index,]
temp <- edx[test_index,]
# Make sure userId and movieId in validation set are also in edx set
testSet <- temp %>%
semi_join(trainSet, by = "movieId") %>%
semi_join(trainSet, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, testSet)
trainSet <- rbind(trainSet, removed)
rm(test_index, temp, removed)
# Display Results
text_tbl2 <- data.frame(
Feature = c("Number of Rows", "Number of Columns", "Unique Users","Unique Movies","Variety of Genres"),
Train = c(nrow(trainSet), ncol(trainSet),n_distinct(trainSet$userId),n_distinct(trainSet$movieId),n_distinct(trainSet$genres)),
Test = c(nrow(testSet), ncol(testSet),n_distinct(testSet$userId),n_distinct(testSet$movieId),n_distinct(testSet$genres)),
Validation = c(nrow(validation), ncol(validation),n_distinct(validation$userId),n_distinct(validation$movieId),n_distinct(validation$genres)))
kable(text_tbl2) %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = T) %>%
column_spec(2, bold=F) %>%
column_spec(3, bold=F) %>%
column_spec(4, bold=F) %>% row_spec(0, bold = T, color = "white", background = "#D7261E")
## Dataset analysis
### Ratings Distribution
# Heatmap for User vs Movies sample of 100
usersHM <- sample(unique(trainSet$userId), 100)
trainSet %>% filter(userId %in% usersHM) %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>%
select(sample(ncol(.), 100)) %>%
as.matrix() %>% t(.) %>%
image(1:100, 1:100,. , xlab="Movies", ylab="Users")
abline(h=0:100+0.5, v=0:100+0.5, col = "grey")
title("User and Movie Matrix")
# create plot to show rating distribution across values(0.5-5)
trainSet %>%
group_by(rating) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = rating, y = count)) +
geom_line() + xlab("Rating 0.5-5") + ylab("# Ratings, thousands") +ggtitle("Distribution of Ratings")
# The distribution of ratings varies from user to user, movie to movie, year to year.
# let's clean the global enviroment, as we can see some smoke coming from the computer - partly solved with gooogle server, but let's still keep the enviroment clean
rm(text_tbl, text_tbl1, text_tbl2, chkEdx)
# count how many whole star ratings in the dataset
a<-sum(trainSet$rating %% 1 == 0)/length(trainSet$rating)
b<-mean(trainSet$rating)
# In general, half star ratings are less common than whole star ratings.
# In the *trainSet* we can count over 79% of whole star ratings across all users.
# At the same time, the avarage ratings across the dataset is 3.5.
# show rating distribution over years; Im assigning to ratdistY because otherwise shows warning for geom_path
trainSet %>%
group_by(yearR,rating) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = rating, y = count)) +
geom_line() + xlab("Rating scale") + ylab("# Ratings, thousands") +
ggtitle("Distribution of Ratings over Years")+facet_wrap(~yearR, nrow=3) +
theme(axis.text.x=element_text(size=7, vjust=0.5))
#ReleaseRating Plot
trainSet %>% group_by(release) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(release, rating)) +
geom_point() +
geom_smooth()
# show rating distribution over months over years
trainSet %>%
group_by(yearR,monthR) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = monthR, y = count)) +
geom_point() +
scale_x_continuous(name="Months",breaks=seq(1,12,1),
labels=c("Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"))+
ylab("# Ratings, thousands") +ggtitle("Distribution of Ratings over Months") +
facet_wrap(~yearR, nrow=3) + theme(axis.text.x=element_text(angle=90, size=7, vjust=0.5))
# show rating distribution over days
trainSet %>%
group_by(dayR, monthR) %>% #filter(yearR==1996) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = dayR, y = count)) +
geom_point() +
scale_x_continuous(name="Days",breaks=seq(1,31,1))+
theme(axis.text.x = element_text(size=7, angle=45))+
ylab("# Ratings, thousands") +ggtitle("Distribution of Ratings over Days for all Years") +
facet_wrap(~monthR, nrow=3) + theme(axis.text.x=element_text(angle=90, size=7, vjust=1))
# show rating distribution over release years
trainSet %>%
group_by(release,rating) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = release, y = count)) +
geom_line() + xlab("Year of Release") + ylab("# Ratings, thousands") +
ggtitle("Distribution of Ratings over Release Years")+facet_wrap(~rating, nrow=2) +
theme(axis.text.x=element_text(size=7, vjust=0.5))
# show rating distribution over movies over years
trainSet %>%
group_by(movieId, yearR) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = movieId, y = count)) +
geom_line() + xlab("Movie ID") + ylab("# Ratings, thousands") +
ggtitle("Distribution of Ratings over Movies over Years")+facet_wrap(~yearR, nrow=3) +
theme(axis.text.x=element_text(size=7, vjust=0.5))
# show rating distribution over movies over years
trainSet %>%
group_by(movieId, release) %>%
summarize(count = n()/1000) %>%
ggplot(aes(x = movieId, y = count)) +
geom_line() + xlab("Movie ID") + ylab("# Ratings, thousands") +
ggtitle("Distribution of Ratings over Movies over Release Years")+
facet_wrap(~release, nrow=7) + theme(axis.text.x=element_text(size=0, vjust=0.5))
# To understand the data origin, we need to examine how users rated movies in the datasets.
trainSet %>% group_by(userId) %>%
summarise(count=n()) %>%
ggplot(aes(count)) +
geom_histogram(color = "white") +
scale_x_log10() +
ggtitle("Distribution of Users") +
xlab("Number of Ratings") +
ylab("Number of Users")
# Genres popularity dataset from trainset
genres_popularity <- trainSet %>%
na.omit() %>% # omit missing values
select(movieId, yearR, genres) %>% # select columns we are interested in
mutate(genres = as.factor(genres)) %>% # turn genres in factors
group_by(yearR, genres) %>% # group data by year and genre
summarise(number = n())
# Selective genres vs year of rating
genres_popularity %>%
filter(yearR > 1996) %>%
filter(genres %in% c("Action","Adventure","Animation","Children","Comedy","Fantasy","Drama","Thriller", "Romance","War", "Sci-Fi", "Western")) %>%
ggplot(aes(x = yearR, y = number)) +
geom_line(aes(color=genres)) +
scale_fill_brewer(palette = "Paired")
# Results
## Linear Model
### Mean distribution model
# Mean of observed values
mu <- mean(trainSet$rating)
mu
# calculate RMSE
naive_rmse <- RMSE(testSet$rating,mu)
# show results table with the findings from the first model
results <- tibble(Method = "Mean", RMSE = naive_rmse, Tested = "testSet")
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
### Movie-centered model (Adding Movie Effects)
# Define movie effect b_i
bi <- trainSet %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu))
# Rating with mean + b_i
y_hat_bi <- mu + testSet %>%
left_join(bi, by = "movieId") %>%
.$b_i
# Calculate the RMSE
results <- bind_rows(results, tibble(Method = "Mean + bi", RMSE = RMSE(testSet$rating, y_hat_bi), Tested = "testSet"))
# Show the RMSE improvement
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
### Movie + User model (Adding User Effects)
#Plot for user effect distribution
trainSet %>%
group_by(userId) %>%
summarize(b_u = mean(rating)) %>%
filter(n()>=100) %>%
ggplot(aes(b_u)) +
geom_histogram(color = "black") +
ggtitle("User Effect Distribution") +
xlab("User Bias") +
ylab("Count")
#Apply the model
# User effect (bu)
bu <- trainSet %>%
left_join(bi, by = 'movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu - b_i))
# Prediction
y_hat_bi_bu <- testSet %>%
left_join(bi, by='movieId') %>%
left_join(bu, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
# Calculate the RMSE
results <- bind_rows(results, tibble(Method = "Mean + bi + bu", RMSE = RMSE(testSet$rating, y_hat_bi_bu), Tested = "testSet"))
# Show the RMSE improvement
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
### Movie + User + Genre model
# it is assumed that adding genre to the model will not make a significant impact
# on the RMSE performance, due to the fact that genres are now not separated into their groups,
# are repetitive by nature and consume many computing resources to be correctly calculated.
# We will not include this model unless absolutely necessary (if RMSE will not reach the desired value).
### Regularization
# regularization must be performed in order to improve the RMSE results.
# Define a set of lambdas to tune
lambdas <- seq(0, 10, 0.25)
# Tune lambda
rmses <- sapply(lambdas,
regularization,
trainset = trainSet,
testset = testSet)
# We can construct a plot to find out.
# Plot the lambda vs RMSE
tibble(Lambda = lambdas, RMSE = rmses) %>%
ggplot(aes(x = Lambda, y = RMSE)) +
geom_point() +
ggtitle("Regularization")
# We pick the lambda that returns the lowest RMSE.
lambda <- lambdas[which.min(rmses)]
lambda
# We will apply this parameter to our model:
# Then, we calculate the predicted rating using the best parameters
# achieved from regularization.
mu <- mean(trainSet$rating)
# Movie effect (bi)
b_i <- trainSet %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda))
# User effect (bu)
b_u <- trainSet %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+lambda))
# Prediction
y_hat_reg <- testSet %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
# Update the result table
results <- bind_rows(results,
tibble(Method = "Regularized bi and bu",
RMSE = RMSE(testSet$rating, y_hat_reg),
Tested = "testSet"))
# Show the RMSE improvement
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
# We can now see significant improvement and potentially meeting the requirement for the project.
# We should still use this model on the final validation set to approve the result.
# ## Factorization Model
# ### Recosystem
# We followed the steps described in the package documentation, and it took approximately an hour for the script to be
# executed. However, the results were worth waiting:
set.seed(123, sample.kind = "Rounding") # This is a randomized algorithm
# Convert the train and test sets into recosystem input format
train_data <- with(trainSet, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
test_data <- with(testSet, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
# Create the model object
r <- recosystem::Reco()
# Select the best tuning parameters
opts <- r$tune(train_data, opts = list(dim = c(10, 20, 30),
lrate = c(0.1, 0.2),
costp_l2 = c(0.01, 0.1),
costq_l2 = c(0.01, 0.1),
nthread = 4, niter = 10))
# Train the algorithm
r$train(train_data, opts = c(opts$min, nthread = 4, niter = 20))
### Results
# Calculate the predicted values
y_hat_reco <- r$predict(test_data, out_memory())
head(y_hat_reco, 10)
# # Update the result table
results <- bind_rows(results,
tibble(Method = "Recosystem",
RMSE = RMSE(testSet$rating, y_hat_reco),
Tested = "testSet"))
# Show the RMSE improvement
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
# Validation and Final Results
## Validation
# Validation was performed on the *validation* set using the LM prediction after regularising the data:
mu_edx <- mean(edx$rating)
# Movie effect (bi)
b_i_edx <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu_edx)/(n()+lambda))
# User effect (bu)
b_u_edx <- edx %>%
left_join(b_i_edx, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu_edx)/(n()+lambda))
# Prediction
y_hat_edx <- validation %>%
left_join(b_i_edx, by = "movieId") %>%
left_join(b_u_edx, by = "userId") %>%
mutate(pred = mu_edx + b_i + b_u) %>%
pull(pred)
# Update the result table
results <- bind_rows(results,
tibble(Method = "Regularized EDX's bi and bu on Validation Set",
RMSE = RMSE(validation$rating, y_hat_edx),
Tested = "validation"))
# Show the RMSE improvement < 0.86490
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
## Final Results Table
# The below table summarizes the final results for the Models:
# Show the final Results table RMSE improvement < 0.86490
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
# Conclusion
## Results
# Show the final Results table RMSE improvement < 0.86490
results %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive"),
position = "center",
font_size = 9,
full_width = FALSE) %>%
row_spec(0, bold = T, color = "white", background = "#D7261E")
# Session Info
sessionInfo()
```
|
6c701c3f6344a4007c77fd71c1c8a641e3b027db | ca9321fa12c8baaaf1fcf357c510cd2cac052f50 | /lab_r/rml02_knn.R | 550596cf6c43e6ea80346862ae477f2f585a12ef | [] | no_license | serener91/ITW | e5fb7a7fbaf9142c6f14afcda2b04fb75ff3513e | 1eac674657d38158d720cb1bfd7a289a4dce64ef | refs/heads/main | 2023-08-07T21:21:55.589767 | 2021-09-17T05:22:21 | 2021-09-17T05:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,490 | r | rml02_knn.R | # wisc_bc_data.csv: 위스콘신 대학 유방암 데이터 프레임
# kNN 알고리즘을 사용해서 암 유무를 예측
# 필요한 패키지 로드
library(tidyverse) # 데이터 전처리, 가공, 시각화, ...
library(class) # classification: knn() 함수
library(gmodels) # CrossTable() 함수 - 오차(혼동) 행렬
search()
# 데이터 준비
file_path <- 'https://github.com/JakeOh/202105_itw_bd26/raw/main/datasets/wisc_bc_data.csv'
wisc_bc_data <- read.csv(file_path)
head(wisc_bc_data)
tail(wisc_bc_data)
str(wisc_bc_data)
summary(wisc_bc_data) # 기술 통계량 요약
# diagnosis 변수에 있는 값들
table(wisc_bc_data$diagnosis)
#> B(Benign): 양성 종양(암이 아님.)
#> M(Malignant): 악성 종양(암)
# diagnosis 변수를 factor 타입으로 변환
wisc_bc_data$diagnosis <- factor(wisc_bc_data$diagnosis,
levels = c('B', 'M'),
labels = c('Benign', 'Malignant'))
summary(wisc_bc_data)
# as.xyz() 종류의 함수들: 데이터의 타입을 변환
# as.character(), as.integer(), as.numeric(), as.data.frame(), as.factor()
# 데이터 셋을 특성(features)과 타겟(target)으로 분리
# id(환자 아이디)는 ML에서 사용되면 안되기 때문에 제거
head(wisc_bc_data)
features <- wisc_bc_data[, 3:32] # 모든 행들, 3~32번째 열들
target <- wisc_bc_data[, 2] # 모든 행들, 2번째 열
# 데이터 셋의 80%를 훈련 셋, 20%를 테스트 셋으로 사용
# target이 정렬된 상태가 아니기 때문에,
# random sampling(임의 추출)을 사용하지 않고,
# 순서대로 분리해도 괜찮음.
tr_size <- round(569 * 0.8) # 훈련 셋의 관찰값 개수
# 훈련/테스트 특성들
train_set <- features[1:tr_size, ] # 1 ~ 455 행
test_set <- features[(tr_size + 1):569, ] # 456 ~ 569 행
# 훈련/테스트 레이블(타겟)
train_target <- target[1:tr_size]
test_target <- target[(tr_size + 1):569]
# 훈련/테스트 레이블이 편향되지 않고 임의로 섞여 있는 지 확인
table(train_target)
prop.table(table(train_target))
table(test_target)
prop.table(table(test_target))
# knn 알고리즘 적용
test_predictions <- knn(train = train_set,
cl = train_target,
test = test_set,
k = 1)
# knn 알고리즘 평가
mean(test_predictions == test_target) # 94.7%
CrossTable(x = test_target,
y = test_predictions,
prop.chisq = FALSE)
# 다른 k값의 결과와 비교
test_predictions <- knn(train = train_set,
cl = train_target,
test = test_set,
k = 3)
mean(test_predictions == test_target) # 95.6%
CrossTable(x = test_target,
y = test_predictions,
prop.chisq = FALSE)
test_predictions <- knn(train = train_set,
cl = train_target,
test = test_set,
k = 11)
mean(test_predictions == test_target) # 93.8%
CrossTable(x = test_target,
y = test_predictions,
prop.chisq = FALSE)
# 모든 특성들을 표준화(standardization) 후 knn 알고리즘을 적용
standardize <- function(x) {
# x: 숫자 벡터
mean <- mean(x) # 숫자 벡터의 평균을 계산
std <- sd(x) # 숫자 벡터의 표준편차를 계산
return ((x - mean) / std) # 표준화한 결과를 반환
}
features_standardized <- data.frame(lapply(features, standardize))
summary(features_standardized)
# 표준화된 특성들을 사용해서 k=3, k=11 결과 비교
# 표준화된 훈련 셋
train_set_std <- features_standardized[1:tr_size, ]
# 표준화된 테스트 셋
test_set_std <- features_standardized[(tr_size + 1):569, ]
test_predictions <- knn(train = train_set_std,
cl = train_target,
test = test_set_std,
k = 3)
mean(test_predictions == test_target) # 96.5%
CrossTable(x = test_target, y = test_predictions,
prop.chisq = FALSE)
test_predictions <- knn(train = train_set_std,
cl = train_target,
test = test_set_std,
k = 11)
mean(test_predictions == test_target) # 95.6%
CrossTable(x = test_target, y = test_predictions,
prop.chisq = FALSE)
|
359ccd125b72af93ee6696889e9078755c2dc1bb | 92df0c74fa341c50753c95a593d297e30f87bc94 | /man/soft_full.Rd | ec9f346a766f4271448095667eb651964bf29c8b | [] | no_license | cran/LinearDetect | 24ed8e86d17eddef3a2fa0d147b997d999de216e | f108bc9158618d94f387db56319a8430532a6456 | refs/heads/master | 2023-03-24T07:19:37.677204 | 2021-03-21T23:20:02 | 2021-03-21T23:20:02 | 349,128,088 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 334 | rd | soft_full.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{soft_full}
\alias{soft_full}
\title{soft threshold function}
\usage{
soft_full(L, lambda)
}
\arguments{
\item{L}{input matrix}
\item{lambda}{threshold parameter}
}
\value{
thresholded matrix L
}
\description{
soft threshold function
}
|
f07452ae936417d3018d10146920f5fe4e9d3e6c | 1822ffd8e109a953dccecade22c8c7878249fafb | /src/03_variables/varibale_naming.R | 8dc23e7904a1d10f9c1708725b6f806dc3e58f58 | [
"MIT"
] | permissive | hygull/rscript | d8e70d52fcbfe091be12484aa7850ad6e656f79b | 1cac28b845ee31fde6a62ee96f4ce94312eb1680 | refs/heads/master | 2021-01-20T08:29:56.300907 | 2017-05-12T06:16:53 | 2017-05-12T06:16:53 | 90,153,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 479 | r | varibale_naming.R | # {
# 'created_on' : '5 May 2017',
# 'aim_of_rscript' : 'Using proper and variable names in R',
# 'coded_by' : 'Rishikesh Agrawani',
# }
arr = c(12, 45, 67)
my_fav_fruits3 = c("Apple", "Pine apple", "Grape")
.fruits = array( c("Apple", "Grape", "Orange") )
integers.vector = c(1, 5, 7, 9)
print(arr)
print(my_fav_fruits3)
print(.fruits)
print(integers.vector)
# [1] 12 45 67
# [1] "Apple" "Pine apple" "Grape"
# [1] "Apple" "Grape" "Orange"
# [1] 1 5 7 9 |
6d78a74eef3a68bf2d268228995b19fb15fdd42f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/steemr/examples/phour.Rd.R | d03e0933e60bbe0b9392eea275765bcde77c77a2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 394 | r | phour.Rd.R | library(steemr)
### Name: phour
### Title: Hour rose plot. Copied and modified from the openair package.
### 'phour()' plotted a 24-hour clock, indicating the active hours on the
### basis of a time column in a data frame. It is highly cumstomized on
### the basis of the 'openair::windrose()' function'.
### Aliases: phour
### ** Examples
## Not run:
##D phour()
## End(Not run)
|
e37076c5cf30ee0b3e01da4a4fced8c1975bdd08 | 34af8beb0b82a115ba359f41f2a38e1c7c357b0c | /R Scripts/cvcharts.R | 313da657fb245cb47aba282d5787e8b4d62496b3 | [] | no_license | srikantv03/Coronavirus-WebScrapers-and-Data-Vis | 5e1ce2a76c37e1abe9a40bb7ed2bbdb5d27f0363 | 1a8d3bf68f04b68b116647487abfa1422cf0e263 | refs/heads/master | 2022-07-29T02:18:43.399713 | 2020-04-21T10:32:04 | 2020-04-21T10:32:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,050 | r | cvcharts.R | setwd("C:/Users/Srikant/Desktop/Coronavirus Data Stuff")
data <- read.csv(url("https://covidtracking.com/api/states/daily.csv"))
library(tidyverse)
library(gridExtra)
library(maps)
library(socviz)
library(tibble)
source("./myfunctionsaug.R")
source("./theme_map.R")
head(data)
names(data)
data <- data[c(1:4, 7, 8, 11:17)]
names(data)
us_states <- map_data("state")
view(us_states)
dim(data)
top_daily <- data[c(1:56),c(1:13)]
#add mergable values to socviz
top_daily <- add_column(top_daily, region = c(
"alaska", "alabama", "arkansas", "am.somoa",
"arizona", "california", "colorado", "connecticut",
"district of columbia", "delaware", "florida", "georgia", "guam",
"hawaii", "iowa", "idaho", "illinois", "indiana",
"kansas", "kentucky", "louisiana", "massachusetts", "maryland",
"maine", "michigan", "minnesota", "missouri", "mp", "mississippi",
"montana", "north carolina", "north dakota", "nebraska", "new hampshire",
"new jersey", "new mexico", "nevada", "new york", "ohio", "oklahoma",
"oregon", "pennsylvania", "pr", "rhode island", "south carolina", "south dakota",
"tennessee", "texas", "utah", "virginia", "vi", "vermont", "washington",
"wisconsin", "west virginia", "wyoming"
), .after = 13)
top_daily[c(1:56), c(2, 14)]
names(top_daily)
mergedCD <- left_join(us_states, top_daily)
mergedCD
totalIncrease <-
ggplot(data=mergedCD,
mapping=aes(x=long, y=lat, group=group ,fill=positiveIncrease)) +
geom_polygon(color="white", size=0.1) +
coord_map(projection="albers", lat0=30, lat1=45) +
scale_fill_gradient(low="#22369c", high="red") +
labs(title="1-Day Increase in Coronavirus Cases Per State in Mainland US") +
theme_map()
hist(top_daily$positiveIncrease, col="red",breaks=seq(0,8000,250),
main="Histogram of The Increases in Coronavirus Cases Within the Last Day",
xlab="Total Increase in Cases (within the last day)")
view(mergedCD)
print("Total Number of Cases in the United States")
names(top_daily)
totalIncrease
top_daily[, c(3)]
|
bd2b6b47d570dafd707584e58266127c111d11d1 | b75a06043870136a4ecb4fa4be57c080855771ea | /demo/fitmodel3.r | 78b8c38e09fce1a18fe4263bc36492761c1adf0f | [] | no_license | cran/mme | 7415a547a67109bf1ce6deb959f376a94c773462 | 3acfdec03fccdace46d3a0c68abe1e89569b6b5d | refs/heads/master | 2021-06-01T22:07:25.024544 | 2019-01-27T14:40:13 | 2019-01-27T14:40:13 | 17,697,603 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,526 | r | fitmodel3.r | # THIS IS A COMPLETE EXAMPLE WITH MODEL 3
# USE SIMULATED DATA FOR 50 AREAS AND 6 PERIODS OF TIME
######################
# Model 3
# Model with two independent random effects in each category of the response variable:
# one domain random effect and another correlated time and domain random effect
######################
library(mme)
#THIS FUNCTION SIMULATE THE DATA
simulation<-function(d,t,k){
D=d*t
u=matrix(0,d,t)
x1=matrix(0,d,t)
x2=matrix(0,d,t)
u1=matrix(0,d,t)
u2=matrix(0,d,t)
for (i in 1:d){
for (j in 1:t){
u1[i,j]=((i-d)/d+1/2+j/t)/3
u2[i,j]=((i-d)/d+2/2+j/t)/3
x1[i,j]=1+u1[i,j]
x2[i,j]=1+sqrt(2)*(0*u1[i,j]+sqrt(1-(0*0))*u2[i,j])
}}
phi1=c(1,2)
phi2=c(0.25,0.50)
u1=matrix(0,d,k-1)
s = 12345678
set.seed(s)
u1[,1]=rnorm(d,mean=0,sd=sqrt(phi1[1]))
u1[,2]=rnorm(d,mean=0,sd=sqrt(phi1[2]))
u2=matrix(0,D,k-1)
rho=c(0.50,0.75)
a=omega(t,k,rho,phi2)
ceros=matrix(rep(0,t),t,1)
datos=mvrnorm(d,ceros,((phi2[1])*(a[[1]][[1]])))
u2[,1]=matrix(t(datos),D,1)
datos=mvrnorm(d,ceros,((phi2[2])*(a[[1]][[2]])))
u2[,2]=matrix(t(datos),D,1)
u11=matrix(0,D,k-1)
jj=1
for (i in 1:d){
for(j in 1:t){
u11[jj,]=u1[i,]
jj=jj+1}}
x1=matrix(t(x1),d*t,1)
x2=matrix(t(x2),d*t,1,byrow=TRUE)
ind=matrix(rep(1.3,D),D,1)
ind2=matrix(rep(-1.6,D),D,1)
beta=c(-1,1)
pr=matrix(0,D,k-1)
theta=matrix(0,D,k-1)
for (j in 1:(k-1)){
if (j==1) {theta[,j]=ind+x1*beta[j]+u11[,j]+u2[,j]}
if (j==2) {theta[,j]=ind2+x2*beta[j]+u11[,j]+u2[,j]}
}
suma=rowSums(exp(theta))
a=1/(1+suma)
for (i in 1:(k-1)){
pr[,i]=a*exp(theta[,i])}
aa=list()
j=5
for ( i in 1:d){
aa[[i]]=matrix(rep(j,t),t,1)
j=j+5}
nu=do.call(rbind,aa)
aa=list()
j=200
for ( i in 1:d){
aa[[i]]=matrix(rep(j,t),t,1)
j=j+100}
nuu=do.call(rbind,aa)
y=matrix(0,D,(k))
yr=matrix(0,D,(k))
for (i in 1:D){
y[i,]=t(rmultinom(1,nu[i],c(pr[i,1],pr[i,2],a[i])))
yr[i,]=t(rmultinom(1,nuu[i]-nu[i],c(pr[i,1],pr[i,2],a[i])))}
a=list()
for ( i in 1:d){
a[[i]]=matrix(rep(i,t),t,1)}
area=do.call(rbind,a)
time=rep(seq(1:t),d)
output=cbind(area,time,nu,nuu,y,cbind(x1,x2),yr)
return(output)}
#DATA
data=simulation(50,10,3)
colnames(data)=c("area","time","sample","population","y1","y2","y3","x1","x2","y11","y22","y33")
data=as.data.frame(data)
names(data)
data=subset(data,data$time>4)
k=3 #number of categories of the response variable
pp=c(1,1) #vector with the number of auxiliary variables in each category #data
mod=3 #Model 3
#Needed matrix and initial values
datar=data.mme(data[,1:9],k,pp, mod)
#Model fit
result=model(datar$d,datar$t,pp,datar$Xk,datar$X,datar$Z,datar$initial,datar$y[,1:(k-1)],datar$n,datar$N, mod)
result
#Fixed effects
result$beta.Stddev.p.value
#Random effects
result$phi.Stddev.p.value
#Direct estimators
dir1=data$y11
dir2=data$y22
#Plot direct estimator in front of model estimator
dos.ver<-matrix(1:2,1,2)
layout(dos.ver)
plot(dir1,result$mean[,1],main="Small area estimator Y1",xlab="Direct estimate", ylab="model estimate",font.main=2,cex.main=1.5,cex.lab=1.3)
abline(a=0,b=1)
plot(dir2,result$mean[,2],main="Small area estimator Y2",xlab="Direct estimate", ylab="model estimate",font.main=2,cex.main=1.5,cex.lab=1.3)
abline(a=0,b=1)
#Model estimator
data$yest1=result$mean[,1]
data$yest2=result$mean[,2]
#Plot direct estimator and model estimator ordered by sample size for time=10
dos.ver<-matrix(1:2,1,2)
layout(dos.ver)
a=subset(data,data[,2]==10)
a=a[order(a[,3]),]
g_range <- range(0,45)
plot(a$y11/1000,type="b", col="blue",axes=FALSE, ann=FALSE)
lines(a$yest1/1000,type="b",pch=4, lty=2, col="red")
title(xlab="Sample size")
axis(1,at=c(1,10,20,30,40,50),lab=c(a$sample[1],a$sample[10],a$sample[20],a$sample[30],a$sample[40],a$sample[50]))
axis(2, las=1, at=1*0:g_range[2])
legend("topleft", c("Direct","Model"), cex=1, col=c("blue","red"),
lty=1:2,pch=c(1,4), bty="n")
title(main="Small area estimator Y1", font.main=1.2,cex.main=1)
plot(a$y22/1000,type="b",col="blue",axes=FALSE, ann=FALSE)
lines(a$yest2/1000,type="b",pch=4, lty=2, col="red")
title(xlab="Sample size")
axis(1,at=c(1,10,20,30,40,50),lab=c(a$sample[1],a$sample[10],a$sample[20],a$sample[30],a$sample[40],a$sample[50]))
axis(2, las=1, at=1*0:g_range[2])
legend("topleft", c("Direct","Model"), cex=1, col=c("blue","red"),
lty=1:2,pch=c(1,4), bty="n")
title(main="Small area estimator Y2", font.main=1.2,cex.main=1)
##Bootstrap parametric BIAS and MSE
B=100 #Bootstrap iterations
ss=12345 #SEED
set.seed(ss)
mse.pboot=mseb(pp,datar$Xk,datar$X,datar$Z,datar$n,datar$N,result,B,mod)
#RMSE
data$rmse1=mse.pboot[[3]][,1]
data$rmse2=mse.pboot[[3]][,2]
#PLOT THE RMSE ORDERED BY SAMPLE SIZE FOR TIME=10
a=subset(data,data[,2]==10)
a=a[order(a[,3]),]
dos.ver<-matrix(1:2,1,2)
layout(dos.ver)
g_range <- range(0,45)
plot(a$rmse1,type="b", col="blue",axes=FALSE, ann=FALSE)
title(xlab="Sample size")
axis(1,at=c(1,10,20,30,40,50),lab=c(a$sample[1],a$sample[10],a$sample[20],a$sample[30],a$sample[40],a$sample[50]))
axis(2, las=1, at=10*0:g_range[2])
title(main="RMSE for the estimator of Y1", font.main=1.2,cex.main=1)
g_range <- range(0,45)
plot(a$rmse2,type="b",col="blue",axes=FALSE, ann=FALSE)
title(xlab="Sample size")
axis(1,at=c(1,10,20,30,40,50),lab=c(a$sample[1],a$sample[10],a$sample[20],a$sample[30],a$sample[40],a$sample[50]))
axis(2, las=1, at=10*0:g_range[2])
title(main="RMSE for the estimator of Y2", font.main=1.2,cex.main=1)
|
ee0c9532078a3ac43c0d15a8f556a9a749d70b37 | fc88a834f1914bc26bf9bff03209f5ae4ee47105 | /man/example.Rd | 46298098bc190c4bbd6834e9c424d4c1ed98f576 | [] | no_license | SoniaTC/NOISeq | c2181897f2492cc5f05e5f59fd2b1761ef3a53ca | 84b8811a7e9095f361290d9388f64f601c87b93f | refs/heads/master | 2022-11-19T03:31:25.432278 | 2020-07-20T09:58:33 | 2020-07-20T09:58:33 | 274,364,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,007 | rd | example.Rd | \name{example}
\docType{data}
\alias{mydata}
\alias{mynoiseq}
\title{Example of objects used and created by the NOISeq package}
\description{
This is a quick view of the objects generated by the
package. To take a look, see the usage information. These objects have
been created from Marioni's reduce dataset (only chromosomes I to IV).
}
\usage{
# To load the object myCounts generated by the readData() function from R objects containing expression data, the factors describing the experimental conditions to be studied, the feature length, the feature biotypes, the chromosome and the position:
data(myCounts)
# To load the object generated after running the noiseq() function to compute differential expression:
data(noiseq)
}
\references{
Marioni, J.C. and Mason, C.E. and Mane, S.M. and Stephens, M. and Gilad, Y. (2008) RNA-seq: an assessment of
technical reproducibility and comparison with gene expression arrays. \emph{Genome Research}, \bold{18}:
1509--1517.
}
\keyword{datasets}
|
65ccf1b76c757d31e1d731b2460f43136e78c947 | f65fb425744cb0e367438b2e69019b6645048cdb | /R/deleteOne.R | 01daf14f9f396efed970fe548865b3c26db4fe70 | [] | no_license | cran/smacof | 2dc3946ee615eef6846212c3e0fbb6bbd3d94e9a | c89277ac6d174f1c8d191994fc2fe0ee849f6a25 | refs/heads/master | 2022-05-27T05:52:02.937125 | 2022-05-06T06:04:47 | 2022-05-06T06:04:47 | 17,699,709 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 303 | r | deleteOne.R | smacofDeleteOne <- function (delta, ndim, type) {
n <- nrow (delta)
x <- array (0, c (n, ndim, n))
for (i in 1:n) {
xi <- smacofSym(delta[-i, -i], ndim = ndim, type = type)$conf
x[((1 : n)[-i]), (1 : ndim), i] <- xi
x[i, (1 : ndim), i] <- 0
}
return (x)
} |
a53545bcb26698194e8d6aef94ad8905ee6fd1e2 | 276eac36e10f083d9f7d62a6b0d5989a3ccec106 | /scripts/figure_generation/make_figure4.R | 12498f0401e6f4700f790f240c18e751d71ca8de | [
"BSD-3-Clause"
] | permissive | greenelab/deconvolution_pilot | 5caec028804243ba0ae3a4568310cbe9eed20ec6 | b62d8e77c0f128480af27f9ab61b6ab45305ab0f | refs/heads/main | 2023-07-27T22:58:53.136158 | 2023-07-27T22:58:47 | 2023-07-27T22:58:47 | 492,989,959 | 1 | 1 | BSD-3-Clause | 2023-07-27T22:58:48 | 2022-05-16T20:28:46 | R | UTF-8 | R | false | false | 4,913 | r | make_figure4.R | suppressPackageStartupMessages({
library(DropletUtils)
library(data.table)
library(scater)
library(miQC)
library(ggplot2)
library(dplyr)
library(pheatmap)
library(RColorBrewer)
library(patchwork)
library(ggpubr)
library(yaml)
library(DESeq2)
library(rtracklayer)
})
params <- read_yaml("../../config.yml")
data_path <- params$data_path
local_data_path <- params$local_data_path
figure_path <- params$figure_path
samples <- params$samples
source("figure_utils.R")
# Load DESeq2 object
deseq_path <- paste(local_data_path, "deseq2_output", sep = "/")
dds <- readRDS(paste(deseq_path, "ribo_vs_polyA_data.rds", sep = "/"))
# Rlog transformation to adjust for heteroskedasticity
rld <- rlog(dds, blind = FALSE)
# PCA plot
pcaData <- plotPCA(rld, intgroup = c("condition", "sample"), returnData = TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
pA <- ggplot(pcaData, aes(PC1, PC2, color = sample, shape = condition)) +
geom_point(size = 3) +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5)) +
xlab(paste0("PC1: ", percentVar[1], "% variance")) +
ylab(paste0("PC2: ", percentVar[2], "% variance")) +
labs(color = "Sample", shape = "Library prep") +
coord_fixed() +
scale_color_manual(values = colors_samples)
# Get DESeq2 results
res <- results(dds)
# Filter down to protein coding genes
genefile <- paste(data_path,"index/refdata-gex-GRCh38-2020-A/genes/genes.gtf",sep = "/")
gff <- readGFF(genefile)
protein_coding <- subset(gff, gff$gene_type=="protein_coding")
res <- subset(res, rownames(res) %in% protein_coding$gene_name)
# Set genes with p value of 0 to 300
res_df <- as.data.frame(res)
res_df[res_df$padj==0,]$padj <- 1e-300
res_df$group <- "Other"
# Get poly-A negative genes from Yang et al 2011
polyA_neg <- fread(paste(local_data_path, "miscellaneous",
"polyA_negative_genes.tsv", sep = "/"),
header = FALSE)
polyA_neg <- polyA_neg$V1
res_df[rownames(res_df) %in% polyA_neg,]$group <- "Other polyA(-)"
# Split out histone genes
hist_genes <- grep("HIST", rownames(dds), value = T)
res_df[rownames(res_df) %in% hist_genes,]$group <- "Histones"
# Get mitochondrial genes
mt_genes <- grep("MT-", rownames(dds), value = T)
res_df[rownames(res_df) %in% mt_genes,]$group <- "MT Genes"
# Rearrange so interesting genes are plotted on top
res_df$group <- factor(res_df$group, levels = c("Other","Histones", "Other polyA(-)", "MT Genes"))
res_df <- res_df[order(res_df$group), ]
# Make volcano plot
pB <- ggplot(res_df, mapping = aes(x = log2FoldChange,
y = -log10(padj),
color = group)) +
geom_point() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5)) +
geom_hline(yintercept = -log10(0.05), linetype = "dashed") +
geom_vline(xintercept = c(log2(0.5), log2(2)), linetype = "dashed") +
scale_color_manual(name = "Gene set", values = colors_genesets,
limits = c("Histones", "Other polyA(-)", "MT Genes", "Other")) +
xlab("log2 fold change") + ylab("-log10 adjusted p-value") +
annotate("text", x= -4.7, y= 275, label = "rRNA- Dissociated", size = 6) +
annotate("text", x= 4.2, y= 275, label="polyA+ Dissociated", size = 6) #+
# Get sums of all histone genes
hist_expr <- as.data.frame(colSums(assay(dds[hist_genes,])))
colnames(hist_expr) <- "counts"
hist_expr$id <- colData(dds)$id
hist_expr$sample <- gsub("_.*","", hist_expr$id)
hist_expr$condition <- gsub(".*_", "", hist_expr$id)
pC <- ggplot(hist_expr, aes(x=condition, y=counts, group=sample, color=sample)) +
geom_point() +
scale_y_log10() +
geom_line() +
theme(axis.text.x = element_text(angle=0, hjust = 0.5, vjust = 0.5),
plot.title = element_text(hjust = 0.5)) +
labs(x = "Status", y = "Normalized read counts", color = "Sample") +
ggtitle("Histone genes") +
scale_color_manual(values = colors_samples) +
annotation_logticks(sides = "l")
# Get sums of all mitochondrial genes
mito_expr <- as.data.frame(colSums(assay(dds[mt_genes,])))
colnames(mito_expr) <- "counts"
mito_expr$id <- colData(dds)$id
mito_expr$sample <- gsub("_.*","", mito_expr$id)
mito_expr$condition <- gsub(".*_", "", mito_expr$id)
pD <- ggplot(mito_expr, aes(x=condition, y=counts, group=sample, color=sample)) +
geom_point() +
scale_y_log10() +
geom_line() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5, vjust = 0.5),
plot.title = element_text(hjust = 0.5)) +
labs(x = "Status", y = "Normalized read counts", color = "Sample") +
ggtitle("Mitochondrial genes") +
scale_color_manual(values = colors_samples) +
annotation_logticks(sides = "l")
pdf(paste(figure_path, "figure4.pdf", sep = "/"), width = 16, height = 10.67, family = "sans")
pA + pB + pC + pD +
plot_layout(nrow = 2, heights = c(1,1)) +
plot_annotation(tag_levels = "A")
dev.off()
|
9aa165cfeab44e52aee1652d3d8287288dd75a3b | 462ef36e8d5c00293bb93899bde36e7a61597047 | /man/biscuiteerDataListDates.Rd | c4b058672a5113a610d5e108b5435b9d7d1a4746 | [] | no_license | trichelab/biscuiteerData | 9bdfde69e0f079cbd70623ef366595e416764c78 | f5366976a4422f4cc549e8913a2cc33922008e09 | refs/heads/master | 2023-08-03T11:04:26.493002 | 2023-07-05T14:40:08 | 2023-07-05T14:40:08 | 210,435,170 | 0 | 1 | null | 2019-10-17T14:32:02 | 2019-09-23T19:25:39 | R | UTF-8 | R | false | true | 375 | rd | biscuiteerDataListDates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{biscuiteerDataListDates}
\alias{biscuiteerDataListDates}
\title{List all versions of biscuiteer data}
\usage{
biscuiteerDataListDates()
}
\value{
Sorted unique dates in biscuiteer data
}
\description{
List all versions of biscuiteer data
}
\examples{
biscuiteerDataListDates()
}
|
55912663d1e34d8a996dda5ee0e29c6ec671d3ca | 4f46f3b0377a558f488bd2c035b2f5f09cce1e31 | /ui-vegindex-selection-plots.R | 3a0349087d088fa8767a16c8e9f743c0ee47bd8b | [
"MIT"
] | permissive | alkc/specalyzer | d34ee95fc37188a0caeafc865d504adf239248bb | 2b7278d8020f2c4193ff9ce2f5ded66e7857f5be | refs/heads/master | 2021-07-09T21:27:37.300561 | 2021-04-15T21:07:44 | 2021-04-15T21:07:44 | 123,193,976 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 694 | r | ui-vegindex-selection-plots.R | get_vegindex_selection_plot_ui <- function() {
fluidPage(
titlePanel("Selection of vegetation indices"),
sidebarLayout(
sidebarPanel(
selectInput(
"viselection_attr_select",
label = "Select attribute of interest:",
choices = "",
multiple = FALSE,
selected = NULL
),
selectInput(
"viselection_attr_type",
label = "Select attribute:",
choices = c("Continuous" = "continuous", "Categorical" = "categorical"),
multiple = FALSE,
selected = NULL
)
),
mainPanel(
plotlyOutput("viselection_plot", height = "700px")
)
)
)
} |
99e29d4db647d5e2da513e282b8d97e57c38b9d2 | 61d2414c11b7ea89f5485ea0b4a63e2983d954b1 | /plot1.R | e2cb08fcd4ecf8fc6ff6193de5cea1030c2bec65 | [] | no_license | briankw1108/Ex_Data_Analysis_Plotting1 | ab56e6f139e64ac523bd6fe880353da5764f990f | 155ddb5d6c89714bdbc7b17b59c4998ef4016ecc | refs/heads/master | 2016-08-11T19:42:02.700173 | 2015-10-11T18:22:56 | 2015-10-11T18:22:56 | 44,056,324 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,567 | r | plot1.R | #Load libraries
library(lubridate); library(dplyr)
#Set up correct working directory
setwd("C://Users/u213493/Desktop/Data Science/Exploratory Data Analysis")
#unzip the file
#unzip("exdata_data_household_power_consumption.zip")
#Load the file
data = read.table("household_power_consumption.txt", header = T, sep = ";", na.strings = "?", stringsAsFactors = F)
#Convert Data variable from charactor to Date
data$Date = as.Date(data$Date, format = "%d/%m/%Y")
#Subset the data between 2007-02-01 and 2007-02-02
newData = filter(data, data$Date >= "2007-02-01" & data$Date <= "2007-02-02")
#Create a new variable with Date/Time
DateTimeCombine = paste(newData$Date, newData$Time)
DateTime = strptime(DateTimeCombine, format = "%Y-%m-%d %H:%M:%S")
newData$Date_Time = DateTime
##Or the action above can be done as following
#DateTime = paste(newData$Date, newData$Time)
#newData$Date_Time = as.POSIXct(DateTime)
#Plot histogram of Global_active_power as plot1
par(mar = c(5.1, 4.1, 4.1, 2.1)) #Setup margins
#Save the histogram as png file named "plot1.png"
png(filename = "plot1.png", height = 480, width = 480)
with(newData, hist(newData$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red"))
dev.off()
##Or to save the plot, the action above can also be done as following
#hist(newData$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red")
#dev.copy(png, file = "plot1.png", height = 480, width = 480)
#dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.