blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9fb4ae1a811d12618b522810e956b355126ebef9 | 900afaf1006963fe57bfe412b06f830e66fab76b | /lost-found/R/65.R | 29dab156fa00a90777933c22d7e64b34f2783394 | [] | no_license | AndrissP/LabsOnR | f91478fca3570d3eeaf2dd9978111b1e404675a7 | 774efb5b9a699731895cab5deb24d18c7f62f9b0 | refs/heads/master | 2020-07-03T02:18:10.058717 | 2019-08-13T08:58:18 | 2019-08-13T08:58:18 | 201,754,657 | 0 | 0 | null | null | null | null | ISO-8859-13 | R | false | false | 963 | r | 65.R | png("7_attels.png", width=800, height=400,pointsize=20)
par(mar = c(4.5, 4, 1, 2))
plot(l_Pb,ln_Pb,xlim=c(0,2),ylim=c(-2,1.4),xlab="l,cm", ylab=expression("ln(n-n"[f]*")"))
lines(l_Pb,fit_Pb$fitted.values)
arrows(l_Pb,ln_Pb-svari_Pb,l_Pb,ln_Pb+svari_Pb,code=3,length=0.02,angle=90)
points(l_Te,ln_Te,col='blue',pch=2)
lines(l_Te,fit_Te$fitted.values,col='blue',lty=2)
arrows(l_Te,ln_Te-svari_Te,l_Te,ln_Te+svari_Te,col='blue',code=3,length=0.02,angle=90)
points(l_Al,ln_Al,col='red',pch=5)
lines(l_Al,fit_Al$fitted.values,col='red',lty=3)
arrows(l_Al,ln_Al-svari_Al,l_Al,ln_Al+svari_Al,col='red',code=3,length=0.02,angle=90)
points(l_Pl,ln_Pl,col='green',pch=4)
lines(l_Pl,fit_Pl$fitted.values,col='green',lty=4)
arrows(l_Pl,ln_Pl-svari_Pl,l_Pl,ln_Pl+svari_Pl,col='green',code=3,length=0.02,angle=90)
legend("topright",legend=c("svins", "tērauds","alumīnijs","plastmasa"),lty=c(1,2,3,4),pch=c(1,2,5,4),col=c('black',"blue","red",'green'),ncol=2)
dev.off()
|
87f377776afea5dba4ef3ee263cef57ea6c0edca | e494ed1da922ddf0beb9fab29e85300fb9f6007e | /man/extinction_simulation.Rd | 7cf50a74de9acf52f52772246242b7dfcb026517 | [] | no_license | adsteen/funfunfun | 6fa543907544621e2e84f6af94bdbf7cc243db5e | 8e5c275097dd13843b6ee2268e237aca84720760 | refs/heads/main | 2023-03-06T13:40:25.889406 | 2021-02-09T13:49:55 | 2021-02-09T13:49:55 | 336,002,863 | 1 | 2 | null | 2021-02-04T19:57:23 | 2021-02-04T15:49:07 | R | UTF-8 | R | false | true | 1,762 | rd | extinction_simulation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func_redun_sim.R
\name{extinction_simulation}
\alias{extinction_simulation}
\title{Results of functional redundancy simulations}
\usage{
extinction_simulation(
range.express = c(1:100),
range.nonexpress = c(0:100),
q = 0.5,
a.range = seq(0, 0.4, by = 0.01),
loss.thres = 0.99,
range.trait = seq(1, 1000, by = 1),
n.community = 100,
n.extinct.sim = 10
)
}
\arguments{
\item{range.express}{The number of taxa contributing to a community-aggregated parameter--vector}
\item{range.nonexpress}{The number of taxa not contributing to a community-aggregated parameter--vector}
\item{q}{The diversity order to use when calculate functional redundancy--real number}
\item{a.range}{A parameter modulating how even a lognormal abundance distribution is--vector}
\item{loss.thres}{A parameter specifying the total loss trait in a community necessary to stop a simulation--number between 0 and 1}
\item{range.trait}{Possible range of trait values to be assigned to taxa--a vector}
\item{n.community}{Number of artificial communities to generate--integer greater than 0}
\item{n.extinct.sim}{Number of extinction simulations to perform on every artificial community--integer greater than 0}
}
\value{
RReturns a list of fr (functional redundancy), loss.frac (average fraction of a community required to go extinct prior to trait loss), and express.level (the fraction of a community required to go extinct)
}
\description{
Performs extinction simulations and calculates functional redundancy using the Royalty method.
}
\details{
ANYTHING THAT NEEDS TO GO INTO THE DETAILS SECTION
}
\examples{
# We should include examples, but I'm kind of confused about how examples work
}
|
909789f8b596e66efced07f2fecfa310a884e9c1 | 303cec757865d4187456554b6c8fff032e6ada19 | /R/faux_Skx.R | 264409fe993b46a28edc1e314caf223ca1b19efa | [
"MIT"
] | permissive | shamindras/ars | 591363b88d56ff2540996b7a4ba9c8d311baa146 | d76b9d0f60743212beba2377729c25548c3f9d52 | refs/heads/master | 2020-12-26T04:16:03.406484 | 2015-12-17T19:55:02 | 2015-12-17T19:55:02 | 47,591,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,497 | r | faux_Skx.R | #' Helper function to create piecewise function \eqn{s_{k}(x)}{sk(x)}
#' @param inp_uintervallist A list of intervals between \eqn{z} values, as in the
#' output from uInterval.
#' @param inp_ufunlist A list of functions, the output from uFun.
#' @return A list of functions.
#' The length of the list is equal to the length of the inputs
#' \code{inp_uintervallist} and \code{inp_ufunlist}.
#' Each element of the list is one piece of the piecewise function
#' \eqn{s_{k}(x)}{sk(x)}.
#' @export
faux_Skx <- function(inp_uintervallist, inp_ufunlist) {
# check that inp_uintervallist is the same length as inp_ufunlist
if(!(length(inp_ufunlist)==length(inp_uintervallist))){
stop("inp_uintervallist and inp_ufunlist must have the same length")
}
# check that inp_uintervallist is a list
if(!(class(inp_uintervallist))=="list"){
stop("inp_uintervallist must be a list")
}
# check that inp_ufunlist is a list
if(!(class(inp_ufunlist))=="list"){
stop("inp_ufunlist must be a list")
}
# check that each element of inp_uintervallist is a 2-dim vector
if(!(do.call(sum,lapply(inp_uintervallist,length)))==
2*length(inp_uintervallist)){
stop("Intervals in inp_uintervallist must be two dimensional vectors")
}
# check that every element of inp_ufunlist is a function
if(!all.equal(lapply(inp_ufunlist,class),
as.list(rep("function",length(inp_ufunlist))))){
stop("inp_ufunlist must be a list of functions")
}
# function to take the exp() of every function in inp_ufunlist
addexp <- function(i){
str <- deparse(body(inp_ufunlist[[i]]))
h <- function(x) eval(parse(text = paste0("exp(", str, ")")))
return(h)
}
exps <- sapply(seq(1:length(inp_ufunlist)),addexp) # exponentiate each
# function in the list
# function to take the integral of each element in exps
int <- function(i) integrate(exps[[i]], inp_uintervallist[[i]][1]
, inp_uintervallist[[i]][2])[[1]]
constant <- sum(sapply(seq(1:length(inp_ufunlist)),int)) #normalizing constant
# function which exponentiates each function in inp_ufunlist and divides
# each by the normalizing constant
addconst <- function(i){
str <- deparse(body(inp_ufunlist[[i]]))
h <- function(x) eval(parse(text = paste0("exp(", str, ")/",constant)))
return(h)
}
# final list of sk(x) functions
faux_Skx_out <- sapply(seq(1:length(inp_ufunlist)),addconst)
return(faux_Skx_out)
} |
b2b3511e50e594ef37fb04d6b8c7f0230e404ea6 | b8f60b0cc802d613ff252ebf5f2aec9ac005b3b7 | /ScrapeDavis/biosci.R | bf5e628f6b4944bdd42ab756535fa4d121184a8d | [] | no_license | dsidavis/ResearchProfiles | e66675406195ab552dd888e5db65ca1e003d8e2a | e04ea0d2c712993485a6e19b0d38a18b506d13eb | refs/heads/master | 2020-06-11T18:59:27.871947 | 2018-02-23T17:01:15 | 2018-02-23T17:01:15 | 38,620,699 | 1 | 2 | null | 2018-02-18T10:04:49 | 2015-07-06T13:29:13 | HTML | UTF-8 | R | false | false | 234 | r | biosci.R | source("funcs.R")
u = "http://biosci3.ucdavis.edu/Faculty/Profile/ActiveFaculty"
doc = htmlParse(u)
nodes = getNodeSet(doc, "//a[contains(@href, 'Faculty/Profile/View')]")
biosci = getNames(nodes)
save(biosci, file = "biosci.rda")
|
1a723e24eb6676b57c054b7211f618bf96d4f21f | cd3aa68b2dce3f43a4a941a1c79476acac84702c | /hold/markov_chain_lesson.R | 5c8573069bb7096c5528cbbbda1be62ef4af09ca | [] | no_license | HTrammel/Capstone-Project | 2ba8ea18239a799bb09428ad954b269f3cdfe03b | 2d1d38935c740aeea5d58ca78fa26d2c951a9f94 | refs/heads/master | 2021-01-10T17:42:38.988050 | 2016-01-14T23:00:58 | 2016-01-14T23:00:58 | 47,585,136 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,391 | r | markov_chain_lesson.R | # MARKOV CHAIN LESSON
# Snakes and Ladders
n <- 100
M <- matrix(0,n+1,n+1+6)
rownames(M) <- 0:n
colnames(M) <- 0:(n+6)
for (i in 1:6) {
diag(M[,(i+1):(i+1+n)]) <- 1/6
}
M[,n+1] <- apply(M[, (n+1):(n+1+6)], 1, sum)
M <- M[,1:(n+1)]
starting <- c(4,9,17,20,28,40,51,54,62,64,63,71,93,95,92)
ending <- c(14,31,7,38,84,59,67,34,19,60,81,91,73,75,78)
for(i in 1:length(starting)){
v <- M[,starting[i]+1]
ind <- which(v>0)
M[ind,starting[i]+1] <- 0
M[ind,ending[i]+1] <- M[ind,ending[i]+1]+v[ind]
}
powermat<- function(P,h){
Ph<- P
if (h>1) {
for(k in 2:h) {
Ph<- Ph%*%P
}
}
return(Ph)
}
initial <- c(1,rep(0,n))
COLOR <- rev(heat.colors(101))
u <- 1:sqrt(n)
boxes <- data.frame(
index <- 1:n,
ord <- rep(u,each<- sqrt(n)),
abs <- rep(c(u,rev(u)),sqrt(n)/2)
)
position<- function(h = 1){
D <- initial%*%powermat(M,h)
plot(0:10,
0:10,
col<- "white",
axes<- FALSE,
xlab<- "",
ylab<- "",
main<- paste("Position after", h, "turns"))
segments(0:10, rep(0,11), 0:10, rep(10,11))
segments(rep(0,11), 0:10, rep(10,11), 0:10)
for(i in 1:n){
polygon(boxes$abs[i]-c(0,0,1,1),
boxes$ord[i]-c(0,1,1,0),
col<- COLOR[min(1+trunc(500*D[i+1]),101)],
border<- NA)
}
text(boxes$abs-.5,
boxes$ord-.5,
boxes$index,
cex<- .7)
segments(c(0,10),rep(0,2),c(0,10),rep(10,2))
segments(rep(0,2),c(0,10),rep(10,2),c(0,10))
} |
44f490c4f109887dccb62af33c84a20dd8bf72fe | 977743b1f39c76b566a5514fe8bb8e8108a7e17c | /man/api_coveragedb.Rd | 15bd897be2d1c0099126b50d62ff8045cc495484 | [
"MIT"
] | permissive | cimentadaj/scrapex | 9eaa7aa7016e46edfe0e49d8b312fa82e3c60f93 | 5fa0adbc7e8249a97c7f8740c33a224eb772e9d1 | refs/heads/master | 2023-01-05T23:21:02.758972 | 2022-12-23T16:44:24 | 2022-12-23T16:44:24 | 219,947,980 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,173 | rd | api_coveragedb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_coveragedb.R
\name{api_coveragedb}
\alias{api_coveragedb}
\title{REST API for the COVerAGE-DB database}
\usage{
api_coveragedb(port = NULL)
}
\arguments{
\item{port}{a numeric value used as a port}
}
\value{
callr result of the individual R process
}
\description{
COVerAGE-DB is an open-access database that includes cumulative counts of confirmed COVID-19 cases, deaths, tests, and vaccines by age and sex. The main goal of COVerAGE-DB is to provide a centralized, standardized, age-harmonized, and fully reproducible database of COVID-19 data. For more information, visit
}
\details{
This API wraps a custom download performed locally of the database and filters only 'California', 'Utah' and 'New York State' for both sexes (`m` and `f` in the database).
The API has two parameters which can be filtered:
* region: 'California', 'Utah' and 'New York State'
* sex: `m` and `f`
This function launches a plumber API in a new R process using `callr` and return the `callr` process.
}
\examples{
\dontrun{
live_api <- api_coveragedb()
live_api$kill()
}
}
\author{
Jorge Cimentada
}
|
5a652cb3a5a8e9b2401ca163b88583a30af7e164 | d1bc382da458eece07e01459a903302b8d129919 | /webiste/shiny.R | 6d52e12a8620467b7aaf3fb7d2e0d013e6eac576 | [] | no_license | 123saaa/Hello | 00f09762363b1ff81c56d0863e393d5d6cfe6e85 | d451bb093691ad4dfaed5283960fa4e5728b54e7 | refs/heads/master | 2022-12-28T10:59:08.458103 | 2020-10-02T03:12:24 | 2020-10-02T03:12:24 | 281,630,664 | 0 | 0 | null | 2020-08-18T06:01:46 | 2020-07-22T09:14:42 | null | UTF-8 | R | false | false | 775 | r | shiny.R | library(shiny)
if (interactive()) {
ui<-shinyUI(fluidPage(
h1("Heart Disease Prediction",align = "center"),#titlePanel
br(),
numericInput("age","Age:",1),
textInput("sex","Sex:"),
textInput("lymph","Lymph:"),
verbatimTextOutput("number"),
fileInput("file1", "Choose CSV File", accept = ".csv"),
submitButton("Submit"),
verbatimTextOutput("value"),
))
server <- function(input, output) {
output$contents <- renderTable({
file <- input$file1
#ext <- tools::file_ext(file$datapath)
req(file)
validate(need(ext == "csv", "Please upload a csv file"))
read.csv(file$datapath, header = input$header)
})
}
app<- shinyApp(ui, server)
runApp(app,port=getOption("shiny.port",8080),host = getOption("shiny.host", "127.0.0.1"))
}
|
6c5df8334cd590d9dbf6923b7d8569c1b6819ea0 | b56ad2e238af61a08368b52a06b86649724d57e7 | /junk_files/old_code.R | abb30cc8afbc7c8b7d1e7748106c9dfe34e5ac3e | [
"BSD-3-Clause"
] | permissive | ryscott5/eparTextTools | 113b835df4df2f97be55a32a41f8d7778ad304c6 | 7849d9bcaabb8001a3b04d35aea48369014f265c | refs/heads/master | 2021-05-01T04:44:54.727927 | 2017-10-02T19:13:43 | 2017-10-02T19:13:43 | 63,177,507 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 12,567 | r | old_code.R | #Template for text analysis in R
#--------------------------------------------------#
#set colors for Graphs
EPAR_Colors_1 <- c("#ffffff", "#e5f0ec", "#8ebfad", "#417361", "#e8e3ee", "#9179af", "#3d2f4f")
EPAR_Colors_2 <- c("#9179af", "#8ebfad")
#SECTION 2: Most frequent terms overall
#calculate column totals (number of occurences per term across all documents)
totals <- colSums(df[,-1], na.rm = TRUE) #[,-1] specifies that we want all rows and all columns except column 1, na.rm=TRUE means we omit the missing variables
#save as a dataframe for graphing, re-sort highest to lowest frequency
colSums(tdm)
names(tdm)
#Template for text analysis in R
#Input: term-document matrix .csv file from Python
#Output: text analysis graphs
#clear workspace
rm(list = ls())
#--------------------------------------------------#
#SECTION 1: Set-up
#install packages (uncomment if you haven't installed them on your computer yet)
#install.packages("reshape2")
#install.packages("ggplot2")
#load packages (do not comment out)
library("reshape2")
library("ggplot2")
#set colors for Graphs
EPAR_Colors_1 <- c("#ffffff", "#e5f0ec", "#8ebfad", "#417361", "#e8e3ee", "#9179af", "#3d2f4f")
EPAR_Colors_2 <- c("#9179af", "#8ebfad")
#import .csv file
df <- read.csv("R:/Project/EPAR/EPAR Templates and Guidelines/Text Analysis Procedures and Templates/Project_Template_Python/Code/frequencyMatrix - Copy.csv")
#Replace NA values with zeros
df[is.na(df)] <- 0
#--------------------------------------------------#
#SECTION 2: Most frequent terms overall
#calculate column totals (number of occurences per term across all documents)
totals <- colSums(df[,-1], na.rm = TRUE) #[,-1] specifies that we want all rows and all columns except column 1, na.rm=TRUE means we omit the missing variables
#save as a dataframe for graphing, re-sort highest to lowest frequency
totals <- as.data.frame(sort(totals, decreasing = TRUE))
#Rename the column name as "Frequency"
names(totals)[1]<-"Frequency"
#add the rownames as a real column
totals <- cbind(Word=rownames(totals), totals)
#make it a factor so the order is maintained in a graph - ordering the Words based on their Frequency
totals$Word <- factor(totals$Word, levels=totals[order(totals$Frequency, decreasing=TRUE), "Word"])
#Graph the top 15 terms in a bar plot
graph_1 <- ggplot(totals[1:15,], aes(Word, Frequency)) +
geom_bar(fill="#8ebfad", position = "dodge", stat="identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Frequency")+
ggtitle("Most Common Words Across All Grants")
graph_1
#--------------------------------------------------#
#SECTION 3: Creating a sub-list with terms of interest
#Creating a dataframe with only gendered words (binding selected columns from the larger dataframe)
df.gender <- df[, c("filename", "woman", "girl", "wife", "female", "men", "boy", "husband", "male")]
#total frequencies per word across all documents:
totals.gender <- colSums(df.gender[, -1], na.rm = TRUE)
df.gender <- cbind(df.gender, totals.gender)
#add word names to the gender totals dataframe
totals.gender <- as.data.frame(totals.gender)
totals.gender <- cbind(word=rownames(totals.gender), totals.gender)
#make it a factor so the order is maintained in a graph - ordering the words based on their frequency
totals.gender$word <- factor(totals.gender$word, levels=totals.gender[order(totals.gender$totals.gender, decreasing=TRUE), "word"])
#Graph the overall frequencies
graph_2 <- ggplot(totals.gender, aes(word, totals.gender)) +
geom_bar(fill="#8ebfad", position = "dodge", stat="identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Frequency")+
ggtitle("Gender Word Frequencies Across all Documents")
graph_2
#Alternative graph: color-coding by gender
#add a column to the totals.gender data frame that specifies the gender of the word
totals.gender <- cbind(totals.gender, gender = c("female", "female", "female", "female", "male", "male", "male", "male"))
#graph - color fill is dependent on the "gender" variable
graph_3 <- ggplot(totals.gender, aes(word, totals.gender, fill=gender)) +
geom_bar(stat="identity")+
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=14),panel.background=element_blank())+
xlab("")+
ylab("Word count, all proposals")+
guides(fill=FALSE)+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_3
#--------------------------------------------------#
#SECTION 4: Comparing the selected words across documents
#Making a graph that shows the relative frequency of female versus male terms between grant documents
#New dataframe
count.df <- as.data.frame(df$filename)
#rename the columns
colnames(count.df) <- "filename"
#Calculate the sum of frequencies of all female words in a given grant
count.df$total.female <- df$woman + df$girl + df$female + df$wife
#Calculate the sum of frequencies of all male words in a given grant
count.df$total.male <- df$boy + df$men + df$male + df$husband
#Calculate total number of words per grant
count.df$total.words <- rowSums(df[-1])
#Calcuate relative frequency of female words per grant (total female words / total words)
count.df$freq.female <- count.df$total.female/count.df$total.words
#Calcuate relative frequency of male words per grant (total male words / total words)
count.df$freq.male <- count.df$total.male/count.df$total.words
#new data frame for frequency
frequency.df <- count.df[, c("filename", "freq.female", "freq.male")]
#melt the data
frequency.df.melt <- melt(frequency.df, id.vars="filename")
#Graph relative frequencies by grant document
graph_4 <-ggplot(frequency.df.melt, aes(x=filename, y=value)) +
geom_bar(aes(fill=variable), position = "dodge", stat = "identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Relative Frequency of Document Words")+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_4
#Same as above, but this time graph the total counts of words per grant
count.df <- count.df[, c("filename", "total.female", "total.male")]
#melt the data
count.df.melt <- melt(count.df, id.vars="filename")
#Graph counts by grant document
graph_4 <-ggplot(count.df.melt, aes(x=filename, y=value)) +
geom_bar(aes(fill=variable), position = "dodge", stat = "identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Count of Document Words")+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_4
#Same as above, but only for the Asian grants
count.asia.melt <- melt(count.df[c(2, 9, 12:13, 17:20),], id.vars="filename") #selecting only the rows of Asian grants
graph_5 <-ggplot(count.asia.melt, aes(x=filename, y=value)) +
geom_bar(aes(fill=variable), position = "dodge", stat = "identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Count of Document Words")+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_5
totals <- as.data.frame(sort(totals, decreasing = TRUE))
#Rename the column name as "Frequency"
names(totals)[1]<-"Frequency"
#add the rownames as a real column
totals <- cbind(Word=rownames(totals), totals)
#make it a factor so the order is maintained in a graph - ordering the Words based on their Frequency
totals$Word <- factor(totals$Word, levels=totals[order(totals$Frequency, decreasing=TRUE), "Word"])
#Graph the top 15 terms in a bar plot
graph_1 <- ggplot(totals[1:15,], aes(Word, Frequency)) +
geom_bar(fill="#8ebfad", position = "dodge", stat="identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Frequency")+
ggtitle("Most Common Words Across All Grants")
graph_1
#--------------------------------------------------#
#SECTION 3: Creating a sub-list with terms of interest
#Creating a dataframe with only gendered words (binding selected columns from the larger dataframe)
df.gender <- df[, c("filename", "woman", "girl", "wife", "female", "men", "boy", "husband", "male")]
#total frequencies per word across all documents:
totals.gender <- colSums(df.gender[, -1], na.rm = TRUE)
df.gender <- cbind(df.gender, totals.gender)
#add word names to the gender totals dataframe
totals.gender <- as.data.frame(totals.gender)
totals.gender <- cbind(word=rownames(totals.gender), totals.gender)
#make it a factor so the order is maintained in a graph - ordering the words based on their frequency
totals.gender$word <- factor(totals.gender$word, levels=totals.gender[order(totals.gender$totals.gender, decreasing=TRUE), "word"])
#Graph the overall frequencies
graph_2 <- ggplot(totals.gender, aes(word, totals.gender)) +
geom_bar(fill="#8ebfad", position = "dodge", stat="identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Frequency")+
ggtitle("Gender Word Frequencies Across all Documents")
graph_2
#Alternative graph: color-coding by gender
#add a column to the totals.gender data frame that specifies the gender of the word
totals.gender <- cbind(totals.gender, gender = c("female", "female", "female", "female", "male", "male", "male", "male"))
#graph - color fill is dependent on the "gender" variable
graph_3 <- ggplot(totals.gender, aes(word, totals.gender, fill=gender)) +
geom_bar(stat="identity")+
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=14),panel.background=element_blank())+
xlab("")+
ylab("Word count, all proposals")+
guides(fill=FALSE)+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_3
#--------------------------------------------------#
#SECTION 4: Comparing the selected words across documents
#Making a graph that shows the relative frequency of female versus male terms between grant documents
#New dataframe
count.df <- as.data.frame(df$filename)
#rename the columns
colnames(count.df) <- "filename"
#Calculate the sum of frequencies of all female words in a given grant
count.df$total.female <- df$woman + df$girl + df$female + df$wife
#Calculate the sum of frequencies of all male words in a given grant
count.df$total.male <- df$boy + df$men + df$male + df$husband
#Calculate total number of words per grant
count.df$total.words <- rowSums(df[-1])
#Calcuate relative frequency of female words per grant (total female words / total words)
count.df$freq.female <- count.df$total.female/count.df$total.words
#Calcuate relative frequency of male words per grant (total male words / total words)
count.df$freq.male <- count.df$total.male/count.df$total.words
#new data frame for frequency
frequency.df <- count.df[, c("filename", "freq.female", "freq.male")]
#melt the data
frequency.df.melt <- melt(frequency.df, id.vars="filename")
#Graph relative frequencies by grant document
graph_4 <-ggplot(frequency.df.melt, aes(x=filename, y=value)) +
geom_bar(aes(fill=variable), position = "dodge", stat = "identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Relative Frequency of Document Words")+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_4
#Same as above, but this time graph the total counts of words per grant
count.df <- count.df[, c("filename", "total.female", "total.male")]
#melt the data
count.df.melt <- melt(count.df, id.vars="filename")
#Graph counts by grant document
graph_4 <-ggplot(count.df.melt, aes(x=filename, y=value)) +
geom_bar(aes(fill=variable), position = "dodge", stat = "identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Count of Document Words")+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_4
#Same as above, but only for the Asian grants
count.asia.melt <- melt(count.df[c(2, 9, 12:13, 17:20),], id.vars="filename") #selecting only the rows of Asian grants
graph_5 <-ggplot(count.asia.melt, aes(x=filename, y=value)) +
geom_bar(aes(fill=variable), position = "dodge", stat = "identity") +
theme(axis.text.x=element_text(color="#000000",angle=50, hjust=1, size=12),panel.background=element_blank())+
xlab("")+
ylab("Count of Document Words")+
scale_fill_manual(values=c("#9179af", "#8ebfad"))
graph_5
|
691d69e55a0c4861d65f888c3e724c6532080dc6 | bffd2afc5e5717528138b497b923c0ba6f65ef58 | /man/ex10.08.Rd | 92dd5a6fc5937b005b7187b1f657bd7037bbce90 | [] | no_license | dmbates/Devore6 | 850565e62b68e9c01aac8af39ff4275c28b4ce68 | b29580f67971317b4c2a5e8852f8218ecf61d95a | refs/heads/master | 2016-09-10T21:47:13.150798 | 2012-05-31T19:32:53 | 2012-05-31T19:32:53 | 4,512,058 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 493 | rd | ex10.08.Rd | \name{ex10.08}
\alias{ex10.08}
\docType{data}
\title{data from exercise 10.8}
\description{
The \code{ex10.08} data frame has 35 rows and 2 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{stiffnss}{
a numeric vector
}
\item{length}{
a numeric vector
}
}
}
\source{
Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury
}
\examples{
str(ex10.08)
}
\keyword{datasets}
|
50971511287e1af26e3355e0137926b3b47e3a33 | 842ae20ba0e9ae9b50f54be9ebcf6d0fc0aaa597 | /R/genOr.R | d7faf009d575981facc23f6ef420ba457af39b37 | [] | no_license | jsieker/MultiMod | ad0bc90515c4d733e2839633ea5cd2f44a9b73ae | 341d91facd69fb608bcf7147b18f3555c92319b8 | refs/heads/master | 2021-01-16T18:25:32.393780 | 2018-01-09T05:27:35 | 2018-01-09T05:27:35 | 100,074,961 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 873 | r | genOr.R |
genOr <- function(threshold, kME, cutVal){
if(missing(cutVal)){ cutVal = TRUE }
premergeKME <- kME
receiving <- data.frame(matrix(0, nrow=nrow(premergeKME), ncol=ncol(premergeKME)))
for(k in 1:nrow(premergeKME)) {
shelf <- data.frame(-sort(premergeKME[k,]))
s <- sum(shelf[1,]>threshold)
if(s>=1){
col <-colnames(shelf[1:s])
receiving[k, 1:length(col)] <- col
}
}
#count number of modules each gene is assigned to
rownames(premergeKME) -> rownames(receiving)
Out <- receiving
count <- data.frame(matrix(0, (nrow(Out)), 1))
for(k in 1:nrow(Out)){
count[k,] <- sum(Out[k,] != 0)
}
rownames(count) <- rownames(Out)
colnames(count) <- c("Module Memberships")
count$threshold <- threshold
Mod <- cbind(count, Out)
tab <- table(Mod$"Module Memberships")
if(cutVal == TRUE) { Mod <- Mod[,1:(length(tab)+1)]}
list(GenOrOutput <- Mod, Tabled_Membership <- tab)
}
|
f122cf36635f141016a947baf77dfcecb7ec73f9 | 5ebf58c2c5cdf592d2aec47ab230c983c4bc0765 | /man/boxCoxEncode.Rd | ac1b2ec76e19ccf6f6048ad1117711d5f66fb406 | [] | no_license | AnotherSamWilson/pipelineTools | a90c968c50d36be0071c66ecb3e7c8e626c2c641 | 4917ec3630bbdfe58596e44293828fd47f9517b8 | refs/heads/master | 2022-11-01T07:51:47.322047 | 2020-06-17T14:22:45 | 2020-06-17T14:22:45 | 272,734,088 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,849 | rd | boxCoxEncode.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxCoxEncode.R
\name{boxCoxEncode}
\alias{boxCoxEncode}
\title{boxCoxEncode}
\usage{
boxCoxEncode(
dt,
vars,
lambda = NULL,
minNormalize = 0.05,
capNegPredOutliers = 0
)
}
\arguments{
\item{dt}{Dataset to create object on.}
\item{vars}{variables you want to include in the encoding.}
\item{lambda}{You can pass custom lambdas if you want. Not recommended.}
\item{minNormalize}{Box-Cox is a _risky_ transformation because it will fail if
it encounters a number <= 0. You can reduce this _riskyness_ by adding a certain amount of
'space' between your expected range and 0. \code{minNormalize} represents the number of
standard deviations you want between 0 and the minimum number (lower bound) in the distribution.
This is set higher to ensure the variable never experiences a future number <= 0. Usually
safe being set pretty low if you have lots of data. If you have done some engineering
yourself to ensure this never happens, can be set to 0. All variables are automatically re-scaled,
Can either be a scalar or a named list of values, with names equal to vars.}
\item{capNegPredOutliers}{If you weren't careful enough with minNormalize and some
negative values end up coming through, do you want to cap them before they hit boxCox, or throw an error?
Safer to throw an error, so it's set to 0 by default. Then results in \code{applyEncoding}
trying to perform boxCox on 0, which will fail. If not 0, this number represents the number
of standard deviations above 0 that the numbers will be (min) capped at. Should be lower than minNormalize,
or the results will no longer be in the same order, since negative values will now be greater than the
minimum sample this encoding was created on.}
}
\value{
BoxCox Encoded Object
}
\description{
boxCoxEncode
}
|
50dbac7fbadd15e04e649a2bb80c3493b98f0b42 | 3baf52f8f7e343079ca01b75eeafa0d985cddcab | /supertable1.R | 09e3f81f4605f622ff429fc5176c1f0fac191aae | [] | no_license | nanhung/httk.ra | 85cb6ea33fb8fa17e0f085ce232b9d8b25c60741 | 2e04126c2bbc799e23d110dd8a421854a1e6e3ee | refs/heads/master | 2021-01-20T01:13:39.941213 | 2018-02-13T20:37:24 | 2018-02-13T20:37:24 | 89,240,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,779 | r | supertable1.R | library(httk) # for toxcast AC50
# Generate the main data frame
Chem.df<-read.csv("ChemData.csv", row.names = NULL)
Chem.df[,"ToxCast"]<-""
Chem.df[,"Tox21"]<-""
Chem.df[,"ExpoCast"]<-""
Chem.df[,"NHANES"]<-""
no.Chem <- length(Chem.df[,1]) # The number of chemicals
# Double check the excel table and httk (*some information are different*)
for (this.cas in Chem.df$CAS[1:no.Chem])
{
this.index <- Chem.df$CAS==this.cas
if (is.nhanes(this.cas)) Chem.df[this.index,"NHANES"] <- 1 # 1 = yes
if (is.tox21(this.cas)) Chem.df[this.index,"Tox21"] <- 1
if (is.toxcast(this.cas)) Chem.df[this.index,"ToxCast"] <- 1
if (is.expocast(this.cas)) Chem.df[this.index,"ExpoCast"] <- 1
}
#View(Chem.df)
# Generate Toxicity Table w/ AC50
tc.dt.sub <- tc.dt[`Activity Call`=="Active",
.(`Chemical Name`, CASRN, `Assay Endpoint`, `Activity Call`, `AC 50`)]
Chem.tc.dt <- tc.dt.sub[tc.dt.sub$CASRN %in% Chem.df[1,3],]
for(i in 2:no.Chem){
Chem.tc.dt <- rbind(Chem.tc.dt, tc.dt.sub[tc.dt.sub$CASRN %in% Chem.df[i,3],])
}
#View(Chem.tc.dt)
# Summarize the no. of toxicity data, min-AC50, and max-AC50 in the dataset
M<-as.matrix(Chem.tc.dt[,2])
Chem.df[,"No.ToxData"]<-""
Chem.df[,"Min.AC50"]<-""
Chem.df[,"Max.AC50"]<-""
for(i in 1:no.Chem){
tmp<-subset(Chem.tc.dt, CASRN==Chem.df$CAS[i])
Chem.df[i,"No.ToxData"]<-length(which(M==Chem.df$CAS[i]))
Chem.df[i,"Min.AC50"]<-min(tmp$`AC 50`)
Chem.df[i,"Max.AC50"]<-max(tmp$`AC 50`)
}
tmp<-cbind(as.numeric(Chem.df[,13]),as.numeric(Chem.df[,14]))
tmp[!is.finite(tmp)] <- NA
Chem.df[, 13:14] <- tmp
View(Chem.df)
# Find Tox21 data in Chem.tc.dt
Chem.t21.dt <- Chem.tc.dt[grep("Tox21", Chem.tc.dt$`Assay Endpoint`)]
View(Chem.t21.dt)
# Curl ExpoCast data
Chem.df[,"Expo.Total_median"]<-"" # median for total population
Chem.df[,"Expo.Total_95perc"]<-"" # 95% for total population
# CAUTION! This step will take long time ~30 min
for(i in 1:no.Chem){
CAS<-Chem.df[i,3]
tmp<-readLines(paste("https://comptox.epa.gov/dashboard/dsstoxdb/results?utf8=%E2%9C%93&search=", CAS, sep = ""))
Chem.df[i,"Expo.Total_median"]<-substr(tmp[grep('>Total<',tmp)+1][1], 44, 51)
Chem.df[i,"Expo.Total_95perc"]<-substr(tmp[grep('>Total<',tmp)+2][1], 44, 51)
}
tmp.df <- Chem.df[grep(">", Chem.df$Expo.Total_median), ] # detect the wrong data
for(i in tmp.df[,1]){ # revise the correct value
CAS<-Chem.df[i,3]
tmp<-readLines(paste("https://comptox.epa.gov/dashboard/dsstoxdb/results?utf8=%E2%9C%93&search=", CAS, sep = ""))
Chem.df[i,"Expo.Total_median"]<-substr(tmp[grep('>Total<',tmp)+1][2], 44, 51)
Chem.df[i,"Expo.Total_95perc"]<-substr(tmp[grep('>Total<',tmp)+2][2], 44, 51)
}
write.csv(Chem.df, file = "ChemTox.csv")
# 0525 new master list----
Chem.df<-read.csv("ChemData0524.csv")
Chem.df[,"ToxCast"]<-""
Chem.df[,"Tox21"]<-""
Chem.df[,"ExpoCast"]<-""
Chem.df[,"NHANES"]<-""
no.Chem <- length(Chem.df$Order) # The number of chemicals
# Double check the excel table and httk (*some information are different*)
for (this.cas in Chem.df$CAS[1:no.Chem])
{
this.index <- Chem.df$CAS==this.cas
if (is.nhanes(this.cas)) Chem.df[this.index,"NHANES"] <- 1 # 1 = yes
if (is.tox21(this.cas)) Chem.df[this.index,"Tox21"] <- 1
if (is.toxcast(this.cas)) Chem.df[this.index,"ToxCast"] <- 1
if (is.expocast(this.cas)) Chem.df[this.index,"ExpoCast"] <- 1
}
# Generate Toxicity Table w/ AC50
tc.dt.sub <- tc.dt[`Activity Call`=="Active",
.(`Chemical Name`, CASRN, `Assay Endpoint`, `Activity Call`, `AC 50`)]
Chem.tc.dt <- tc.dt.sub[tc.dt.sub$CASRN %in% Chem.df[1,3],]
for(i in 2:no.Chem){
Chem.tc.dt <- rbind(Chem.tc.dt, tc.dt.sub[tc.dt.sub$CASRN %in% Chem.df[i,3],])
}
M<-as.matrix(Chem.tc.dt[,2])
Chem.df[,"No.ToxData"]<-""
Chem.df[,"Min.AC50"]<-""
Chem.df[,"Max.AC50"]<-""
for(i in 1:no.Chem){
tmp<-subset(Chem.tc.dt, CASRN==Chem.df$CAS[i])
Chem.df[i,"No.ToxData"]<-length(which(M==Chem.df$CAS[i]))
Chem.df[i,"Min.AC50"]<-min(tmp$`AC 50`)
Chem.df[i,"Max.AC50"]<-max(tmp$`AC 50`)
}
tmp<-cbind(as.numeric(Chem.df[,13]),as.numeric(Chem.df[,14]))
tmp[!is.finite(tmp)] <- NA
Chem.df[, 13:14] <- tmp
View(Chem.df)
# Find Tox21 data in Chem.tc.dt
Chem.t21.dt <- Chem.tc.dt[grep("Tox21", Chem.tc.dt$`Assay Endpoint`)]
View(Chem.t21.dt)
# Curl ExpoCast data
Chem.df[,"Expo.Total_median"]<-"" # median for total population
Chem.df[,"Expo.Total_95perc"]<-"" # 95% for total population
# CAUTION! This step will take long time ~30 min
for(i in 1:no.Chem){
CAS<-Chem.df[i,3]
tmp<-readLines(paste("https://comptox.epa.gov/dashboard/dsstoxdb/results?utf8=%E2%9C%93&search=", CAS, sep = ""))
Chem.df[i,"Expo.Total_median"]<-substr(tmp[grep('>Total<',tmp)+1][1], 44, 51)
Chem.df[i,"Expo.Total_95perc"]<-substr(tmp[grep('>Total<',tmp)+2][1], 44, 51)
}
tmp.df <- Chem.df[grep(">", Chem.df$Expo.Total_median), ] # detect the wrong data
for(i in tmp.df[,1]){ # revise the correct value
CAS<-Chem.df[i,3]
tmp<-readLines(paste("https://comptox.epa.gov/dashboard/dsstoxdb/results?utf8=%E2%9C%93&search=", CAS, sep = ""))
Chem.df[i,"Expo.Total_median"]<-substr(tmp[grep('>Total<',tmp)+1][2], 44, 51)
Chem.df[i,"Expo.Total_95perc"]<-substr(tmp[grep('>Total<',tmp)+2][2], 44, 51)
}
# Estimate CSS
Chem.df<-read.csv("ChemTox_v2.csv", header = T)
Chem.df[,"httk"]<-""
Chem.df[,"Css.med_medRTK.plasma.uM"]<-"" # median for total population
Chem.df[,"Css.med_95RTK.plasma.uM"]<-"" # median for total population
Chem.df[,"Css.95perc_medRTK.plasma.uM"]<-"" # 95% for total population
Chem.df[,"Css.95perc_95RTK.plasma.uM"]<-"" # 95% for total population
# Double check the excel table and httk (*some information are different*)
for (this.cas in Chem.df$CAS[1:no.Chem])
{
this.index <- Chem.df$CAS==this.cas
if (is.httk(this.cas)) Chem.df[this.index,"httk"] <- 1
}
tmp.df <- Chem.df[grep("1", Chem.df$httk), ] # detect the httk
tmp.df[,1]
for (i in tmp.df[,1]){
cas<-Chem.df$CAS_trimmed[i]
md<-Chem.df$Expo.Total_median[i]
u95<-Chem.df$Expo.Total_95perc[i]
a<-calc_mc_css(chem.cas=cas, which.quantile=.5, output.units='uM', model='3compartmentss', httkpop=FALSE)
b<-calc_mc_css(chem.cas=cas, which.quantile=.95, output.units='uM', model='3compartmentss', httkpop=FALSE)
ratio<-b/a
Chem.df[i,"Css.med_medRTK.plasma.uM"] <- calc_analytic_css(chem.cas=cas, output.units='uM', model='3compartmentss', daily.dose=md)
Chem.df[i,"Css.med_95RTK.plasma.uM"] <- Chem.df[i,"Css.med_medRTK.plasma.uM"] * ratio
Chem.df[i,"Css.95perc_medRTK.plasma.uM"] <- calc_analytic_css(chem.cas=cas, output.units='uM', model='3compartmentss', daily.dose=u95)
Chem.df[i,"Css.95perc_95RTK.plasma.uM"] <- Chem.df[i,"Css.95perc_medRTK.plasma.uM"] * ratio
}
#write.csv(Chem.df, file = "ChemTox.csv")
cas<-Chem.df$CAS_trimmed[1]
md<-Chem.df$Expo.Total_median[1]
u95<-Chem.df$Expo.Total_95perc[1]
a<-calc_mc_css(chem.cas=cas, which.quantile=.5, output.units='uM', model='3compartmentss', httkpop=FALSE)
b<-calc_mc_css(chem.cas=cas, which.quantile=.95, output.units='uM', model='3compartmentss', httkpop=FALSE)
b/a
#
Chem.df<-read.csv("ChemTox_v2.csv", header = T)
no.Chem <- length(Chem.df[,1]) # The number of chemicals
Chem.df<-Chem.df[c(4:6)]
Chem.df[,"Boiling Point Ave.exp"]<-""
Chem.df[,"Boiling Point Ave.prd"]<-""
Chem.df[,"Boiling Point Med.exp"]<-""
Chem.df[,"Boiling Point Med.prd"]<-""
Chem.df[,"Boiling Point Rng.exp"]<-""
Chem.df[,"Boiling Point Rng.prd"]<-""
Chem.df[,"Boiling Point Deg"]<-""
for(i in 1:no.Chem){
CAS<-Chem.df[i,3]
tmp<-readLines(paste("https://comptox.epa.gov/dashboard/dsstoxdb/results?utf8=%E2%9C%93&search=", CAS, sep = ""))
Chem.df[i,"Ave.exp"]<-substr(tmp[grep('>Boiling Point<',tmp)+3][1], 31, 37)
Chem.df[i,"Ave.prd"]<-substr(tmp[grep('>Boiling Point<',tmp)+8][1], 31, 37)
Chem.df[i,"Med.exp"]<-substr(tmp[grep('>Boiling Point<',tmp)+13][1], 31, 37)
Chem.df[i,"Med.prd"]<-substr(tmp[grep('>Boiling Point<',tmp)+18][1], 31, 37)
Chem.df[i,"Rng.exp"]<-substr(tmp[grep('>Boiling Point<',tmp)+23][1], 35, 44)
Chem.df[i,"Rng.prd"]<-substr(tmp[grep('>Boiling Point<',tmp)+28][1], 35, 44)
Chem.df[i,"Deg"]<-substr(tmp[grep('>Boiling Point<',tmp)+33][1], 36, 37)
}
tmp.df <- Chem.df[grep("class=", Chem.df$Ave.exp), ] # detect the wrong data
for(i in tmp.df[,1]){
CAS<-Chem.df[i,3]
tmp<-readLines(paste("https://comptox.epa.gov/dashboard/dsstoxdb/results?utf8=%E2%9C%93&search=", CAS, sep = ""))
Chem.df[i,"Ave.exp"]<-""
Chem.df[i,"Ave.prd"]<-substr(tmp[grep('>Boiling Point<',tmp)+3][1], 56, 58)
Chem.df[i,"Med.exp"]<-""
Chem.df[i,"Med.prd"]<-substr(tmp[grep('>Boiling Point<',tmp)+9][1], 56, 58)
Chem.df[i,"Rng.prd"]<-substr(tmp[grep('>Boiling Point<',tmp)+15][1], 60, 69)
Chem.df[i,"Deg"]<-substr(tmp[grep('>Boiling Point<',tmp)+20][1], 61, 61)
}
write.csv(Chem.df, file = "ChemBoil.csv")
|
d788098cd4c258cf5544c61011d1aff8e97be515 | 0bfbfffdd6a9fbf8d59a83725de4169ca3e33d1a | /src/analysis_forpaper/Fig_heightandpcloud.R | 4fd933dae1e19167207781ceed2d67f7f64dc5b7 | [
"Apache-2.0"
] | permissive | komazsofi/PhDPaper3_wetlandstr | 9f903fdd99f5b3752a64b9c44ad76b66af5615bf | 8977d30705ae5e617ba188a0a110371360efe6d3 | refs/heads/master | 2023-07-15T10:15:58.975301 | 2021-08-27T12:11:57 | 2021-08-27T12:11:57 | 257,857,347 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,033 | r | Fig_heightandpcloud.R | library(ggplot2)
library(gridExtra)
library(dplyr)
library(tidyr)
library(stargazer)
library(lidR)
#workdir="C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/Analysis9/"
workdir="D:/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/Analysis9/"
setwd(workdir)
####################################### Plot
plot_data05=read.csv(paste("Plot_db_",0.5,".csv",sep=""))
plot_data5=read.csv(paste("Plot_db_",5,".csv",sep=""))
plot_data5$total.weight=plot_data5$total.weight/10000
#las_bal <- readLAS("C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/balaton_25mrad_reclass/Balaton_OBJNAME204_25mrad_reclass.laz")
#las_fert <- readLAS("C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/ferto_25mrad_reclass/Ferto_OBJNAME321_25mrad_reclass.laz")
#las_tisza <- readLAS("C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/tisza_25mrad_leafon_reclass/Tisza_OBJNAME186_25mrad_reclass.laz")
#las_tisza <- readLAS("C:/Koma/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/tisza_25mrad_reclass/Tisza_OBJNAME186_25mrad_reclass.laz")
las_bal <- readLAS("D:/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/balaton_25mrad_reclass/Balaton_OBJNAME204_25mrad_reclass.laz")
las_fert <- readLAS("D:/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/ferto_25mrad_reclass/Ferto_OBJNAME321_25mrad_reclass.laz")
#las_tisza <- readLAS("D:/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/tisza_25mrad_leafon_reclass/Tisza_OBJNAME186_25mrad_reclass.laz")
las_tisza <- readLAS("D:/Sync/_Amsterdam/_PhD/Chapter2_habitat_str_lidar/3_Dataprocessing/pcloud/tisza_25mrad_reclass/Tisza_OBJNAME186_25mrad_reclass.laz")
##### Pcloud visualization
bal=plot_data05[plot_data05$OBJNAME==204,]
fert=plot_data05[plot_data05$OBJNAME==321,]
tisza=plot_data05[plot_data05$OBJNAME==186,]
plot_crossection <- function(las,
p1,
p2,
bal,
width = 2, colour_by = NULL)
{
colour_by <- enquo(colour_by)
data_clip <- clip_transect(las, p1, p2, width)
p <- ggplot(data_clip@data, aes(X,Z)) + geom_point(size = 5) + coord_equal() + theme_minimal(base_size=30)+
geom_vline(xintercept=bal$coords.x1, linetype="dashed", color = "red")+
geom_vline(xintercept=bal$coords.x1-0.5, linetype="dashed", color = "blue")+
geom_vline(xintercept=bal$coords.x1+0.5, linetype="dashed", color = "blue")+
geom_vline(xintercept=bal$coords.x1-2.5, linetype="dashed", color = "black")+
geom_vline(xintercept=bal$coords.x1+2.5, linetype="dashed", color = "black")+
scale_colour_manual(values=c("1"="darkgreen", "2"="deeppink"),label=c("Non-ground points","Ground points"),name="Classification")+
theme(axis.text.x = element_text(size=30),axis.text.y = element_text(size=30))
if (!is.null(colour_by))
p <- p + aes(color = !!colour_by) + labs(color = "")
return(p)
}
p1=plot_crossection(las_bal,p1 = c(bal$coords.x1-5, bal$coords.x2),p2 = c(bal$coords.x1+5, bal$coords.x2),bal,colour_by = factor(Classification))
p2=plot_crossection(las_fert,p1 = c(fert$coords.x1[1]+5, fert$coords.x2[1]),p2 = c(fert$coords.x1[1]-5, fert$coords.x2[1]),fert,colour_by = factor(Classification))
p3=plot_crossection(las_tisza,p1 = c(tisza$coords.x1[1]+5, tisza$coords.x2[1]),p2 = c(tisza$coords.x1[1]-5, tisza$coords.x2[1]),tisza,colour_by = factor(Classification))
ggsave("Figcross_bal.png",plot = p1,width = 22, height = 12)
ggsave("Figcross_fer.png",plot = p2,width = 22, height = 12)
ggsave("Figcross_tiszaon.png",plot = p3,width = 22, height = 12)
#ggsave("Figcross_tiszaoff.png",plot = p3,width = 22, height = 12)
las_tisza@data=las_tisza@data[(las_tisza@data$Z<135 & las_tisza@data$Z>110),]
plot(las_bal,size=4,axis=FALSE,bg = "white")
plot(las_fert,size=4,axis=FALSE,bg = "white")
plot(las_tisza,size=4,axis=FALSE,bg = "white")
|
8b76a1ba0d25c134ff0d1867ff54d4af7cf32552 | cfb444f0995fce5f55e784d1e832852a55d8f744 | /man/faux_options.Rd | f9b6fa165c03179cea311ef3522f047d7ecc29ee | [
"MIT"
] | permissive | debruine/faux | 3a9dfc44da66e245a7b807220dd7e7d4ecfa1317 | f2be305bdc6e68658207b4ad1cdcd2d4baa1abb4 | refs/heads/master | 2023-07-19T18:28:54.258681 | 2023-07-07T16:59:24 | 2023-07-07T16:59:24 | 163,506,566 | 87 | 15 | NOASSERTION | 2023-01-30T10:09:37 | 2018-12-29T11:43:04 | R | UTF-8 | R | false | true | 933 | rd | faux_options.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/faux_options.R
\name{faux_options}
\alias{faux_options}
\title{Set/get global faux options}
\usage{
faux_options(...)
}
\arguments{
\item{...}{One of four: (1) nothing, then returns all options as a list; (2) a name of an option element, then returns its value; (3) a name-value pair which sets the corresponding option to the new value (and returns nothing), (4) a list with option-value pairs which sets all the corresponding arguments.}
}
\value{
a list of options, values of an option, or nothing
}
\description{
Global faux options are used, for example, to set the default separator for cell names.
}
\examples{
faux_options() # see all options
faux_options("sep") # see value of faux.sep
\dontrun{
# changes cell separator (e.g., A1.B2)
faux_options(sep = ".")
# changes cell separator back to default (e.g., A1_B2)
faux_options(sep = "_")
}
}
|
8457ff9e959821188bc9582eaf7b2c1a2e1f75db | 6bf88bdec264ae2d587955dfbd4f7e3848a21e2f | /WGCNA_lungca.R | 8c3ab1f3f6feb073ecaa4a3109c7ad870a5bce8a | [] | no_license | mdsgroup/riskstratmodel | ea02091c09009af89180cf0337327cec9c5b236f | 63717b73a0e8d9bafd878aa28e3867fdf4781f7c | refs/heads/master | 2020-05-30T01:28:13.856879 | 2017-03-03T05:07:00 | 2017-03-03T05:07:00 | 82,623,152 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,233 | r | WGCNA_lungca.R | ###############
#Preprocessing#
##############
# Dataset: Using RMA normalized array data of lung adenocarcinoma
load("./Rdata/final_normdata.Rdata")
load("./Rdata/final_adc subset.Rdata")
load("./Rdata/final_survival data.RData")
library(WGCNA)
library(ggplot2)
# sample name correction
colnames(GSE50081.rma) <- substr(colnames(GSE50081.rma), 1, 10)
colnames(GSE50081.rma)
colnames(GSE19188.rma) <- substr(colnames(GSE19188.rma), 1, 9)
colnames(GSE19188.rma)
colnames(GSE31546.rma) <- substr(colnames(GSE31546.rma), 1, 9)
colnames(GSE31546.rma)
colnames(GSE31210.rma) <- substr(colnames(GSE31210.rma), 1, 9)
colnames(GSE31210.rma)
colnames(GSE37745.rma) <- substr(colnames(GSE37745.rma), 1, 10)
colnames(GSE37745.rma)
colnames(GSE10245.rma) = substr(colnames(GSE10245.rma), 1, 9)
colnames(GSE10245.rma)
colnames(GSE33532.rma) = substr(colnames(GSE33532.rma), 1, 9)
colnames(GSE33532.rma)
colnames(GSE28571.rma) = substr(colnames(GSE28571.rma), 1, 9)
colnames(GSE28571.rma)
colnames(GSE27716.rma) = substr(colnames(GSE27716.rma), 1, 9)
colnames(GSE27716.rma)
colnames(GSE12667.rma) = substr(colnames(GSE12667.rma), 1, 9)
colnames(GSE12667.rma)
# geneFilter
library(genefilter)
library(hgu133plus2.db)
GSE50081.exp.adc = exprs(featureFilter(GSE50081.rma[,GSE50081.adc]))
GSE19188.exp.adc = exprs(featureFilter(GSE19188.rma[,GSE19188.adc]))
GSE31546.exp.adc = exprs(featureFilter(GSE31546.rma[,GSE31546.adc]))
GSE37745.exp.adc = exprs(featureFilter(GSE37745.rma[,GSE37745.adc]))
GSE10245.exp.adc = exprs(featureFilter(GSE10245.rma[,GSE10245.adc]))
GSE33532.exp.adc = exprs(featureFilter(GSE33532.rma[,GSE33532.adc]))
GSE28571.exp.adc = exprs(featureFilter(GSE28571.rma[,GSE28571.adc]))
GSE27716.exp.adc = exprs(featureFilter(GSE27716.rma[,GSE27716.adc]))
GSE12667.exp.adc = exprs(featureFilter(GSE12667.rma[,GSE12667.adc]))
rownames(GSE50081.exp.adc) = unlist(mget(rownames(GSE50081.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE19188.exp.adc) = unlist(mget(rownames(GSE19188.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE31546.exp.adc) = unlist(mget(rownames(GSE31546.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE37745.exp.adc) = unlist(mget(rownames(GSE37745.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE10245.exp.adc) = unlist(mget(rownames(GSE10245.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE33532.exp.adc) = unlist(mget(rownames(GSE33532.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE28571.exp.adc) = unlist(mget(rownames(GSE28571.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE27716.exp.adc) = unlist(mget(rownames(GSE27716.exp.adc), env = hgu133plus2SYMBOL))
rownames(GSE12667.exp.adc) = unlist(mget(rownames(GSE12667.exp.adc), env = hgu133plus2SYMBOL))
f1 <- pOverA(0.25, log2(100))
#f2 <- function(x) {IQR(x) > 0.5}
ff <- filterfun(f1)
# apply filterfunction to each expression matrix
GSE50081.exp.adc <- GSE50081.exp.adc[genefilter(GSE50081.exp.adc, ff),]
GSE19188.exp.adc <- GSE19188.exp.adc[genefilter(GSE19188.exp.adc, ff),]
GSE31546.exp.adc <- GSE31546.exp.adc[genefilter(GSE31546.exp.adc, ff),]
GSE37745.exp.adc <- GSE37745.exp.adc[genefilter(GSE37745.exp.adc, ff),]
GSE10245.exp.adc <- GSE10245.exp.adc[genefilter(GSE10245.exp.adc, ff),]
GSE33532.exp.adc <- GSE33532.exp.adc[genefilter(GSE33532.exp.adc, ff),]
GSE28571.exp.adc <- GSE28571.exp.adc[genefilter(GSE28571.exp.adc, ff),]
GSE27716.exp.adc <- GSE27716.exp.adc[genefilter(GSE27716.exp.adc, ff),]
GSE12667.exp.adc <- GSE12667.exp.adc[genefilter(GSE12667.exp.adc, ff),]
# intersect probeset
int1 <- intersect(rownames(GSE50081.exp.adc), rownames(GSE19188.exp.adc))
int2 <- intersect(int1, rownames(GSE31546.exp.adc))
int4 <- intersect(int2, rownames(GSE37745.exp.adc))
int5 <- intersect(int4, rownames(GSE10245.exp.adc))
int6 <- intersect(int5, rownames(GSE33532.exp.adc))
int7 <- intersect(int6, rownames(GSE28571.exp.adc))
int8 <- intersect(int7, rownames(GSE27716.exp.adc))
int9 <- intersect(int8, rownames(GSE12667.exp.adc))
# select genes
GSE50081.exp.adc <- GSE50081.exp.adc[int9,]
GSE19188.exp.adc <- GSE19188.exp.adc[int9,]
GSE31546.exp.adc <- GSE31546.exp.adc[int9,]
GSE37745.exp.adc <- GSE37745.exp.adc[int9,]
GSE10245.exp.adc <- GSE10245.exp.adc[int9,]
GSE33532.exp.adc <- GSE33532.exp.adc[int9,]
GSE28571.exp.adc <- GSE28571.exp.adc[int9,]
GSE27716.exp.adc <- GSE27716.exp.adc[int9,]
GSE12667.exp.adc <- GSE12667.exp.adc[int9,]
# merge expression matrix (samples with or without survival data)
GSE.exp0 = cbind(GSE50081.exp.adc, GSE19188.exp.adc, GSE31546.exp.adc,
GSE37745.exp.adc, GSE10245.exp.adc, GSE33532.exp.adc, GSE28571.exp.adc,
GSE27716.exp.adc, GSE12667.exp.adc)
dim(GSE.exp0)
gene_all = rownames(GSE.exp0)
# Remove batch effect using combat
library(sva)
batch = c(rep(1, ncol(GSE50081.exp.adc)), rep(2, ncol(GSE19188.exp.adc)),
rep(3, ncol(GSE31546.exp.adc)),
rep(5, ncol(GSE37745.exp.adc)), rep(6, ncol(GSE10245.exp.adc)),
rep(7, ncol(GSE33532.exp.adc)), rep(8, ncol(GSE28571.exp.adc)),
rep(9, ncol(GSE27716.exp.adc)), rep(10, ncol(GSE12667.exp.adc)))
GSE.combat = ComBat(dat = GSE.exp0, batch = batch, par.prior = T, prior.plots = F)
rownames(GSE.combat) = gene_all # change probe set ids to gene names
# IAC filtering
sizeGrWindow(5,10)
par(mfrow = c(1,2))
IAC <- cor(GSE.combat, use = "p") # cauclating IACs for all pairs of samples
hist(IAC, sub = paste("Mean =", format(mean(IAC[upper.tri(IAC)]), digits = 3)))
meanIAC <- apply(IAC, 2, mean)
sdCorr <- sd(meanIAC)
numbersd <- (meanIAC - mean(meanIAC)) / sdCorr
plot(numbersd)
abline(h = -2, col = "red", lwd = 1)
sdout <- -2
outliers <- colnames(GSE.combat)[numbersd < sdout]
show(outliers)
GSE.filt <- GSE.combat[,numbersd > sdout]
dim(GSE.filt)
IAC2 <- cor(GSE.filt, use="p")
hist(IAC2, sub = paste("Mean =", format(mean(IAC2[upper.tri(IAC2)]), digits = 3)))
meanIAC2 <- apply(IAC2, 2, mean)
sdCorr2 <- sd(meanIAC2)
numbersd2 <- (meanIAC2 - mean(meanIAC2)) / sdCorr2
plot(numbersd2)
#########
# WGCNA #
########
# checking data for excessive missing values and identification of outlier microarray samples
gsg <- goodSamplesGenes(GSE.filt, verbose = 3)ddbs
ghd
gsg$allOK # if TRUe, all genes have passtd the cuts
# transpose Expression dta
GSE.filt<- t(GSE.filt)
## Construction of gene network and identification of modules
#choose a set of soft-thresholding powers
powers <- c(c(1:10), seq(from = 12, to = 20, by = 2))
# call the network topology analysis function
sft <- pickSoftThreshold(GSE.filt, powerVector = powers, verbose = 5)
# plot the results
sizeGrWindow(9, 5)
par(mfrow = c(1,2))
cex1 = 0.9
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab = "Soft Threshold (power)", ylab = "Scale Free Topology Model Fit, signed R^2", type = "n",
main = paste("Scale independence"))
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels = powers, cex = cex1, col = "red")
# this line corresponds to using an R^2 cut-off of h = 0.95
abline(h = 0.95, col = "red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab = "Soft Threshold (power)", ylab = "Mean Connectivity", type = "n",
main = paste("Mean Connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels = powers, cex = cex1, col = "red")
### Two step
softPower <- 6
adjacency <- adjacency(GSE.filt, power = softPower)
## turn adjacency into topological overlap
TOM <- TOMsimilarity(adjacency)
dissTOM <- 1-TOM
## clustering using TOM
geneTree <- hclust(as.dist(dissTOM), method = "average")
sizeGrWindow(12, 9)
plot(geneTree, xlab = "", sub = "", main = "Gene clustering on TOM-based dissimilarity",
labels = F, hang = 0.04)
minModuleSize <- 30
# module identification using dynamic tree cut
dynamicMods <- cutreeDynamic(dendro = geneTree, distM = dissTOM,
deepSplit = 2, pamRespectsDendro = F,
minClusterSize = minModuleSize)
table(dynamicMods)
# convert numeric labels into colors
dynamicColors <- labels2colors(dynamicMods)
table(dynamicColors)
# plot the dendrogram and colors underneath
sizeGrWindow(8,6)
plotDendroAndColors(geneTree, dynamicColors, "Dynamic Tree Cut",
dendroLabels = F, hang = 0.03,
addGuide = T, guideHang = 0.05,
main = "Gene dendrogram and module colors")
## Merging of modules whose expression profiles are very similar
#calculate eigengenes
MEList <- moduleEigengenes(GSE.filt, colors = dynamicColors)
MEs <- MEList$eigengenes
#Calculate Principal component coefficients
module_gene_pc=list()
module_gene_sqrlatent=list()
module_gene_score=list()
for (i in colnames(MEs))
{
module_gene = GSE.filt[, dynamicColors==gsub("ME","",i)]
module_gene_svd = svd(t(scale(module_gene)))
module_gene_pc[[i]] = module_gene_svd$u[,1] # for pc1 rotation
module_gene_sqrlatent[[i]] = module_gene_svd$d[1] #for pc1 latent, square root
module_gene_score[[i]] = scale(module_gene) %*% module_gene_pc[[i]] / module_gene_sqrlatent[[i]]
}
# calculate dissimilarity of module eigengenes
MEDiss <- 1-cor(MEs)
# cluster module eigengenes
METree <- hclust(as.dist(MEDiss), method = "average")
# plot the result
sizeGrWindow(7, 6)
plot(METree, main = "Clustering of module eigengenes", xlab = "", sub = "")
moduleLabels <- dynamicMods
moduleColors <- dynamicColors
## Visualize the network of eigengenes
sizeGrWindow(5, 7.5)
par(cex = 0.9)
plotEigengeneNetworks(MEs, "", marDendro = c(0, 4, 1, 2),
marHeatmap = c(3, 4, 1, 2), cex.lab = 0.8, xLabelsAngle = 90)
########################
# Survival analysis #
#######################
library(survival)
# merge surv data
GSE.pheno.v2 = rbind(GSE50081.surv, GSE19188.surv, GSE31546.surv, GSE37745.surv)
index = rownames(GSE.pheno.v2) %in% rownames(GSE.filt)
index = which(index == T)
GSE.pheno.v2 = GSE.pheno.v2[index,] # indexing d/t outlier removal
GSE.pheno.v2$surv = Surv(GSE.pheno.v2$time, GSE.pheno.v2$status == "dead") # build surv object
# subset MEs with survival data
rownames(MEs) = rownames(GSE.filt)
index.ME = rownames(MEs) %in% rownames(GSE.pheno.v2)
index.ME = which(index.ME == T)
MEs = MEs[index.ME,] # MEs with survival data
GSE.filt.surv = GSE.filt[index.ME,] # expression matrix with survival data
# Cox, univariate
res.cox.p<-vector()
res.cox.ci<-vector()
for (i in 1:ncol(MEs))
{
res.cox1<-coxph(GSE.pheno.v2$surv ~ MEs[,i])
res.cox1.sum<-summary(res.cox1)
res.cox.p[i]<-as.numeric(res.cox1.sum$coefficients[,5])
res.cox.ci[i]<-as.numeric(res.cox1.sum$concondance[1])
}
names(res.cox.p)=colnames(MEs)
# Plotting modules and p-values of univariate cox model.
MEcolors = gsub("ME","",colnames(MEs))
sizeGrWindow(10,10)
plot(-log10(res.cox.p), xlab="Modules", ylab="-log10(p-value)", col=MEcolors,pch=16, cex=1.5, xaxt="n") #cex: size, pch: no of shape
text(-log10(res.cox.p), MEcolors,cex=0.7, pos=4, col=1)
axis(1, labels=MEcolors, at=1:ncol(MEs), cex.axis=1, las=2)
cox.index<-which(res.cox.p<0.05) #Significant modules...
abline(h = -log(0.05, base = 10), lwd = 1, lty = 3, col = "red")
### validataion GSE 31210######
GSE31210.exp.adc = exprs(featureFilter(GSE31210.rma))
rownames(GSE31210.exp.adc) = unlist(mget(rownames(GSE31210.exp.adc), env = hgu133plus2SYMBOL))
GSE31210.exp = GSE31210.exp.adc[gene_all,]
GSE31210.exp= t(GSE31210.exp)
#Extract module eigengenes (modules extracted by multiple GEO dataset) <- from ME coefficient of train set
MEs_val=matrix(0,nrow(GSE31210.exp),length(module_gene_pc))
colnames(MEs_val)=names(module_gene_pc)
for (i in names(module_gene_pc))
{
module_gene=GSE31210.exp[,dynamicColors==gsub("ME","",i)]
module_gene_score=scale(module_gene) %*% module_gene_pc[[i]] / module_gene_sqrlatent[[i]]
MEs_val[,i]=module_gene_score
}
inds= rownames(GSE31210.surv) %in% rownames(GSE31210.exp)
GSE31210.surv = GSE31210.surv[inds,]
GSE31210.surv$surv <- Surv(GSE31210.surv$time, GSE31210.surv$status == "dead")
rownames(MEs_val) = rownames(GSE31210.exp)
index.ME = rownames(MEs_val) %in% rownames(GSE31210.surv)
index.ME = which(index.ME == T)
MEs_val = MEs_val[index.ME,] # MEs with survival data
# Cox, univariate
resval.cox.p<-vector()
resval.cox.ci<-vector()
for (i in 1:ncol(MEs_val))
{
resval.cox1<-coxph(GSE31210.surv$surv ~ MEs_val[,i])
resval.cox1.sum<-summary(resval.cox1)
resval.cox.p[i]<-as.numeric(resval.cox1.sum$coefficients[,5])
resval.cox.ci[i]<-as.numeric(resval.cox1.sum$concordance[1])
}
names(resval.cox.p)=colnames(MEs)
#Plotting modules and p-values of univariate cox model.
#Original Training & Validation
tmp=data.frame(MEcolors[cox.index],-log10(resval.cox.p)[cox.index])
colnames(tmp)=c("Modules","p")
p1=ggplot(data=tmp, aes(x=reorder(Modules,p),y=p))+
geom_bar(stat="identity", fill=tmp$Modules[order(tmp$p)], alpha = 0.8)+
geom_hline(yintercept=-log10(0.05) ,color="gray20", linetype=3)+
labs(x="Modules", y= "-log10(p-value)")+
theme_bw()+
theme(axis.text.x = element_text(size=8, angle=45, vjust=0.5), axis.ticks = element_blank(),
panel.grid.major = element_line(colour = "grey80"),
panel.grid.minor = element_blank())
p1
####GSE30219####
colnames(GSE30219.rma) <- substr(colnames(GSE30219.rma), 1, 9)
GSE30219.exp.adc = exprs(featureFilter(GSE30219.rma))
rownames(GSE30219.exp.adc) = unlist(mget(rownames(GSE30219.exp.adc), env = hgu133plus2SYMBOL))
GSE30219.exp = GSE30219.exp.adc[gene_all,]
GSE30219.exp= t(GSE30219.exp)
#Extract module eigengenes (modules extracted by multiple GEO dataset) <- from ME coefficient of train set
MEs_val2=matrix(0,nrow(GSE30219.exp),length(module_gene_pc))
colnames(MEs_val2)=names(module_gene_pc)
for (i in names(module_gene_pc))
{
module_gene=GSE30219.exp[,dynamicColors==gsub("ME","",i)]
module_gene_score=scale(module_gene) %*% module_gene_pc[[i]] / module_gene_sqrlatent[[i]]
MEs_val2[,i]=module_gene_score
}
inds= rownames(GSE30219.surv) %in% rownames(GSE30219.exp)
GSE30219.surv = GSE30219.surv[inds,]
GSE30219.surv$surv <- Surv(GSE30219.surv$time, GSE30219.surv$status == "dead")
rownames(MEs_val2) = rownames(GSE30219.exp)
index.ME2 = rownames(MEs_val2) %in% rownames(GSE30219.surv)
index.ME2 = which(index.ME2 == T)
MEs_val2 = MEs_val2[index.ME2,] # MEs with survival data
# Cox, univariate
resval2.cox.p<-vector()
resval2.cox.ci<-vector()
for (i in 1:ncol(MEs_val2))
{
resval2.cox1<-coxph(GSE30219.surv$surv ~ MEs_val2[,i])
resval2.cox1.sum<-summary(resval2.cox1)
resval2.cox.p[i]<-as.numeric(resval2.cox1.sum$coefficients[,5])
resval2.cox.ci[i]<-as.numeric(resval2.cox1.sum$concordance[1])
}
names(resval2.cox.p)=colnames(MEs)
#Plotting modules and p-values of univariate cox model.
tmp=data.frame(MEcolors[cox.index],-log10(resval2.cox.p)[cox.index])
colnames(tmp)=c("Modules","p")
p2=ggplot(data=tmp, aes(x=reorder(Modules,p),y=p))+
geom_bar(stat="identity", fill=tmp$Modules[order(tmp$p)], alpha = 0.8)+
geom_hline(yintercept=-log10(0.05) ,color="gray20", linetype=3)+
labs(x="Modules", y= "-log10(p-value)")+
theme_bw()+
theme(axis.text.x = element_text(size=8, angle=45, vjust=0.5), axis.ticks = element_blank(),
panel.grid.major = element_line(colour = "grey80"),
panel.grid.minor = element_blank())
p2
##########################################
#Processing for Deep learning modeling #
##########################################
###Gene module membership
geneModuleMembership = as.data.frame(cor(GSE.filt.surv, MEs, use = "p"))
MMPvalue = as.data.frame(corPvalueStudent(as.matrix(geneModuleMembership), nrow(GSE.filt.surv)))
gMM.module=list()
gMM.order=list()
MM.cox.p=list()
MM.GSE=list()
for (which.ME in names(cox.index))
{
which.module=gsub("ME","",which.ME)
gMM.module[[which.module]]=geneModuleMembership[moduleColors==which.module,which.ME]
names(gMM.module[[which.module]])=gene_all[moduleColors==which.module]
gMM.order[[which.module]]=order(abs(gMM.module[[which.module]]),decreasing=T)
MM.cox.p[[which.module]]<-vector()
MM.GSE[[which.module]] <- GSE.filt.surv[,moduleColors==which.module]
for (i in 1:ncol(MM.GSE[[which.module]]))
{
res.cox1<-coxph(GSE.pheno.v2$surv ~ MM.GSE[[which.module]][,i])
res.cox1.sum<-summary(res.cox1)
MM.cox.p[[which.module]][i]<-as.numeric(res.cox1.sum$coefficients[,5])
}
names(MM.cox.p[[which.module]])=gene_all[moduleColors==which.module]
}
#Plot survival-related modules
sizeGrWindow(5,15)
par(mfrow = c(2,3))
for (which.ME in names(cox.index))
{
which.module=gsub("ME","",which.ME)
plot(abs(gMM.module[[which.module]]), -log10(MM.cox.p[[which.module]]), xlab="Gene Module Membership", ylab="-log10(p-value)", cex=0.7, pch=16, col=which.module) #,xlab="Gene Module Membership", ylab="-log10(p-value)"
text(abs(gMM.module[[which.module]][gMM.order[[which.module]][1:10]]), -log10(MM.cox.p[[which.module]][gMM.order[[which.module]][1:10]]),
names(MM.cox.p[[which.module]])[gMM.order[[which.module]][1:10]],cex=0.5, pos=1, col=1, offset=0.1)
mtext( paste("r=",toString(round(cor(abs(gMM.module[[which.module]]), -log10(MM.cox.p[[which.module]])),digits=2)),
"\np=", toString(cor.test(abs(gMM.module[[which.module]]), -log10(MM.cox.p[[which.module]]))$p.value)),
cex=0.5)
}
###Export Top genes for significant modules
topno=10 # no. of genes per module
GSE.sig=list()
dir.create("./ModuleGenes", showWarnings = FALSE)
for (which.ME in names(cox.index))
{
which.module=gsub("ME","",which.ME)
GSE.sig[[which.module]]=MM.GSE[[which.module]][,gMM.order[[which.module]][1:topno]]
write.table(GSE.sig[[which.module]], file = paste("./ModuleGenes/GSE",which.module, ".csv",sep=""), sep = ",", quote = TRUE, row.names = FALSE)
}
write.table(GSE.pheno.v2, file="./ModuleGenes/GSEpheno.csv", sep=",", quote=TRUE, row.names=TRUE)
save(GSE.sig, file = "./Rdata/SignificantModules.RData")
#--> To python code..
#Validation set : GSE31210
GSE31210.sig=list()
valindx= rownames(GSE31210.exp) %in% rownames(GSE31210.surv)
GSE31210.exp.val=GSE31210.exp[valindx,]
dir.create("./ModuleGenes_validation", showWarnings = FALSE)
for (which.module in names(GSE.sig))
{
GSE31210.sig[[which.module]] = GSE31210.exp.val[, colnames(GSE.sig[[which.module]])]
write.table(GSE31210.sig[[which.module]], file=paste("./ModuleGenes_validation/GSE",which.module,".csv",sep=""),sep = ",", quote = TRUE, row.names = FALSE)
}
write.table(GSE31210.surv, file="./ModuleGenes_validation/GSEpheno.csv", sep=",", quote=TRUE, row.names=TRUE)
#--> TO python testset.
#Validation set : GSE30219
GSE30219.sig=list()
valindx= rownames(GSE30219.exp) %in% rownames(GSE30219.surv)
GSE30219.exp.val=GSE30219.exp[valindx,]
dir.create("./ModuleGenes_validation2", showWarnings = FALSE)
for (which.module in names(GSE.sig))
{
GSE30219.sig[[which.module]] = GSE30219.exp.val[, colnames(GSE.sig[[which.module]])]
write.table(GSE30219.sig[[which.module]], file=paste("./ModuleGenes_validation2/GSE",which.module,".csv",sep=""),sep = ",", quote = TRUE, row.names = FALSE)
}
write.table(GSE30219.surv, file="./ModuleGenes_validation2/GSEpheno.csv", sep=",", quote=TRUE, row.names=TRUE)
#--> TO python testset.
|
1dbe125621a1590f8ec15ae7f180f4e5ee5ac477 | 9d6bcf01b24542dedd0b75cebcb4b468595addf0 | /R/IsDoublet.R | 348d3bad9d516f2a41c10c49af1ce34702f1ccb2 | [] | no_license | lyc-1995/DoubletDeconSeurat | ec2995e71caa9e4842b2e3baa0638135090ea827 | 34c63801ffad93bcddf49536dfae35035b0f2b15 | refs/heads/master | 2020-09-22T03:59:38.608483 | 2020-03-06T16:34:19 | 2020-03-06T16:34:19 | 225,041,620 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,107 | r | IsDoublet.R | #' Is A Doublet
#'
#' This function uses deconvolution analysis (DeconRNASeq) to evaluate each cell for equal contribution from blacklisted clusters.
#'
#' @param data Processed data from CleanUpInput (or RemoveCellCycle).
#' @param newMedoids New combined medoids from BlacklistGroups.
#' @param groups Processed groups file from CleanUpInput.
#' @param synthProfiles Average profiles of synthetic doublets from SyntheticDoublets.
#' @param log_file_name used for saving run notes to log file
#'
#' @return isADoublet - data.frame with each cell as a row and whether it is called a doublet by deconvolution analysis.
#' @return resultsreadable - data.frame with results of deconvolution analysis (cell by cluster) in percentages.
#'
#' @keywords doublet deconvolution decon
#'
#' @export
#'
IsDoublet <- function(
data,
newMedoids,
groups,
synthProfiles,
log_file_name
) {
#create data frame to store doublets table
isADoublet <- data.frame(matrix(ncol = 4, nrow = (ncol(x = data) - 1)))
rownames(x = isADoublet) <- colnames(x = data)[2:ncol(x = data)]
rownames(x = newMedoids) <- rownames(x = data)[2:nrow(x = data)]
#run DeconRNASeq with new medoids and data
results <- DeconRNASeq(data[2:nrow(x = data), 2:ncol(x = data)], newMedoids)
resultsreadable <- round(results$out.all*100, 2)
rownames(x = resultsreadable) <- rownames(x = isADoublet) #make an easily readable results table
#get average profiles for cell clusters
averagesReal <- as.data.frame(matrix(ncol = ncol(x = resultsreadable), nrow = length(x = unique(groups[, 2]))))
colnames(x = averagesReal) <- colnames(x = resultsreadable)
for (clust in 1:length(x = unique(groups[, 2]))) {
cells <- row.names(x = subset(groups, groups[, 1] == clust))
subsetResults <- resultsreadable[row.names(x = resultsreadable) %in% cells, , drop = FALSE]
averagesReal[clust, ] <- apply(subsetResults, 2, mean)
}
#create a table with average profiles of cell clusters and synthetic combinations
allProfiles <- rbind(averagesReal, synthProfiles)
#this section determines the profile with the highest correlation to the given cell and determines if it is one of the doublet profiles
for (cell in 1:nrow(x = isADoublet)) {
if (ncol(x = resultsreadable) == 2) { #If there are only 2 groups, correlation won't work, so I use minimum euclidean distance instead
a <- rbind(allProfiles, resultsreadable[cell, ])
b <- as.matrix(dist(a))
c <- b[nrow(x = b), 1:(ncol(x = b) - 1)]
chosenCorrelation <- c[c %in% min(x = c)]
isADoublet[cell, 1] <- 100 - chosenCorrelation #100-euclidean distance
isADoublet[cell, 2] <- names(chosenCorrelation)
if (names(chosenCorrelation) %in% unique(groups[, 2])) { #it is an original cluster
isADoublet[cell, 3] <- FALSE
} else {
isADoublet[cell, 3] <- TRUE
}
} else {
#correlations=apply(allProfiles, 1, cor, resultsreadable[cell,])
correlations <- apply(allProfiles, 1, cor, resultsreadable[cell, ])
sortCorrelations <- sort(correlations, decreasing = TRUE)[1:2]
maxCorrelation1 <- which(correlations == sortCorrelations[1])
maxCorrelation2 <- which(correlations == sortCorrelations[2])
chosenCorrelation <- maxCorrelation1
isADoublet[cell, 1] <- correlations[chosenCorrelation]
correlatedCluster <- row.names(x = allProfiles)[chosenCorrelation]
isADoublet[cell, 2] <- correlatedCluster
if (chosenCorrelation > length(x = unique(groups[, 2]))) {
isADoublet[cell, 3] <- TRUE
} else {
isADoublet[cell, 3] <- FALSE
}
}
}
isADoublet[, 4] <- groups[, 2]
colnames(x = isADoublet) <- c('Distance','Cell_Types', 'isADoublet', 'Group_Cluster')
message(paste0(length(which(isADoublet$isADoublet == TRUE)), '/', nrow(x = isADoublet), ' possible doublets removed'))
cat(paste0(length(which(isADoublet$isADoublet == TRUE)), '/', nrow(x = isADoublet), ' possible doublets removed'), file = log_file_name, append = TRUE, sep = '\n')
return(list(isADoublet = isADoublet, resultsreadable = resultsreadable))
}
|
52c11fdb50ad5930df7b9c52599db84f2b19eb11 | 775c56dc8fadc1e6f793d7e7c565886947d18523 | /man/store.sql.Rd | 793c17aa4fdd72a299a8a32582f5d110e59ea602 | [] | no_license | ndesmo/rquery | b91200adca65fa82c43c69039f3e10379685d291 | 50a1819595c092610798da8c37ffdfae80573a3e | refs/heads/master | 2021-07-13T01:22:56.356672 | 2017-10-11T09:08:30 | 2017-10-11T09:08:30 | 105,581,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 465 | rd | store.sql.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/store.sql.R
\name{store.sql}
\alias{store.sql}
\title{Store SQL}
\usage{
store.sql(name, sql = NULL, sql.dir = "sql")
}
\arguments{
\item{name}{Name of the query}
\item{sql}{SQL query to run}
\item{sql.dir}{SQL query directory. Stores all the SQL used. If subs were provided, it saves the query before the subs were inserted.}
}
\description{
Store the SQL query to the filesystem.
}
|
cef24e56c22cb8a5adfcea811a5fb3f255991333 | d09157aa6b0827caacc7b61aacc4af8dd20b8f85 | /man/d2r.Rd | daba61797ad31018762b097b9b2ae36690904d0f | [
"MIT"
] | permissive | tunelipt/wutils | 81147b837dc8851153fbfceade349f8238e597c7 | dadc24991e2be3b54651c0c359bb066384c1b844 | refs/heads/master | 2020-07-25T13:33:46.697935 | 2019-09-13T17:35:35 | 2019-09-13T17:35:35 | 208,308,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 611 | rd | d2r.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trig.R
\name{d2r}
\alias{d2r}
\alias{r2d}
\alias{r2d}
\title{Conversion between degrees and radians.}
\usage{
d2r(x)
r2d(x)
}
\arguments{
\item{x}{Numeric vector containing angle in degrees (\code{d2r}) or radians (\code{r2d}).}
}
\value{
Angle in radians (\code{d2r}) or in degrees (\code{r2d})
}
\description{
Converts angles from degrees to radians and from radians to
to degrees.
}
\details{
\code{d2r} Degrees -> Radians.
\code{r2d} Radians -> Degrees.
}
\examples{
ad <- 45
ar <- d2r(ad)
print(ar)
ad2 <- r2d(ar)
print(ad2)
}
|
dd69dfb752307fce6c3f652d28a69e09e895e2e8 | 2e731f06724220b65c2357d6ce825cf8648fdd30 | /BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615925935-test.R | 21595c9d29f6342034d79dc596e1e839b5c2dfce | [] | no_license | akhikolla/updatedatatype-list1 | 6bdca217d940327d3ad42144b964d0aa7b7f5d25 | 3c69a987b90f1adb52899c37b23e43ae82f9856a | refs/heads/master | 2023-03-19T11:41:13.361220 | 2021-03-20T15:40:18 | 2021-03-20T15:40:18 | 349,763,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | 1615925935-test.R | testlist <- list(a = 2.22799651300306e+297, b = -2.82893518951238e-60)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) |
73ac5b69ba8e2810127061b692dccb4bddcd7ce3 | 97cb06c66e7b81712206be2ae9dbe5407955e5d0 | /man/tracks.AsspDataObj.Rd | 93b1ddf6cbdc1f3e2c81d86e0dc260ea6bc01544 | [] | no_license | IPS-LMU/wrassp | b25b0324222827ac5963b83960ed436d31150dba | 462f246a0f7e40fbe7690f594889542babcca679 | refs/heads/master | 2023-04-07T04:00:15.362384 | 2023-04-04T14:55:24 | 2023-04-04T14:55:24 | 10,401,604 | 23 | 7 | null | null | null | null | UTF-8 | R | false | true | 556 | rd | tracks.AsspDataObj.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AsspDataObj.R
\name{tracks.AsspDataObj}
\alias{tracks.AsspDataObj}
\title{tracks.AsspDataObj}
\usage{
tracks.AsspDataObj(x)
}
\arguments{
\item{x}{an object of class AsspDataObj}
}
\value{
a character vector containing the names of the tracks
}
\description{
List the tracks of an AsspDataObj
}
\details{
AsspDataObj contain tracks (at least one). This function lists the names
of these tracks. This function is equivalent to calling \code{names(x)}.
}
\author{
Lasse Bombien
}
|
6c9a0dff99444d8de0d785f00dc2dddac03aac2f | 7705dfc1f7b74694e0e0b84389d1efe882971628 | /Simulation studies/Simulation5.R | 3b2bb6c1d8d6ea0fd616b847e0e6846b47995801 | [] | no_license | vissermachiel/More-with-LESS | 1bf74a417b4c21b6f5de1e7a8ad6fb9289541c48 | e69714382e0b9110807b861ee5a43702cf2562e0 | refs/heads/master | 2020-09-23T02:35:45.280941 | 2020-04-28T16:07:16 | 2020-04-28T16:07:16 | 225,380,483 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 93,096 | r | Simulation5.R | rm(list = ls())
set.seed(0952702)
#### General information ####
# n1 = n2 = 50,
# V1 = meaningful variable
# V2-V1000 = not meaningful (noise) variables
# y = response variabe with labels {-1, +1}
# Load packages
library(Matrix)
library(foreach)
library(parallel)
library(iterators)
library(doParallel)
library(glmnet)
library(lpSolve)
library(ggplot2)
library(pROC)
library(LiblineaR)
library(telegram)
library(latex2exp)
library(scales)
source("Scripts/lesstwoc_mv.R")
source("Scripts/Telegram.R")
# General data info
n1 <- 50 # number of observations of class 1
n2 <- 50 # number of observations of class 2
N <- n1 + n2 # total number of observations
mu1 <- -0.5 # mean of class 1 on dimension 1
mu2 <- 0.5 # mean of class 2 on dimension 1
P.log <- round(10^seq(0, 3, length.out = 16)) # number of variables (logspace)
K <- 100 # number of simulations to average over
ncores <- 50 # number of parallel cpu cores
nfolds <- 10 # Number of folds for cross-validation
# Create empty data objects
train <- cbind(as.data.frame(matrix(NA, nrow = N, ncol = max(P.log))),
y = factor(c(rep(-1, n1), rep(1, n2)), levels = c("-1", "1")))
test <- cbind(as.data.frame(matrix(NA, nrow = 100 * N, ncol = max(P.log))),
y = factor(c(rep(-1, 100 * n1), rep(1, 100 * n2)), levels = c("-1", "1")))
# Create list for results
sim.list <- list(betas.less.none = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.less.std = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.less.less = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.less.lessstd = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.less.lessstd2 = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lrl1.none = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lrl1.std = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lrl1.less = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lrl1.lessstd = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lrl1.lessstd2 = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lasso.none = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lasso.std = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lasso.less = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lasso.lessstd = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.lasso.lessstd2 = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.svml1.none = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.svml1.std = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.svml1.less = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.svml1.lessstd = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
betas.svml1.lessstd2 = matrix(NA, nrow = length(P.log), ncol = max(P.log)),
cv.less.none = numeric(length(P.log)),
cv.less.std = numeric(length(P.log)),
cv.less.less = numeric(length(P.log)),
cv.less.lessstd = numeric(length(P.log)),
cv.less.lessstd2 = numeric(length(P.log)),
cv.lrl1.none = numeric(length(P.log)),
cv.lrl1.std = numeric(length(P.log)),
cv.lrl1.less = numeric(length(P.log)),
cv.lrl1.lessstd = numeric(length(P.log)),
cv.lrl1.lessstd2 = numeric(length(P.log)),
cv.lasso.none = numeric(length(P.log)),
cv.lasso.std = numeric(length(P.log)),
cv.lasso.less = numeric(length(P.log)),
cv.lasso.lessstd = numeric(length(P.log)),
cv.lasso.lessstd2 = numeric(length(P.log)),
cv.svml1.none = numeric(length(P.log)),
cv.svml1.std = numeric(length(P.log)),
cv.svml1.less = numeric(length(P.log)),
cv.svml1.lessstd = numeric(length(P.log)),
cv.svml1.lessstd2 = numeric(length(P.log)),
auc.less.none = numeric(length(P.log)),
auc.less.std = numeric(length(P.log)),
auc.less.less = numeric(length(P.log)),
auc.less.lessstd = numeric(length(P.log)),
auc.less.lessstd2 = numeric(length(P.log)),
auc.lrl1.none = numeric(length(P.log)),
auc.lrl1.std = numeric(length(P.log)),
auc.lrl1.less = numeric(length(P.log)),
auc.lrl1.lessstd = numeric(length(P.log)),
auc.lrl1.lessstd2 = numeric(length(P.log)),
auc.lasso.none = numeric(length(P.log)),
auc.lasso.std = numeric(length(P.log)),
auc.lasso.less = numeric(length(P.log)),
auc.lasso.lessstd = numeric(length(P.log)),
auc.lasso.lessstd2 = numeric(length(P.log)),
auc.svml1.none = numeric(length(P.log)),
auc.svml1.std = numeric(length(P.log)),
auc.svml1.less = numeric(length(P.log)),
auc.svml1.lessstd = numeric(length(P.log)),
auc.svml1.lessstd2 = numeric(length(P.log)),
accuracy.less.none = numeric(length(P.log)),
accuracy.less.std = numeric(length(P.log)),
accuracy.less.less = numeric(length(P.log)),
accuracy.less.lessstd = numeric(length(P.log)),
accuracy.less.lessstd2 = numeric(length(P.log)),
accuracy.lrl1.none = numeric(length(P.log)),
accuracy.lrl1.std = numeric(length(P.log)),
accuracy.lrl1.less = numeric(length(P.log)),
accuracy.lrl1.lessstd = numeric(length(P.log)),
accuracy.lrl1.lessstd2 = numeric(length(P.log)),
accuracy.lasso.none = numeric(length(P.log)),
accuracy.lasso.std = numeric(length(P.log)),
accuracy.lasso.less = numeric(length(P.log)),
accuracy.lasso.lessstd = numeric(length(P.log)),
accuracy.lasso.lessstd2 = numeric(length(P.log)),
accuracy.svml1.none = numeric(length(P.log)),
accuracy.svml1.std = numeric(length(P.log)),
accuracy.svml1.less = numeric(length(P.log)),
accuracy.svml1.lessstd = numeric(length(P.log)),
accuracy.svml1.lessstd2 = numeric(length(P.log)),
numbetas.less.none = integer(length(P.log)),
numbetas.less.std = integer(length(P.log)),
numbetas.less.less = integer(length(P.log)),
numbetas.less.lessstd = integer(length(P.log)),
numbetas.less.lessstd2 = integer(length(P.log)),
numbetas.lrl1.none = integer(length(P.log)),
numbetas.lrl1.std = integer(length(P.log)),
numbetas.lrl1.less = integer(length(P.log)),
numbetas.lrl1.lessstd = integer(length(P.log)),
numbetas.lrl1.lessstd2 = integer(length(P.log)),
numbetas.lasso.none = integer(length(P.log)),
numbetas.lasso.std = integer(length(P.log)),
numbetas.lasso.less = integer(length(P.log)),
numbetas.lasso.lessstd = integer(length(P.log)),
numbetas.lasso.lessstd2 = integer(length(P.log)),
numbetas.svml1.none = integer(length(P.log)),
numbetas.svml1.std = integer(length(P.log)),
numbetas.svml1.less = integer(length(P.log)),
numbetas.svml1.lessstd = integer(length(P.log)),
numbetas.svml1.lessstd2 = integer(length(P.log)),
total.time = numeric(1))
# Go parallel
registerDoParallel(cores = ncores)
sim.list <- foreach(k = 1:K) %dopar% {
# Progress
print(paste("----------", "Dataset:", k, "----------"))
start.time <- Sys.time()
set.seed(0952702 + k)
fold.id <- sample(rep(seq(nfolds), length = N))
# Load packages
library(Matrix)
library(foreach)
library(parallel)
library(iterators)
library(doParallel)
library(glmnet)
library(lpSolve)
library(ggplot2)
library(pROC)
library(LiblineaR)
library(telegram)
library(latex2exp)
library(scales)
source("Scripts/lesstwoc_mv.R")
source("Scripts/Telegram.R")
# Generate data
train[1:n1, 1] <- rnorm(n1, mu1, 1)
train[n1 + 1:n2, 1] <- rnorm(n2, mu2, 1)
train[, 2:max(P.log)] <- rt((max(P.log) - 1) * N, df = 1)
test[1:(100 * n1), 1] <- rnorm(100 * n1, mu1, 1)
test[100 * n1 + 1:(100 * n2), 1] <- rnorm(100 * n2, mu2, 1)
test[, 2:max(P.log)] <- rt(100 * (max(P.log) - 1) * N, df = 1)
## Data scaling based on train set
# Map data for standardisation
train.std <- train
for (j in 2:ncol(train) - 1) {
train.std[, j] <- (train[, j] - mean(train[, j])) / sd(train[, j]) ^ as.logical(sd(train[, j]))
}
test.std <- test
for (j in 2:ncol(train) - 1) {
test.std[, j] <- (test[, j] - mean(train[, j])) / sd(train[, j]) ^ as.logical(sd(train[, j]))
}
# Map data for less scaling
M.train <- matrix(0.0, nrow = length(levels(train$y)), ncol = max(P.log))
M.train[1, ] <- colMeans(train[train$y == levels(train$y)[1], 1:max(P.log)])
M.train[2, ] <- colMeans(train[train$y == levels(train$y)[2], 1:max(P.log)])
train.map <- train
train.map[, 1:max(P.log)] <- mapmeans(DF = train[, -ncol(train)], M = M.train)
test.map <- test
test.map[, 1:max(P.log)] <- mapmeans(DF = test[, -ncol(test)], M = M.train)
# Map data for lessstd scaling
S.train <- matrix(0.0, nrow = length(levels(train$y)), ncol = max(P.log))
S.train[1, ] <- apply(train[train$y == levels(train$y)[1], 1:max(P.log)], 2, var)
S.train[2, ] <- apply(train[train$y == levels(train$y)[2], 1:max(P.log)], 2, var)
train.map.std <- train
train.map.std[, 1:max(P.log)] <- mapmeansstd(DF = train[, -ncol(train)], M = M.train, S = S.train)
test.map.std <- test
test.map.std[, 1:max(P.log)] <- mapmeansstd(DF = test[, -ncol(test)], M = M.train, S = S.train)
# Map data for lessstd2 scaling
S.train2 <- matrix(apply(rbind(train[train$y == levels(train$y)[1], 1:max(P.log)] -
matrix(rep(M.train[1, ], times = n1), nrow = n1, byrow = TRUE),
train[train$y == levels(train$y)[2], 1:max(P.log)] -
matrix(rep(M.train[2, ], times = n1), nrow = n1, byrow = TRUE)),
2, var),
nrow = 1, ncol = max(P.log))
train.map.std2 <- train
train.map.std2[, 1:max(P.log)] <- mapmeansstd2(DF = train[, -ncol(train)], M = M.train, S = S.train2)
test.map.std2 <- test
test.map.std2[, 1:max(P.log)] <- mapmeansstd2(DF = test[, -ncol(test)], M = M.train, S = S.train2)
for (p in 2:length(P.log)) {
# Progress
print(paste("Dimensionality:", P.log[p]))
#### LESS ##################################################################
### LESS + no scaling ###
# 10-fold cross-validation for C
C.hyper.less.none <- 10^seq(-3, 2, length.out = 51)
score.fold.less.none <- numeric(N) # score probabilities of fold
auc.cv.less.none <- numeric(length(C.hyper.less.none)) # cross-validation results
for (c in C.hyper.less.none) {
for (fold in 1:nfolds) {
model.fold.less.none <- lesstwoc_none(DF = train[fold.id != fold, c(1:P.log[p], ncol(train))],
C = c)
score.fold.less.none[fold.id == fold] <- predict.less_none(MODEL = model.fold.less.none,
NEWDATA = train[fold.id == fold, 1:P.log[p]])$score
}
auc.cv.less.none[which(c == C.hyper.less.none)] <- pROC::roc(response = train$y, predictor = score.fold.less.none)$auc
}
sim.list$cv.less.none[p] <- C.hyper.less.none[
which(auc.cv.less.none == max(auc.cv.less.none))[
floor(median(1:length(which(auc.cv.less.none == max(auc.cv.less.none)))))]]
# Train model
model.less.none <- lesstwoc_none(DF = train[, c(1:P.log[p], ncol(train))],
C = sim.list$cv.less.none[p])
sim.list$betas.less.none[p, 1:P.log[p]] <- model.less.none$model$beta
# Test model
preds.less.none <- predict.less_none(MODEL = model.less.none,
NEWDATA = test[, 1:P.log[p]])$prediction
score.less.none <- predict.less_none(MODEL = model.less.none,
NEWDATA = test[, 1:P.log[p]])$score
sim.list$auc.less.none[p] <- pROC::roc(response = test$y, predictor = as.numeric(score.less.none))$auc
sim.list$accuracy.less.none[p] <- mean(factor(preds.less.none, levels = c("-1", "1")) == test$y) * 100
# print("Finished LESS + none")
### LESS + standardisation ###
# 10-fold cross-validation for C
C.hyper.less.std <- 10^seq(-3, 2, length.out = 51)
score.fold.less.std <- numeric(N) # score probabilities of fold
auc.cv.less.std <- numeric(length(C.hyper.less.std)) # cross-validation results
for (c in C.hyper.less.std) {
for (fold in 1:nfolds) {
model.fold.less.std <- lesstwoc_std(DF = train[fold.id != fold, c(1:P.log[p], ncol(train))],
C = c)
score.fold.less.std[fold.id == fold] <- predict.less_std(MODEL = model.fold.less.std,
NEWDATA = train[fold.id == fold, 1:P.log[p]])$score
}
auc.cv.less.std[which(c == C.hyper.less.std)] <- pROC::roc(response = train$y, predictor = score.fold.less.std)$auc
}
sim.list$cv.less.std[p] <- C.hyper.less.std[
which(auc.cv.less.std == max(auc.cv.less.std))[
floor(median(1:length(which(auc.cv.less.std == max(auc.cv.less.std)))))]]
# Train model
model.less.std <- lesstwoc_std(DF = train[, c(1:P.log[p], ncol(train))],
C = sim.list$cv.less.std[p])
sim.list$betas.less.std[p, 1:P.log[p]] <- model.less.std$model$beta
# Test model
preds.less.std <- predict.less_std(MODEL = model.less.std,
NEWDATA = test[, 1:P.log[p]])$prediction
score.less.std <- predict.less_std(MODEL = model.less.std,
NEWDATA = test[, 1:P.log[p]])$score
sim.list$auc.less.std[p] <- pROC::roc(response = test$y, predictor = as.numeric(score.less.std))$auc
sim.list$accuracy.less.std[p] <- mean(factor(preds.less.std, levels = c("-1", "1")) == test$y) * 100
# print("Finished LESS + std")
### LESS + LESS scaling ###
# 10-fold cross-validation for C
C.hyper.less.less <- 10^seq(-3, 2, length.out = 51)
score.fold.less.less <- numeric(N) # score probabilities of fold
auc.cv.less.less <- numeric(length(C.hyper.less.less)) # cross-validation results
for (c in C.hyper.less.less) {
for (fold in 1:nfolds) {
model.fold.less.less <- lesstwoc(DF = train[fold.id != fold, c(1:P.log[p], ncol(train))],
C = c)
score.fold.less.less[fold.id == fold] <- predict.less(MODEL = model.fold.less.less,
NEWDATA = train[fold.id == fold, 1:P.log[p]])$score
}
auc.cv.less.less[which(c == C.hyper.less.less)] <- pROC::roc(response = train$y, predictor = score.fold.less.less)$auc
}
sim.list$cv.less.less[p] <- C.hyper.less.less[
which(auc.cv.less.less == max(auc.cv.less.less))[
floor(median(1:length(which(auc.cv.less.less == max(auc.cv.less.less)))))]]
# Train model
model.less.less <- lesstwoc(DF = train[, c(1:P.log[p], ncol(train))],
C = sim.list$cv.less.less[p])
sim.list$betas.less.less[p, 1:P.log[p]] <- model.less.less$model$beta
# Test model
preds.less.less <- predict.less(MODEL = model.less.less,
NEWDATA = test[, 1:P.log[p]])$prediction
score.less.less <- predict.less(MODEL = model.less.less,
NEWDATA = test[, 1:P.log[p]])$score
sim.list$auc.less.less[p] <- pROC::roc(response = test$y, predictor = as.numeric(score.less.less))$auc
sim.list$accuracy.less.less[p] <- mean(factor(preds.less.less, levels = c("-1", "1")) == test$y) * 100
# print("Finished LESS + less")
### LESS + LESSstd scaling ###
# 10-fold cross-validation for C
C.hyper.less.lessstd <- 10^seq(-3, 2, length.out = 51)
score.fold.less.lessstd <- numeric(N) # score probabilities of fold
auc.cv.less.lessstd <- numeric(length(C.hyper.less.lessstd)) # cross-validation results
for (c in C.hyper.less.lessstd) {
for (fold in 1:nfolds) {
model.fold.less.lessstd <- lesstwoc_lessstd(DF = train[fold.id != fold, c(1:P.log[p], ncol(train))],
C = c)
score.fold.less.lessstd[fold.id == fold] <- predict.less_lessstd(MODEL = model.fold.less.lessstd,
NEWDATA = train[fold.id == fold, 1:P.log[p]])$score
}
auc.cv.less.lessstd[which(c == C.hyper.less.lessstd)] <- pROC::roc(response = train$y, predictor = score.fold.less.lessstd)$auc
}
sim.list$cv.less.lessstd[p] <- C.hyper.less.lessstd[
which(auc.cv.less.lessstd == max(auc.cv.less.lessstd))[
floor(median(1:length(which(auc.cv.less.lessstd == max(auc.cv.less.lessstd)))))]]
# Train model
model.less.lessstd <- lesstwoc_lessstd(DF = train[, c(1:P.log[p], ncol(train))],
C = sim.list$cv.less.lessstd[p])
sim.list$betas.less.lessstd[p, 1:P.log[p]] <- model.less.lessstd$model$beta
# Test model
preds.less.lessstd <- predict.less_lessstd(MODEL = model.less.lessstd,
NEWDATA = test[, 1:P.log[p]])$prediction
score.less.lessstd <- predict.less_lessstd(MODEL = model.less.lessstd,
NEWDATA = test[, 1:P.log[p]])$score
sim.list$auc.less.lessstd[p] <- pROC::roc(response = test$y, predictor = as.numeric(score.less.lessstd))$auc
sim.list$accuracy.less.lessstd[p] <- mean(factor(preds.less.lessstd, levels = c("-1", "1")) == test$y) * 100
# print("Finished LESS + lessstd")
### LESS + LESSstd2 scaling ###
# 10-fold cross-validation for C
C.hyper.less.lessstd2 <- 10^seq(-3, 2, length.out = 51)
score.fold.less.lessstd2 <- numeric(N) # score probabilities of fold
auc.cv.less.lessstd2 <- numeric(length(C.hyper.less.lessstd2)) # cross-validation results
for (c in C.hyper.less.lessstd2) {
for (fold in 1:nfolds) {
model.fold.less.lessstd2 <- lesstwoc_lessstd2(DF = train[fold.id != fold, c(1:P.log[p], ncol(train))],
C = c)
score.fold.less.lessstd2[fold.id == fold] <- predict.less_lessstd2(MODEL = model.fold.less.lessstd2,
NEWDATA = train[fold.id == fold, 1:P.log[p]])$score
}
auc.cv.less.lessstd2[which(c == C.hyper.less.lessstd2)] <- pROC::roc(response = train$y, predictor = score.fold.less.lessstd2)$auc
}
sim.list$cv.less.lessstd2[p] <- C.hyper.less.lessstd2[
which(auc.cv.less.lessstd2 == max(auc.cv.less.lessstd2))[
floor(median(1:length(which(auc.cv.less.lessstd2 == max(auc.cv.less.lessstd2)))))]]
# Train model
model.less.lessstd2 <- lesstwoc_lessstd2(DF = train[, c(1:P.log[p], ncol(train))],
C = sim.list$cv.less.lessstd2[p])
sim.list$betas.less.lessstd2[p, 1:P.log[p]] <- model.less.lessstd2$model$beta
# Test model
preds.less.lessstd2 <- predict.less_lessstd2(MODEL = model.less.lessstd2,
NEWDATA = test[, 1:P.log[p]])$prediction
score.less.lessstd2 <- predict.less_lessstd2(MODEL = model.less.lessstd2,
NEWDATA = test[, 1:P.log[p]])$score
sim.list$auc.less.lessstd2[p] <- pROC::roc(response = test$y, predictor = as.numeric(score.less.lessstd2))$auc
sim.list$accuracy.less.lessstd2[p] <- mean(factor(preds.less.lessstd2, levels = c("-1", "1")) == test$y) * 100
# print("Finished LESS + lessstd2")
#### Support Vector Machine with L1 regularisation #########################
### SVML1 + no scaling ###
# 10 fold cross-validation for penalisation parameter C
C.hyper.svml1.none <- 10^seq(-3, 2, length.out = 51)
score.fold.svml1.none <- numeric(N) # score probabilities of fold
auc.cv.svml1.none <- numeric(length(C.hyper.svml1.none)) # cross-validation results
for (c in C.hyper.svml1.none) {
for (fold in 1:nfolds) {
model.fold.svml1.none <- LiblineaR(data = train[fold.id != fold, c(1:P.log[p])],
target = train[fold.id != fold, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = c,
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
score.fold.svml1.none[fold.id == fold] <- predict(model.fold.svml1.none,
train[fold.id == fold, c(1:P.log[p])],
decisionValues = TRUE)$decisionValues[, 1]
}
auc.cv.svml1.none[which(c == C.hyper.svml1.none)] <- pROC::roc(response = train$y, predictor = score.fold.svml1.none)$auc
}
sim.list$cv.svml1.none[p] <- C.hyper.svml1.none[
which(auc.cv.svml1.none == max(auc.cv.svml1.none))[
floor(median(1:length(which(auc.cv.svml1.none == max(auc.cv.svml1.none)))))]]
# Train model
model.svml1.none <- LiblineaR(data = as.matrix(train[, c(1:P.log[p])]),
target = train[, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = sim.list$cv.svml1.none[p],
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
sim.list$betas.svml1.none[p, 1:P.log[p]] <- model.svml1.none$W[-length(model.svml1.none$W)]
# Test model
preds.svml1.none <- predict(model.svml1.none,
test[, 1:P.log[p]],
decisionValues = TRUE)$predictions
score.svml1.none <- predict(model.svml1.none,
test[, 1:P.log[p]],
decisionValues = TRUE)$decisionValues[, 1]
sim.list$auc.svml1.none[p] <- pROC::roc(response = test$y, predictor = as.numeric(score.svml1.none))$auc
sim.list$accuracy.svml1.none[p] <- mean(factor(preds.svml1.none, levels = c("-1", "1")) == test$y) * 100
# print("Finished SVML1 + none")
### SVML1 + standardisation ###
# 10 fold cross-validation for penalisation parameter C
C.hyper.svml1.std <- 10^seq(-3, 2, length.out = 51)
score.fold.svml1.std <- numeric(N) # score probabilities of fold
auc.cv.svml1.std <- numeric(length(C.hyper.svml1.std)) # cross-validation results
for (c in C.hyper.svml1.std) {
for (fold in 1:nfolds) {
model.fold.svml1.std <- LiblineaR(data = train.std[fold.id != fold, c(1:P.log[p])],
target = train.std[fold.id != fold, ncol(train.std)],
type = 5, # L1-regularized L2-loss support vector classification
cost = c,
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
score.fold.svml1.std[fold.id == fold] <- predict(model.fold.svml1.std,
train.std[fold.id == fold, c(1:P.log[p])],
decisionValues = TRUE)$decisionValues[, 1]
}
auc.cv.svml1.std[which(c == C.hyper.svml1.std)] <- pROC::roc(response = train.std$y, predictor = score.fold.svml1.std)$auc
}
sim.list$cv.svml1.std[p] <- C.hyper.svml1.std[
which(auc.cv.svml1.std == max(auc.cv.svml1.std))[
floor(median(1:length(which(auc.cv.svml1.std == max(auc.cv.svml1.std)))))]]
# Train model
model.svml1.std <- LiblineaR(data = as.matrix(train.std[, c(1:P.log[p])]),
target = train.std[, ncol(train.std)],
type = 5, # L1-regularized L2-loss support vector classification
cost = sim.list$cv.svml1.std[p],
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
sim.list$betas.svml1.std[p, 1:P.log[p]] <- model.svml1.std$W[-length(model.svml1.std$W)]
# Test model
preds.svml1.std <- predict(model.svml1.std,
test.std[, 1:P.log[p]],
decisionValues = TRUE)$predictions
score.svml1.std <- predict(model.svml1.std,
test.std[, 1:P.log[p]],
decisionValues = TRUE)$decisionValues[, 1]
sim.list$auc.svml1.std[p] <- pROC::roc(response = test.std$y, predictor = as.numeric(score.svml1.std))$auc
sim.list$accuracy.svml1.std[p] <- mean(factor(preds.svml1.std, levels = c("-1", "1")) == test.std$y) * 100
# print("Finished SVML1 + std")
### SVML1 + LESS scaling ###
# 10 fold cross-validation for penalisation parameter C
C.hyper.svml1.less <- 10^seq(-3, 2, length.out = 51)
score.fold.svml1.less <- numeric(N) # score probabilities of fold
auc.cv.svml1.less <- numeric(length(C.hyper.svml1.less)) # cross-validation results
for (c in C.hyper.svml1.less) {
for (fold in 1:nfolds) {
model.fold.svml1.less <- LiblineaR(data = train.map[fold.id != fold, c(1:P.log[p])],
target = train.map[fold.id != fold, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = c,
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
score.fold.svml1.less[fold.id == fold] <- predict(model.fold.svml1.less,
train.map[fold.id == fold, c(1:P.log[p])],
decisionValues = TRUE)$decisionValues[, 1]
}
auc.cv.svml1.less[which(c == C.hyper.svml1.less)] <- pROC::roc(response = train.map$y, predictor = score.fold.svml1.less)$auc
}
sim.list$cv.svml1.less[p] <- C.hyper.svml1.less[
which(auc.cv.svml1.less == max(auc.cv.svml1.less))[
floor(median(1:length(which(auc.cv.svml1.less == max(auc.cv.svml1.less)))))]]
# Train model
model.svml1.less <- LiblineaR(data = as.matrix(train.map[, c(1:P.log[p])]),
target = train.map[, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = sim.list$cv.svml1.less[p],
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
sim.list$betas.svml1.less[p, 1:P.log[p]] <- model.svml1.less$W[-length(model.svml1.less$W)]
# Test model
preds.svml1.less <- predict(model.svml1.less,
test.map[, 1:P.log[p]],
decisionValues = TRUE)$predictions
score.svml1.less <- predict(model.svml1.less,
test.map[, 1:P.log[p]],
decisionValues = TRUE)$decisionValues[, 1]
sim.list$auc.svml1.less[p] <- pROC::roc(response = test.map$y, predictor = as.numeric(score.svml1.less))$auc
sim.list$accuracy.svml1.less[p] <- mean(factor(preds.svml1.less, levels = c("-1", "1")) == test.map$y) * 100
# print("Finished SVML1 + less")
### SVML1 + LESSstd scaling ###
# 10 fold cross-validation for penalisation parameter C
C.hyper.svml1.lessstd <- 10^seq(-3, 2, length.out = 51)
score.fold.svml1.lessstd <- numeric(N) # score probabilities of fold
auc.cv.svml1.lessstd <- numeric(length(C.hyper.svml1.lessstd)) # cross-validation results
for (c in C.hyper.svml1.lessstd) {
for (fold in 1:nfolds) {
model.fold.svml1.lessstd <- LiblineaR(data = train.map.std[fold.id != fold, c(1:P.log[p])],
target = train.map.std[fold.id != fold, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = c,
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
score.fold.svml1.lessstd[fold.id == fold] <- predict(model.fold.svml1.lessstd,
train.map.std[fold.id == fold, c(1:P.log[p])],
decisionValues = TRUE)$decisionValues[, 1]
}
auc.cv.svml1.lessstd[which(c == C.hyper.svml1.lessstd)] <- pROC::roc(response = train.map.std$y, predictor = score.fold.svml1.lessstd)$auc
}
sim.list$cv.svml1.lessstd[p] <- C.hyper.svml1.lessstd[
which(auc.cv.svml1.lessstd == max(auc.cv.svml1.lessstd))[
floor(median(1:length(which(auc.cv.svml1.lessstd == max(auc.cv.svml1.lessstd)))))]]
# Train model
model.svml1.lessstd <- LiblineaR(data = as.matrix(train.map.std[, c(1:P.log[p])]),
target = train.map.std[, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = sim.list$cv.svml1.lessstd[p],
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
sim.list$betas.svml1.lessstd[p, 1:P.log[p]] <- model.svml1.lessstd$W[-length(model.svml1.lessstd$W)]
# Test model
preds.svml1.lessstd <- predict(model.svml1.lessstd,
test.map.std[, 1:P.log[p]],
decisionValues = TRUE)$predictions
score.svml1.lessstd <- predict(model.svml1.lessstd,
test.map.std[, 1:P.log[p]],
decisionValues = TRUE)$decisionValues[, 1]
sim.list$auc.svml1.lessstd[p] <- pROC::roc(response = test.map.std$y, predictor = as.numeric(score.svml1.lessstd))$auc
sim.list$accuracy.svml1.lessstd[p] <- mean(factor(preds.svml1.lessstd, levels = c("-1", "1")) == test.map.std$y) * 100
# print("Finished SVML1 + lessstd")
### SVML1 + LESSstd2 scaling ###
# 10 fold cross-validation for penalisation parameter C
C.hyper.svml1.lessstd2 <- 10^seq(-3, 2, length.out = 51)
score.fold.svml1.lessstd2 <- numeric(N) # score probabilities of fold
auc.cv.svml1.lessstd2 <- numeric(length(C.hyper.svml1.lessstd2)) # cross-validation results
for (c in C.hyper.svml1.lessstd2) {
for (fold in 1:nfolds) {
model.fold.svml1.lessstd2 <- LiblineaR(data = train.map.std2[fold.id != fold, c(1:P.log[p])],
target = train.map.std2[fold.id != fold, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = c,
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
score.fold.svml1.lessstd2[fold.id == fold] <- predict(model.fold.svml1.lessstd2,
train.map.std2[fold.id == fold, c(1:P.log[p])],
decisionValues = TRUE)$decisionValues[, 1]
}
auc.cv.svml1.lessstd2[which(c == C.hyper.svml1.lessstd2)] <- pROC::roc(response = train.map.std2$y, predictor = score.fold.svml1.lessstd2)$auc
}
sim.list$cv.svml1.lessstd2[p] <- C.hyper.svml1.lessstd2[
which(auc.cv.svml1.lessstd2 == max(auc.cv.svml1.lessstd2))[
floor(median(1:length(which(auc.cv.svml1.lessstd2 == max(auc.cv.svml1.lessstd2)))))]]
# Train model
model.svml1.lessstd2 <- LiblineaR(data = as.matrix(train.map.std2[, c(1:P.log[p])]),
target = train.map.std2[, ncol(train)],
type = 5, # L1-regularized L2-loss support vector classification
cost = sim.list$cv.svml1.lessstd2[p],
epsilon = 1e-7,
bias = 1,
wi = NULL,
cross = 0,
verbose = FALSE,
findC = FALSE,
useInitC = FALSE)
sim.list$betas.svml1.lessstd2[p, 1:P.log[p]] <- model.svml1.lessstd2$W[-length(model.svml1.lessstd2$W)]
# Test model
preds.svml1.lessstd2 <- predict(model.svml1.lessstd2,
test.map.std2[, 1:P.log[p]],
decisionValues = TRUE)$predictions
score.svml1.lessstd2 <- predict(model.svml1.lessstd2,
test.map.std2[, 1:P.log[p]],
decisionValues = TRUE)$decisionValues[, 1]
sim.list$auc.svml1.lessstd2[p] <- pROC::roc(response = test.map.std2$y, predictor = as.numeric(score.svml1.lessstd2))$auc
sim.list$accuracy.svml1.lessstd2[p] <- mean(factor(preds.svml1.lessstd2, levels = c("-1", "1")) == test.map.std2$y) * 100
# print("Finished SVML1 + lessstd2")
#### Logistic Regression with L1 penalisation ##############################
### LRL1 + no scaling
# 10-fold cross-validation for L1
cv.model.lrl1.none <- cv.glmnet(x = as.matrix(train[, 1:P.log[p]]),
y = train$y,
family = "binomial",
alpha = 1,
foldid = fold.id,
type.measure = "auc")
sim.list$cv.lrl1.none[p] <- cv.model.lrl1.none$lambda.1se
# Train model
model.lrl1.none <- glmnet(x = as.matrix(train[, 1:P.log[p]]),
y = train$y,
intercept = TRUE,
standardize = FALSE,
family = "binomial",
alpha = 1,
lambda = sim.list$cv.lrl1.none[p])
sim.list$betas.lrl1.none[p, 1:P.log[p]] <- coef(model.lrl1.none)[-1]
# Test model
preds.lrl1.none <- predict.glmnet(object = model.lrl1.none,
newx = as.matrix(test[, 1:P.log[p]]),
s = sim.list$cv.lrl1.none[p],
type = "class")
sim.list$auc.lrl1.none[p] <- pROC::roc(response = test$y, predictor = as.numeric(preds.lrl1.none))$auc
preds.lrl1.none <- factor(ifelse(preds.lrl1.none < 0, -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lrl1.none[p] <- mean(preds.lrl1.none == test$y) * 100
# print("Finished LRL1 + none")
### LRL1 + standardisation
# 10-fold cross-validation for L1
cv.model.lrl1.std <- cv.glmnet(x = as.matrix(train.std[, 1:P.log[p]]),
y = train.std$y,
family = "binomial",
alpha = 1,
foldid = fold.id,
type.measure = "auc")
sim.list$cv.lrl1.std[p] <- cv.model.lrl1.std$lambda.1se
# Train model
model.lrl1.std <- glmnet(x = as.matrix(train.std[, 1:P.log[p]]),
y = train.std$y,
intercept = TRUE,
standardize = FALSE,
family = "binomial",
alpha = 1,
lambda = sim.list$cv.lrl1.std[p])
sim.list$betas.lrl1.std[p, 1:P.log[p]] <- coef(model.lrl1.std)[-1]
# Test model
preds.lrl1.std <- predict.glmnet(object = model.lrl1.std,
newx = as.matrix(test.std[, 1:P.log[p]]),
s = sim.list$cv.lrl1.std[p],
type = "class")
sim.list$auc.lrl1.std[p] <- pROC::roc(response = test.std$y, predictor = as.numeric(preds.lrl1.std))$auc
preds.lrl1.std <- factor(ifelse(preds.lrl1.std < 0, -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lrl1.std[p] <- mean(preds.lrl1.std == test.std$y) * 100
# print("Finished LRL1 + std")
### LRL1 + LESS scaling
# 10-fold cross-validation for L1
cv.model.lrl1.less <- cv.glmnet(x = as.matrix(train.map[, 1:P.log[p]]),
y = train.map$y,
family = "binomial",
alpha = 1,
foldid = fold.id,
type.measure = "auc")
sim.list$cv.lrl1.less[p] <- cv.model.lrl1.less$lambda.1se
# Train model
model.lrl1.less <- glmnet(x = as.matrix(train.map[, 1:P.log[p]]),
y = train.map$y,
intercept = TRUE,
standardize = FALSE,
family = "binomial",
alpha = 1,
lambda = sim.list$cv.lrl1.less[p])
sim.list$betas.lrl1.less[p, 1:P.log[p]] <- coef(model.lrl1.less)[-1]
# Test model
preds.lrl1.less <- predict.glmnet(object = model.lrl1.less,
newx = as.matrix(test.map[, 1:P.log[p]]),
s = sim.list$cv.lrl1.less[p],
type = "class")
sim.list$auc.lrl1.less[p] <- pROC::roc(response = test.map$y, predictor = as.numeric(preds.lrl1.less))$auc
preds.lrl1.less <- factor(ifelse(preds.lrl1.less < 0, -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lrl1.less[p] <- mean(preds.lrl1.less == test.map$y) * 100
# print("Finished LRL1 + less")
### LRL1 + LESSstd scaling
# 10-fold cross-validation for L1
cv.model.lrl1.lessstd <- cv.glmnet(x = as.matrix(train.map.std[, 1:P.log[p]]),
y = train.map.std$y,
family = "binomial",
alpha = 1,
foldid = fold.id,
type.measure = "auc")
sim.list$cv.lrl1.lessstd[p] <- cv.model.lrl1.lessstd$lambda.1se
# Train model
model.lrl1.lessstd <- glmnet(x = as.matrix(train.map.std[, 1:P.log[p]]),
y = train.map.std$y,
intercept = TRUE,
standardize = FALSE,
family = "binomial",
alpha = 1,
lambda = sim.list$cv.lrl1.lessstd[p])
sim.list$betas.lrl1.lessstd[p, 1:P.log[p]] <- coef(model.lrl1.lessstd)[-1]
# Test model
preds.lrl1.lessstd <- predict.glmnet(object = model.lrl1.lessstd,
newx = as.matrix(test.map.std[, 1:P.log[p]]),
s = sim.list$cv.lrl1.lessstd[p],
type = "class")
sim.list$auc.lrl1.lessstd[p] <- pROC::roc(response = test.map.std$y, predictor = as.numeric(preds.lrl1.lessstd))$auc
preds.lrl1.lessstd <- factor(ifelse(preds.lrl1.lessstd < 0, -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lrl1.lessstd[p] <- mean(preds.lrl1.lessstd == test.map.std$y) * 100
# print("Finished LRL1 + lessstd")
### LRL1 + LESSstd scaling
# 10-fold cross-validation for L1
cv.model.lrl1.lessstd2 <- cv.glmnet(x = as.matrix(train.map.std2[, 1:P.log[p]]),
y = train.map.std2$y,
family = "binomial",
alpha = 1,
foldid = fold.id,
type.measure = "auc")
sim.list$cv.lrl1.lessstd2[p] <- cv.model.lrl1.lessstd2$lambda.1se
# Train model
model.lrl1.lessstd2 <- glmnet(x = as.matrix(train.map.std2[, 1:P.log[p]]),
y = train.map.std2$y,
intercept = TRUE,
standardize = FALSE,
family = "binomial",
alpha = 1,
lambda = sim.list$cv.lrl1.lessstd2[p])
sim.list$betas.lrl1.lessstd2[p, 1:P.log[p]] <- coef(model.lrl1.lessstd2)[-1]
# Test model
preds.lrl1.lessstd2 <- predict.glmnet(object = model.lrl1.lessstd2,
newx = as.matrix(test.map.std2[, 1:P.log[p]]),
s = sim.list$cv.lrl1.lessstd2[p],
type = "class")
sim.list$auc.lrl1.lessstd2[p] <- pROC::roc(response = test.map.std2$y, predictor = as.numeric(preds.lrl1.lessstd2))$auc
preds.lrl1.lessstd2 <- factor(ifelse(preds.lrl1.lessstd2 < 0, -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lrl1.lessstd2[p] <- mean(preds.lrl1.lessstd2 == test.map.std2$y) * 100
# print("Finished LRL1 + lessstd2")
#### LASSO Regression ######################################################
### LASSO + no scaling
# 10-fold cross-validation for L1
cv.model.lasso.none <- cv.glmnet(x = as.matrix(train[, 1:P.log[p]]),
y = as.numeric(train$y),
family = "gaussian",
alpha = 1,
foldid = fold.id,
type.measure = "mse")
sim.list$cv.lasso.none[p] <- cv.model.lasso.none$lambda.1se
# Train model
model.lasso.none <- glmnet(x = as.matrix(train[, 1:P.log[p]]),
y = as.numeric(train$y),
intercept = TRUE,
standardize = FALSE,
family = "gaussian",
alpha = 1,
lambda = sim.list$cv.lasso.none[p])
sim.list$betas.lasso.none[p, 1:P.log[p]] <- coef(model.lasso.none)[-1]
# Test model
preds.lasso.none <- predict.glmnet(object = model.lasso.none,
newx = as.matrix(test[, 1:P.log[p]]),
s = sim.list$cv.lasso.none[p],
type = "link")
sim.list$auc.lasso.none[p] <- pROC::roc(response = test$y, predictor = as.numeric(preds.lasso.none))$auc
preds.lasso.none <- factor(ifelse(preds.lasso.none < mean(unique(as.numeric(train$y))), -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lasso.none[p] <- mean(preds.lasso.none == test$y) * 100
# print("Finished LASSO + none")
### LASSO + standardisation
# 10-fold cross-validation for L1
cv.model.lasso.std <- cv.glmnet(x = as.matrix(train.std[, 1:P.log[p]]),
y = as.numeric(train.std$y),
family = "gaussian",
alpha = 1,
foldid = fold.id,
type.measure = "mse")
sim.list$cv.lasso.std[p] <- cv.model.lasso.std$lambda.1se
# Train model
model.lasso.std <- glmnet(x = as.matrix(train.std[, 1:P.log[p]]),
y = as.numeric(train.std$y),
intercept = TRUE,
standardize = FALSE,
family = "gaussian",
alpha = 1,
lambda = sim.list$cv.lasso.std[p])
sim.list$betas.lasso.std[p, 1:P.log[p]] <- coef(model.lasso.std)[-1]
# Test model
preds.lasso.std <- predict.glmnet(object = model.lasso.std,
newx = as.matrix(test.std[, 1:P.log[p]]),
s = sim.list$cv.lasso.std[p],
type = "link")
sim.list$auc.lasso.std[p] <- pROC::roc(response = test.std$y, predictor = as.numeric(preds.lasso.std))$auc
preds.lasso.std <- factor(ifelse(preds.lasso.std < mean(unique(as.numeric(train.std$y))), -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lasso.std[p] <- mean(preds.lasso.std == test.std$y) * 100
# print("Finished LASSO + std")
### LASSO + LESS scaling
# 10-fold cross-validation for L1
cv.model.lasso.less <- cv.glmnet(x = as.matrix(train.map[, 1:P.log[p]]),
y = as.numeric(train.map$y),
family = "gaussian",
alpha = 1,
foldid = fold.id,
type.measure = "mse")
sim.list$cv.lasso.less[p] <- cv.model.lasso.less$lambda.1se
# Train model
model.lasso.less <- glmnet(x = as.matrix(train.map[, 1:P.log[p]]),
y = as.numeric(train.map$y),
intercept = TRUE,
standardize = FALSE,
family = "gaussian",
alpha = 1,
lambda = sim.list$cv.lasso.less[p])
sim.list$betas.lasso.less[p, 1:P.log[p]] <- coef(model.lasso.less)[-1]
# Test model
preds.lasso.less <- predict.glmnet(object = model.lasso.less,
newx = as.matrix(test.map[, 1:P.log[p]]),
s = sim.list$cv.lasso.less[p],
type = "link")
sim.list$auc.lasso.less[p] <- pROC::roc(response = test.map$y, predictor = as.numeric(preds.lasso.less))$auc
preds.lasso.less <- factor(ifelse(preds.lasso.less < mean(unique(as.numeric(train.map$y))), -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lasso.less[p] <- mean(preds.lasso.less == test.map$y) * 100
# print("Finished LASSO + less")
### LASSO + LESSstd scaling
# 10-fold cross-validation for L1
cv.model.lasso.lessstd <- cv.glmnet(x = as.matrix(train.map.std[, 1:P.log[p]]),
y = as.numeric(train.map.std$y),
family = "gaussian",
alpha = 1,
foldid = fold.id,
type.measure = "mse")
sim.list$cv.lasso.lessstd[p] <- cv.model.lasso.lessstd$lambda.1se
# Train model
model.lasso.lessstd <- glmnet(x = as.matrix(train.map.std[, 1:P.log[p]]),
y = as.numeric(train.map.std$y),
intercept = TRUE,
standardize = FALSE,
family = "gaussian",
alpha = 1,
lambda = sim.list$cv.lasso.lessstd[p])
sim.list$betas.lasso.lessstd[p, 1:P.log[p]] <- coef(model.lasso.lessstd)[-1]
# Test model
preds.lasso.lessstd <- predict.glmnet(object = model.lasso.lessstd,
newx = as.matrix(test.map.std[, 1:P.log[p]]),
s = sim.list$cv.lasso.lessstd[p],
type = "link")
sim.list$auc.lasso.lessstd[p] <- pROC::roc(response = test.map.std$y, predictor = as.numeric(preds.lasso.lessstd))$auc
preds.lasso.lessstd <- factor(ifelse(preds.lasso.lessstd < mean(unique(as.numeric(train.map.std$y))), -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lasso.lessstd[p] <- mean(preds.lasso.lessstd == test.map.std$y) * 100
# print("Finished LASSO + lessstd")
### LASSO + LESSstd2 scaling
# 10-fold cross-validation for L1
cv.model.lasso.lessstd2 <- cv.glmnet(x = as.matrix(train.map.std2[, 1:P.log[p]]),
y = as.numeric(train.map.std2$y),
family = "gaussian",
alpha = 1,
foldid = fold.id,
type.measure = "mse")
sim.list$cv.lasso.lessstd2[p] <- cv.model.lasso.lessstd2$lambda.1se
# Train model
model.lasso.lessstd2 <- glmnet(x = as.matrix(train.map.std2[, 1:P.log[p]]),
y = as.numeric(train.map.std2$y),
intercept = TRUE,
standardize = FALSE,
family = "gaussian",
alpha = 1,
lambda = sim.list$cv.lasso.lessstd2[p])
sim.list$betas.lasso.lessstd2[p, 1:P.log[p]] <- coef(model.lasso.lessstd2)[-1]
# Test model
preds.lasso.lessstd2 <- predict.glmnet(object = model.lasso.lessstd2,
newx = as.matrix(test.map.std2[, 1:P.log[p]]),
s = sim.list$cv.lasso.lessstd2[p],
type = "link")
sim.list$auc.lasso.lessstd2[p] <- pROC::roc(response = test.map.std2$y, predictor = as.numeric(preds.lasso.lessstd2))$auc
preds.lasso.lessstd2 <- factor(ifelse(preds.lasso.lessstd2 < mean(unique(as.numeric(train.map.std2$y))), -1, 1), levels = c("-1", "1"))
sim.list$accuracy.lasso.lessstd2[p] <- mean(preds.lasso.lessstd2 == test.map.std2$y) * 100
# print("Finished LASSO + lessstd2")
}
sim.list$numbetas.less.none <- apply(sim.list$betas.less.none, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.less.std <- apply(sim.list$betas.less.std, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.less.less <- apply(sim.list$betas.less.less, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.less.lessstd <- apply(sim.list$betas.less.lessstd, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.less.lessstd2 <- apply(sim.list$betas.less.lessstd2, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.svml1.none <- apply(sim.list$betas.svml1.none, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.svml1.std <- apply(sim.list$betas.svml1.std, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.svml1.less <- apply(sim.list$betas.svml1.less, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.svml1.lessstd <- apply(sim.list$betas.svml1.lessstd, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.svml1.lessstd2 <- apply(sim.list$betas.svml1.lessstd2, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lrl1.none <- apply(sim.list$betas.lrl1.none, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lrl1.std <- apply(sim.list$betas.lrl1.std, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lrl1.less <- apply(sim.list$betas.lrl1.less, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lrl1.lessstd <- apply(sim.list$betas.lrl1.lessstd, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lrl1.lessstd2 <- apply(sim.list$betas.lrl1.lessstd2, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lasso.none <- apply(sim.list$betas.lasso.none, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lasso.std <- apply(sim.list$betas.lasso.std, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lasso.less <- apply(sim.list$betas.lasso.less, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lasso.lessstd <- apply(sim.list$betas.lasso.lessstd, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
sim.list$numbetas.lasso.lessstd2 <- apply(sim.list$betas.lasso.lessstd2, 1, function(x) {sum(x != 0 & abs(x) > 1e-6, na.rm = TRUE)})
end.time <- Sys.time()
sim.list$total.time <- as.numeric(difftime(end.time, start.time, units = "sec"))
sim.list
}
stopImplicitCluster()
# Total time
total.time <- unlist(lapply("total.time", function(k) {sum(sapply(sim.list, "[[", k))}))
# Mean Model Sparseness over all K simulations
sim.mean.numbetas.less.none <- unlist(lapply("numbetas.less.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.less.std <- unlist(lapply("numbetas.less.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.less.less <- unlist(lapply("numbetas.less.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.less.lessstd <- unlist(lapply("numbetas.less.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.less.lessstd2 <- unlist(lapply("numbetas.less.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.svml1.none <- unlist(lapply("numbetas.svml1.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.svml1.std <- unlist(lapply("numbetas.svml1.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.svml1.less <- unlist(lapply("numbetas.svml1.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.svml1.lessstd <- unlist(lapply("numbetas.svml1.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.svml1.lessstd2 <- unlist(lapply("numbetas.svml1.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lrl1.none <- unlist(lapply("numbetas.lrl1.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lrl1.std <- unlist(lapply("numbetas.lrl1.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lrl1.less <- unlist(lapply("numbetas.lrl1.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lrl1.lessstd <- unlist(lapply("numbetas.lrl1.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lrl1.lessstd2 <- unlist(lapply("numbetas.lrl1.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lasso.none <- unlist(lapply("numbetas.lasso.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lasso.std <- unlist(lapply("numbetas.lasso.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lasso.less <- unlist(lapply("numbetas.lasso.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lasso.lessstd <- unlist(lapply("numbetas.lasso.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.numbetas.lasso.lessstd2 <- unlist(lapply("numbetas.lasso.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
# Mean Test AUC over all K simulations
sim.mean.auc.less.none <- unlist(lapply("auc.less.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.less.std <- unlist(lapply("auc.less.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.less.less <- unlist(lapply("auc.less.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.less.lessstd <- unlist(lapply("auc.less.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.less.lessstd2 <- unlist(lapply("auc.less.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.svml1.none <- unlist(lapply("auc.svml1.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.svml1.std <- unlist(lapply("auc.svml1.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.svml1.less <- unlist(lapply("auc.svml1.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.svml1.lessstd <- unlist(lapply("auc.svml1.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.svml1.lessstd2 <- unlist(lapply("auc.svml1.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lrl1.none <- unlist(lapply("auc.lrl1.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lrl1.std <- unlist(lapply("auc.lrl1.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lrl1.less <- unlist(lapply("auc.lrl1.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lrl1.lessstd <- unlist(lapply("auc.lrl1.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lrl1.lessstd2 <- unlist(lapply("auc.lrl1.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lasso.none <- unlist(lapply("auc.lasso.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lasso.std <- unlist(lapply("auc.lasso.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lasso.less <- unlist(lapply("auc.lasso.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lasso.lessstd <- unlist(lapply("auc.lasso.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.auc.lasso.lessstd2 <- unlist(lapply("auc.lasso.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
# Mean Test Accuracy over all K simulations
sim.mean.accuracy.less.none <- unlist(lapply("accuracy.less.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.less.std <- unlist(lapply("accuracy.less.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.less.less <- unlist(lapply("accuracy.less.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.less.lessstd <- unlist(lapply("accuracy.less.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.less.lessstd2 <- unlist(lapply("accuracy.less.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.svml1.none <- unlist(lapply("accuracy.svml1.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.svml1.std <- unlist(lapply("accuracy.svml1.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.svml1.less <- unlist(lapply("accuracy.svml1.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.svml1.lessstd <- unlist(lapply("accuracy.svml1.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.svml1.lessstd2 <- unlist(lapply("accuracy.svml1.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lrl1.none <- unlist(lapply("accuracy.lrl1.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lrl1.std <- unlist(lapply("accuracy.lrl1.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lrl1.less <- unlist(lapply("accuracy.lrl1.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lrl1.lessstd <- unlist(lapply("accuracy.lrl1.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lrl1.lessstd2 <- unlist(lapply("accuracy.lrl1.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lasso.none <- unlist(lapply("accuracy.lasso.none", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lasso.std <- unlist(lapply("accuracy.lasso.std", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lasso.less <- unlist(lapply("accuracy.lasso.less", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lasso.lessstd <- unlist(lapply("accuracy.lasso.lessstd", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.accuracy.lasso.lessstd2 <- unlist(lapply("accuracy.lasso.lessstd2", function(k) {rowMeans(sapply(sim.list, "[[", k))}))
sim.mean.df <- data.frame(Dimensions = rep(P.log[-1], 20),
mean.numbetas = c(sim.mean.numbetas.less.none[-1],
sim.mean.numbetas.less.std[-1],
sim.mean.numbetas.less.less[-1],
sim.mean.numbetas.less.lessstd[-1],
sim.mean.numbetas.less.lessstd2[-1],
sim.mean.numbetas.svml1.none[-1],
sim.mean.numbetas.svml1.std[-1],
sim.mean.numbetas.svml1.less[-1],
sim.mean.numbetas.svml1.lessstd[-1],
sim.mean.numbetas.svml1.lessstd2[-1],
sim.mean.numbetas.lrl1.none[-1],
sim.mean.numbetas.lrl1.std[-1],
sim.mean.numbetas.lrl1.less[-1],
sim.mean.numbetas.lrl1.lessstd[-1],
sim.mean.numbetas.lrl1.lessstd2[-1],
sim.mean.numbetas.lasso.none[-1],
sim.mean.numbetas.lasso.std[-1],
sim.mean.numbetas.lasso.less[-1],
sim.mean.numbetas.lasso.lessstd[-1],
sim.mean.numbetas.lasso.lessstd2[-1]),
mean.auc = c(sim.mean.auc.less.none[-1],
sim.mean.auc.less.std[-1],
sim.mean.auc.less.less[-1],
sim.mean.auc.less.lessstd[-1],
sim.mean.auc.less.lessstd2[-1],
sim.mean.auc.svml1.none[-1],
sim.mean.auc.svml1.std[-1],
sim.mean.auc.svml1.less[-1],
sim.mean.auc.svml1.lessstd[-1],
sim.mean.auc.svml1.lessstd2[-1],
sim.mean.auc.lrl1.none[-1],
sim.mean.auc.lrl1.std[-1],
sim.mean.auc.lrl1.less[-1],
sim.mean.auc.lrl1.lessstd[-1],
sim.mean.auc.lrl1.lessstd2[-1],
sim.mean.auc.lasso.none[-1],
sim.mean.auc.lasso.std[-1],
sim.mean.auc.lasso.less[-1],
sim.mean.auc.lasso.lessstd[-1],
sim.mean.auc.lasso.lessstd2[-1]),
mean.accuracy = c(sim.mean.accuracy.less.none[-1],
sim.mean.accuracy.less.std[-1],
sim.mean.accuracy.less.less[-1],
sim.mean.accuracy.less.lessstd[-1],
sim.mean.accuracy.less.lessstd2[-1],
sim.mean.accuracy.svml1.none[-1],
sim.mean.accuracy.svml1.std[-1],
sim.mean.accuracy.svml1.less[-1],
sim.mean.accuracy.svml1.lessstd[-1],
sim.mean.accuracy.svml1.lessstd2[-1],
sim.mean.accuracy.lrl1.none[-1],
sim.mean.accuracy.lrl1.std[-1],
sim.mean.accuracy.lrl1.less[-1],
sim.mean.accuracy.lrl1.lessstd[-1],
sim.mean.accuracy.lrl1.lessstd2[-1],
sim.mean.accuracy.lasso.none[-1],
sim.mean.accuracy.lasso.std[-1],
sim.mean.accuracy.lasso.less[-1],
sim.mean.accuracy.lasso.lessstd[-1],
sim.mean.accuracy.lasso.lessstd2[-1]),
Method = c(rep("LESS", (length(P.log) - 1) * 5),
rep("SVM", (length(P.log) - 1) * 5),
rep("LogReg", (length(P.log) - 1) * 5),
rep("LASSO", (length(P.log) - 1) * 5)),
Scaling = rep(c(rep("none", (length(P.log) - 1)),
rep("std", (length(P.log) - 1)),
rep("less", (length(P.log) - 1)),
rep("lessstd", (length(P.log) - 1)),
rep("lessstd2", (length(P.log) - 1))), 4))
save(sim.list, sim.mean.df, total.time, file = "Simulation5.RData")
#### Plot results ####
# # Colors
groups <- 4
cols <- hcl(h = seq(15, 375, length = groups + 1), l = 65, c = 100)[1:groups]
# plot(1:groups, pch = 16, cex = 7, col = cols)
# plot(c(2:6, 9:13, 17:21, 24:28), pch = 16, cex = 7, col = cols[c(2:6, 9:13, 17:21, 24:28)])
# cols
# Relevel factors for correct order in plots
sim.mean.df$Method <- factor(sim.mean.df$Method, levels = c("LASSO", "LogReg", "SVM", "LESS"))
sim.mean.df$Scaling <- factor(sim.mean.df$Scaling, levels = c("none", "std", "less", "lessstd", "lessstd2"))
# Remove LESS_none and LESS_std
sim.mean.df <- subset(sim.mean.df,
!((sim.mean.df$Method == "LESS" & sim.mean.df$Scaling == "none") |
(sim.mean.df$Method == "LESS" & sim.mean.df$Scaling == "std")))
# Plot mean sparseness (log scale)
ggsave("Simulation5_Sparseness_loglines.png",
ggplot(sim.mean.df, aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
# Plot test AUC (log scale)
ggsave("Simulation5_AUC_loglines.png",
ggplot(sim.mean.df, aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
# Plot test accuracy (log scale)
ggsave("Simulation5_Accuracy_loglines.png",
ggplot(sim.mean.df, aes(x = Dimensions)) +
geom_line(aes(y = mean.accuracy, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test Accuracy (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Accuracy") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
### Sparseness plots per classification method #################################
ggsave("sim5_method/Simulation5_Sparseness_loglines_LESS.png",
ggplot(sim.mean.df[sim.mean.df$Method == "LESS", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[1]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_method/Simulation5_Sparseness_loglines_SVM.png",
ggplot(sim.mean.df[sim.mean.df$Method == "SVM", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[4]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_method/Simulation5_Sparseness_loglines_LogReg.png",
ggplot(sim.mean.df[sim.mean.df$Method == "LogReg", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[3]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_method/Simulation5_Sparseness_loglines_LASSO.png",
ggplot(sim.mean.df[sim.mean.df$Method == "LASSO", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[2]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
### AUC plots per classification method ########################################
ggsave("sim5_method/Simulation5_AUC_loglines_LESS.png",
ggplot(sim.mean.df[sim.mean.df$Method == "LESS", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[1]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_method/Simulation5_AUC_loglines_SVM.png",
ggplot(sim.mean.df[sim.mean.df$Method == "SVM", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[4]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_method/Simulation5_AUC_loglines_LogReg.png",
ggplot(sim.mean.df[sim.mean.df$Method == "LogReg", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[3]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_method/Simulation5_AUC_loglines_LASSO.png",
ggplot(sim.mean.df[sim.mean.df$Method == "LASSO", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[2]) +
scale_linetype_manual(values = c("dotted", "solid", "dashed", "longdash", "twodash"),
labels = unname(TeX(c("$\\textit{x}$", "$\\textit{z}$", "$\\textit{\\mu_k}$",
"$\\textit{\\mu_k \\sigma^2_k}$",
"$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
### Sparseness plots per scaling method ########################################
ggsave("sim5_scaling/Simulation5_Sparseness_loglines_none.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "none", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dotted"),
labels = unname(TeX(c("$\\textit{x}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_Sparseness_loglines_std.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "std", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("solid"),
labels = unname(TeX(c("$\\textit{z}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_Sparseness_loglines_les.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "less", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dashed"),
labels = unname(TeX(c("$\\textit{\\mu_k}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_Sparseness_loglines_lessstd.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "lessstd", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("longdash"),
labels = unname(TeX(c("$\\textit{\\mu_k \\sigma^2_k}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_Sparseness_loglines_lessstd2.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "lessstd2", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.numbetas, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("twodash"),
labels = unname(TeX(c("$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Model Sparseness (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("Number of selected variables") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
### AUC plots per scaling method ###############################################
ggsave("sim5_scaling/Simulation5_AUC_loglines_none.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "none", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dotted"),
labels = unname(TeX(c("$\\textit{x}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_AUC_loglines_std.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "std", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("solid"),
labels = unname(TeX(c("$\\textit{z}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_AUC_loglines_les.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "less", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("dashed"),
labels = unname(TeX(c("$\\textit{\\mu_k}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_AUC_loglines_lessstd.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "lessstd", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("longdash"),
labels = unname(TeX(c("$\\textit{\\mu_k \\sigma^2_k}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
ggsave("sim5_scaling/Simulation5_AUC_loglines_lessstd2.png",
ggplot(sim.mean.df[sim.mean.df$Scaling == "lessstd2", ],
aes(x = Dimensions)) +
geom_line(aes(y = mean.auc, colour = Method, linetype = Scaling), size = 1) +
scale_x_log10() +
annotation_logticks(sides = "b") +
scale_colour_manual(values = cols[c(2, 3, 4, 1)]) +
scale_linetype_manual(values = c("twodash"),
labels = unname(TeX(c("$\\textit{\\mu_k \\bar{\\sigma}^2}$")))) +
# ggtitle(paste0("Test AUC (mean over ", K, " simulations)")) +
xlab("Number of variables") +
ylab("AUC") +
guides(color = guide_legend(order = 1, keywidth = 2.5),
linetype = guide_legend(order = 2, keywidth = 2.5)) +
theme_bw(),
width = 150, height = 150, units = "mm")
send_telegram_message(text = "Simulation 5 is finished!",
chat_id = "441084295",
bot_token = "880903665:AAE_f0i_bQRXBXJ4IR5TEuTt5C05vvaTJ5w")
#### END #### |
095f542feed31de54178f144341a2c10657322d6 | 445dace456883329cc5c13d3b80140c5a3e0089c | /tests/testthat.R | ad1bc951999aa8f3ddf709dc9fd0658ecc461788 | [
"MIT"
] | permissive | prologr/paxor | a95b0873a569acac341e162d1d5da7a794061119 | 78f444077ee021d45a2e20bd869faad7b1b7eb7d | refs/heads/master | 2023-04-03T15:37:16.466277 | 2021-04-14T17:52:49 | 2021-04-14T17:52:49 | 355,420,362 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 54 | r | testthat.R | library(testthat)
library(paxor)
test_check("paxor")
|
cee7d23679893bbb248f7125f1ef248c4f4a45e5 | 1a612185b259689884472bf68336a2345d690eef | /analysis/rnaSeq/rcpp_src/testRcpp.R | 76424a8c0e23538385fc595ab72870ca0e7c45bb | [] | no_license | adamwespiser/encode-manager | 70648d750c83b0f21de5d23c130a6a63a53703f3 | 3b73d831ba0d237d33d50d654a87234d685d6a1e | refs/heads/master | 2021-01-15T21:20:46.376234 | 2015-08-03T07:04:27 | 2015-08-03T07:04:27 | 8,618,288 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,953 | r | testRcpp.R | rm(list=unlist(ls()))
library(Rcpp)
library(inline)
library(RcppArmadillo)
# p 26
src1 <- '
Rcpp::NumericVector xa(a);
Rcpp::NumericVector xb(b);
int n_xa = xa.size(), n_xb = xb.size();
Rcpp::NumericVector xab(n_xa + n_xb + 1);
for (int i = 0; i < n_xa; i++)
for (int j = 0; j < n_xb; j++)
xab[i + j] += xa[i] * xb[j];
return xab;
'
fun1 = cxxfunction(signature(a="numeric", b="numeric"), src1, plugin="Rcpp")
fun1(1:4,2:5)
# armadillo example:
# http://arma.sourceforge.net/docs.html
src2 <- '
Rcpp::NumericMatrix Xr(Xs);
Rcpp::NumericVector yr(ys);
int n = Xr.nrow(), k = Xr.ncol();
arma::mat X(Xr.begin(), n, k, false);
arma::colvec y(yr.begin(), yr.size(), false);
arma::mat sim = 1 / (trans(X)*X);
arma::mat eigenVectors;
arma::colvec eigenValues;
eig_sym(eigenValues, eigenVectors, sim);
return Rcpp::List::create(Rcpp::Named("vector") = eigenValues );
'
f2 = cxxfunction(signature(Xs="numeric", ys="numeric"), src2, plugin="RcppArmadillo")
#k = 10000;f2(Xs=matrix(runif(k*k),k,k),runif(k))
src4 <- '
Rcpp::NumericMatrix Xr(Xs);
Rcpp::NumericVector yr(ys);
int n = Xr.nrow(), k = Xr.ncol();
arma::mat X(Xr.begin(), n, k, false);
arma::colvec y(yr.begin(), yr.size(), false);
arma::colvec y_prev = y;
arma::colvec y_temp = y;
arma::mat sim = (X*trans(X));
sim = arma::pow(sim,-1);
int i = 0;
double diff = 10000000.0;
double y_norm = 0;
while(diff > 1e-10 && i < 2000){
i++;
y_prev = y;
y_temp = sim * y;
y_norm = norm(y_temp,2);
y = y_temp / y_norm ;
//diff = sum(abs(y_prev - y)) ;
diff = norm(y_prev - y, 2);
}
return Rcpp::List::create(Rcpp::Named("y") = y,
Rcpp::Named("ytemp") = y_temp,
Rcpp::Named("yprev") = y_prev,
Rcpp::Named("ynorm") = y_norm,
Rcpp::Named("converge") = abs(sum(y_prev - y)),
Rcpp::Named("iters") = i,
Rcpp::Named("diff") = diff);
' # end of src4....
f4 = cxxfunction(signature(Xs="numeric", ys="numeric"), src4, plugin="RcppArmadillo")
a= f4(Xs = matrix(runif(500*40),500), ys = runif(500))
X = as.matrix(iris[,1:4])
y = runif(dim(X)[1])
ic = f4(Xs = X +1, ys = y)
yp = as.matrix(ic$y,1)
X = as.matrix(iris[,1:4])
S = 1/ (X %*% t(X))
e = eigen(S)
ye = e$vector[,1]
if(cor(yp,ye) < 0.0000001){
print("TEST PASSED")
}
src3 = '
int n = 100;
int k = 30;
arma::mat X = arma::randu<arma::mat>(100,30);
// X.randu(n,k);
arma::mat sim = (X*trans(X));
sim = arma::pow(sim,-1);
arma::mat eigenVectors;
arma::colvec eigenValues;
eig_sym(eigenValues, eigenVectors, sim, "dc");
//eigenValues.max(index);
arma::colvec output = eigenVectors.row(1);
return Rcpp::List::create(Rcpp::Named("eigenVector") = output );
';f3 = cxxfunction(signature(), src3, plugin="RcppArmadillo");f3()
|
fa1e5a7167f01d40d480ef27686d4122c58cd5f1 | 162ad14e40fb0ffba7a8b52c83c3a3406d60adc2 | /man/modify.cor.matrix.Rd | b4535e44bca422cd357d105328f93732b67cf735 | [] | no_license | guillaumeevin/GWEX | c09c1f53a7c54eebc209b1f4aa5b8484fb59faf6 | b1cae5f753a625d5963507b619af34efa2459280 | refs/heads/master | 2023-01-21T10:01:28.873553 | 2023-01-16T11:10:16 | 2023-01-16T11:10:16 | 172,738,929 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 839 | rd | modify.cor.matrix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GWexPrec_lib.r
\name{modify.cor.matrix}
\alias{modify.cor.matrix}
\title{modify.cor.matrix}
\usage{
modify.cor.matrix(cor.matrix)
}
\arguments{
\item{cor.matrix}{possibly non-positive definite correlation matrix}
}
\value{
positive definite correlation matrix
}
\description{
Modify a non-positive definite correlation matrix in order to have a positive
definite matrix
}
\references{
Rousseeuw, P. J. and G. Molenberghs. 1993. Transformation of non positive semidefinite
correlation matrices. Communications in Statistics: Theory and Methods 22(4):965-984.
Rebonato, R., & Jackel, P. (2000). The most general methodology to create a valid
correlation matrix for risk management and option pricing purposes. J. Risk, 2(2), 17-26.
}
\author{
Guillaume Evin
}
|
7e2a1c0131faf88942a00501a1dc86a228a7ebe7 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/gcKrig/man/mlegc.Rd | 879817a3ba3c31f1486299c3d2cc769e4024aa35 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,779 | rd | mlegc.Rd |
\name{mlegc}
\alias{mlegc}
\title{Maximum Likelihood Estimation in Gaussian Copula Models for Geostatistical Count Data}
\usage{
mlegc(y, x = NULL, locs, marginal, corr, effort = 1, longlat = FALSE,
distscale = 1, method = "GHK", corrpar0 = NULL, ghkoptions = list(nrep
= c(100, 1000), reorder = FALSE, seed = 12345))
}
\arguments{
\item{y}{a non-negative integer vector of response with its length equals to the number of sampling locations.}
\item{x}{a numeric matrix or data frame of covariates,
with its number of rows equals to the number of sampling locations.
If no covariates then \code{x = NULL}.}
\item{locs}{a numeric matrix or data frame of \emph{n-D} points with row denoting points.
The first column is \emph{x} or longitude, the second column is \emph{y} or latitude.
The number of locations is equal to the number of rows.}
\item{marginal}{an object of class \code{\link{marginal.gc}} specifying the marginal distribution.}
\item{corr}{an object of class \code{\link{corr.gc}} specifying the correlation function.}
\item{effort}{the sampling effort. For binomial marginal it is the size parameter (number of trials).
See details.}
\item{longlat}{if FALSE, use Euclidean distance, if TRUE use great circle distance. The default is FALSE.}
\item{distscale}{a numeric scaling factor for computing distance. If original distance is in kilometers, then
\code{distscale = 1000} will convert it to meters.}
\item{method}{two methods are implemented. If
\code{method = 'GHK'} then the maximum simulated likelihood estimates are computed, if
\code{method = 'GQT'} then the maximum surrogate likelihood estimates are computed.}
\item{corrpar0}{the starting value of correlation parameter in the optimization procedure.
If \code{corrpar0 = NULL} then
initial range is set to be half of the median distance in distance matrix and initial nugget
(if \code{nugget = TRUE}) is 0.2.}
\item{ghkoptions}{a list of three elements that only need to be specified if \code{method = 'GHK'}.
\code{nrep} is the Monte Carlo size of the importance sampling algorithm for likelihood approximation.
It can be a vector with increasing positive integers so that the model is
fitted with a sequence of different Monte Carlo sizes, and the starting
values for optimization are taken from the previous fitting.
The default value is 100 for the first optimization and 1000 for the second and definitive optimization.
\code{reorder} indicates whether the integral will be reordered every iteration in computation
according to the algorithm in Gibson, etal (1994), default is FALSE.
\code{seed} is the seed of the pseudorandom generator used in Monte Carlo simulation.}
}
\value{
A list of class "mlegc" with the following elements:
\item{MLE}{the maximum likelihood estimate.}
\item{x}{the design matrix.}
\item{nug}{1 if \code{nugget = TRUE}, 0 if \code{nugget = FALSE}.}
\item{nreg}{number of regression parameters.}
\item{log.lik}{the value of the maximum log-likelihood.}
\item{AIC}{the Akaike information criterion.}
\item{AICc}{the AICc information criterion; essentially AIC with a greater penalty for extra parameters.}
\item{BIC}{ the Bayesian information criterion.}
\item{kmarg}{number of marginal parameters.}
\item{par.df}{number of parameters.}
\item{N}{number of observations.}
\item{D}{the distance matrix.}
\item{optlb}{lower bound in optimization.}
\item{optub}{upper bound in optimization.}
\item{hessian}{the hessian matrix evaluated at the final estimates.}
\item{args}{arguments passed in function evaluation.}
}
\description{
Computes the maximum likelihood estimates. Two methods are implemented. If
\code{method = 'GHK'} then the maximum simulated likelihood estimates are computed, if
\code{method = 'GQT'} then the maximum surrogate likelihood estimates are computed.
}
\details{
This program implemented one simulated likelihood method via sequential importance
sampling (see Masarotto and Varin 2012), which is same as the method implemented in package
\code{gcmr} (Masarotto and Varin 2016) except an antithetic variable is used. It also implemented
one surrogate likelihood method via distributional transform (see Kazianka and Pilz 2010), which is
generally faster.
The argument \code{effort} is the sampling effort (known). It can be used to consider the heterogeneity
of the measurement time or area at different locations.
The default is 1 for all locations. See Han and De Oliveira (2016) for more details.
}
\examples{
\dontrun{
## Fit a Simulated Dataset with 100 locations
grid <- seq(0.05, 0.95, by = 0.1)
xloc <- expand.grid(x = grid, y = grid)[,1]
yloc <- expand.grid(x = grid, y = grid)[,2]
set.seed(123)
simData1 <- simgc(locs = cbind(xloc,yloc), sim.n = 1,
marginal = negbin.gc(mu = exp(1+xloc), od = 1),
corr = matern.gc(range = 0.4, kappa = 0.5, nugget = 0))
simFit1 <- mlegc(y = simData1$data, x = xloc, locs = cbind(xloc,yloc),
marginal = negbin.gc(link = 'log'),
corr = matern.gc(kappa = 0.5, nugget = FALSE), method = 'GHK')
simFit2 <- mlegc(y = simData1$data, x = xloc, locs = cbind(xloc,yloc),
marginal = negbin.gc(link = 'log'),
corr = matern.gc(kappa = 0.5, nugget = FALSE), method = 'GQT')
#summary(simFit1);summary(simFit2)
#plot(simFit1);plot(simFit2)
## Time consuming examples
## Fit a real dataset with 70 sampling locations.
data(Weed95)
weedobs <- Weed95[Weed95$dummy==1, ]
weedpred <- Weed95[Weed95$dummy==0, ]
Weedfit1 <- mlegc(y = weedobs$weedcount, x = weedobs[,4:5], locs = weedobs[,1:2],
marginal = poisson.gc(link='log'),
corr = matern.gc(kappa = 0.5, nugget = TRUE),
method = 'GHK')
summary(Weedfit1)
plot(Weedfit1)
## Fit a real dataset with 256 locations
data(LansingTrees)
Treefit1 <- mlegc(y = LansingTrees[,3], x = LansingTrees[,4], locs = LansingTrees[,1:2],
marginal = negbin.gc(link = 'log'),
corr = matern.gc(kappa = 0.5, nugget = FALSE), method = 'GHK')
summary(Treefit1)
plot(Treefit1)
# Try to use GQT method
Treefit2<- mlegc(y = LansingTrees[,3], x = LansingTrees[,4],
locs = LansingTrees[,1:2], marginal = poisson.gc(link='log'),
corr = matern.gc(kappa = 0.5, nugget = TRUE), method = 'GQT')
summary(Treefit2)
plot(Treefit2)
## Fit a real dataset with randomized locations
data(AtlanticFish)
Fitfish <- mlegc(y = AtlanticFish[,3], x = AtlanticFish[,4:6], locs = AtlanticFish[,1:2],
longlat = TRUE, marginal = negbin.gc(link='log'),
corr = matern.gc(kappa = 0.5, nugget = TRUE), method = 'GHK')
summary(Fitfish)
## Fit a real dataset with binomial counts; see Masarotto and Varin (2016).
library(gcmr)
data(malaria)
malariax <- data.frame(netuse = malaria$netuse,
green = malaria$green/100,
phc = malaria$phc)
Fitmalaria <- mlegc(y = malaria$cases, x = malariax, locs = malaria[,1:2],
marginal = binomial.gc(link='logit'), corrpar0 = 1.5,
corr = matern.gc(kappa = 0.5, nugget = FALSE),
distscale = 0.001, effort = malaria$size, method = 'GHK')
summary(Fitmalaria)
## Fit a real spatial binary dataset with 333 locations using probit link
data(OilWell)
Oilest1 <- mlegc(y = OilWell[,3], x = NULL, locs = OilWell[,1:2],
marginal = binomial.gc(link = 'probit'),
corr = matern.gc(nugget = TRUE), method = 'GHK')
summary(Oilest1)
plot(Oilest1, col = 2)
}
}
\author{
Zifei Han \email{hanzifei1@gmail.com}
}
\references{
Han, Z. and De Oliveira, V. (2016) On the correlation structure of Gaussian
copula models for geostatistical count data.
\emph{Australian and New Zealand Journal of Statistics, 58:47-69}.
Kazianka, H. and Pilz, J. (2010)
Copula-based geostatistical modeling of continuous and discrete data including covariates.
\emph{Stoch Environ Res Risk Assess 24:661-673}.
Masarotto, G. and Varin, C. (2012)
Gaussian copula marginal regression.
\emph{Electronic Journal of Statistics 6:1517-1549}.
\url{https://projecteuclid.org/euclid.ejs/1346421603}.
Masarotto, G. and Varin C. (2017). Gaussian Copula Regression in R. \emph{Journal of Statistical Software}, \bold{77}(8), 1--26. \doi{10.18637/jss.v077.i08}.
Han, Z. and De Oliveira, V. (2018) gcKrig: An R Package for the Analysis of Geostatistical Count Data Using Gaussian Copulas.
\emph{Journal of Statistical Software}, \bold{87}(13), 1--32. \doi{10.18637/jss.v087.i13}.
}
\seealso{
\code{\link[gcmr]{gcmr}}
}
\keyword{Estimation}
|
a326a1ab23e60a2e682c4ebfda6995f6fe758a43 | b2360322fc78847770c990a55a2e0859f328c520 | /projects/Prediction/exersice1.R | 7e6e6a2f667019553e179ea44dd36be9f1844ff5 | [] | no_license | senthil-lab/RWork | 6739546116a67cd8a6e95d5760d230f3f7432643 | 3754a2c4f50c39b9be189a48c7ee2087ce2e248c | refs/heads/master | 2020-12-18T18:19:09.299030 | 2020-10-04T18:28:28 | 2020-10-04T18:28:28 | 235,481,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,675 | r | exersice1.R | library(HistData)
library(dplyr)
library(caret)
set.seed(1, sample.kind="Rounding")
n <- 100
Sigma <- 9*matrix(c(1.0, 0.95, 0.95, 1.0), 2, 2)
dat <- MASS::mvrnorm(n = 100, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
Sigma
dat %>% head()
# We will build 100 linear models using the data above
#and calculate the mean and standard deviation of the combined models.
# First, set the seed to 1 again (make sure to use sample.kind="Rounding" if your R is version 3.6 or later).
# Then, within a replicate() loop,
# (1) partition the dataset into test and training sets with p=0.5 and using dat$y to generate your indices,
# (2) train a linear model predicting y from x,
# (3) generate predictions on the test set, and
# (4) calculate the RMSE of that model.
# Then, report the mean and standard deviation (SD) of the RMSEs from all 100 models.
set.seed(1, sample.kind="Rounding")
RMSE <- replicate(n, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm( y~x, data =train_set)
yHat <- predict(fit, test_set)
sqrt(mean((yHat - test_set$y)^2))
})
length(RMSE)
mean(RMSE)
sd(RMSE)
func <- function(n) {
Sigma <- 9*matrix(c(1.0, 0.5, 0.5, 1.0), 2, 2)
dat <- MASS::mvrnorm(n, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
RMSE <- replicate(100, {
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm( y~x, data =train_set)
yHat <- predict(fit, test_set)
sqrt(mean((yHat - test_set$y)^2))
})
c(mean(RMSE),sd(RMSE))
}
set.seed(1, sample.kind="Rounding")
n <- c(100, 500, 1000, 5000, 10000)
result <- sapply(n, func)
result
set.seed(1, sample.kind="Rounding")
n <- 100
Sigma <- matrix(c(1.0, 0.75, 0.75, 0.75, 1.0, 0.95, 0.75, 0.95, 1.0), 3, 3)
dat <- MASS::mvrnorm(n = 100, c(0, 0, 0), Sigma) %>%
data.frame() %>% setNames(c("y", "x_1", "x_2"))
Sigma
dat %>% head()
cor(dat)
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(dat$y, times = 1, p = 0.5, list = FALSE)
train_set <- dat %>% slice(-test_index)
test_set <- dat %>% slice(test_index)
fit <- lm(y ~ x_1, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
fit <- lm(y ~ x_2, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
fit <- lm(y ~ x_1 + x_2, data = train_set)
y_hat <- predict(fit, newdata = test_set)
sqrt(mean((y_hat-test_set$y)^2))
|
258abea2f1b7af92a2d0f55f272b866d31012aee | 21818aeceda73fc35827ef8e79a56bb715305eb6 | /Evaluation/hyperparameters_plot.R | aa50558120ad52379e6b90b1680928289ffbfbd6 | [
"MIT"
] | permissive | JiahuaQu/Cell_BLAST | 25ab0c5072a05faa49cd2fcc4b5c743ae5d3b125 | 45b14bbd3385b8a7be0b48ef5ab42bc946f3558f | refs/heads/master | 2023-07-17T02:21:18.868383 | 2021-09-01T03:08:36 | 2021-09-01T03:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,703 | r | hyperparameters_plot.R | #!/usr/bin/env Rscript
source("../.Rprofile", chdir = TRUE)
suppressPackageStartupMessages({
library(ggplot2)
library(ggsci)
library(reshape2)
library(dplyr)
library(ggsci)
library(ggpubr)
library(extrafont)
})
source("../Utilities/utils.R")
# Read data
df <- read.csv(snakemake@input[["data"]], check.names = FALSE, stringsAsFactors = FALSE)
df$dataset <- factor(df$dataset, levels = df %>%
select(dataset, n_cell) %>%
arrange(n_cell) %>%
distinct() %>%
pull(dataset)
) # This determines dataset order
facets <- c(
"dimensionality", "hidden_layer", "depth",
"cluster", "lambda_prior", "prob_module"
)
df_list <- list()
for (facet in facets) {
mask <- Reduce(`&`, lapply(
setdiff(facets, facet),
function(item) df[[item]] == snakemake@config[[item]][["default"]]
))
df_list[[facet]] <- df[mask, c(facet, setdiff(colnames(df), facets))]
df_list[[facet]] <- melt(df_list[[facet]], measure.vars = facet)
}
df <- Reduce(rbind, df_list)
df_val_levels <- unique(df$value)
df_val_rank <- integer(length(df_val_levels))
suppressWarnings(mask <- !is.na(as.numeric(df_val_levels)))
df_val_rank[mask] <- order(as.numeric(df_val_levels[mask]))
df_val_rank[!mask] <- order(df_val_levels[!mask]) + sum(mask)
df$value <- factor(df$value, levels = df_val_levels[df_val_rank])
color_mapping <- pal_d3("category10")(nlevels(df$dataset))
gp <- ggplot(data = df %>% group_by(dataset, variable, value) %>% summarise(
sd = sd(mean_average_precision),
mean_average_precision = mean(mean_average_precision)
), mapping = aes(
x = value, y = mean_average_precision,
ymin = mean_average_precision - sd,
ymax = mean_average_precision + sd,
group = dataset, col = dataset
)) + geom_line() + geom_errorbar(
width = 0.1
) + facet_wrap(
~variable, scales = "free_x"
) + scale_x_discrete(
name = "Hyperparameter value"
) + scale_y_continuous(
name = "Mean average precision"
) + scale_color_manual(
name = "Dataset", values = color_mapping
)
ggsave(snakemake@output[["map"]], mod_style(gp), width = 7.5, height = 5)
df$dataset <- factor(df$dataset, levels = snakemake@config[["dataset"]])
dataset_meta <- rbind(
read.csv(
"../Datasets/ACA_datasets.csv", row.names = 1, comment = "#",
check.names = FALSE, stringsAsFactors = FALSE
)[, "platform", drop = FALSE],
read.csv(
"../Datasets/additional_datasets.csv", row.names = 1, comment = "#",
check.names = FALSE, stringsAsFactors = FALSE
)[, "platform", drop = FALSE]
)
levels(df$dataset) <- sapply(levels(df$dataset), function(x) {
sprintf("%s\n(%s)", x, dataset_meta[x, "platform"])
})
prob_df <- df %>% filter(variable == "prob_module")
prob_df_blank <- prob_df %>% group_by(dataset, value) %>% group_modify(function(.x, .y) {
.x$mean_average_precision <- .x$mean_average_precision + 0.5 * sd(.x$mean_average_precision)
.x
}) %>% ungroup() # Slightly increase the gap between significance labels and boxes
gp <- ggplot(data = prob_df, mapping = aes(
x = value, y = mean_average_precision,
col = value, fill = value
)) + geom_boxplot(alpha = 0.5, width = 0.5) + facet_wrap(
~dataset, scales = "free_y", ncol = 3
) + stat_compare_means(
mapping = aes(label = paste0(..p.signif.., " (p = ", ..p.format.., ")")),
method = "wilcox.test", label.x.npc = 0.3, size = 3.0
) + geom_blank(data = prob_df_blank) + scale_x_discrete(
name = "Generative distribution"
) + scale_y_continuous(
name = "Mean average precision"
) + scale_fill_d3() + scale_color_d3() + guides(fill = FALSE, color = FALSE)
ggsave(snakemake@output[["probmap"]], mod_style(gp), width = 7, height = 8)
|
5212a740906fde96d76564d14f06a9f7e85c934c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/spatstat/examples/Finhom.Rd.R | 9cb4b974966b96c62079fbc3cd3f7d44d2964258 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 297 | r | Finhom.Rd.R | library(spatstat)
### Name: Finhom
### Title: Inhomogeneous Empty Space Function
### Aliases: Finhom
### Keywords: spatial nonparametric
### ** Examples
## Not run:
##D plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2))
##D
## End(Not run)
plot(Finhom(swedishpines, sigma=10))
|
2734e147799241c654380e29ccc5b7755b72f562 | 41329af4cb486a2d26931085312645aa3ddf80e6 | /man/prune2data.Rd | 8aabc402249bea095ffeaa630a9541208e9fd892 | [] | no_license | cran/windex | 2ef5f63ecbe60a421c85e501366e6c2ecbe5138b | c7eff56deb0cdee331d899dda446f85c196bf627 | refs/heads/master | 2023-09-01T07:28:12.027450 | 2023-08-24T13:30:02 | 2023-08-24T15:30:42 | 25,276,782 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 794 | rd | prune2data.Rd | \name{prune2data}
\alias{prune2data}
\title{
Prunes a phylogenetic tree to match a vector (e.g. of species names in a dataset)
}
\description{
Takes a phylo object and vector of names to be matched to tip labels and returns a pruned phylogeny containing only tip labels that match those in the vector.
}
\usage{
prune2data(tree, species)
}
\arguments{
\item{tree}{
Phylogenetic tree of class 'phylo'.
}
\item{species}{
Vector of names to be matched against tip labels of the tree.
}
}
\value{
Returns a phylogenetic tree of the class 'phylo' containing only tips whose labels match the input vector (species)
}
\author{
Kevin Arbuckle
}
\examples{
data(sample.data)
data(sample.tree)
tree<-prune2data(sample.tree,sample.data$species[1:10])
plot(tree)
}
|
00d325aa63aec4d40ca4362c71076c610aee99a5 | 8591c35b0ed4035aee8b174fb003965dc0709031 | /wave/1-calc_fetch.R | 539c8a2342c302762b9ee964281b27efd5f05a39 | [] | no_license | ultimatemegs/msec | ad713d47caad0a712448c410b5eac9597e9006ff | 576c3c842327198538c3e6cf8b82ec5cda555174 | refs/heads/master | 2020-03-23T14:13:48.590647 | 2017-05-09T13:53:16 | 2017-05-09T13:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,943 | r | 1-calc_fetch.R | # Calculate fetch for all grid cells points within 50km from a coastline
library(rgdal)
library(rgeos)
library(raster)
library(waver)
source("utils.R")
gshhs_dir <- "{{Insert path to GSHHS shapefiles}}"
# Combine shorelines from GSHHS L1 (World except Antartica) and L5 (Antarctica sea-ice border)
gshhs1 <- readOGR(file.path(gshhs_dir, "f"), "GSHHS_f_L1")
gshhs5 <- readOGR(file.path(gshhs_dir, "h"), "GSHHS_h_L5")
gshhs_full <- rbind(gshhs1, gshhs5, makeUniqueIDs = TRUE)
gshhs_full <- as(gshhs_full, "SpatialPolygons")
rm(gshhs1, gshhs5)
# Get grid points to calculate fetch at
# 55km land buffer from land area calculation
land_buf55 <- raster("reeflandarea/buffers/land_buf55_resamp.grd")
# Remove land cells
land_mask <- raster("reeflandarea/land_final.grd")
land_buf55 <- mask(land_buf55, land_mask, maskvalue = 1)
grid_pts <- rasterToPoints(land_buf55, fun = function(x) {x == 1})
grid_pts <- as.data.frame(grid_pts)
grid_pts$layer <- NULL
# Need to rotate coordinates to (-180, 180) longitude range
grid_pts$x[grid_pts$x > 180] <- grid_pts$x[grid_pts$x > 180] - 360
# Convert to SpatialPointsDataFrame
coordinates(grid_pts) <- ~x + y
proj4string(grid_pts) <- CRS(proj4string(gshhs_full))
# Parameters for fetch calculation
bearings <- seq(0, 337.5, 22.5)
spread <- seq(-10, 10, 2.5)
dmax <- 50000 # Only calculate fetch up to 50km
# Find bounding box intersections between 50km rectangle around points and
# shoreline polygons. (To speed up later calculation)
rects <- do.call(rbind, c(lapply(1:length(grid_pts),
function(i) get_clip_rect(grid_pts[i], dmax, FALSE)
), makeUniqueIDs = TRUE))
btree <- gBinarySTRtreeQuery(gshhs_full, rects)
rm(rects)
# Function to calculate fetch for point at index "i"
# first subsetting the shoreline layer based on btree, to save processing time
# Returns a vector with names corresponding to bearings
fetch_i <- function(i) {
if (is.null(btree[[i]])) {
# If no shoreline polygons around, put max fetch
setNames(rep(dmax, length(bearings)), bearings)
} else {
tryCatch(
fetch_len(grid_pts[i], bearings, gshhs_full[btree[[i]]],
dmax, spread),
error = function(e) {
print(paste("Error at", i, ":", e))
setNames(rep(NA, length(bearings)), bearings)
}
)
}
}
# NOTE: This calculation was parallelized on a HPC cluster
fetch_res <- lapply(1:length(grid_pts), fetch_i)
fetch_res <- do.call(rbind, fetch_res) # Forms n_points x n_bearings matrix
# Merge coordinates and fetch data, only keep points with at least one fetch < 15km
fetch_res <- SpatialPointsDataFrame(grid_pts, fetch_res)
fetch_res <- fetch_res[which(apply(fetch_res@data, 1, function(x) any(x < 50000))), ]
# Rotate coordinates back to (0, 360) latitude range, and save
fetch_res <- rotate_pts(fetch_res)
saveRDS(fetch_res, "wave/fetch_res.RData")
|
3b76d3ca165cf376bdd82e430f5408a3d4eaafe4 | d59430497b1fab82c62f09e7fc01c49cec73644b | /man/listQTL.Rd | 54b3bd7fa1538c05f55ad0a3eb4647afe1a69196 | [] | no_license | liuyufong/AnimalGene2QTL | 5b4734e177cab8fcd1a1351b5d575dba0af6df0a | cfaf0205e21ec4ab3aa27e94aabda485f433e7a1 | refs/heads/master | 2021-01-01T10:41:03.593993 | 2017-08-17T13:43:28 | 2017-08-17T13:43:28 | 97,571,834 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 262 | rd | listQTL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listQTL.r
\name{listQTL}
\alias{listQTL}
\title{list of QTL database}
\usage{
listQTL()
}
\value{
result
}
\description{
list of QTL database
}
\examples{
listQTL()
}
|
8177d464a00b9304083529b8515f017c44970f41 | 221ef8c34d40387e09ff18cfc16e322b05c6e29c | /man/Row.Rd | 6c042a170f51122d65607a0fb42798cfd0331a0b | [] | no_license | sita-aghasoy33/xlsx | ae88c22dfabe8b86876a4b953359cc4f9b596016 | 3bebd39606169232b7bc74d3749fba3d2e8c98cf | refs/heads/master | 2020-12-04T14:33:49.947169 | 2019-12-25T20:05:02 | 2019-12-25T20:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,851 | rd | Row.Rd | \name{Row}
\alias{Row}
\alias{createRow}
\alias{getRows}
\alias{removeRow}
\alias{setRowHeight}
\title{Functions to manipulate rows of a worksheet.}
\description{
Functions to manipulate rows of a worksheet.
}
\usage{
createRow(sheet, rowIndex=1:5)
getRows(sheet, rowIndex=NULL)
removeRow(sheet, rows=NULL)
setRowHeight(rows, inPoints, multiplier=NULL)
}
\arguments{
\item{sheet}{a worksheet object as returned by \code{createSheet} or
by subsetting \code{getSheets}.}
\item{rowIndex}{a numeric vector specifying the index of rows to
create. For \code{getRows}, a \code{NULL} value will return all
non empty rows.}
\item{rows}{a list of \code{Row} objects.}
\item{inPoints}{a numeric value to specify the height of the row in
points.}
\item{multiplier}{a numeric value to specify the multiple of default
row height in points. If this value is set, it takes precedence
over the \code{inPoints} argument.}
}
\details{
\code{removeRow} is just a convenience wrapper to remove the rows from
the sheet (before saving). Internally it calls \code{lapply}.
}
\value{
For \code{getRows} a list of java object references each pointing to
a row. The list is named with the row number.
}
\author{Adrian Dragulescu}
\seealso{To extract the cells from a given row, see \code{\link{Cell}}.}
\examples{
file <- system.file("tests", "test_import.xlsx", package = "xlsx")
wb <- loadWorkbook(file)
sheets <- getSheets(wb)
sheet <- sheets[[2]]
rows <- getRows(sheet) # get all the rows
# see all the available java methods that you can call
#.jmethods(rows[[1]])
# for example
rows[[1]]$getRowNum() # zero based index in Java
removeRow(sheet, rows) # remove them all
# create some row
rows <- createRow(sheet, rowIndex=1:5)
setRowHeight( rows, multiplier=3) # 3 times bigger rows than the default
}
|
179c0f2889113ade28b3514207b5fba072a30dde | 398a4623c105f1395485ea117916a22065d7bf9d | /R/bedr.subtract.region.R | 76e82f2b4b0e7fc4b8020f44ca2de81c0f0f918c | [] | no_license | cran/bedr | 9d0d23bacab3f67edc672682d8f68c3d76543430 | 579f88449820e6c191d05464d94d079360aab3c0 | refs/heads/master | 2021-01-10T13:17:47.043201 | 2019-04-01T17:50:02 | 2019-04-01T17:50:02 | 48,077,102 | 0 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,891 | r | bedr.subtract.region.R | # The bedr package is copyright (c) 2014 Ontario Institute for Cancer Research (OICR)
# This package and its accompanying libraries is free software; you can redistribute it and/or modify it under the terms of the GPL
# (either version 1, or at your option, any later version) or the Artistic License 2.0. Refer to LICENSE for the full license text.
# OICR makes no representations whatsoever as to the SOFTWARE contained herein. It is experimental in nature and is provided WITHOUT
# WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE OR ANY OTHER WARRANTY, EXPRESS OR IMPLIED. OICR MAKES NO REPRESENTATION
# OR WARRANTY THAT THE USE OF THIS SOFTWARE WILL NOT INFRINGE ANY PATENT OR OTHER PROPRIETARY RIGHT.
# By downloading this SOFTWARE, your Institution hereby indemnifies OICR against any loss, claim, damage or liability, of whatsoever kind or
# nature, which may arise from your Institution's respective use, handling or storage of the SOFTWARE.
# If publications result from research using this SOFTWARE, we ask that the Ontario Institute for Cancer Research be acknowledged and/or
# credit be given to OICR scientists, as scientifically appropriate.
bedr.subtract.region <- function(x, y, fraction.overlap = 1/1e9, remove.whole.feature = TRUE, check.zero.based = TRUE, check.chr = TRUE, check.valid = TRUE, check.sort = TRUE, check.merge = TRUE, verbose = TRUE) {
catv("SUBTRACTING\n");
fraction.overlap <- ifelse(fraction.overlap == 1/1e9, "", paste("-f ", fraction.overlap));
remove.whole.feature <- ifelse(remove.whole.feature, " -A ", "");
xy <- bedr(engine = "bedtools", input = list(a = x, b = y), method = "subtract", params = paste(fraction.overlap, remove.whole.feature), check.zero.based = check.zero.based, check.chr = check.chr, check.valid = check.valid, check.sort = check.sort, check.merge = check.merge, verbose = verbose);
return(xy);
}
|
091d7c857371cec6982d186e4c8be7c6b58a85ff | abdd6c0ff16c23e47571084a901d32422e6eb5b7 | /BST215_Quantitative_Research_Methods/r_code/history1/history1.R | 28eace189188d1402ac7c0151cbcbe21a61e5b34 | [] | no_license | sn0wfree/Study_backup | f1071878d048140a47e5d511835da18601f5005a | dd11b57e00ef2b76fbf504b71d41e413a9ba6338 | refs/heads/master | 2020-06-19T18:05:18.216034 | 2017-02-28T04:46:57 | 2017-02-28T04:46:57 | 74,840,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,160 | r | history1.R | # history1.R 6 Oct 2016
# using Wages.xls
read.table("clipboard",header=TRUE)
bug=read.table("clipboard",header=TRUE)
bug
head(bug)
bug$Age
mean(bug$Age)
sd(bug$Age)
median(bug$Age)
Age
bug$Age
attach(bug)
Age
boxplot(Age)
boxplot(Wage)
plot(Age,Wage)
plot(lowess(Age,Wage))
plot(lowess(Age,Wage,f=1/3))
plot(lowess(Age,Wage,f=1/3),pch=19)
plot(lowess(Age,Wage,f=1/3),pch=19,cex=2)
plot(lowess(Age,Wage,f=1/3),pch=19,cex=2,col="red")
head(bug)
colnames(bug)
plot(lowess(Age[Sex="Female"],Wage[Sex="Female"],f=1/3),pch=19,cex=2,col="red")
plot(lowess(Age[Sex=="Female"],Wage[Sex=="Female"],f=1/3),pch=19,cex=2,col="red")
points(lowess(Age[Sex=="Male"],Wage[Sex=="Male"],f=1/3),pch=19,cex=2,col="blue")
plot(lowess(Age[Sex=="Male"],Wage[Sex=="Male"],f=1/3),pch=19,cex=2,col="blue")
plot(c(15,65),c(0,12))
plot(c(15,65),c(0,12),col="white")
ppoints(lowess(Age[Sex=="Male"],Wage[Sex=="Male"],f=1/3),pch=19,cex=2,col="blue")
points(lowess(Age[Sex=="Male"],Wage[Sex=="Male"],f=1/3),pch=19,cex=2,col="blue")
points(lowess(Age[Sex=="Female"],Wage[Sex=="Female"],f=1/3),pch=19,cex=2,col="red")
history()
history(100)
history(50)
|
7312acc9d0c83fba26209233b7af1ab47275e8c9 | ef9d57949bbc3a23f660bf5b897f4b42edc9f7f1 | /R/zzz.R | bb8b8430678b4acece076c8db90242de066ba234 | [] | no_license | crushing05/BayesCorrOcc | 32678f5935a51b9c699347ac375ada8c4fd84011 | 7e1ed713f634f6462d3b3e9d642855cc333c5232 | refs/heads/master | 2021-09-06T23:54:14.721637 | 2018-02-13T17:49:00 | 2018-02-13T17:49:00 | 100,308,672 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,901 | r | zzz.R | .onLoad <- function(libname="BBSclim", pkgname="BBSclim"){
options(digits=4)
library(ggplot2)
theme_crushing <- function(base_size = 12, base_family = "") {
half_line <- base_size/2
theme(
# Elements in this first block aren't used directly, but are inherited
# by others
line = element_line(size = 0.5, linetype = 1, colour = "black",
lineend = "butt"),
rect = element_rect(fill = "white", colour = "black", size = 0.5, linetype = 1),
text = element_text(family = base_family, face = "plain", colour = "black",
size = base_size,
hjust = 0.5, vjust = 0.5, angle = 0, lineheight = 0.9, margin = margin(),
debug = FALSE),
axis.text = element_text(colour = "grey40"),
axis.title = element_text(colour = "grey20", vjust = 0.35),
strip.text = element_text(size = rel(0.8)),
axis.line = element_line(),
axis.line.x = element_line(size=.7, color = "grey60"),
axis.line.y = element_line(size=.7, color = "grey60"),
axis.text.x = element_text(size = base_size*1.1, lineheight = 0.9,
margin = margin(t = 0.8 * half_line/2), vjust = 1),
axis.text.y = element_text(size = base_size*1.1, lineheight = 0.9,
margin = margin(r = 0.8 * half_line/2), vjust = 0.5),
axis.ticks = element_line(colour = "grey60", size = 0.2),
axis.title.x = element_text(size = base_size*1.4, vjust = 0.3,
margin = margin(t = 10, b = 0.8 * half_line/2)),
axis.title.y = element_text(size = base_size*1.4, angle = 90, vjust = 1,
margin = margin(r = 10, l = 0.8 * half_line/2)),
axis.ticks.length = grid::unit(0.3, "lines"),
legend.background = element_rect(colour = NA),
legend.margin = grid::unit(0.2, "cm"),
legend.key = element_rect(colour = "grey80"),
legend.key.size = grid::unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = base_size * 0.8),
legend.text.align = NULL,
legend.title = element_blank(),
legend.title.align = NULL,
legend.position = "right",
legend.direction = NULL,
legend.justification = "center",
legend.box = NULL,
panel.background = element_rect(fill = "white", colour = NA),
panel.border = element_rect(fill = NA, color = NA,size=.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.margin = grid::unit(half_line, "pt"),
panel.margin.x = NULL,
panel.margin.y = NULL,
panel.ontop = FALSE,
strip.background = element_rect(fill = NA, colour = NA),
strip.text.x = element_text(size = base_size, margin = margin(t = half_line, b = half_line)),
strip.text.y = element_text(size = base_size, angle = -90, margin = margin(l = half_line, r = half_line)),
strip.switch.pad.grid = unit(0.1, "cm"),
strip.switch.pad.wrap = unit(0.1, "cm"),
plot.background = element_rect(colour = NA),
plot.title = element_text(size = base_size * 1.7, face="bold",vjust=2, margin = margin(b = half_line * 1.2)),
plot.margin = grid::unit(c(1, 1.5, 0.8, 0.8), "lines"),
complete = TRUE
)
}
theme_set(theme_crushing())
scale_colour_discrete <- function(...) ggthemes::scale_color_solarized()
update_geom_defaults("point", list(size = 3))
update_geom_defaults("line", list(size = 0.8))
}
|
6772abc93fc761b99151361abd5aa8c2c4ab4aa8 | 0a36d7506471b1fb339eab56498ea9a4b50fa3fd | /BASIC BAG DATA RUN.R | 0bc6d9bac6e42988c4d768936edf03bd1b7e161a | [] | no_license | djrobillard/SCSD | 89db29783cf3c9bc49ce76a90d347ffea5e74a93 | d5b9d403bacfb6997eed2d403dc4eb04b0d97752 | refs/heads/master | 2021-08-20T09:36:59.867736 | 2017-11-28T20:53:28 | 2017-11-28T20:53:28 | 112,387,167 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,164 | r | BASIC BAG DATA RUN.R | library(stringi)
library(stringr)
library(tidyverse)
#read files
ehout2018<-read.csv("ehout2018.csv")
attendancetotab<-read.csv("attendtotab.csv",strip.white = TRUE)
enrollment<-read.csv("attendance2018.csv",strip.white = TRUE)
SEL_cleaned<-read.csv("SEL_cleaned.csv")
SM_cleaned<-read.csv("SM_cleaned.csv")
SR_cleaned<-read.csv("SR_cleaned.csv")
McKinneyVento<-read.csv("MKV.csv",strip.white = TRUE)
MarkReporting<-read.csv("MarkReporting2018.csv",strip.white = TRUE)
#ehout
EhoutClean<-ehout2018%>%
rename(student_id=OffenderID,OSSDays=OSS.Days)%>%
mutate(RefCount=ifelse(IncidentType=="referral",1,0))%>%
mutate(ClassCount=ifelse(IncidentType=="classroom",1,0))%>%
group_by(student_id)%>%
summarise(OSS=sum(OSS),Referrals=sum(RefCount),ClassroomIncidents=sum(ClassCount),
OSSDays=sum(OSSDays))%>%
select(student_id,Referrals,ClassroomIncidents,OSS,OSSDays)
#attendance
attendance2018<-attendancetotab %>%
filter(End=="11/08/2017") %>%
select(Attendance.Percentage,student_id,DaysEnrolled,DailyAbsences)%>%
rename(student_id=student_id)
#cleaning STAR information
SEL<-SEL_cleaned%>%
select(Student.ID,F17EL_ScreeningCategoryGroupAdjustment)%>%
rename("Fall Early Literacy"=F17EL_ScreeningCategoryGroupAdjustment,student_id=Student.ID)
SR<-SR_cleaned%>%
select(Student.ID,F17Read_ScreeningCategoryGroupAdjustment)%>%
rename("Fall STAR Reading"=F17Read_ScreeningCategoryGroupAdjustment,student_id=Student.ID)
SM<-SM_cleaned%>%
select(Student.ID,F17Math_ScreeningCategoryGroupAdjustment)%>%
rename("Fall STAR Math"=F17Math_ScreeningCategoryGroupAdjustment,student_id=Student.ID)
StarClean<-merge(SEL,SM,by="student_id",all=TRUE)
StarClean<-merge(StarClean,SR,by="student_id",all=TRUE)
StarClean[is.na(StarClean)]<-NoScore
#enrollment and demographics
enrollmentclean<-enrollment%>%
filter(Current.Status=="A")%>%
rename(student_id=Student.Id) %>%
rename(Grade=Curr.Grade.Lvl) %>%
rename(Building=Attendance.Location.Name) %>%
mutate(Ethnicity=str_replace_all(Rptg.Race.Ethnicity.Desc,c("Black or African American"="B","Asian"="A",
"White"="W","Multiracial"="M","Hispanic"="H",
"American Indian or Alaska native"="I",
"Native Hawaiian / Other Pacific Islander"="P"))) %>%
mutate(ENL=str_replace_all(Eng.Proficiency,c("Proficient"="1","Advanced"="1","Beginner"="1","Intermediate"="1",
"Transitioning"="1","Commanding"="1","Entering"="1","Emerging"="1","Expanding"="1",
"b"="1")))%>%
mutate(IEP=str_replace_all(Has.Iep,c("Y"="1","N"="0")))%>%
mutate(StudentName=paste(Student.Last.Nm,Student.First.Nm,sep=", "))%>%
mutate(Gender=str_replace_all(Student.Gender,c("Female"="F","Male"="M")))%>%
select(student_id,StudentName,Building,Grade,Ethnicity,IEP,ENL,Gender) %>%
mutate(Building=str_replace_all(Building,c("P-Tech"="Institute of Technology at Syracuse Central",
"Hurlbut W. Smith Elementary School"="HW Smith K8",
"Hurlbut W. Smith Middle School"="HW Smith K8",
"Frazer Middle School"="Frazer K8",
"Frazer Elementary School"="Frazer K8",
"Hughes Elementary School"="Syracuse Latin - Hughes",
"Syracuse Latin School"="Syracuse Latin - Hughes",
"Montessori School @ Lemoyne"="Lemoyne Elementary School",
"GPS Elementary Program"="MSAP - CORE",
"Huntington Middle School"="Huntington K8",
"Huntington Elementary School"="Huntington K8",
"Delaware Academy"="Delaware Academy - Primary",
"Delaware Primary School"="Delaware Academy - Primary",
"Twilight Academy @ Corcoran"="Corcoran High School",
"Twilight Academy @ Nottingham" = "Nottingham High School",
"Twilight Academy @ PSLA" = "Public Service Leadership Academy at Fowler",
"Twilight Academy @ Henninger" = "Henninger High School",
"Roberts Elementary School" = "Roberts K8",
"Roberts Middle School"="Roberts K8",
"Edward Smith Elementary School"="Ed Smith K8",
"Edward Smith Middle"="Ed Smith K8",
"JVC EPIC Program"="Johnson Center",
"Johnson Center Transition Program"="Johnson Center")))%>%
mutate(Grade=str_replace_all(Grade,c("U8"="8","U1"="1","U2"="2","U3"="3","U4"="4","U5"="5","U6"="6",
"U7"="7","UK"="KF")))%>%
unique()
#McKinney-Vento
MKV<-McKinneyVento %>%
mutate(McKinneyVento=1)%>%
mutate(End.Date=as.numeric(End.Date))%>%
filter(End.Date==1)%>%
select(Student.Id,McKinneyVento)%>%
rename(student_id=Student.Id)%>%
unique()
#####################################
#####markreporting
MarkReportingClean <- MarkReporting %>%
mutate(Class.Average=as.numeric(Class.Average))%>%
mutate(Failing=ifelse(Class.Average<65,1,0))%>%
group_by(student_id)%>%
summarise(NumberofFailingCourses=sum(Failing))%>%
select(student_id,NumberofFailingCourses)
#merging all
final<-left_join(enrollmentclean,EhoutClean,by="student_id")
final<-left_join(final,MarkReportingClean,by="student_id")
final<-left_join(final,StarClean,by="student_id")
final<-left_join(final,MKV,by="student_id")
final<-left_join(final,attendance2018,by="student_id")
final[is.na(final)]<-0
#organizing table
final1<-final%>%
mutate(BehaviorFlag=ifelse(Referrals>=3,1,ifelse(ClassroomIncidents>=1,1,0)))%>%
mutate(AttendanceFlag=ifelse(Attendance.Percentage<=.9,1,0))%>%
mutate(SecondaryGradeFlag=ifelse(NumberofFailingCourses>0,1,0))%>%
mutate(ReadingLevelFlag=ifelse('Fall Star Reading'=="Urgent Intervention",1,ifelse('Fall Early Literacy'=="Urgent Intervention",1,0)))%>%
mutate(TotalFlags=BehaviorFlag+AttendanceFlag+SecondaryGradeFlag+ReadingLevelFlag)%>%
select(StudentName,Grade,IEP,ENL,Ethnicity,Gender,
McKinneyVento,TotalFlags,BehaviorFlag,AttendanceFlag,SecondaryGradeFlag,
ReadingLevelFlag,Referrals, ClassroomIncidents,OSS,OSSDays,DaysEnrolled,
DailyAbsences,Attendance.Percentage,NumberofFailingCourses,
`Fall STAR Reading`, `Fall STAR Math`,`Fall Early Literacy`,student_id,Building)%>%
mutate(Attendance.Percentage=paste(round((Attendance.Percentage)*100,digits=1),"%",sep=""))
final1$Building<-strtrim(final1$Building, 31)
#exporting to csv
write.csv(final1, "C:/Users/drobil66/Desktop/RFiles/R Reports/BAGS1030.csv") |
27b623ec956d266eacdb8c803ba3ca7895762c88 | 3f74fd357884b9d64f6800fc2fa3dfc7885bce07 | /R/crosstable_ext.R | 0caad56c572f1bb4a1ee86e7490947353475bfb3 | [] | no_license | lemonlinn/lemonutil | d2d8cd06d684d167d15199025ece59b26835cb3d | 49b834e0c97de04cd0689cd0904464853517f9f5 | refs/heads/master | 2022-12-22T00:21:44.812391 | 2020-09-22T00:01:13 | 2020-09-22T00:01:13 | 286,166,321 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,273 | r | crosstable_ext.R | #' Generates a table for cross tabulations with percentages
#'
#' This function generates a flextable for a cross tabulation
#' between two categorical (nominal or ordinal), numeric variables within a dataframe.
#' The cell values contains the total percentage, column percentage, row percentage, and count.
#' There is an option to make the column names of the table reflect response options
#' instead of the default, where the column names are the actual numeric values.
#'
#' @param data Object holding the dataframe
#' @param x String of the first variable name
#' @param y String of the second variable name
#' @param x_names Vector of response options. Default value is FALSE.
#' @param y_names Vector of response options. Default value is FALSE.
#' @param percents String representing what percentages to calculate. Accepts any of c("TRC", "RC", "TC", "TR", "C", "R", "T", "N"). Default is "TRC".
#' @param row_t Boolean determining if row totals should be included. Default value is TRUE.
#' @param col_t Boolean determining if column totals should be included. Default value is TRUE.
#' @return A flextable object
#' @examples
#' mat <- as.data.frame(matrix(1:20, 5, 4, dimnames = list(NULL, LETTERS[1:4])))
#' crosstable_ext(mat, "A", "B")
#' crosstable_ext(mat, "A", "B", x_names = c("one", "two", "three", "four", "five"), y_names = c("six", "seven", "eight", "nine", "ten"))
#' @export
crosstable_ext <- function(data, x, y, x_names = FALSE, y_names = FALSE, percents = "TRC", row_t = TRUE, col_t = TRUE){
tab = xtabs(formula = ~unlist(data[y])+unlist(data[x]))
tabDF = as.data.frame.matrix(tab)
oldrownames = rownames(tabDF)
oldcolnames = colnames(tabDF)
N = nrow(na.omit(data[c(x,y)]))
C = colSums(tabDF, na.rm = T)
R = rowSums(tabDF, na.rm = T)
if (percents != "N"){
for (i in 1:ncol(tabDF)){
for (j in 1:nrow(tabDF)){
a = strtoi(tabDF[j,i])
if (percents == "TRC"){
tabDF[j,i] <- paste0(toString(round((a/N)*100, digits=2)), "%T", "\n",
toString(round((a/C[i])*100,digits=2)), "%C", "\n",
toString(round((a/R[j])*100,digits=2)), "%R", "\n",
"(n=", toString(a), ")")
} else if (percents == "RC"){
tabDF[j,i] <- paste0(toString(round((a/C[i])*100,digits=2)), "%C", "\n",
toString(round((a/R[j])*100,digits=2)), "%R", "\n",
"(n=", toString(a), ")")
} else if (percents == "TR"){
tabDF[j,i] <- paste0(toString(round((a/N)*100, digits=2)), "%T", "\n",
toString(round((a/R[j])*100,digits=2)), "%R", "\n",
"(n=", toString(a), ")")
} else if (percents == "TC"){
tabDF[j,i] <- paste0(toString(round((a/N)*100, digits=2)), "%T", "\n",
toString(round((a/C[i])*100,digits=2)), "%C", "\n",
"(n=", toString(a), ")")
} else if (percents == "T"){
tabDF[j,i] <- paste0(toString(round((a/N)*100, digits=2)), "%T", "\n",
"(n=", toString(a), ")")
} else if (percents == "R"){
tabDF[j,i] <- paste0(toString(round((a/R[j])*100,digits=2)), "%R", "\n",
"(n=", toString(a), ")")
} else if (percents == "C"){
tabDF[j,i] <- paste0(toString(round((a/C[i])*100,digits=2)), "%C", "\n",
"(n=", toString(a), ")")
}
}
}
}
if (row_t & col_t){
tabDF <- cbind(tabDF, data.frame("Row Totals" = R))
colDF <- data.frame(t(c(C, paste0("N=", toString(N)))))
colnames(colDF) <- colnames(tabDF)
tabDF <- rbind(tabDF, colDF)
if (!isFALSE(y_names)){
rownames(tabDF) <- c(y_names, "Column Totals")
} else {
rownames(tabDF) <- c(oldrownames, "Column Totals")
}
tabDF <- cbind(x = rownames(tabDF), tabDF)
if (!isFALSE(x_names)){
colnames(tabDF) <- c(y, x_names, "Row Totals")
} else {
colnames(tabDF) <- c(y, oldcolnames, "Row Totals")
}
}
if (row_t & !col_t){
tabDF <- cbind(tabDF, data.frame("Row Totals" = R))
if (!isFALSE(y_names)){
rownames(tabDF) <- c(y_names)
} else {
rownames(tabDF) <- c(oldrownames)
}
tabDF <- cbind(x = rownames(tabDF), tabDF)
if (!isFALSE(x_names)){
colnames(tabDF) <- c(y, x_names, "Row Totals")
} else {
colnames(tabDF) <- c(y, oldcolnames, "Row Totals")
}
}
if (!row_t & col_t){
colDF <- data.frame(t(C))
colnames(colDF) <- colnames(tabDF)
tabDF <- rbind(tabDF, colDF)
if (!isFALSE(y_names)){
rownames(tabDF) <- c(y_names, "Column Totals")
} else {
rownames(tabDF) <- c(oldrownames, "Column Totals")
}
tabDF <- cbind(x = rownames(tabDF), tabDF)
if (!isFALSE(x_names)){
colnames(tabDF) <- c(y, x_names)
} else {
colnames(tabDF) <- c(y, oldcolnames)
}
}
if (!row_t & !col_t){
if (!isFALSE(y_names)){
rownames(tabDF) <- c(y_names)
} else {
rownames(tabDF) <- c(oldrownames)
}
tabDF <- cbind(x = rownames(tabDF), tabDF)
if (!isFALSE(x_names)){
colnames(tabDF) <- c(y, x_names)
} else {
colnames(tabDF) <- c(y, oldcolnames)
}
}
chitest = summary(tab)
title = paste0("Cross Tabulation of ", x, " and ", y)
foot = paste0("(N=", toString(N), ")")
chi = paste0("Chi-Square=", round(chitest$statistic,2), " (p-val=", round(chitest$p.value,4), ")")
maxval = ncol(tabDF)
ft <- flextable::flextable(tabDF)
ft <- flextable::add_header_row(ft, top = TRUE, values = c(NA, x), colwidths = c(1, maxval-1))
ft <- flextable::add_header_row(ft, top = TRUE, values = title, colwidths = maxval)
ft <- flextable::align(ft, i = c(1:2), align = "center", part = "header")
if (col_t & row_t){
ft <- flextable::add_footer_row(ft, top = FALSE, values = c(chi, " "), colwidths = c(maxval-1, 1))
} else {
ft <- flextable::add_footer_row(ft, top = FALSE, values = c(chi, foot), colwidths = c(maxval-1, 1))
}
ft <- flextable::align(ft, i = 1, j = maxval, align = "right", part = "footer")
ft <- flextable::bold(ft, part = "header", i = 1)
return(ft)
}
|
05d30aaaafdb92b1e08b93572025b1faea045f9d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/cOde/examples/odeC.Rd.R | 65907376c509c9969c634d702a235ef03e38977c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,712 | r | odeC.Rd.R | library(cOde)
### Name: odeC
### Title: Interface to ode()
### Aliases: odeC
### ** Examples
## Not run:
##D
##D ######################################################################
##D ## Ozone formation and decay, modified by external forcings
##D ######################################################################
##D
##D library(deSolve)
##D data(forcData)
##D forcData$value <- forcData$value + 1
##D
##D # O2 + O <-> O3
##D f <- c(
##D O3 = " (build_O3 + u_build) * O2 * O - (decay_O3 + u_degrade) * O3",
##D O2 = "-(build_O3 + u_build) * O2 * O + (decay_O3 + u_degrade) * O3",
##D O = "-(build_O3 + u_build) * O2 * O + (decay_O3 + u_degrade) * O3"
##D )
##D
##D # Generate ODE function
##D forcings <- c("u_build", "u_degrade")
##D func <- funC(f, forcings = forcings, modelname = "test",
##D fcontrol = "nospline", nGridpoints = 10)
##D
##D # Initialize times, states, parameters and forcings
##D times <- seq(0, 8, by = .1)
##D yini <- c(O3 = 0, O2 = 3, O = 2)
##D pars <- c(build_O3 = 1/6, decay_O3 = 1)
##D
##D forc <- setForcings(func, forcData)
##D
##D # Solve ODE
##D out <- odeC(y = yini, times = times, func = func, parms = pars,
##D forcings = forc)
##D
##D # Plot solution
##D
##D par(mfcol=c(1,2))
##D t1 <- unique(forcData[,2])
##D M1 <- matrix(forcData[,3], ncol=2)
##D t2 <- out[,1]
##D M2 <- out[,2:4]
##D M3 <- out[,5:6]
##D
##D matplot(t1, M1, type="l", lty=1, col=1:2, xlab="time", ylab="value",
##D main="forcings", ylim=c(0, 4))
##D matplot(t2, M3, type="l", lty=2, col=1:2, xlab="time", ylab="value",
##D main="forcings", add=TRUE)
##D
##D legend("topleft", legend = c("u_build", "u_degrade"), lty=1, col=1:2)
##D matplot(t2, M2, type="l", lty=1, col=1:3, xlab="time", ylab="value",
##D main="response")
##D legend("topright", legend = c("O3", "O2", "O"), lty=1, col=1:3)
##D
##D
##D
##D ######################################################################
##D ## Ozone formation and decay, modified by events
##D ######################################################################
##D
##D
##D f <- c(
##D O3 = " (build_O3 + u_build) * O2 * O - (decay_O3 + u_degrade) * O3",
##D O2 = "-(build_O3 + u_build) * O2 * O + (decay_O3 + u_degrade) * O3",
##D O = "-(build_O3 + u_build) * O2 * O + (decay_O3 + u_degrade) * O3",
##D u_build = "0", # piecewise constant
##D u_degrade = "0" # piecewise constant
##D )
##D
##D # Define parametric events
##D events.pars <- data.frame(
##D var = c("u_degrade", "u_degrade", "u_build"),
##D time = c("t_on", "t_off", "2"),
##D value = c("plus", "minus", "2"),
##D method = "replace"
##D )
##D
##D # Declar parameteric events when generating funC object
##D func <- funC(f, forcings = NULL, events = events.pars, modelname = "test",
##D fcontrol = "nospline", nGridpoints = 10)
##D
##D # Set Parameters
##D yini <- c(O3 = 0, O2 = 3, O = 2, u_build = 1, u_degrade = 1)
##D times <- seq(0, 8, by = .1)
##D pars <- c(build_O3 = 1/6, decay_O3 = 1, t_on = exp(rnorm(1, 0)), t_off = 6, plus = 3, minus = 1)
##D
##D # Solve ODE with additional fixed-value events
##D out <- odeC(y = yini, times = times, func = func, parms = pars)
##D
##D
##D # Plot solution
##D
##D par(mfcol=c(1,2))
##D t2 <- out[,1]
##D M2 <- out[,2:4]
##D M3 <- out[,5:6]
##D
##D
##D matplot(t2, M3, type="l", lty=2, col=1:2, xlab="time", ylab="value",
##D main="events")
##D legend("topleft", legend = c("u_build", "u_degrade"), lty=1, col=1:2)
##D matplot(t2, M2, type="l", lty=1, col=1:3, xlab="time", ylab="value",
##D main="response")
##D legend("topright", legend = c("O3", "O2", "O"), lty=1, col=1:3)
##D
##D
##D
##D
## End(Not run)
|
3fd52e9f423776db3331f8555340541bea280ca8 | 7cafae71ad56d1040a702b6c97e36cd3845e4227 | /R/graficoP1ordenamiento.R | c2ae4d615117ff71e9cbeffa4eb90c73a298246b | [] | no_license | gabrielaelisa/Tarea1_logaritmos | 3435fd5bcbd106cfe6020ff7e17b6b2bbea579d9 | fa0ee1453813f778cff4fff80bb1bfb0b7a554ae | refs/heads/master | 2020-04-02T01:23:06.174725 | 2018-11-06T15:06:52 | 2018-11-06T15:06:52 | 153,852,772 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,413 | r | graficoP1ordenamiento.R | library(readr)
library(scales)
library(ggplot2)
## funcion para sacar el valor p de una regresion
lmp <- function (modelobject) {
if (class(modelobject) != "lm") stop("Not an object of class 'lm' ")
f <- summary(modelobject)$fstatistic
p <- pf(f[1],f[2],f[3],lower.tail=F)
attributes(p) <- NULL
return(p)
}
# leer datos
datos_ord <- read_csv("datosp1cordenamiento.csv")
## regresion lineal
fit = lm(log(y)~log(x), data=datos_ord)
slope <- round(fit$coefficients["log(x)"],3)
error <- round(summary(fit)$coef[2,2],4)
r_squared = summary(fit)$r.squared
p_value = lmp(fit)
## graficar
p <-ggplot(datos_ord,aes(x, y))
p + geom_point(size=2) +
ggtitle("Gráfico Log Log de Número de Elementos v/s Tiempo de Ordenación") +
scale_x_continuous(trans='log10', name="Número de Elementos",labels = comma,breaks = c(0,10,100,1000,10000,100000,1000000,10000000)) +
scale_y_continuous(trans='log10',name = "Tiempo de Ejecución (nanosegundos)",labels = comma) +
annotation_logticks() +
theme_bw() +
geom_boxplot(aes(group = x)) +
geom_smooth(method="lm") +
annotate("text",x=10000/2,y = 10000000*3, hjust=0,label = paste('R² = ', round(r_squared,4))) +
annotate("text",x=10000/2,y = 1000000*5*3, hjust=0,label = paste('Valor p = ',formatC(p_value, format = "e", digits = 2))) +
annotate("text",x=10000/2,y = 1000000*2.5*3, hjust=0,label = paste('Pendiente = ',slope, " ± ",error))
|
4d5088b6e82ca698662479d78ba2c19d081fc3b7 | 9941cbed1d465caad2940bd8439f7d318360db22 | /Temp/Simu_CI_test.R | 5b38f8ff07f9971176cef280e2c439807fcc5643 | [] | no_license | dangdang001/Simultaneous_CI- | 25c4c6b8880944f9f69dddab9bbde93a75a04e13 | 7e3cdc307d08e6e03be21b87c454c65987502aea | refs/heads/master | 2020-03-20T12:07:25.145437 | 2018-08-24T15:01:07 | 2018-08-24T15:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,463 | r | Simu_CI_test.R | ##################################################################
##################################################################
# salloc -n 40 --time=0-2 /bin/bash -i
# mpirun -n 1 --oversubscribe R --quiet --no-save
# install.packages("R2Cuba")
# install.packages("Rmpi")
rm(list = ls())
setwd("C:/Users/Donglei.Yin")
library(R2Cuba)
library(Rmpi)
# Questions:
# 1. set random seeds?
# 2. upper bound for uniroot?
# 3. if condition?
# Part 1: Parameter Initialization
mu.R1 <- 0
mu.R2 <- 2
mu.T <- 1
rsd21 <- 1.25 #single 0.5 2 1.25 0.8 # 0.8 0.8 1.25 1.25 # 0.8 0.8 1.25 1.25
rsdT1 <- 2 #single 1.25 0.8 0.5 2 # 1.25 0.8 0.8 1.25 # 0.5 2 0.5 2
n <- 10
alpha <- 0.05
p.level <- 1 - 2*alpha
NDIM <- 3
NCOMP <- 1
p.int <- 0.999 ?
p.int1 <- 0.005
sd.all <- c((max(mu.R1,mu.R2,mu.T)-min(mu.R1,mu.R2,mu.T))/1.5,seq(2,12,2)) # candidate sd
ksce <- length(sd.all) # number of sd tested
repet <- 1000 # number of repetitions
# results for 1000 repetitions
Arcd.data <- matrix(NA,nrow=repet,ncol=n*5*ksce)
Arcd.mean <- matrix(NA,nrow=repet,ncol=3*ksce)
Arcd.std <- matrix(NA,nrow=repet,ncol=(3+2)*ksce) # 3+2?
Arcd.sig <- matrix(NA,nrow=repet,ncol=3*ksce)
Arcd.simuCI1 <- matrix(NA,nrow=repet,ncol=9*ksce)
Arcd.simuCI2 <- matrix(NA,nrow=repet,ncol=9*ksce)
Arcd.simuCI3 <- matrix(NA,nrow=repet,ncol=9*ksce)
# final results summarizing from repetitions
summ.sig <- matrix(NA,nrow=ksce,ncol=5)
summ.simuCI1 <- matrix(NA,nrow=ksce,ncol=18)
summ.simuCI2 <- matrix(NA,nrow=ksce,ncol=18)
summ.simuCI3 <- matrix(NA,nrow=ksce,ncol=18)
# Part 2: Main caculation
for(k in 1:ksce){
# for each candidate population sd k:
sd.R1 <- sd.all[k]
sd.R2 <- sd.R1*rsd21
sd.T <- sd.R1*rsdT1
true12 <- abs(mu.R1-mu.R2)/sd.R1
true1T <- abs(mu.R1-mu.T)/sd.R1
true2T <- abs(mu.T-mu.R2)/sd.R1
true2T.2 <- abs(mu.T-mu.R2)/sd.R2
rcd.data <- matrix(NA,nrow=repet,ncol=n*5)
rcd.mean <- matrix(NA,nrow=repet,ncol=3)
rcd.std <- matrix(NA,nrow=repet,ncol=3+2)
rcd.sig <- matrix(NA,nrow=repet,ncol=3)
rcd.simuCI1 <- matrix(NA,nrow=repet,ncol=9)
rcd.simuCI2 <- matrix(NA,nrow=repet,ncol=9)
rcd.simuCI3 <- matrix(NA,nrow=repet,ncol=9)
simu.one.PAR.mpi <- function(i=1,k=k,mu.R1=mu.R1,mu.R2=mu.R2,mu.T=mu.T,sd.R1=sd.R1,sd.R2=sd.R2,sd.T=sd.T,n=n){
# function to generate n random samples ~ N(mu, std^2)
sample.1product <- function(n,mu,std){
return(rnorm(n,mean=mu,sd=std))
}
# function to run pairwise equivalence test, if significant (conclude biosimilar) return(0), if not return(1)
biosim.test <- function(n1,mu1,n2,mu2,refstd,alpha){
mar <- 1.5*refstd
Za <- qnorm(1-alpha)
sig.low <- ( (mu2-mu1)+Za*refstd*sqrt(1/n1+1/n2) < mar )
sig.up <- ( (mu2-mu1)-Za*refstd*sqrt(1/n1+1/n2) > (-mar) )
sig <- sig.low*sig.up
return(sig)
}
data.R1refv <- sample.1product(n,mu.R1,sd.R1) # data.R1refv and data.R2refv were used to estimate population variance
data.R2refv <- sample.1product(n,mu.R2,sd.R2)
data.R1 <- sample.1product(n,mu.R1,sd.R1)
data.R2 <- sample.1product(n,mu.R2,sd.R2)
data.T <- sample.1product(n,mu.T,sd.T)
# store the original data
rcd.data.tmp <- c(data.R1, data.R2, data.T, data.R1refv, data.R2refv)
mean.data <- c(mean(data.R1),mean(data.R2),mean(data.T)) # sample means for the 3 population
sd.data <- c(sd(data.R1),sd(data.R2),sd(data.T)) # sample variances for the 3 population
sd.ref <- sd(data.R1refv)
sd.ref2 <- sd(data.R2refv)
rcd.mean.tmp <- mean.data # 1*3
rcd.std.tmp <- c(sd(data.R1),sd(data.R2),sd(data.T), sd.ref, sd.ref2) # 1*5
########################### 1. Pairwise equivalence test ###################################
rcd.sig.tmp <- c(biosim.test(n,mean.data[1],n,mean.data[2],sd.ref,alpha),
biosim.test(n,mean.data[1],n,mean.data[3],sd.ref,alpha),
biosim.test(n,mean.data[2],n,mean.data[3],sd.ref2,alpha))
########################## 2. Simultanuous CI using fiducial inference ########################
# 2.1 With the assumption of equal variances (use sd.ref to calculate similarity margin: refstd=1.5*sd.ref)
# 2.1.1 Original version, sd.ref as true population sd
fun.eFP1 <- function(x){
refstd <- 1.5*sd.ref
indt <- ( abs(x[1]-x[2])<=refstd )*( abs(x[1]-x[3])<=refstd )*( abs(x[2]-x[3])<=refstd )
fxyz <- dnorm(x[1], mean = mean.data[1], sd = sd.ref/sqrt(n))*
dnorm(x[2], mean = mean.data[2], sd = sd.ref/sqrt(n))*
dnorm(x[3], mean = mean.data[3], sd = sd.ref/sqrt(n))*indt # fiducal inference, mu~N(mean(x), sd.ref/sqrt(n))
return(fxyz)
}
# integral limits: intmar.l and intmar.u: 1*3
# shouldn't it be [-1.5*sd.ref, 1.5*sd.ref]?
# intmar.l <- rep(-1.5*sd.ref,3)
# intmar.u <- rep(1.5*sd.ref,3)
intmar.l <- mean.data - qnorm(p.int)*sd.ref/sqrt(n) # ?p.int
intmar.u <- mean.data + qnorm(p.int)*sd.ref/sqrt(n)
eFP1.res <- cuhre(NDIM, NCOMP, fun.eFP1, lower=intmar.l, upper=intmar.u, flags= list(verbose=0, final=0))
rcd.simuCI1.tmp1 <- eFP1.res$value
# two type of restricted CI:
rcd.simuCI1.tmp2 <- NA
rcd.simuCI1.tmp3 <- NA
# Why the if condition?
if(eFP1.res$value>p.level){
eFP1.int1 <- function(delta){
fun.eFP1t1 <- function(x){
refstd <- 1.5*sd.ref
indt <- ( abs(x[1]-x[2])<=refstd )*( abs(x[1]-x[3])<=delta )*( abs(x[2]-x[3])<=refstd )
fxyz <- dnorm(x[1], mean = mean.data[1], sd = sd.ref/sqrt(n))*
dnorm(x[2], mean = mean.data[2], sd = sd.ref/sqrt(n))*
dnorm(x[3], mean = mean.data[3], sd = sd.ref/sqrt(n))*indt
return(fxyz)
}
return(cuhre(NDIM, NCOMP, fun.eFP1t1, lower=intmar.l, upper=intmar.u, flags= list(verbose=0, final=0))$value)
}
rcd.simuCI1.tmp2 <- uniroot(function(delta) eFP1.int1(delta)-p.level, lower = 0, upper = 1.5*sd.ref)$root
eFP1.int2 <- function(delta){
fun.eFP1t2 <- function(x){
indt <- ( abs(x[1]-x[2])<=delta )*( abs(x[1]-x[3])<=delta )*( abs(x[2]-x[3])<=delta )
fxyz <- dnorm(x[1], mean = mean.data[1], sd = sd.ref/sqrt(n))*dnorm(x[2], mean = mean.data[2], sd = sd.ref/sqrt(n))*dnorm(x[3], mean = mean.data[3], sd = sd.ref/sqrt(n))*indt
return(fxyz)
}
return(cuhre(NDIM, NCOMP, fun.eFP1t2, lower=intmar.l, upper=intmar.u, flags= list(verbose=0, final=0))$value)
}
rcd.simuCI1.tmp3 <- uniroot(function(delta) eFP1.int2(delta)-p.level, lower = 0, upper = 20*sd.ref)$root
}
# 2.1.2 Integrated version, suppose population sd unknown and follow an inversed chisq distribution
intmarv <- sqrt((n-1)*sd.ref^2/qchisq(1-p.int,df=n-1)) # p.int=0.999, upper bound of the inversed chisq, UB for intergral interval of population sd
tmp1 <- sqrt((n-1)*sd.ref^2/qchisq(p.int1,df=n-1))
intmar.l.int <- mean.data - qnorm(p.int)*tmp1/sqrt(n) # tmp1 seems not correct?
intmar.u.int <- mean.data + qnorm(p.int)*tmp1/sqrt(n)
fun.eFP1.int <- function(x){
refstd <- 1.5*x[4]
indt <- ( abs(x[1]-x[2])<=refstd )*( abs(x[1]-x[3])<=refstd )*( abs(x[2]-x[3])<=refstd )
# derive the f.d of sd(r1)?
fxyzu <- dnorm(x[1], mean = mean.data[1], sd = x[4]/sqrt(n))*
dnorm(x[2], mean = mean.data[2], sd = x[4]/sqrt(n))*
dnorm(x[3], mean = mean.data[3], sd = x[4]/sqrt(n))*indt*
(2*x[4]^(-3)*(n-1)*sd.ref^2)*dchisq((n-1)*sd.ref^2/(x[4]^2), df=n-1) # pdf of population sd through fiducial inference?
return(fxyzu)
}
eFP1.res.mod <- cuhre(NDIM+1, NCOMP, fun.eFP1.int, lower=c(intmar.l.int,0), upper=c(intmar.u.int,intmarv), flags= list(verbose=0, final=0))
rcd.simuCI1.tmp4 <- eFP1.res.mod$value
rcd.simuCI1.tmp5 <- NA
if(eFP1.res.mod$value>p.level){
eFP1.int1.mod <- function(delta){
fun.eFP1t1.mod <- function(x){
refstd <- 1.5*x[4]
indt <- ( abs(x[1]-x[2])<=refstd )*( abs(x[1]-x[3])<=delta*refstd )*( abs(x[2]-x[3])<=refstd )
fxyzu <- dnorm(x[1], mean = mean.data[1], sd = x[4]/sqrt(n))*dnorm(x[2], mean = mean.data[2], sd = x[4]/sqrt(n))*dnorm(x[3], mean = mean.data[3], sd = x[4]/sqrt(n))*indt* (2*x[4]^(-3)*(n-1)*sd.ref^2)*dchisq((n-1)*sd.ref^2/(x[4]^2), df=n-1)
return(fxyzu)
}
return(cuhre(NDIM+1, NCOMP, fun.eFP1t1.mod, lower=c(intmar.l.int,0), upper=c(intmar.u.int,intmarv), flags= list(verbose=0, final=0))$value)
}
rcd.simuCI1.tmp5 <- uniroot(function(delta) eFP1.int1.mod(delta)-p.level, lower = 0, upper = 1)$root
eFP1.int2.mod <- function(delta){
fun.eFP1t2.mod <- function(x){
refstd <- 1.5*x[4]
indt <- ( abs(x[1]-x[2])<=delta*refstd )*( abs(x[1]-x[3])<=delta*refstd )*( abs(x[2]-x[3])<=delta*refstd )
fxyzu <- dnorm(x[1], mean = mean.data[1], sd = x[4]/sqrt(n))*dnorm(x[2], mean = mean.data[2], sd = x[4]/sqrt(n))*dnorm(x[3], mean = mean.data[3], sd = x[4]/sqrt(n))*indt* (2*x[4]^(-3)*(n-1)*sd.ref^2)*dchisq((n-1)*sd.ref^2/(x[4]^2), df=n-1)
return(fxyzu)
}
return(cuhre(NDIM+1, NCOMP, fun.eFP1t2.mod, lower=c(intmar.l.int,0), upper=c(intmar.u.int,intmarv), flags= list(verbose=0, final=0))$value)
}
rcd.simuCI1.tmp6 <- uniroot(function(delta) eFP1.int2.mod(delta)-p.level, lower = 0, upper = 20)$root
}
# 2.1.3 Least favorable version, suppose population sd take the lower bound of the inversed chisq distribution
sd.ref.lf <- sqrt((n-1)*sd.ref^2/qchisq(1 - alpha,df=n-1)) # lower bound of the inversed chisq
intmar.l.lf <- mean.data - qnorm(p.int)*sd.ref.lf/sqrt(n)
intmar.u.lf <- mean.data + qnorm(p.int)*sd.ref.lf/sqrt(n)
fun.eFP1.lf <- function(x){
refstd <- 1.5*sd.ref.lf
indt <- ( abs(x[1]-x[2])<=refstd )*( abs(x[1]-x[3])<=refstd )*( abs(x[2]-x[3])<=refstd )
fxyz <- dnorm(x[1], mean = mean.data[1], sd = sd.ref.lf/sqrt(n))*
dnorm(x[2], mean = mean.data[2], sd = sd.ref.lf/sqrt(n))*
dnorm(x[3], mean = mean.data[3], sd = sd.ref.lf/sqrt(n))*indt
return(fxyz)
}
eFP1.res.lf <- cuhre(NDIM, NCOMP, fun.eFP1.lf, lower=intmar.l.lf, upper=intmar.u.lf, flags= list(verbose=0, final=0))
rcd.simuCI1.tmp7 <- eFP1.res.lf$value
rcd.simuCI1.tmp8 <- NA
rcd.simuCI1.tmp9 <- NA
if(eFP1.res.lf$value>p.level){
eFP1.int1.lf <- function(delta){
fun.eFP1t1.lf <- function(x){
refstd <- 1.5*sd.ref.lf
indt <- ( abs(x[1]-x[2])<=refstd )*( abs(x[1]-x[3])<=delta )*( abs(x[2]-x[3])<=refstd )
fxyz <- dnorm(x[1], mean = mean.data[1], sd = sd.ref.lf/sqrt(n))*dnorm(x[2], mean = mean.data[2], sd = sd.ref.lf/sqrt(n))*dnorm(x[3], mean = mean.data[3], sd = sd.ref.lf/sqrt(n))*indt
return(fxyz)
}
return(cuhre(NDIM, NCOMP, fun.eFP1t1.lf, lower=intmar.l.lf, upper=intmar.u.lf, flags= list(verbose=0, final=0))$value)
}
rcd.simuCI1.tmp8 <- uniroot(function(delta) eFP1.int1.lf(delta)-p.level, lower = 0, upper = 1.5*sd.ref.lf)$root
eFP1.int2.lf <- function(delta){
fun.eFP1t2.lf <- function(x){
indt <- ( abs(x[1]-x[2])<=delta )*( abs(x[1]-x[3])<=delta )*( abs(x[2]-x[3])<=delta )
fxyz <- dnorm(x[1], mean = mean.data[1], sd = sd.ref.lf/sqrt(n))*dnorm(x[2], mean = mean.data[2], sd = sd.ref.lf/sqrt(n))*dnorm(x[3], mean = mean.data[3], sd = sd.ref.lf/sqrt(n))*indt
return(fxyz)
}
return(cuhre(NDIM, NCOMP, fun.eFP1t2.lf, lower=intmar.l.lf, upper=intmar.u.lf, flags= list(verbose=0, final=0))$value)
}
rcd.simuCI1.tmp9 <- uniroot(function(delta) eFP1.int2.lf(delta)-p.level, lower = 0, upper = 20*sd.ref.lf)$root
}
# ---------------- combine data and results: n*5, 3+5+3, 9+9+9 ---------------
results <- c(rcd.data.tmp,
rcd.mean.tmp,
rcd.std.tmp,
rcd.sig.tmp,
rcd.simuCI1.tmp1,
rcd.simuCI1.tmp2,
rcd.simuCI1.tmp3,
rcd.simuCI1.tmp4,
rcd.simuCI1.tmp5,
rcd.simuCI1.tmp6,
rcd.simuCI1.tmp7,
rcd.simuCI1.tmp8,
rcd.simuCI1.tmp9,
rcd.simuCI2.tmp1,
rcd.simuCI2.tmp2,
rcd.simuCI2.tmp3,
rcd.simuCI2.tmp4,
rcd.simuCI2.tmp5,
rcd.simuCI2.tmp6,
rcd.simuCI2.tmp7,
rcd.simuCI2.tmp8,
rcd.simuCI2.tmp9,
rcd.simuCI3.tmp1,
rcd.simuCI3.tmp2,
rcd.simuCI3.tmp3,
rcd.simuCI3.tmp4,
rcd.simuCI3.tmp5,
rcd.simuCI3.tmp6,
rcd.simuCI3.tmp7,
rcd.simuCI3.tmp8,
rcd.simuCI3.tmp9
)
return(results)
}
library(Rmpi)
mpi.spawn.Rslaves()
system.time( out.tmp.cont <- mpi.parSapply(1:repet, simu.one.PAR.mpi, k=k,rsd21=rsd21,rsdT1=rsdT1,mu.R1=mu.R1,mu.R2=mu.R2,mu.T=mu.T,sd.all=sd.all,n=n) )
mpi.close.Rslaves()
restmp <- t(out.tmp.cont)
rcd.data <- restmp[,1:(n*5)]
rcd.mean <- restmp[,(n*5+1):(n*5+3)]
rcd.std <- restmp[,(n*5+3+1):(n*5+3+5)]
rcd.sig <- restmp[,(n*5+3+5+1):(n*5+3+5+3)]
rcd.simuCI1 <- restmp[,(n*5+3+5+3+1):(n*5+3+5+3+9)]
rcd.simuCI2 <- restmp[,(n*5+3+5+3+9+1):(n*5+3+5+3+9+9)]
rcd.simuCI3 <- restmp[,(n*5+3+5+3+9+9+1):(n*5+3+5+3+9+9+9)]
Arcd.data[,(n*5*(k-1)+1):(n*5*k)] <- rcd.data
Arcd.mean[,(3*(k-1)+1):(3*k)] <- rcd.mean
Arcd.std[,((3+2)*(k-1)+1):((3+2)*k)] <- rcd.std
Arcd.sig[,(3*(k-1)+1):(3*k)] <- rcd.sig
Arcd.simuCI1[,(9*(k-1)+1):(9*k)] <- rcd.simuCI1
Arcd.simuCI2[,(9*(k-1)+1):(9*k)] <- rcd.simuCI2
Arcd.simuCI3[,(9*(k-1)+1):(9*k)] <- rcd.simuCI3
summ.sig[k,] <- c(apply(rcd.sig,2,mean), sum(apply(rcd.sig[,2:3],1,prod))/repet, sum(apply(rcd.sig,1,prod))/repet)
summ.simuCI1[k,] <- c(mean(rcd.simuCI1[,1]),
mean(rcd.simuCI1[,1]>=p.level),
mean(rcd.simuCI1[,2]/rcd.std[,4], na.rm=T),
mean(rcd.simuCI1[,2]/rcd.std[,4]>=true1T, na.rm=T),
mean(rcd.simuCI1[,3]/rcd.std[,4]),
mean(rcd.simuCI1[,3]/rcd.std[,4]>=max(true1T,true2T,true12)),
mean(rcd.simuCI1[,4]),
mean(rcd.simuCI1[,4]>=p.level),
mean(rcd.simuCI1[,5]*1.5, na.rm=T),
mean(rcd.simuCI1[,5]*1.5>=true1T, na.rm=T),
mean(rcd.simuCI1[,6]*1.5),
mean(rcd.simuCI1[,6]*1.5>=max(true1T,true2T,true12)),
mean(rcd.simuCI1[,7]),
mean(rcd.simuCI1[,7]>=p.level),
mean(rcd.simuCI1[,8]/rcd.std[,4], na.rm=T),
mean(rcd.simuCI1[,8]/rcd.std[,4]>=true1T, na.rm=T),
mean(rcd.simuCI1[,9]/rcd.std[,4]),
mean(rcd.simuCI1[,9]/rcd.std[,4]>=max(true1T,true2T,true12))
)
summ.simuCI2[k,] <- c(mean(rcd.simuCI2[,1]),
mean(rcd.simuCI2[,1]>=p.level),
mean(rcd.simuCI2[,2]/rcd.std[,4], na.rm=T),
mean(rcd.simuCI2[,2]/rcd.std[,4]>=true1T, na.rm=T),
mean(rcd.simuCI2[,3]/rcd.std[,4]),
mean(rcd.simuCI2[,3]/rcd.std[,4]>=max(true1T,true2T,true12)),
mean(rcd.simuCI2[,4]),
mean(rcd.simuCI2[,4]>=p.level),
mean(rcd.simuCI2[,5]*1.5, na.rm=T),
mean(rcd.simuCI2[,5]*1.5>=true1T, na.rm=T),
mean(rcd.simuCI2[,6]*1.5),
mean(rcd.simuCI2[,6]*1.5>=max(true1T,true2T,true12)),
mean(rcd.simuCI2[,7]),
mean(rcd.simuCI2[,7]>=p.level),
mean(rcd.simuCI2[,8]/rcd.std[,4], na.rm=T),
mean(rcd.simuCI2[,8]/rcd.std[,4]>=true1T, na.rm=T),
mean(rcd.simuCI2[,9]/rcd.std[,4]),
mean(rcd.simuCI2[,9]/rcd.std[,4]>=max(true1T,true2T,true12))
)
summ.simuCI3[k,] <- c(mean(rcd.simuCI3[,1]),
mean(rcd.simuCI3[,1]>=p.level),
mean(rcd.simuCI3[,2]/rcd.std[,4], na.rm=T),
mean(rcd.simuCI3[,2]/rcd.std[,4]>=true1T, na.rm=T),
mean(rcd.simuCI3[,3]*1.5),
mean(rcd.simuCI3[,3]*1.5>=max(true1T,true2T.2,true12)),
mean(rcd.simuCI3[,4]),
mean(rcd.simuCI3[,4]>=p.level),
mean(rcd.simuCI3[,5]*1.5, na.rm=T),
mean(rcd.simuCI3[,5]*1.5>=true1T, na.rm=T),
mean(rcd.simuCI3[,6]*1.5),
mean(rcd.simuCI3[,6]*1.5>=max(true1T,true2T.2,true12)),
mean(rcd.simuCI3[,7]),
mean(rcd.simuCI3[,7]>=p.level),
mean(rcd.simuCI3[,8]/rcd.std[,4], na.rm=T),
mean(rcd.simuCI3[,8]/rcd.std[,4]>=true1T, na.rm=T),
mean(rcd.simuCI3[,9]*1.5),
mean(rcd.simuCI3[,9]*1.5>=max(true1T,true2T.2,true12))
)
}
Arcd <- cbind(Arcd.data, Arcd.mean, Arcd.std, Arcd.sig, Arcd.simuCI1, Arcd.simuCI2, Arcd.simuCI3)
summ <- round(cbind(summ.sig, summ.simuCI1, summ.simuCI2, summ.simuCI3),3)
rundate <- c("0407")
write.csv(Arcd, paste("Arcd_n",n,"_",mu.R1,"_",mu.R2,"_",mu.T,"_r_",rsd21,"_",rsdT1,"_sdR1_",round(sd.all[1],2),"_",round(sd.all[ksce],2),"_rp",repet,"_",rundate,"_v2.csv",sep=""))
write.csv(summ, paste("summ_n",n,"_",mu.R1,"_",mu.R2,"_",mu.T,"_r_",rsd21,"_",rsdT1,"_sdR1_",round(sd.all[1],2),"_",round(sd.all[ksce],2),"_rp",repet,"_",rundate,"_v2.csv",sep=""))
|
0849ff9454212e2bb90e04d6b96194345fc99d56 | 1d80ea56e9759f87ef9819ed92a76526691a5c3b | /R/interpret_cfa_fit.R | 863ea6d728855431f4bfc88b0aba66cd744449f5 | [] | no_license | cran/effectsize | 5ab4be6e6b9c7f56d74667e52162c2ca65976516 | e8baef181cc221dae96f60b638ed49d116384041 | refs/heads/master | 2023-08-16T21:23:58.750452 | 2023-08-09T18:40:02 | 2023-08-09T19:30:51 | 236,590,396 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,982 | r | interpret_cfa_fit.R | #' Interpret of CFA / SEM Indices of Goodness of Fit
#'
#' Interpretation of indices of fit found in confirmatory analysis or structural
#' equation modelling, such as RMSEA, CFI, NFI, IFI, etc.
#'
#' @param x vector of values, or an object of class `lavaan`.
#' @param rules Can be the name of a set of rules (see below) or custom set of
#' [rules()].
#' @inheritParams interpret
#'
#' @inherit performance::model_performance.lavaan details
#' @inherit performance::model_performance.lavaan references
#'
#' @details
#' ## Indices of fit
#' - **Chisq**: The model Chi-squared assesses overall fit and the discrepancy
#' between the sample and fitted covariance matrices. Its p-value should be >
#' .05 (i.e., the hypothesis of a perfect fit cannot be rejected). However, it
#' is quite sensitive to sample size.
#'
#' - **GFI/AGFI**: The (Adjusted) Goodness of Fit is the proportion of variance
#' accounted for by the estimated population covariance. Analogous to R2. The
#' GFI and the AGFI should be > .95 and > .90, respectively (Byrne, 1994;
#' `"byrne1994"`).
#'
#' - **NFI/NNFI/TLI**: The (Non) Normed Fit Index. An NFI of 0.95, indicates the
#' model of interest improves the fit by 95\% relative to the null model. The
#' NNFI (also called the Tucker Lewis index; TLI) is preferable for smaller
#' samples. They should be > .90 (Byrne, 1994; `"byrne1994"`) or > .95
#' (Schumacker & Lomax, 2004; `"schumacker2004"`).
#'
#' - **CFI**: The Comparative Fit Index is a revised form of NFI. Not very
#' sensitive to sample size (Fan, Thompson, & Wang, 1999). Compares the fit of a
#' target model to the fit of an independent, or null, model. It should be > .96
#' (Hu & Bentler, 1999; `"hu&bentler1999"`) or .90 (Byrne, 1994; `"byrne1994"`).
#'
#' - **RFI**: the Relative Fit Index, also known as RHO1, is not guaranteed to
#' vary from 0 to 1. However, RFI close to 1 indicates a good fit.
#'
#' - **IFI**: the Incremental Fit Index (IFI) adjusts the Normed Fit Index (NFI)
#' for sample size and degrees of freedom (Bollen's, 1989). Over 0.90 is a good
#' fit, but the index can exceed 1.
#'
#' - **PNFI**: the Parsimony-Adjusted Measures Index. There is no commonly
#' agreed-upon cutoff value for an acceptable model for this index. Should be >
#' 0.50.
#'
#' - **RMSEA**: The Root Mean Square Error of Approximation is a
#' parsimony-adjusted index. Values closer to 0 represent a good fit. It should
#' be < .08 (Awang, 2012; `"awang2012"`) or < .05 (Byrne, 1994; `"byrne1994"`).
#' The p-value printed with it tests the hypothesis that RMSEA is less than or
#' equal to .05 (a cutoff sometimes used for good fit), and thus should be not
#' significant.
#'
#' - **RMR/SRMR**: the (Standardized) Root Mean Square Residual represents the
#' square-root of the difference between the residuals of the sample covariance
#' matrix and the hypothesized model. As the RMR can be sometimes hard to
#' interpret, better to use SRMR. Should be < .08 (Byrne, 1994; `"byrne1994"`).
#'
#' See the documentation for \code{\link[lavaan:fitmeasures]{fitmeasures()}}.
#'
#'
#' ## What to report
#' For structural equation models (SEM), Kline (2015) suggests that at a minimum
#' the following indices should be reported: The model **chi-square**, the
#' **RMSEA**, the **CFI** and the **SRMR**.
#'
#' @note When possible, it is recommended to report dynamic cutoffs of fit
#' indices. See https://dynamicfit.app/cfa/.
#'
#'
#' @examples
#' interpret_gfi(c(.5, .99))
#' interpret_agfi(c(.5, .99))
#' interpret_nfi(c(.5, .99))
#' interpret_nnfi(c(.5, .99))
#' interpret_cfi(c(.5, .99))
#' interpret_rmsea(c(.07, .04))
#' interpret_srmr(c(.5, .99))
#' interpret_rfi(c(.5, .99))
#' interpret_ifi(c(.5, .99))
#' interpret_pnfi(c(.5, .99))
#'
#' @examplesIf require("lavaan") && interactive()
#' # Structural Equation Models (SEM)
#' structure <- " ind60 =~ x1 + x2 + x3
#' dem60 =~ y1 + y2 + y3
#' dem60 ~ ind60 "
#'
#' model <- lavaan::sem(structure, data = lavaan::PoliticalDemocracy)
#'
#' interpret(model)
#'
#' @references
#' - Awang, Z. (2012). A handbook on SEM. Structural equation modeling.
#'
#' - Byrne, B. M. (1994). Structural equation modeling with EQS and EQS/Windows.
#' Thousand Oaks, CA: Sage Publications.
#'
#' - Fan, X., B. Thompson, and L. Wang (1999). Effects of sample size,
#' estimation method, and model specification on structural equation modeling
#' fit indexes. Structural Equation Modeling, 6, 56-83.
#'
#' - Hu, L. T., & Bentler, P. M. (1999). Cutoff criteria for fit indexes in
#' covariance structure analysis: Conventional criteria versus new
#' alternatives. Structural equation modeling: a multidisciplinary journal,
#' 6(1), 1-55.
#'
#' - Kline, R. B. (2015). Principles and practice of structural equation
#' modeling. Guilford publications.
#'
#' - Schumacker, R. E., and Lomax, R. G. (2004). A beginner's guide to
#' structural equation modeling, Second edition. Mahwah, NJ: Lawrence Erlbaum
#' Associates.
#'
#' - Tucker, L. R., and Lewis, C. (1973). The reliability coefficient for
#' maximum likelihood factor analysis. Psychometrika, 38, 1-10.
#'
#'
#' @keywords interpreters
#' @export
interpret_gfi <- function(x, rules = "byrne1994") {
rules <- .match.rules(
rules,
list(
byrne1994 = rules(c(0.95), c("poor", "satisfactory"), name = "byrne1994", right = FALSE)
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_agfi <- function(x, rules = "byrne1994") {
rules <- .match.rules(
rules,
list(
byrne1994 = rules(c(0.90), c("poor", "satisfactory"), name = "byrne1994", right = FALSE)
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_nfi <- function(x, rules = "byrne1994") {
rules <- .match.rules(
rules,
list(
byrne1994 = rules(c(0.90), c("poor", "satisfactory"), name = "byrne1994", right = FALSE),
schumacker2004 = rules(c(0.95), c("poor", "satisfactory"), name = "schumacker2004", right = FALSE)
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_nnfi <- interpret_nfi
#' @rdname interpret_gfi
#' @export
interpret_cfi <- function(x, rules = "byrne1994") {
rules <- .match.rules(
rules,
list(
"hu&bentler1999" = rules(c(0.96), c("poor", "satisfactory"), name = "hu&bentler1999", right = FALSE),
"byrne1994" = rules(c(0.90), c("poor", "satisfactory"), name = "byrne1994", right = FALSE)
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_rfi <- function(x, rules = "default") {
rules <- .match.rules(
rules,
list(
default = rules(c(0.90), c("poor", "satisfactory"), name = "default", right = FALSE)
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_ifi <- function(x, rules = "default") {
rules <- .match.rules(
rules,
list(
default = rules(c(0.90), c("poor", "satisfactory"), name = "default", right = FALSE)
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_pnfi <- function(x, rules = "default") {
rules <- .match.rules(
rules,
list(
default = rules(c(0.50), c("poor", "satisfactory"), name = "default")
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_rmsea <- function(x, rules = "byrne1994") {
rules <- .match.rules(
rules,
list(
byrne1994 = rules(c(0.05), c("satisfactory", "poor"), name = "byrne1994"),
awang2012 = rules(c(0.05, 0.08), c("good", "satisfactory", "poor"), name = "awang2012")
)
)
interpret(x, rules)
}
#' @rdname interpret_gfi
#' @export
interpret_srmr <- function(x, rules = "byrne1994") {
rules <- .match.rules(
rules,
list(
byrne1994 = rules(c(0.08), c("satisfactory", "poor"), name = "byrne1994")
)
)
interpret(x, rules)
}
# lavaan ------------------------------------------------------------------
#' @rdname interpret_gfi
#' @export
interpret.lavaan <- function(x, ...) {
interpret(performance::model_performance(x, ...), ...)
}
#' @rdname interpret_gfi
#' @export
interpret.performance_lavaan <- function(x, ...) {
mfits <- c(
"GFI", "AGFI", "NFI", "NNFI",
"CFI", "RMSEA", "SRMR", "RFI",
"IFI", "PNFI"
)
mfits <- intersect(names(x), mfits)
table <- lapply(mfits, function(ind_name) {
.interpret_ind <- eval(parse(text = paste0("interpret_", tolower(ind_name))))
interp <- .interpret_ind(x[[ind_name]])
rules <- attr(interp, "rules")
data.frame(
Name = ind_name,
Value = x[[ind_name]],
Threshold = rules$values,
Interpretation = interp,
stringsAsFactors = FALSE
)
})
do.call(rbind, table)
}
|
6141ad9f1feba52e2da2b5439a9a533d360d1f9c | 5069e68c2dc09710b5f40e7bd86d9f591ce606d9 | /BasetableModelCode.R | 9e092fd6b6886929a1fd2b1e18b013b270613d43 | [] | no_license | kmkepler/DefectionProject | 1f6fbb85e6101140e31565e57685ef343d5177bf | cce38bbb9f9f7132d4472446bdcfff459085622f | refs/heads/master | 2020-04-10T10:11:50.978670 | 2016-09-26T17:07:48 | 2016-09-26T17:07:48 | 68,229,868 | 0 | 1 | null | 2016-09-14T18:47:15 | 2016-09-14T17:54:56 | R | UTF-8 | R | false | false | 5,595 | r | BasetableModelCode.R | # INSTALL PACKAGES
if (!require("plyr")) install.packages('plyr'); library('plyr')
# SET DATE FORMAT
f <- "%d/%m/%Y";setClass("fDate");setAs(from="character",to="fDate",def=function(from) as.Date(from,format=f))
# LOADS DATA
customers<-read.csv("http://ballings.co/hidden/aCRM/data/chapter6/customers.txt",sep=";",header=TRUE,colClasses=c("character","factor","fDate","factor","factor","character"))
formula<-read.csv("http://ballings.co/hidden/aCRM/data/chapter6/formula.txt",sep=";",header=TRUE,colClasses=c("character","factor","factor","numeric"))
subscriptions<-read.csv("http://ballings.co/hidden/aCRM/data/chapter6/subscriptions.txt",sep=";",header=TRUE,colClasses=c("character","character","factor","factor","fDate","fDate","integer","integer","fDate","factor","factor","fDate","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
delivery<-read.csv("http://ballings.co/hidden/aCRM/data/chapter6/delivery.txt",sep=";",header=TRUE,colClasses=c("character","character","factor","factor","factor","fDate","fDate"))
complaints<-read.csv("http://ballings.co/hidden/aCRM/data/chapter6/complaints.txt",sep=";",header=TRUE,colClasses=c("character","character","factor","fDate","factor","factor","factor"))
credit<-read.csv("http://ballings.co/hidden/aCRM/data/chapter6/credit.txt",sep=";",header=TRUE,colClasses=c("character","character","factor","fDate","factor","numeric","integer"))
# DATES
t1<-min(subscriptions$StartDate)
t4<-max(subscriptions$StartDate) - 365
t3<-t4-365 # dependent period of 1 yr
t2<-t3-1 # operational period of 1 to 3 days
# CALC: NUMB COMPLAINTS BY CUSTOMER
complaints <- complaints[which(complaints$ComplaintDate>=t1 & complaints$ComplaintDate<=t2),]
c1<-ddply(complaints,~CustomerID,summarize,num.complaints=length(ComplaintID))
# CALC: NUM.SUBSCRIPTIONS, SUM.NBRNEWSPAPERS, SUM.TOTALDISCOUNT, SUM.TOTALPRICE, SUM.TOTALCREDIT, MAX.RENEWAL
c2 <- subscriptions
c2 <- c2[which(c2$EndDate<=t3),]
c2 <- c2[which(complete.cases(c2)),]
c2<-ddply(c2,~CustomerID,summarize,
num.subscriptions=length(unique(ProductID)),
sum.newspapers=sum(NbrNewspapers),
sum.totaldiscount=sum(TotalDiscount),
sum.totalprice=sum(TotalPrice),
sum.credit=sum(TotalCredit),
num.products=length(unique(ProductID)))
# CALC: COUNT RENEWALS
# subscriptions$renewed <- ifelse(is.na(subscriptions$RenewalDate),0,1) # if renew 1, else 0
# c3<-ddply(subscriptions,~CustomerID,summarize,num.renew=sum(renewed)) # sum number renewals by customer
# CALC: COUNT CREDITS
credit <- credit[which(credit$ProcessingDate>=t1 & credit$ProcessingDate<=t2),]
c4 <- subscriptions
c4 <- c4[which(c4$EndDate<=t3),]
c4 <- c4[which(complete.cases(c4)),]
c4$in.credit <- ifelse(c4$SubscriptionID%in%credit$SubscriptionID,1,0) # if subid in credit subid then 1, else 0
c4<-ddply(c4,~CustomerID,summarize,num.credit=sum(in.credit)) # sum num credits by customer id
# CALC: NUMB PRODUCTS
# c5<-ddply(subscriptions,~CustomerID,summarize,prod.id=unique(ProductID)) # unique productID by customerID (n=1607)
# c5<-ddply(c5,~CustomerID,summarize,num.products=length(prod.id)) # count unique productID by customerID
# CALC: TIME AS CUSTOMER
subscriptions <- subscriptions[which(subscriptions$EndDate>=t3),]
c6<-ddply(subscriptions,~CustomerID,summarize,max.end=max(EndDate),max.start=max(StartDate),min.end=min(EndDate),min.start=min(StartDate))
c6$days.cust<-as.integer(c6$max.end)-as.integer(c6$min.start)
## MERGE TO GET BASETABLE
base<-merge(customers[,c(1,2,3,4)],c1,by="CustomerID",all.x=TRUE)
base$num.complaints[is.na(base$num.complaints)] <- 0
base<-merge(base,c2,by="CustomerID")
#base<-merge(base,c3,by="CustomerID",all.x=TRUE)
#base$did.renew <- ifelse(base$num.renew==0,0,1) # ever renewed 1, else 0
base<-merge(base,c4,by="CustomerID",all.x=TRUE)
base$did.credit <- ifelse(base$num.credit==0,0,1) # if cust ever had a credit 1, else 0
# base<-merge(base,c5,by="CustomerID",all.x=TRUE)
base<-merge(base,c6,by="CustomerID",all.x=TRUE)
# change data types
# base$num.renew<-as.integer(base$num.renew)
# base$did.renew<-as.integer(base$did.renew)
base$num.credit<-as.integer(base$num.credit)
base$did.credit<-as.integer(base$did.credit)
# deal with NAs
# max.renewal <- base$max.renewal
# base$max.renewal <- NULL
#removing 0 subscriptions
base <- base[which(!is.na(base$num.subscriptions)),]
base <- base[complete.cases(base),]
# base$max.renewal <- max.renewal
## COMPUTE DV
# CALC: DV 1 if churn, else 2
base$DV = as.factor(ifelse(base$max.start > t4,2,1))
base$DV = as.factor(ifelse(base$max.end > t4,base$DV,1))
#load the package randomForest
if (!require("randomForest")) {
install.packages('randomForest',
repos="https://cran.rstudio.com/",
quiet=TRUE)
require('randomForest')
}
#randomize order of indicators
ind <- 1:nrow(base)
indTRAIN <- sample(ind,round(0.5*length(ind)))
indTEST <- ind[-indTRAIN]
DV <-base$DV
base$DV <- NULL
base$CustomerID <- NULL
base$max.end <- NULL
base$max.start <- NULL
base$min.end <- NULL
#BasetableTRAIN$sum.totalprice <- NULL
#BasetableTRAIN$min.start <- NULL
#BasetableTRAIN$num.complaints <- NULL
base$days.cust <- NULL
#BasetableTRAIN$num.subscriptions <- NULL
#BasetableTRAIN$sum.totalprice <- NULL
rFmodel <- randomForest(x=(base[indTRAIN,]),
y=DV[indTRAIN],
ntree=1000)
predrF <- predict(rFmodel,base[indTEST,],type="prob")[,2]
#assess final performance
AUC::auc(roc(predrF,DV[indTEST]))
library('lift')
TopDecileLift(predrF,DV[indTEST])
varImpPlot(rFmodel)
|
00f04ab1b65caa042fedd689c9af9d03ec3ea1c2 | ae4176be620b27f3828de57c0abb38e8a5779775 | /plot1.R | 2097000140ff3046a13ceb289da181010e5ecf1e | [] | no_license | merlinjm/ExData_Plotting1 | 495bdca168aa28db02fc90fe40375b4a96340573 | 9e46ec17081f412e4902c9558ae1002a879c1c9b | refs/heads/master | 2021-01-14T11:53:25.476391 | 2015-11-08T20:54:04 | 2015-11-08T20:54:04 | 45,786,920 | 0 | 0 | null | 2015-11-08T15:47:52 | 2015-11-08T15:47:49 | null | UTF-8 | R | false | false | 497 | r | plot1.R | library(lubridate)
png(filename = "plot1.png",width = 480, height = 480)
dat<-read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE,na.strings="?")
dat$Date<-as.Date(dat$Date, "%d/%m/%Y")
subdat<-subset(dat,dat$Date > as.Date("2007-01-31") & dat$Date < as.Date("2007-02-03"))
subdat$Time<-ymd_hms(paste(subdat$Date, subdat$Time))
par(cex=0.75)
hist(subdat$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
983bebff8ef29438f749c2896f5652c40dbbb5a5 | 9ca35958aee8e1d16e78b64b03a4cbd3ae1dc586 | /man/getCountsByRegions.Rd | fdc644822a0e07f17eb6dbcdaefb3dbc84edea40 | [] | no_license | mdeber/BRGenomics | df68e7f6cf01e36db2a5dc1003abe8bf8f21c9f2 | b89c4fd9fff3fd3e795be5d382617473a2358d05 | refs/heads/master | 2023-04-28T17:29:07.075368 | 2023-04-25T15:16:35 | 2023-04-25T15:16:35 | 228,493,638 | 8 | 3 | null | null | null | null | UTF-8 | R | false | true | 5,431 | rd | getCountsByRegions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signal_counting.R
\name{getCountsByRegions}
\alias{getCountsByRegions}
\title{Get signal counts in regions of interest}
\usage{
getCountsByRegions(
dataset.gr,
regions.gr,
field = "score",
NF = NULL,
blacklist = NULL,
melt = FALSE,
region_names = NULL,
expand_ranges = FALSE,
ncores = getOption("mc.cores", 2L)
)
}
\arguments{
\item{dataset.gr}{A GRanges object in which signal is contained in metadata
(typically in the "score" field), or a named list of such GRanges objects.
If a list is given, a dataframe is returned containing the counts in each
region for each dataset.}
\item{regions.gr}{A GRanges object containing regions of interest.}
\item{field}{The metadata field of \code{dataset.gr} to be counted. If
\code{length(field) > 1}, a dataframe is returned containing the counts for
each region in each field. If \code{field} not found in
\code{names(mcols(dataset.gr))}, will default to using all fields found in
\code{dataset.gr}.}
\item{NF}{An optional normalization factor by which to multiply the counts.
If given, \code{length(NF)} must be equal to \code{length(field)}.}
\item{blacklist}{An optional GRanges object containing regions that should be
excluded from signal counting.}
\item{melt}{If \code{melt = TRUE}, a dataframe is returned containing a
column for regions and another column for signal. If multiple datasets are
given (if \code{dataset.gr} is a list or if \code{length(field) > 1}), the
output dataframe is melted to contain a third column indicating the sample
names. (See section on return values below).}
\item{region_names}{If \code{melt = TRUE}, an optional vector of names for
the regions in \code{regions.gr}. If left as \code{NULL}, indices of
\code{regions.gr} are used instead.}
\item{expand_ranges}{Logical indicating if ranges in \code{dataset.gr} should
be treated as descriptions of single molecules (\code{FALSE}), or if ranges
should be treated as representing multiple adjacent positions with the same
signal (\code{TRUE}). If the ranges in \code{dataset.gr} do not all have a
width of 1, this option has a substantial effect on the results
returned. (See details).}
\item{ncores}{Multiple cores will only be used if \code{dataset.gr} is a list
of multiple datasets, or if \code{length(field) > 1}.}
}
\value{
An atomic vector the same length as \code{regions.gr} containing the
sum of the signal overlapping each range of \code{regions.gr}. If
\code{dataset.gr} is a list of multiple GRanges, or if \code{length(field)
> 1}, a dataframe is returned. If \code{melt = FALSE} (the default),
dataframes have a column for each dataset and a row for each region. If
\code{melt = TRUE}, dataframes contain one column to indicate regions
(either by their indices, or by \code{region_names}, if given), another
column to indicate signal, and a third column containing the sample name
(unless \code{dataset.gr} is a single GRanges object).
}
\description{
Get the sum of the signal in \code{dataset.gr} that overlaps each range in
\code{regions.gr}. If \code{expand_regions = FALSE},
\code{getCountsByRegions} is written to calculate \emph{readcounts}
overlapping each region, while \code{expand_regions = TRUE} will calculate
"coverage signal" (see details below).
}
\section{\code{expand_ranges = FALSE}}{
In this configuration,
\code{getCountsByRegions} is designed to work with data in which each range
represents one type of molecule, whether it's a single base (e.g. the 5'
ends, 3' ends, or centers of reads) or entire reads (i.e. paired 5' and 3'
ends of reads).
This is in contrast to standard run-length compressed GRanges object, as
imported using \code{\link[rtracklayer:import.bw]{rtracklayer::import.bw}},
in which a single range can represent multiple contiguous positions that
share the same signal information.
As an example, a range of covering 10 bp with a score of 2 is treated as 2
reads (each spanning the same 10 bases), not 20 reads.
}
\section{\code{expand_ranges = TRUE}}{
In this configuration, this function
assumes that ranges in \code{dataset.gr} that cover multiple bases are
compressed representations of multiple adjacent positions that contain the
same signal. This type of representation is typical of "coverage" objects,
including bedGraphs and bigWigs generated by many command line utilities,
but \emph{not} bigWigs as they are imported by
\code{\link[BRGenomics:import-functions]{BRGenomics::import_bigWig}}.
As an example, a range covering 10 bp with a score of 2 is treated as
representing 20 signal counts, i.e. there are 10 adjacent positions that
each contain a signal of 2.
If the data truly represents basepair-resolution coverage, the "coverage
signal" is equivalent to readcounts. However, users should consider how
they interpret results from whole-read coverage, as the "coverage signal"
is determined by both the read counts as well as read lengths.
}
\examples{
data("PROseq") # load included PROseq data
data("txs_dm6_chr4") # load included transcripts
counts <- getCountsByRegions(PROseq, txs_dm6_chr4)
length(txs_dm6_chr4)
length(counts)
head(counts)
# Assign as metadata to the transcript GRanges
txs_dm6_chr4$PROseq <- counts
txs_dm6_chr4[1:6]
}
\seealso{
\code{\link[BRGenomics:getCountsByPositions]{getCountsByPositions}}
}
\author{
Mike DeBerardine
}
|
9fa837c9c437029e7b63f91ee45f9775ecdf1107 | a0c7365198a3bb2ce26e18819a490b239c921c31 | /other analysis/4-ans correlations.R | 66de556e450f2ce1e751ac5644daf7e9cfec631b | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | langcog/mentalabacus | 60327bee7d4c28d12996e6d7bb61aa265f9f07d4 | 725a17873ae38cdfdd1d1f6ae6f6694278434c5f | refs/heads/master | 2021-01-13T01:36:13.497195 | 2017-05-08T13:51:17 | 2017-05-08T13:51:17 | 15,406,355 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,121 | r | 4-ans correlations.R | ## notebook to look at ANS correlations
rm(list=ls())
source("helper/useful.R")
d <- read.csv("data/zenith all data complete cases.csv")
library(stringr)
library(Hmisc)
for (y in 0:3) {
this.year <- data.frame(ans=d$ans[d$year==y],
arith=d$arith[d$year==y],
wiat=d$wiat[d$year==y],
woodcock=d$woodcock[d$year==y])
# quartz()
# splom(this.year,pch=20)
corrs <- rcorr(as.matrix(this.year),type="spearman")
print(paste("**** year",as.character(y),"****"))
print(round(corrs$r,digits=2))
print(round(corrs$P,digits=2))
}
## add MCMC
mc <- read.table("~/Projects/India Abacus/ZENITH/zenith full analysis/data/mcmc.txt",
header=TRUE)
mc$W.ML[mc$W.ML > 1] <- NA
mc$subnum <- floor(mc$subject)
mc$year <- round(((mc$subject - floor(mc$subject)) * 10000)) - 2010
dplus <- merge(d,mc,by.x=c("subnum","year"), by.y=c("subnum","year"))
library(ggplot2)
qplot(W.ML, ans,facets=~ year,
data=dplus)
quartz()
qplot(ans, W.mcmc,facets=~ year,
data=dplus) +
geom_linerange(aes(ymin=W.lower,ymax=W.upper),alpha=.25) +
geom_abline(aes(slope=1),colour="red") +
xlab("ML estimate (Mike)") +
ylab("MCMC estimate (Steve)") + theme_bw()
quartz()
qplot(W.mcmc,arith,facets=~ year,colour=factor(abacus),
data=dplus) +
geom_linerange(aes(ymin=W.lower,ymax=W.upper),alpha=.25) + theme_bw()
## weighted regression
for (y in 0:3) {
print(paste("*** year ",y,"***"))
print(summary(lm (arith ~ ans, data=subset(dplus,year==y))))
print(summary(lm (arith ~ W.mcmc, data=subset(dplus,year==y),
weights=(1/dplus[dplus$year==y,]$W.sd^2))))
}
for (y in 0:3) {
this.year <- data.frame(ans=dplus$W.mcmc[dplus$year==y],
arith=dplus$arith[dplus$year==y],
wiat=dplus$wiat[dplus$year==y],
woodcock=dplus$woodcock[dplus$year==y])
corrs <- rcorr(as.matrix(this.year),type="spearman")
print(paste("**** year",as.character(y),"****"))
print(round(corrs$r,digits=2))
print(round(corrs$P,digits=2))
} |
10a90013cad0da08adca7775a283608ecd20106f | 56d38fc637ae50fafacf29bc0285e8bf1d3dd819 | /man/utf8_substr.Rd | c89d2101d3786d76446a76dee65aa7341277f101 | [
"MIT"
] | permissive | isabella232/cli-12 | 8d0201e0344089739c24e59adc20f539952a20b9 | b34ae2ceac0716a28df1a6f7b9fc1b24f577d701 | refs/heads/master | 2023-08-02T15:25:46.101332 | 2021-09-07T14:03:37 | 2021-09-07T14:03:37 | 404,305,822 | 0 | 0 | NOASSERTION | 2021-09-08T12:43:13 | 2021-09-08T10:31:27 | null | UTF-8 | R | false | true | 1,031 | rd | utf8_substr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utf8.R
\name{utf8_substr}
\alias{utf8_substr}
\title{Substring of an UTF-8 string}
\usage{
utf8_substr(x, start, stop)
}
\arguments{
\item{x}{Character vector.}
\item{start}{Starting index or indices, recycled to match the length
of \code{x}.}
\item{stop}{Ending index or indices, recycled to match the length of
\code{x}.}
}
\value{
Character vector of the same length as \code{x}, containing
the requested substrings.
}
\description{
This function uses grapheme clusters instaed of Unicode code points in
UTF-8 strings.
}
\examples{
# Five grapheme clusters, select the middle three
str <- paste0(
"\U0001f477\U0001f3ff\u200d\u2640\ufe0f",
"\U0001f477\U0001f3ff",
"\U0001f477\u200d\u2640\ufe0f",
"\U0001f477\U0001f3fb",
"\U0001f477\U0001f3ff")
cat(str)
str24 <- utf8_substr(str, 2, 4)
cat(str24)
}
\seealso{
Other UTF-8 string manipulation:
\code{\link{utf8_graphemes}()},
\code{\link{utf8_nchar}()}
}
\concept{UTF-8 string manipulation}
|
15ffe91d98c31b3006c8073bb47ce1e72fa8ec59 | a50e8e51cc49dc6624fc5f9c35ecedc46a7ac2ed | /R/funds_table1_201314.R | 75fe51cea812df7ce260fac364af9ac1c0e66108 | [] | no_license | HughParsonage/taxstats | 7a98779d3ec2b2c97b3a78b0a56bcfeb0389661e | 330e0df1e6242c2836afa4dc497b8454939ef53e | refs/heads/master | 2020-04-16T23:38:05.842037 | 2019-11-15T10:38:09 | 2019-11-15T10:38:09 | 51,502,209 | 4 | 1 | null | 2019-06-14T11:49:49 | 2016-02-11T08:24:44 | R | UTF-8 | R | false | false | 798 | r | funds_table1_201314.R | #' Super funds time series data
#'
#' @source \url{https://data.gov.au/dataset/taxation-statistics-2013-14/resource/15981dd2-ed4a-44e4-8def-0ccfc0ef8090?inner_span=True}
#'
#' @description This is a long form of the data relating to super funds on data.gov.au.
#'
#' @format A data table with 2652 rows and 5 columns.
#' \describe{
#' \item{Superheading}{The group of the \code{Selected_items}. (Mostly equates to the boldface cells of the original Excel file.)}
#' \item{Selected_items}{The variable, often called Selected items in the sheet.}
#' \item{fy_year}{The financial year.}
#' \item{Count}{The number (of individuals etc) with nonzero values. (Corresponds to no. in original.)}
#' \item{Sum}{The total value (in dollars). (Corresponds to $ in original.)}
#' }
#'
"funds_table1_201314" |
0e4674f2b0280935a6c27a894b2be54cfdf2aec3 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kontchakov/SUBMITTED/Core1108_tbm_02.tex.moduleQ3.2S.000056/Core1108_tbm_02.tex.moduleQ3.2S.000056.R | 9c3ca957eb6ca9c663a83cf1bf0a923eaf655941 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,353 | r | Core1108_tbm_02.tex.moduleQ3.2S.000056.R | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4973
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4721
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4721
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kontchakov/SUBMITTED/Core1108_tbm_02.tex.moduleQ3.2S.000056.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1621
c no.of clauses 4973
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4721
c
c QBFLIB/Kontchakov/SUBMITTED/Core1108_tbm_02.tex.moduleQ3.2S.000056.qdimacs 1621 4973 E1 [705 706 707 708 709 710 711 757 758 759 760 761 762 763 809 810 811 812 813 814 815 861 862 863 864 865 866 867 913 914 915 916 917 918 919 965 966 967 968 969 970 971 1017 1018 1019 1020 1021 1022 1023 1069 1070 1071 1072 1073 1074 1075 1121 1122 1123 1124 1125 1126 1127 1173 1174 1175 1176 1177 1178 1179 1225 1226 1227 1228 1229 1230 1231 1277 1278 1279 1280 1281 1282 1283 1329 1330 1331 1332 1333 1334 1335 1381 1382 1383 1384 1385 1386 1387 1433 1434 1435 1436 1437 1438 1439 1485 1486 1487 1488 1489 1490 1491 1537 1538 1539 1540 1541 1542 1543 1589 1590 1591 1592 1593 1594 1595] 0 145 1300 4721 RED
|
736fdb1572065c7598bb2ed8045d937b29fc7c37 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/downscaledl/man/rcpparmabasic.Rd | 808e4141ac3ce1234e20b640973eebac08a69844 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,251 | rd | rcpparmabasic.Rd | \name{RcppArmadillo-Functions}
\alias{rcpparmabasic_test}
\alias{rcpparmabasic_outerproduct}
\alias{rcpparmabasic_innerproduct}
\alias{rcpparmabasic_bothproducts}
\title{Set operation of functions in RcppArmadillo package}
\description{
These four functions are created when
\code{RcppArmadillo.package.skeleton()} is invoked to create a
skeleton packages.
}
\usage{
rcpparmabasic_test()
rcpparmabasic_outerproduct(x)
rcpparmabasic_innerproduct(x)
rcpparmabasic_bothproducts(x)
}
\arguments{
\item{x}{a numeric vector}
}
\value{
\code{rcpparmabasic_test()} does not return a value, but displays a
message to the console.
\code{rcpparmabasic_outerproduct()} returns a numeric matrix computed as the
outer (vector) product of \code{x}.
\code{rcpparmabasic_innerproduct()} returns a double computer as the inner
(vector) product of \code{x}.
\code{rcpparmabasic_bothproducts()} returns a list with both the outer and
inner products.
}
\details{
These are example functions which should be largely
self-explanatory.
}
\references{
See the documentation for Armadillo, and RcppArmadillo, for more details.
}
\examples{
x <- sqrt(1:4)
rcpparmabasic_innerproduct(x)
rcpparmabasic_outerproduct(x)
}
\author{Lianfa Li} |
76f9007c0bdf21afd8de07c199dee2783d90ff4d | c555092c911699a657b961a007636208ddfa7b1b | /man/geom_quantile.Rd | 49ebc87b6f1e4977bda8cc081f874e601d70d197 | [] | no_license | cran/ggplot2 | e724eda7c05dc8e0dc6bb1a8af7346a25908965c | e1b29e4025de863b86ae136594f51041b3b8ec0b | refs/heads/master | 2023-08-30T12:24:48.220095 | 2023-08-14T11:20:02 | 2023-08-14T12:45:10 | 17,696,391 | 3 | 3 | null | null | null | null | UTF-8 | R | false | true | 5,056 | rd | geom_quantile.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-quantile.R, R/stat-quantilemethods.R
\name{geom_quantile}
\alias{geom_quantile}
\alias{stat_quantile}
\title{Quantile regression}
\usage{
geom_quantile(
mapping = NULL,
data = NULL,
stat = "quantile",
position = "identity",
...,
lineend = "butt",
linejoin = "round",
linemitre = 10,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE
)
stat_quantile(
mapping = NULL,
data = NULL,
geom = "quantile",
position = "identity",
...,
quantiles = c(0.25, 0.5, 0.75),
formula = NULL,
method = "rq",
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[=aes]{aes()}}. If specified and
\code{inherit.aes = TRUE} (the default), it is combined with the default mapping
at the top level of the plot. You must supply \code{mapping} if there is no plot
mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[=ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[=fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{position}{Position adjustment, either as a string naming the adjustment
(e.g. \code{"jitter"} to use \code{position_jitter}), or the result of a call to a
position adjustment function. Use the latter if you need to change the
settings of the adjustment.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{lineend}{Line end style (round, butt, square).}
\item{linejoin}{Line join style (round, mitre, bevel).}
\item{linemitre}{Line mitre limit (number greater than 1).}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[=borders]{borders()}}.}
\item{geom, stat}{Use to override the default connection between
\code{geom_quantile()} and \code{stat_quantile()}.}
\item{quantiles}{conditional quantiles of y to calculate and display}
\item{formula}{formula relating y variables to x variables}
\item{method}{Quantile regression method to use. Available options are \code{"rq"} (for
\code{\link[quantreg:rq]{quantreg::rq()}}) and \code{"rqss"} (for \code{\link[quantreg:rqss]{quantreg::rqss()}}).}
\item{method.args}{List of additional arguments passed on to the modelling
function defined by \code{method}.}
}
\description{
This fits a quantile regression to the data and draws the fitted quantiles
with lines. This is as a continuous analogue to \code{\link[=geom_boxplot]{geom_boxplot()}}.
}
\section{Aesthetics}{
\code{geom_quantile()} understands the following aesthetics (required aesthetics are in bold):
\itemize{
\item \strong{\code{x}}
\item \strong{\code{y}}
\item \code{alpha}
\item \code{colour}
\item \code{group}
\item \code{linetype}
\item \code{linewidth}
\item \code{weight}
}
Learn more about setting these aesthetics in \code{vignette("ggplot2-specs")}.
}
\section{Computed variables}{
These are calculated by the 'stat' part of layers and can be accessed with \link[=aes_eval]{delayed evaluation}.
\itemize{
\item \code{after_stat(quantile)}\cr Quantile of distribution.
}
}
\examples{
m <-
ggplot(mpg, aes(displ, 1 / hwy)) +
geom_point()
m + geom_quantile()
m + geom_quantile(quantiles = 0.5)
q10 <- seq(0.05, 0.95, by = 0.05)
m + geom_quantile(quantiles = q10)
# You can also use rqss to fit smooth quantiles
m + geom_quantile(method = "rqss")
# Note that rqss doesn't pick a smoothing constant automatically, so
# you'll need to tweak lambda yourself
m + geom_quantile(method = "rqss", lambda = 0.1)
# Set aesthetics to fixed value
m + geom_quantile(colour = "red", linewidth = 2, alpha = 0.5)
}
|
1c25d772fbf92b90977fb43b9fc8dd2d691320c2 | 5be281be40d95acde42ff2780c01c26d6d343e8b | /flcode.R | dab23faac96f2d6ebda8ddb0ae5ea450b46258cc | [] | no_license | derekt5-1620677/INFO_201_Final_Project-Twitter | 1805787bded8b08d991ac558189088735d699bfe | fe475562ec02e325507bd23d5bb31686ca259727 | refs/heads/master | 2021-01-25T13:58:25.979672 | 2018-03-09T07:48:40 | 2018-03-09T07:48:40 | 123,632,304 | 0 | 0 | null | 2018-03-08T08:37:21 | 2018-03-02T21:23:10 | R | UTF-8 | R | false | false | 2,373 | r | flcode.R | library(shiny)
library('dplyr')
# creates the UI for the app, with a widget on the side and a tab containing data table
fl.ui <- fluidPage(
sidebarPanel(
textInput("city.name", label = "Enter a Country Name"),
helpText("lower case (i.e. canada)")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Location Summary", textOutput("fl.countrytext"), tableOutput("fl.countrytable")),
h2("Interpretation"), helpText("This table shows the worldwide locations of trending tweets.
The data is dependent on the number of tweets in a certain
city. We want this information because it tells us where news
travels to and what type of people care (i.e. people in the
city vs people in the suburbs)."))
)
)
fl.server <- function(input, output) {
# reading the csv file
fl.trends.data <- read.csv("./available_locations.csv")
# tallies the total number of cities
fl.trends.count <- reactive({
fl.trends.namecount <- fl.trends.data %>%
filter(name != "Worldwide") %>%
mutate(country = tolower(country)) %>%
filter(country == input$city.name) %>%
summarize(total = n())
return(fl.trends.namecount)
})
# decides if city should be plural or singular based on count
ChooseWord <- function(number) {
if (number != 1) {
paste("cities")
} else {
paste("grcity")
}
}
# prints the text
country.text <- reactive({
paste("This table shows the", fl.trends.count(), ChooseWord(fl.trends.count()),
"where the most popular tweets come from in", input$city.name, ".")
})
output$fl.countrytext <- renderText(country.text())
# filtering the dataset so only the cities in specified input country show
fl.trends.country <- reactive({
fl.trends.name <- fl.trends.data %>%
filter(name != "Worldwide") %>%
mutate(country = tolower(country)) %>%
select(country, name) %>%
filter(country == input$city.name)
return(fl.trends.name)
})
# outputting the table
output$fl.countrytable <- renderTable(fl.trends.country())
}
shinyApp(ui = fl.ui, server = fl.server)
|
884cb14ade07c7d8ce8ad1640b75771da4ae1ba8 | 9b34b2250d39c1b05a9d44392d7fed4711d26d30 | /man/univ_quant.Rd | b07986b3c3361a9ab1436db2831ffad4a7367885 | [] | no_license | lbraglia/lbstat | 11bbd806dfb74e46ce332cac23c33da726541205 | f8dc128b507bc1b1cb2741af49c171971abe658c | refs/heads/master | 2023-05-11T00:24:32.746694 | 2023-04-28T12:18:40 | 2023-04-28T12:18:40 | 51,751,382 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,222 | rd | univ_quant.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tables_univariate.R
\name{univ_quant}
\alias{univ_quant}
\title{Univariate table for quantitative data.}
\usage{
univ_quant(
x,
latex = TRUE,
latex_placement = "ht",
label = NULL,
caption = NULL,
use_comments = TRUE,
wb = NULL,
sheets = NULL
)
}
\arguments{
\item{x}{a quantitative variable, a data.frame or a list}
\item{latex}{output the table using \code{xtable::xtable}}
\item{latex_placement}{table placement for latex printing}
\item{label}{latex label}
\item{caption}{latex caption}
\item{use_comments}{use comments for row (variable) names, if available}
\item{wb}{an openxlsx Workbook; if not NULL the table will be saved
in the workbook too, aside printing}
\item{sheets}{optional sheet names (same length as the number of tables)}
}
\description{
Univariate table for quantitative data.
}
\examples{
wb = openxlsx::createWorkbook()
univ_quant(x = airquality$Ozone, sheet = 'ozone', wb = wb)
univ_quant(x = airquality[, c('Ozone')], wb = wb)
univ_quant(x = airquality[, c('Ozone', 'Temp')], wb = wb)
univ_quant(list('a' = 1:10, 'b' = 2:20), wb = wb)
lbmisc::wb_to_xl(wb = wb, file = '/tmp/univ_quant.xlsx')
}
|
3113f1e4cbd149d0978e791ba52f628d4e5d3111 | 36b3c6dc2e95371eb1fb7ed39e15e6bb03d9e570 | /painel_SIAPE_remuneracao.R | ed01c52426a09e489fd55cb2c74c8903a13eec0c | [] | no_license | GRC-UnB/Painel_Unificado | f4034a4110e200939980c1268f49f4082381f189 | 042890d4324cb1557ebe1b4f65c678b259c089a2 | refs/heads/master | 2022-12-29T03:10:19.702632 | 2020-10-13T16:36:06 | 2020-10-13T16:36:06 | 266,450,554 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,845 | r | painel_SIAPE_remuneracao.R | library(openxlsx)
library(tidyverse)
library(readr)
library(data.table)
# Parametros inciais
rm(list=ls())
source("parametros.R")
source(funcoes)
# Parâmetros - tempo do loop
ano_inicial = 2013
ano_final = 2019
meses = c(paste0(0,1:9),10:12)
# Roda loop do ano
for(ano in ano_inicial:ano_final){
# Roda o loop dos meses
for(mes in meses){
# Caminhos dos arquivos
caminho_remuneracao = paste0(pasta_siape,ano,"_",mes,"_servidores/",ano,mes,"_Remuneracao.xlsx")
caminho_cadastro_RDS = paste0(pasta_siape,ano,"_",mes,"_servidores/",ano,mes,"_Cadastro.RDS")
caminho_remuneracao_rds = paste0(pasta_siape,ano,"_",mes,"_servidores/",ano,mes,"_Cadastro_Remunera.RDS")
# Execução remunera
if(file.exists(caminho_cadastro_RDS)){
if(file.exists(caminho_remuneracao)){
# Leitura do arquivo de cadastro
cadastro = read_rds(caminho_cadastro_RDS)
# Alterar nome
names(cadastro) = gsub(pattern = "PERIODO",replacement = "data",x = names(cadastro))
cadastro = cadastro %>%
filter(ATIVIDADE == 1)
# Casos únicos
unicos = cadastro %>%
distinct(Id_SERVIDOR_PORTAL,id,.keep_all = T)
# Remove Cadastro
rm(cadastro)
#### EXECUÇÃO REMUNERAÇÃO
remuneracao = openxlsx::read.xlsx(xlsxFile = caminho_remuneracao)
# Converte tipo
remuneracao = remuneracao %>%
mutate(remuneracao_basica_bruta = converter(`REMUNERAÇÃO.BÁSICA.BRUTA`),
remuneracao_basica_liquida = converter(`REMUNERAÇÃO.APÓS.DEDUÇÕES.OBRIGATÓRIAS`)) %>%
select(-c(`REMUNERAÇÃO.BÁSICA.BRUTA`,`REMUNERAÇÃO.APÓS.DEDUÇÕES.OBRIGATÓRIAS`))
# Cria data.frame final
final = merge.data.frame(x = unicos,y=remuneracao,by.x = "Id_SERVIDOR_PORTAL")
# Remove remuneração e unicos
rm(remuneracao,unicos)
write_rds(x = final,path = caminho_remuneracao_rds,compress = "xz")
## Agrupamento
if(!exists("final_agregado")){
final_agregado = final %>%
group_by(id,data) %>%
summarise(med_remunera_bruta = mean(remuneracao_basica_bruta,na.rm=T),
sd_remunera_bruta = sd(remuneracao_basica_bruta,na.rm = T),
med_remunera_liquida = mean(remuneracao_basica_liquida,na.rm=T),
sd_remunera_liquida = sd(remuneracao_basica_liquida,na.rm = T))
}
else{
temp_agregado = final %>%
group_by(id,data) %>%
summarise(med_remunera_bruta = mean(remuneracao_basica_bruta,na.rm=T),
sd_remunera_bruta = sd(remuneracao_basica_bruta,na.rm = T),
med_remunera_liquida = mean(remuneracao_basica_liquida,na.rm=T),
sd_remunera_liquida = sd(remuneracao_basica_liquida,na.rm = T))
final_agregado = rbindlist(list(final_agregado, temp_agregado))
# Remove temp_agregado
rm(temp_agregado)
}
# Remove dataframe unificado final
rm(final)
log_mensagem = paste0("Arquivos ",caminho_cadastro_RDS," e ",caminho_remuneracao," lidos com sucesso.")
}
else{
log_mensagem = paste0("Não foi encontrado o arquivo: ",caminho_remuneracao)
}
}
else{
log_mensagem = paste0("Não foi encontrado o arquivo: ",caminho_cadastro_RDS)
}
dir.create(path = pasta_logs,showWarnings = F)
cat("\n\n OS ARQUIVOS ",mes,"-",ano," foram lidos")
cat(paste0(date()," - ",log_mensagem,"\n"),file = paste0(pasta_logs,"siape.log"),append = T)
rm(log_mensagem)
}
}
openxlsx::write.xlsx(final_agregado,file = painel_siape_remuneracao,asTable = T)
|
7881ec68c197341a48866a6ca7ed3763144aceb3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fGarch/examples/methods-show.Rd.R | 9bc8cf935ab9527d2b058c15d04aeaff6795a01e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 385 | r | methods-show.Rd.R | library(fGarch)
### Name: show-methods
### Title: GARCH Modelling Show Methods
### Aliases: show-methods show,ANY-method show,fGARCH-method
### show,fGARCHSPEC-method
### Keywords: models
### ** Examples
## garchSpec -
spec = garchSpec()
print(spec)
## garchSim -
x = garchSim(spec, n = 500)
## garchFit -
fit = garchFit(~ garch(1, 1), data = x)
print(fit)
|
e895bfa92a45c80a16b03608dfc0d5c5aba65f08 | 8a9e81debd4336a33523240df3540ea4dee10869 | /server.R | 23e9a0f73606df59005db3bca927ff664a09315f | [] | no_license | gnetsanet/scViz | 029d031663ea2e131098d19ac7515786451f0ba5 | 8489c03595068450492f6758f2867b9d2dd5717b | refs/heads/master | 2020-08-05T06:11:23.481305 | 2019-10-06T23:37:06 | 2019-10-06T23:37:06 | 212,425,314 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,452 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
options(shiny.maxRequestSize=100*1024^2)
# Define server logic required to draw a histogram
shinyServer(
function(input, output) {
data <- reactive({
file1 <- input$file
if(is.null(file1)) { return()}
#read.table(file=file1$datapath, sep=input$sep, header=input$header, stringAsFactors=input$stringAsFactors)
read.table(file=file1$datapath, sep=input$sep, header=input$header)
})
output$filedf <- renderTable ({
if(is.null(data())) { return () }
input$file
})
output$sum <- renderTable({
if(is.null(data())) {return ()}
summary(data())
})
output$table <- renderTable({
if(is.null(data())) { return ()}
head(data())
})
output$tb <- renderUI({
if(is.null(data())) { return () }
else
tabsetPanel(
tabPanel("About file", tableOutput("filedf")),
tabPanel("Data", tableOutput("table")),
tabPanel("Summary", tableOutput("sum"))
)
})
}
) |
797f785bd32e28402a9b1b8ce5836ac0839d5789 | 4dd0758e06c649fcc0a70dc248e5fb5ba1613dd5 | /regression.R | bbf1b997150e7e2a6372702d40dfc2bdd4d1e878 | [] | no_license | ppiyush28/R_code_Session | f0637b07e60fe6ab371a7948c3f6fcca615add9a | 7c8ba6ce59324000bbf56bfb21b2722aacb373c3 | refs/heads/master | 2021-10-08T03:15:10.536985 | 2018-12-07T07:34:28 | 2018-12-07T07:34:28 | 100,985,448 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,197 | r | regression.R |
#plotting the child/parent from galtons data
long <- melt(galton)
g <- ggplot(long, aes(x = value, fill = variable))
g <- g + geom_histogram( colour = 'black', binwidth = 1)
g <- g + facet_grid(.~variable)
g
#finding the mu that minimizes the sum of squared distances between datapoints
#and mu
install.packages("manipulate")
library(manipulate)
myHist <- function(mu){
mse <- mean((galton$child - mu)^2)
g <- ggplot(galton, aes(x=child)) + geom_histogram(fill = "salmon",
colour = "black",
binwidth = 1)
g <- g + geom_vline(xintercept = mu, size = 3)
g <- g + ggtitle(paste("mu = ",mu, ", MSE = ", round(mse,2), sep = ""))
g
}
manipulate(myHist(mu), mu = slider(62, 74, step = 0.5))
#the least squares estimate is the empirical mean
g <- ggplot(galton, aes(x=child)) + geom_histogram(fill = "salmon",
colour = "black",
binwidth = 1)
g <- g + geom_vline(xintercept = mean(galton$child), size = 3)
g
#comparing childrens height and their parents height
ggplot(galton, aes(x = parent, y = child)) + geom_point()
freqData <- as.data.frame(table(galton$parent, galton$child))
names(freqData) <- c("child","parent","freq")
par(mfrow = c(1,1))
plot(as.numeric(as.vector(freqData$parent)), as.numeric(as.vector(freqData$child)),
pch = 21, col = "black", bg = "lightblue", cex = 0.1*freqData$freq, xlab = "parent",
ylab = "child")
#finding the best fit line
install.packages("shiny")
library(shiny)
library(dplyr)
myPlot <- function(beta){
y <- galton$child - mean(galton$child)
x <- galton$parent - mean(galton$parent)
freqData <- as.data.frame(table(x, y))
names(freqData) <- c("child", "parent", "freq")
plot(
as.numeric(as.vector(freqData$parent)),
as.numeric(as.vector(freqData$child)),
pch = 21, col = "black", bg = "lightblue",
cex = .15 * freqData$freq,
xlab = "parent",
ylab = "child"
)
abline(0, beta, lwd = 3)
points(0, 0, cex = 2, pch = 19)
mse <- mean( (y - beta * x)^2 )
title(paste("beta = ", beta, "mse = ", round(mse, 3)))
}
manipulate(myPlot(beta), beta = slider(0.6, 1.2, step = 0.02))
#The solution using glm#
glm(I(galton$child-mean(galton$child)) ~ I(galton$parent-mean(galton$parent))-1)
glm(I(child - mean(child))~ I(parent - mean(parent)) - 1, data = galton)
#vizualzing the best fit line#
freqData <- as.data.frame(table(galton$parent, galton$child))
names(freqData) <- c("child","parent","freq")
plot( as.numeric(as.vector(freqData$parent)),
as.numeric(as.vector(freqData$child)),
pch = 21, col = "black", bg = "lightblue",
cex = .15 * freqData$freq,
xlab = "parent",
ylab = "child"
)
lm1 <- glm(galton$child~galton$parent)
lines(galton$parent, lm1$fitted.values, col="red", lwd=3)
|
edb7375c90c22a5ef7f8aa315b1f4901564ddb3e | 14c2f47364f72cec737aed9a6294d2e6954ecb3e | /man/contrastSampleIndices-EdgeResult-character-method.Rd | 2efa5a49c45dafd36b42b8081db78ecaf8a9b4ec | [] | no_license | bedapub/ribiosNGS | ae7bac0e30eb0662c511cfe791e6d10b167969b0 | a6e1b12a91068f4774a125c539ea2d5ae04b6d7d | refs/heads/master | 2023-08-31T08:22:17.503110 | 2023-08-29T15:26:02 | 2023-08-29T15:26:02 | 253,536,346 | 2 | 3 | null | 2022-04-11T09:36:23 | 2020-04-06T15:18:41 | R | UTF-8 | R | false | true | 494 | rd | contrastSampleIndices-EdgeResult-character-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllMethods.R
\name{contrastSampleIndices,EdgeResult,character-method}
\alias{contrastSampleIndices,EdgeResult,character-method}
\title{Extract contrast sample indices}
\usage{
\S4method{contrastSampleIndices}{EdgeResult,character}(object, contrast)
}
\arguments{
\item{object}{An EdgeResult object.}
\item{contrast}{Character, indicating the contrast of interest.}
}
\description{
Extract contrast sample indices
}
|
99327a34ae318962fdad43538e6b8a60564f26dc | c39e466c2b6fdffbc410f24669f214e13fb87781 | /R/EJERCICIOS/R-ex-2/COMPIS/013_G1-Manuel Jesus Fernandez Ibañez_720765_assignsubmission_file_/Ejercicio graficas.R | c9190f9b45e793c07633108eb254adc73ac87046 | [] | no_license | enanibus/biopython | 3a58efbcc92f1ce60285a115c620de9295b7d281 | 613d334a5c0502059930d9381a9464ef533cca1c | refs/heads/master | 2021-01-12T17:27:39.516793 | 2017-01-02T18:30:09 | 2017-01-02T18:30:09 | 71,573,732 | 0 | 1 | null | null | null | null | WINDOWS-1252 | R | false | false | 2,885 | r | Ejercicio graficas.R | #EJERCICIO 1
read.table("leukemia.data.txt", header= TRUE)
leuk.dat<-read.table("leukemia.data.txt", header= FALSE)
leuk.dat
leuk.dat.m<-data.matrix(leuk.dat)
leuk.dat.m
scan("leukemia.class.txt", what = "")
leuk.class<-factor(c(scan("leukemia.class.txt", what = "")))
sex<-factor(rep(c("Male", "Female"), times=19))
sex
#EJERCICIO 2
##Ejercicio2.1
boxplot(leuk.dat.m[2124,2:39]~leuk.class,
ylab="Gene expression(mRNA)", col = c("orange", "lightblue"),
main="a) Boxplot of PTEN by patient group")
##Ejercicio2.2
plot(leuk.dat.m[2124,2:39],leuk.dat.m[1,2:39], xlab="HK-1", ylab= "PTEN",
main="b) HK???1 vs. PTEN; symbol size proportional to gene 2600",
pch = c(21, 24)[sex],
col = c("blue", "purple")[leuk.class])
lm1<-lm(leuk.dat.m[1,2:39]~leuk.dat.m[2124,2:39])
abline(lm1, lty=2)
lclass <- rep(levels(leuk.class), rep(2, 2))
lsex <- rep(levels(sex), 2)
text.legend <- paste(lclass, lsex, sep = ", ")
legend(-1, 1, c(text.legend),pch = c(24, 21)[sex],col = c("blue","blue",
"purple","purple"))
##Ejercicio3
##3.1
coplot(leuk.dat.m[1, 2:39] ~ leuk.dat.m[2124, 2:39]|sex, xlab="PTEN",
ylab="HK-1", main="Given:sex", panel=panel.smooth)
##3.2
x <- leuk.dat.m[2124, 2:39]
y <- leuk.dat.m[1, 2:39]
library(lattice)
xyplot(leuk.dat.m[1, 2:39] ~ leuk.dat.m[2124, 2:39]|sex, xlab="PTEN",
ylab="HK-1", main="Given:sex", panel=function(x,y) {panel.xyplot(x,y)
panel.loess(x,y)})
##3.3
xyplot(leuk.dat.m[1, 2:39] ~ leuk.dat.m[2124, 2:39]|sex, xlab="PTEN",
ylab="HK-1", main="Given:sex", panel=function(x,y) {panel.xyplot(x,y)
panel.lmline(x,y)})
##3.4
library(ggplot2)
dgg <- data.frame(PTEN= leuk.dat.m[2124, 2:39], HK=leuk.dat.m[1, 2:39],
Sex= sex)
ggplot(data=dgg, aes(PTEN,HK))+ facet_wrap(~Sex) + geom_point() +
geom_smooth(method="loess") + geom_smooth(se= FALSE, method= "lm",
colour="grey") + labs(y="HK-1")
#Ejercicio 4
randomdata <- matrix(rnorm(38 * 1000), ncol = 38)
class <- factor(c(rep("ALL",27 ), rep("AML", 11)))
pvalues <- apply(randomdata, 1, function(x) t.test(x ~ leuk.class)$p.value)
hist(pvalues, main="P???values from t???test", ylab="Density")
tmp <- t.test(randomdata[1, ] ~leuk.class )
tmp
##Ejercicio 5
#5.1
tmp<- wilcox.test(x="vectornumericodelquequeremoshacereltest",...)
#5.2
plot(x="pvalorestestdelatdemanolito",y="pvalorestestwilcox",
type = "n", axes = FALSE, ann = FALSE)
#5.3
#aqui no entiendo muy bien lo que hay que hacer en la primera parte de la pregunta
#en la funcion rug podemos elegir el side donde sera proteado,
#siendo este el 3º argumento 1 (abajo)3(arriba)
#4.4
#rug nos es util porque nos permite ver como es la distribucion de valores
#del test, ya sea continua o discreta.
#4.5
points(cex=0.9)
|
c0446fa5f49a3c99a98a56dd1b6fab2d368226a7 | 66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66 | /man-roxygen/api_type.R | 219c7a5da8e89156142ce69675b12cd6f17de42b | [
"MIT"
] | permissive | StevenMMortimer/salesforcer | 833b09465925fb3f1be8da3179e648d4009c69a9 | a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732 | refs/heads/main | 2023-07-23T16:39:15.632082 | 2022-03-02T15:52:59 | 2022-03-02T15:52:59 | 94,126,513 | 91 | 19 | NOASSERTION | 2023-07-14T05:19:53 | 2017-06-12T18:14:00 | R | UTF-8 | R | false | false | 175 | r | api_type.R | #' @param api_type \code{character}; one of \code{"REST"}, \code{"SOAP"},
#' \code{"Bulk 1.0"}, or \code{"Bulk 2.0"} indicating which API to use when
#' making the request.
|
2558b8d2824250ee48147c98ecb93bf56318b8ba | 56053ecb70a673c879d4c4293c21eb18380f508b | /R/LOB_plotPosNeg.R | 45a6ee49068ce7576ecf4f423df724f255f49d86 | [] | no_license | hholm/LOB_tools | e47e945303fad328e834e91c0e623fed0036d5f6 | 785d79c80c83777c08eb8c19de96766459a50dc4 | refs/heads/master | 2023-06-22T05:16:28.332620 | 2023-06-08T19:02:02 | 2023-06-08T19:02:02 | 172,795,196 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,624 | r | LOB_plotPosNeg.R | LOB_plotPosNeg <- function(XCMSnExp_pos, XCMSnExp_neg, peakdata_pos = NULL, adduct_offset = NULL, mz = NULL, rt = NULL, rtspan = 175,
ppm = 2.5, file = NULL, window = 1) {
# check window size
if (window < 1) {
stop("Window can not be less than 1 (Full rt window searched for scans).")
}
if(is.null(file)){
stop("Please supply a vector with two filenames too plot from XCMSnExp_pos and XCMSnExp_neg respectively.")
}
# check for 'file' in both objects
if (any(!file[1] %in% MSnbase::sampleNames(XCMSnExp_pos) | !file[2] %in% MSnbase::sampleNames(XCMSnExp_neg))) {
stop("File(s) '", paste(file[which(!file %in% MSnbase::sampleNames(XCMSnExp))], collapse = ", "), "' not found in both XCMSnExp.
Check MSnbase::sampleNames(XCMSnExp_pos) and MSnbase::sampleNames(XCMSnExp_neg) to see files in both objects.")
}
# check format of peakdata
if (!is.null(peakdata_pos)) { # if peakdata isnt NULL
if (class(peakdata_pos) == "LOBset") { # and it is a LOBset
peakdata_pos <- LOBSTAHS::peakdata(peakdata_pos) # extract the peakdata.
} else { # otherwise
if (class(peakdata_pos) != "data.frame") { # it should be a data.frame
stop("Input 'peakdata' must be of class 'data.frame'.")
} else {
if (!all(c("peakgroup_rt", "LOBdbase_mz", "compound_name") %in% colnames(peakdata_pos))) { # with three columns.
stop("The input 'peakdata' must have columns 'peakgroup_rt', 'LOBdbase_mz', and 'compound_name'.")
}
}
}
}
if (!is.null(peakdata_pos) & (!is.null(mz) | !is.null(rt))) { # Peakdata overides mz and rt slots
warning("You have provided 'peakdata' as well as 'mz' and/or 'rt' values. 'mz' and 'rt' inputs will be ignored and will be read from 'peakdata'.")
mz <- NULL
rt <- NULL
}
if (is.null(peakdata_pos)) { # if user just supplied mz and rt
peakdata_pos <- data.frame(LOBdbase_mz = mz, peakgroup_rt = rt, compound_name = as.character(mz))
}
range_calc <- function(x) { # a function tp calculate mz range for filtering chromatogram
range <- x * (0.000001 * ppm)
low <- (x - range)
high <- (x + range)
c(low, high)
}
# plot ms1 chromatogram of lipid data
for (i in 1:nrow(peakdata_pos)) {
cat("\n") # for console feedback
flush.console()
cat("Plotting spectra", i, "of", nrow(peakdata_pos), "...")
# set rt and mz terms
mz <- peakdata_pos[i, "LOBdbase_mz"]
rt <- peakdata_pos[i, "peakgroup_rt"]
# calculate range of both
pos_range <- range_calc(mz)
neg_range <- range_calc(mz + adduct_offset)
plot_pos <- xcms::filterMsLevel( # filter to only to the one file
xcms::filterMz( # and correct mz range at MS1
xcms::filterFile(XCMSnExp_pos,
file = file[1]
),
mz = pos_range
),
msLevel = 1
)
plot_neg <- xcms::filterMsLevel( # repeat for negative
xcms::filterMz(
xcms::filterFile(XCMSnExp_neg,
file = file[2]
),
mz = neg_range
),
msLevel = 1
)
# extract a chromatogram from our filtered XCMSnexp objects
df_pos <- xcms::chromatogram(plot_pos)
df_neg <- xcms::chromatogram(plot_neg)
# set non detected ion intensities to 0 for plotting
df_pos[[1]]@intensity[which(is.na(df_pos[[1]]@intensity))] <- 0
df_neg[[1]]@intensity[which(is.na(df_neg[[1]]@intensity))] <- 0
plot(rbind( # plot both graphs
ggplot2::ggplotGrob(ggplot() +
geom_line(aes(
x = df_pos[[1]]@rtime,
y = df_pos[[1]]@intensity
)) +
xlab("Retention Time") +
ylab("Intensity") +
xlim(rt - rtspan * window, rt + rtspan * window) +
geom_vline(aes(xintercept = c(rt + rtspan, rt - rtspan)), color = "green", alpha = 0.75) +
ggtitle(as.character(paste("Lipid Name =", peakdata_pos[i, "compound_name"]," Mode = Positive")),
subtitle = paste(" M/Z = ", mz, " File = ", file[1]," PPM =",ppm))),
ggplot2::ggplotGrob(ggplot() +
geom_line(aes(
x = df_neg[[1]]@rtime,
y = df_neg[[1]]@intensity
)) +
xlab("Retention Time") +
ylab("Intensity") +
xlim(rt - rtspan * window, rt + rtspan * window) +
geom_vline(aes(xintercept = c(rt + rtspan, rt - rtspan)), color = "green", alpha = 0.75) +
ggtitle(as.character(paste("Lipid Name =", peakdata_pos[i, "compound_name"]," Mode = Negative")),
subtitle = paste(" M/Z = ", mz, " File = ", file[2]," PPM =",ppm))
)))
}
}
|
54fb473a89a834dfa9ae1895fd4436e98726432c | 816acc0a1d8f3fd9fc09cab9daa16edfffc1f2e2 | /cachematrix.R | 96f2da6436fa87c5e49083d97537c866de99e0c9 | [] | no_license | raeed20/ProgrammingAssignment2 | 5b0bee4d9bf7c55383c87c01a91c47e0cd292557 | 49b1ca5705af13fd431d9b8bb5c5207d081e59d9 | refs/heads/master | 2020-12-28T08:31:23.273826 | 2014-09-20T20:36:08 | 2014-09-20T20:36:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,092 | r | cachematrix.R | ## the first function creates a litst contatining a function to:
## set the matrix entries, get them, set the inverse of a matrix, and get the it
## the second function returns the inverse of a matrix that was created using the first function
## if the inverse was calculated in prior to the call, it returns the cached inverted matrix
## if not, it calculates it and sets it in the cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
95fcfd2a4014333d5f742967dcd5fa9002ed9a6e | 81efd5832169ef36880d1b84f390ef8e8f7f23d0 | /Chapter06.R | de0b0f3de3b791f289da777da5e13a69b3df773e | [] | no_license | joechong88/RForEveryone | 0d49ccbf7b8117da77f6f161c48919816c6e5e51 | 0abf6355a1ea37b34dc5734d688cb9680ffc1100 | refs/heads/master | 2021-01-20T09:41:47.456462 | 2014-04-05T15:15:38 | 2014-04-05T15:15:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,187 | r | Chapter06.R | ##############################################################################
# Chapter 06 - Reading Data into R
#
##############################################################################
##############################################################################
# 6.1 Reading CSVs
# Best way is to use read.table, and the result is a data.frame object
# read.table arguments
# 1st - full path of file to be loaded
#
##############################################################################
theURL <- "http://www.jaredlander.com/data/Tomato%20First.csv"
tomato <- read.table(file=theURL, header=TRUE, sep=",") # other possible separator values are "\t", ";"
head(tomato)
# to resolve CSVs file which has "," within a cell, use read.csv2 or read.delim2 instead of read.table
# Using stringsAsFactors to prevent character columns from being converted to factor columns.
# This saves computation time, and keeps the columns as character data, which are easier to work with
x <- 10:1
y <- -4:5
q <- c("Hockey", "Football", "Baseball", "Curling", "Rugby", "Lacrosse", "Basketball", "Tennis", "Cricket", "Soccer")
theDF <- data.frame(First=x, Second=y, Sport=q, stringsAsFactors=FALSE)
##############################################################################
# 6.2 Excel Data
# R has issues reading from Excel file, hence, its better to convert an Excel file into CSV first
# and use the read.table command
##############################################################################
##############################################################################
# 6.3 Reading from databases
# require(RODBC) - this is needed to connect to databases via ODBC
##############################################################################
db <- odbcConnect("QV Training")
# simple SELECT * query from one table
ordersTable <- sqlQuery(db, "SELECT * FROM Orders", stringsAsFactors=FALSE)
detailsTable <- sqlQuery(db, "SELECT * FROM [Order Details]", stringsAsFactors=FALSE)
# do a join between the two tables
longQuery <- "SELECT * FROM Orders, [Order Details] WHERE Orders.OrderID = [Order Details].OrderID"
detailsJoin <- sqlQuery(db, longQuery, stringsAsFactors=FALSE)
# check the results
head(ordersTable)
head(detailsTable)
head(detailsJoin)
##############################################################################
# 6.4 Data from Other Statistical Tools
# Ability to read from commonly used statistical tools - SPSS, Stata, SAS, Octave, Minitab, Systat
# Functions available - read.spss, read.dta, read.ssd, read.octave, read.mtp, read.systat
# All function returns the data as a data.frame, but do not always succeed
# SAS data is normally protected by requiring a valid SAS license to read. This can be sidestepped using
# Revolution R from Revolution Analytics with the function RxSasData in the RevoScaleR package
##############################################################################
##############################################################################
# 6.5 R Binary Files
##############################################################################
# save the tomato data.frame to disk
save(tomato, file="tomato.rdata")
# remove tomato from memory
rm(tomato)
# check if it still exists
head(tomato)
# read it from the rdata file
load("tomato.rdata")
head(tomato)
# try adding a few objects to store in a single RData file, remove and load again
n <- 20
r <- 1:10
w <- data.frame(n, r)
save(n, r, w, file="multiple.rdata")
rm(n, r, w)
load("multiple.rdata")
##############################################################################
# 6.6 Data included with R
##############################################################################
require(ggplot2)
data(diamonds)
head(diamonds)
##############################################################################
# 6.7 Extract Data from Web Sites
##############################################################################
# 6.7.1. Simple HTML tables - use readHTMLTable
require(XML)
theURL <- "http://www.jaredlander.com/2012/02/another-kind-of-super-bowl-pool"
bowlPool <- readHTMLTable(theURL, which=1, header=FALSE, stringsAsFactors=FALSE)
bowlPool
|
6cc634edf907384c0d03741679c5db46d5a1fd70 | 7f01d5e67558c1f45f0ce286e453298e559c8000 | /tests/testthat/test_vec.R | 8ad12781585a719642c4b8651c0ca171e925e0fa | [] | no_license | CreRecombinase/EigenH5 | 07da760022aaace56fe9cfcf5e012a362dfb2433 | 50ca6ab8e80ce3fb93418dd2e0b88ad17b841ed5 | refs/heads/master | 2021-01-19T17:49:04.992997 | 2020-03-05T16:14:18 | 2020-03-05T16:14:18 | 101,085,422 | 4 | 0 | null | 2019-11-07T15:43:45 | 2017-08-22T16:43:20 | C++ | UTF-8 | R | false | false | 9,104 | r | test_vec.R | context("vectors")
test_that("I can overwrite a vector", {
tf <- tempfile()
EigenH5::write_vector_h5(integer(0), tf, "empty_test", filter = "none", chunksizes = integer())
# read_vector_h5(tf,"empty_test")
tv <- runif(3)
write_vector_h5(tv, tf, "test")
expect_equal(read_vector_h5(tf, "test"), tv)
ntv <- runif(3)
write_vector_h5(ntv, tf, "test")
expect_equal(read_vector_h5(tf, "test"), ntv)
})
testthat::test_that("I can write a factor", {
tf <- tempfile()
factor_d <- gl(n = 15,k = 4)
EigenH5::write_vector_h5(factor_d, tf, "factor")
ret <- EigenH5::read_vector_h5(tf,"factor")
expect_equal(factor_d,ret)
})
test_that("I can append a vector", {
tf <- tempfile()
tv <- runif(3)
write_vector_h5(data = tv, filename = tf, datapath = , "test", max_dims = c(NA_integer_))
expect_equal(read_vector_h5(tf, "test"), tv)
ntv <- runif(3)
write_vector_h5(filename = tf, datapath = "test", data = ntv, append = T)
expect_equal(read_vector_h5(tf, "test"), c(tv, ntv))
})
test_that("can write and read long strings", {
tvec <- paste0(sample(letters, 254, replace = T), collapse = "")
tempf <- tempfile()
write_vector_h5(tvec, tempf, "testw")
rvec <- read_vector_h5(filename = tempf, "testw")
expect_equal(rvec, tvec)
tvec <- paste0(rep(rawToChar(as.raw(1:126)),3),collapse="")
tempf <- tempfile()
write_vector_h5(tvec, tempf, "testw",filter="none")
res_vec <- read_vector_h5(filename = tempf, "testw")
expect_equal(res_vec, tvec)
})
test_that("can write short strings then long strings", {
tvec <- paste0(sample(letters, 25, replace = T), collapse = "")
tempf <- tempfile()
write_vector_h5(tvec, tempf, "testw",max_dims=NA_integer_,min_string_size=27L)
expect_equal(ArrayTypeSize(tempf,"testw"),28L)
tvec2 <- paste0(sample(letters, 30, replace = T), collapse = "")
expect_error(write_vector_h5(tvec2,tempf,"testw",append=TRUE),"string will not fit in dataset")
write_vector_h5(tvec, tempf, "testw2",max_dims=NA_integer_,min_string_size=31)
expect_equal(ArrayTypeSize(tempf,"testw2"),32)
tvec2 <- paste0(sample(letters, 30, replace = T), collapse = "")
write_vector_h5(tvec2,tempf,"testw2",append=TRUE)
rvec <- read_vector_h5(filename = tempf, "testw2",subset=2)
expect_equal(rvec, tvec2)
})
test_that("can write string vector", {
tvec <- c("allb", "allc", "alld")
tempf <- tempfile()
testthat::expect_true(EigenH5::write_vector_h5(filename = tempf, datapath = "grp/dat", data = tvec))
expect_equal(typeof_h5(filename = tempf, "grp"), "list")
expect_equal(typeof_h5(filename = tempf, "grp/dat"), "character")
expect_equal(dim_h5(filename = tempf, "grp/dat"), length(tvec))
expect_equal(dim_h5(filename = tempf, "grp/dat"), length(tvec))
rd <- read_vector_h5(filename = tempf, datapath = "grp/dat")
expect_equal(rd, tvec)
trd <- read_vector_h5(filename = tempf, datapath = "grp/dat", datasize = 2)
expect_equal(head(tvec, 2), trd)
write_vector_h5(filename = tempf, datapath = "/grp/dat2", data = tvec)
trd <- read_vector_h5(filename = tempf, "grp/dat2")
expect_equal(trd, tvec)
tvec <- c("allb", "allc", "alld")
write_vector_h5(filename = tempf, "/grp2/grp3", "dat2", data = tvec)
trd <- read_vector_h5(filename = tempf, "grp/dat", subset = 2:3)
expect_equal(tail(tvec, 2), trd)
trd <- read_vector_h5(filename = tempf, "grp/dat", subset = c(1, 3))
expect_equal(tvec[c(1, 3)], trd)
})
test_that("can check type of vectors", {
tvec <- c("allb", "allc", "alld")
tempf <- tempfile()
write_vector_h5(filename = tempf,datapath = "grp/dat", tvec)
expect_equal(typeof_h5(filename = tempf, "grp"), "list")
expect_equal(typeof_h5(filename = tempf, "grp/dat"), "character")
tvec <- runif(3)
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath = "grp/grp2/dat", tvec)
tvec <- sample(1:10)
tempf <- tempfile()
write_vector_h5(filename = tempf,datapath = "dat", tvec)
expect_equal(typeof_h5(filename = tempf, datapath = "dat"), "integer")
expect_equal(dim_h5(filename = tempf, datapath = "dat"), 10)
})
test_that("can write a vector out of order", {
tvec <- c(1.0, 2.0, 3.0)
tempf <- tempfile()
ind <- c(3, 1, 2)
write_vector_h5(filename = tempf, datapath="grp/dat", tvec, subset = ind)
trd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(trd, tvec[ind])
})
test_that("can read a vector out of order", {
tvec <- 1:3
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
trd <- read_vector_h5(filename = tempf, datapath="grp/dat", subset = c(3, 1, 2))
expect_equal(trd, tvec[c(3, 1, 2)])
})
# test_that("can read an empty subset", {
# tvec <- runif(3)
# tempf <- tempfile()
# write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
# rd <- read_vector_h5(filename = tempf, datapath="grp/dat", subset = integer())
# expect_equal(rd,integer())
# })
test_that("can write REAL vector", {
tvec <- runif(3)
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
expect_equal(dim_h5(filename = tempf, "grp/dat"), length(tvec))
rd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(rd, tvec)
trd <- read_vector_h5(filename = tempf, datapath="grp/dat", datasize = 2)
expect_equal(head(tvec, 2), trd)
trd <- read_vector_h5(filename = tempf, datapath="grp/dat", subset = 2:3)
expect_equal(tail(tvec, 2), trd)
trd <- read_vector_h5(filename = tempf, datapath="grp/dat", subset = c(1, 3))
expect_equal(tvec[c(1, 3)], trd)
})
test_that("we can read subsets out of order", {
tvec <- c("allb", "allc", "alld")
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath = "grp/dat", data = tvec)
strd <- read_vector_h5(filename = tempf, datapath = "grp/dat")
expect_equal(strd, tvec)
trd <- read_vector_h5(filename = tempf, datapath = "grp/dat", subset = c(2, 1))
expect_equal(tvec[c(2, 1)], trd)
trd <- read_vector_h5(filename = tempf, datapath = "grp/dat", subset = c(3, 1))
expect_equal(tvec[c(3, 1)], trd)
})
test_that("can read string vector", {
tvec <- c("allb", "allc", "alld")
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
rd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(rd, tvec)
})
test_that("can create a vector and then write to it", {
tvec <- c("allb", "allc", "alld")
tempf <- tempfile()
create_vector_h5(filename = tempf, datapath="grp/dat", character(), dim = 3L)
rd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(rd, c("", "", ""))
write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
rd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(rd, tvec)
})
test_that("can read/write numeric vector", {
tvec <- runif(100)
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
rd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(rd, tvec)
})
test_that("can read/write numeric vector using offset/datasize", {
library(EigenH5)
tvec <- runif(100)
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath="grp/dat", tvec,chunksize=10)
expect_equal(dataset_chunks(tempf,"grp/dat"),10)
rd <- read_vector_h5v(filename = tempf,"grp/dat",i = 15L:100L)
expect_equal(rd, tvec[15:100])
})
test_that("can read/write integer vector", {
tvec <- sample(1:100)
tempf <- tempfile()
write_vector_h5(filename = tempf, datapath="grp/dat", tvec)
rd <- read_vector_h5(filename = tempf, datapath="grp/dat")
expect_equal(rd, tvec)
})
test_that("can read string vector", {
tvec <- c("allb", "allc", "alld")
tempf <- tempfile()
write_vector_h5(filename = tempf,
datapath="grp/tdat",
data = tvec
)
# otvec <- tvec
# otvec[2] <- NA_character_
# write_vector_h5(filename = tempf,
# datapath="grp/otdat",
# data = otvec
# )
#
# ord <- read_vector_h5(filename = tempf, datapath="grp/otdat")
# expect_equal(ord,otvec,na.rm=T)
rd <- read_vector_h5(filename = tempf, datapath="grp/tdat")
expect_equal(rd, tvec)
})
|
f15e3607ab19e083a19e7c5f8106719b7d0d842b | 5c0f37d8908d2fbd234a0cd0dddb371f4c0f2f77 | /rFreight/man/model_vehicle_tourpattern.Rd | 4367f5669d85acfe1d3e17dccded9cb49ec78328 | [] | no_license | CMAP-REPOS/cmap_freight_model | e5a1515eaf0e1861eab6ec94ea797b95e97af456 | 580f3bda1df885b1c3e169642eb483c2d92d7e3d | refs/heads/master | 2023-05-01T10:26:36.170941 | 2021-02-10T18:24:57 | 2021-02-10T18:24:57 | 73,124,375 | 5 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,258 | rd | model_vehicle_tourpattern.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/model_vehicle_tourpattern.R
\docType{data}
\name{model_vehicle_tourpattern}
\alias{model_vehicle_tourpattern}
\title{Vehicle and Tour Pattern Model Variables and Coefficients}
\format{A dataframe with 5 variables
\describe{
\item{CHID}{Vehicle and tour pattern model choice ID}
\item{CHDESC}{Vehicle and tour pattern model choice description}
\item{VAR}{Explanatory variable}
\item{TYPE}{Type of the explanatory variable}
\item{COEFF}{Coefficient of the variable}
}}
\source{
The vehicle and tour pattern model was estimated using the Texas Commercial Vehicle Survey (RSG (2012) Tour-based and Supply Chain Freight Forecasting Framework Final Report Framework, developed for the Federal Highway Administration with University of Illinois at Chicago and John Bowman BAA DTFH61-10-R-00013.)
}
\usage{
model_vehicle_tourpattern
}
\description{
This file shows the vehicle and tour pattern model and coefficients. A multinomial logit (MNL) model was estimated for the joint choice of vehicle type and tour pattern.
}
\details{
This table is used in the vehicle and tour pattern component of the truck-touring model.
}
\keyword{datasets}
|
551fba783cf38f9008f692951abd5e56f4f490e3 | 574247a0807ce89f92474075daac98cb0b668825 | /programs/departmentTrendAnalysis.R | 3612e3fc01413a1361c72e44e4cda84ffdca5463 | [] | no_license | jhok2013/MATH335 | 09b677255dfac7d85e3471282bd889cb97160fcf | 62025087b5cc8c33e4d8cbf0cc773d6cae0a9ade | refs/heads/master | 2020-12-13T17:19:11.298151 | 2020-01-20T21:44:58 | 2020-01-20T21:44:58 | 234,482,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,902 | r | departmentTrendAnalysis.R | #=============================================================================
#James Hough, MATH 335
#Summary:
# Improve the department_data.xlsx data to accurately show the growth over
# time by department of RC&W attendance.
# Steps include;
# Data cleaning/prep and validation
# Graphic generation
#Question:
# What is the growth over time tend by department of RC&W attendance?
#=============================================================================
# Cleaning and Prep
#=============================================================================
#Load necessary libraries
library(tidyverse)
library(ggplot2)
library(readxl)
#Create path for excel file
path <- "C:\\MATH335\\data\\department_data.xlsx"
#Specify sheet name
sheetName <- "RCW_data_long"
#Load file to R
departmentData <- read_xlsx(
path = path,
sheet = sheetName,
col_names = TRUE,
progress = readxl_progress()
)
#Add column of composite year and semesters
#------------------------------------------
#Create new vector to receive composites
departmentData <- add_column(
departmentData,
Semester_Abb = NA
)
#Switch swap winter with year number
switcher <- with(
data = departmentData,
Semester == 'Winter'
)
departmentData$Semester_Abb <- ifelse(
test = switcher,
yes = paste(
"WI",
substr(departmentData$Year, 3, 4),
sep = ""
),
no = departmentData$Semester_Abb
)
#switch swap fall with year number
switcher <- with(
data = departmentData,
Semester == 'Fall'
)
departmentData$Semester_Abb <- ifelse(
test = switcher,
yes = paste(
"FA",
substr(departmentData$Year, 3, 4),
sep = ""
),
no = departmentData$Semester_Abb
)
#Switch swap spring with year number
switcher <- with(
data = departmentData,
Semester == 'Spring'
)
departmentData$Semester_Abb <- ifelse(
test = switcher,
yes = paste(
"SP",
substr(departmentData$Year, 3, 4),
sep = ""
),
no = departmentData$Semester_Abb
)
#Switch NA in count to Zeros
switcher <- with(
data = departmentData,
is.na(Count) == TRUE
)
departmentData$Count <- ifelse(
test = switcher,
0,
departmentData$Count
)
#Filter data set to only include relevant columns for graphics
departmentData <- data.frame(
Department = c(departmentData$Department),
SemesterAbb = c(departmentData$Semester_Abb),
Attendance = c(departmentData$Count)
)
#Make case for strings uniform
departmentData$Department <- toupper(departmentData$Department)
#=================================================================
#Graphic generation
#=================================================================
departmentData %>%
ggplot(aes(x = SemesterAbb, y = Attendance, colour = Department, group = Department)) +
geom_line() +
geom_point() +
ggtitle("RC&W Attendance by Department by Semester") +
labs(y = "Attendance", x = "Semester")
|
7f067c08442a8b50f32afba1ad1969f672b26bd2 | 842c8c151fb231ca39a9db46b502cd2d4c89b13a | /architectures/R-ML-Scoring/R/train_forecasting_models.R | 7768a4a97830d2b1f1dbd6d677ae2e4c5be6fa42 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | tikyau/AIArchitecturesAndPractices | cb6a3bd6f45a163fab82c44c7419c8e87f6bc222 | 6d23f6bf10aa71e4620f56d2c37655dce2cbf50f | refs/heads/master | 2020-07-26T14:55:06.041838 | 2019-09-13T23:57:25 | 2019-09-13T23:57:25 | 208,682,869 | 1 | 0 | MIT | 2019-09-16T01:17:14 | 2019-09-16T01:17:14 | null | UTF-8 | R | false | false | 3,357 | r | train_forecasting_models.R |
# 03_(optional)_train_forecasting_models.R
#
# This script trains GBM forecasting models for the 13 time steps in the
# forecast horizon and 5 quantiles. Trained models will be saved directly
# to the File Share, overwriting any models that already exist there.
#
# Run time ~30 minutes on a 5 node cluster
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(dotenv)
library(jsonlite)
library(doAzureParallel)
library(AzureStor)
source("R/utilities.R")
source("R/options.R")
source("R/create_credentials_json.R")
source("R/create_cluster_json.R")
source("R/create_features.R")
# Register batch pool and options for the job ----------------------------------
# If running from script, within docker container, recreate config files from
# environment variables.
if (!interactive()) {
print("Creating config files")
create_credentials_json()
create_cluster_json()
}
setCredentials("azure/credentials.json")
# Set the cluster if already exists, otherwise create it
clust <- makeCluster("azure/cluster.json")
# Register the cluster as the doAzureParallel backend
registerDoAzureParallel(clust)
print(paste("Cluster has", getDoParWorkers(), "nodes"))
azure_options <- list(
enableCloudCombine = TRUE,
autoDeleteJob = FALSE
)
pkgs_to_load <- c("dplyr", "gbm", "AzureStor")
# Load training data
dat <- read.csv(file.path("data", "history", "product1.csv"))
# Get reference to blob storage
cont <- blob_container(
get_env("BLOB_CONTAINER_URL"),
key = get_env("STORAGE_ACCOUNT_KEY")
)
# Train a single model per time step and quantile for steps 1 to 6. Then train
# one model per quantile for all subsequent time steps (without lagged features).
required_models <- list_required_models(
lagged_feature_steps = 6,
quantiles = QUANTILES
)
# Train models
result <- foreach(
idx=1:length(required_models),
.options.azure = azure_options,
.packages = pkgs_to_load
) %dopar% {
step <- required_models[[idx]]$step
quantile <- required_models[[idx]]$quantile
dat <- create_features(dat, step = step, remove_target = FALSE)
if (step <= 6) {
form <- as.formula(
paste("sales ~ sku + deal + feat + level +",
"month_mean + month_max + month_min + lag1 +",
paste(paste0("price", 1:11), collapse = " + ")
)
)
} else {
form <- as.formula(
paste("sales ~ sku + deal + feat + level +",
paste(paste0("price", 1:11), collapse = " + ")
)
)
}
model <- gbm(
form,
distribution = list(name = "quantile", alpha = quantile),
data = dat,
n.trees = N.TREES,
interaction.depth = INTERACTION.DEPTH,
n.minobsinnode = N.MINOBSINNODE,
shrinkage = SHRINKAGE,
keep.data = FALSE
)
model$data <- NULL
name <- paste0("gbm_t", as.character(step), "_q",
as.character(quantile * 100))
tmpfile <- tempfile()
saveRDS(model, file = tmpfile)
upload_blob(cont, src = tmpfile, dest = paste0("models/", name))
# Return arbitrary result
TRUE
}
# Overwrite model files locally
multidownload_blob(
cont,
src = "models/*",
dest = "models",
overwrite = TRUE
)
# Delete the cluster
delete_cluster(clust)
|
b9fa1548ce8ee8f8f0ff858d34a53f620af27318 | 64735a293878f4e26898ed74aa295272a3b4d206 | /Plot4.R | 74b2a15cf45d663a117ef7cffd76a346cfbdfb62 | [] | no_license | YangZhaoCICAMS/Exploratory-Data-Analysis | 1167b32567b3e3112956e56af1f1f23bda56d100 | a5af2f2d65bf41d99c0b2408fdaf267196eda0a4 | refs/heads/master | 2021-01-10T19:54:26.774237 | 2015-03-06T09:45:18 | 2015-03-06T09:45:18 | 31,761,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,570 | r | Plot4.R | ##Reading the full dataset
data <- read.table("J:/Coursera/Exploratory Data Analysis/exdata-data-household_power_consumption/household_power_consumption.txt",
header = TRUE,
sep = ';',
na.strings = "?",
nrows = 2075259,
check.names = FALSE,
stringsAsFactors = F,
quote = '\"')
##Displaying the internal structure of dataset data
str(data)
plot4.data <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
attach(plot4.data)
datetime <- strptime(paste(Date, Time, sep=" "),
"%d/%m/%Y %H:%M:%S")
par(mfrow = c(2,2))
plot(datetime,
Global_active_power,
type="l",
xlab = "",
ylab = "Global Active Power",
cex = 0.2)
plot(datetime,
Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
plot(datetime,
Sub_metering_1,
type="l",
xlab="",
ylab="Energy Submetering")
lines(datetime,
Sub_metering_2,
type = "l",
col = "red")
lines(datetime,
Sub_metering_3,
type = "l",
col = "blue")
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty = 1, lwd = 2.5,
col = c("black","red","blue"),
bty = "o")
plot(datetime,
Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global Reactive Power")
dev.copy(png,
file = "J:/Coursera/Exploratory Data Analysis/plot4.png",
height = 480,
width = 480)
dev.off()
detach(plot4.data)
|
f19aba1aa8c69d7e5982684e579d03619e210cf6 | 43292259e34c3738d1775d5d55bc0b83f027607c | /material original/TemasMATIII_1819/07_Contrastes/TransformR.R | a0de570582d75cd71faceff0943145aa7857cebc | [] | no_license | fenixin15/curso-estadistica-inferencial | 12543ddac6fe43b41e713753b677d17088a84d2d | f655b56aff2f1ef69cd46301f202c05b258ee93e | refs/heads/master | 2020-08-21T18:42:07.514819 | 2019-10-19T10:58:30 | 2019-10-19T10:58:30 | 216,220,287 | 1 | 0 | null | 2019-10-19T14:36:36 | 2019-10-19T14:36:36 | null | UTF-8 | R | false | false | 256 | r | TransformR.R | library(readr)
texte <- read_file("kkk.txt")
texte=gsub("## Ejemplo\n(((([^#]*)\n))*)","## Ejemplo\n<div class=\"example\">\n**Ejemplo**\n\\1\n</div>\n\n",texte)
texte=gsub("[ ]{2,}"," ",texte)
texte=gsub("[\n]{3,}","\n\n",texte)
cat(texte,file="kkk.Rmd")
|
0056a230cd187482f468c6dbd593461de469aaa3 | 3c5d358e0d0f5d5509584bea2038ca74d5ff4d27 | /Clean header files and match GPS.R | ea3398891a4d0308d289bee72db83238dc961235 | [] | no_license | jejoenje/SWTBAT1314 | 4f07437d6c5b9e3938619264b7a17363eb158b26 | 049d479cb7f3ba6811c9362201c6a1c026f6750f | refs/heads/master | 2016-09-06T18:13:17.962348 | 2015-03-30T15:38:45 | 2015-03-30T15:38:45 | 22,647,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,446 | r | Clean header files and match GPS.R | library(gdata)
library(sp)
library(rgdal)
# Set root folder with data:
rootf <- '../2014/Bats'
# List folders in data folder - should ONLY be individual sites (check!)
sfolders <- list.files(rootf)
# Count no. folders (== no. of sites)
nfolders <- length(sfolders)
# Start with an empty data frame for all site data:
alldat <- as.data.frame(NULL)
alldat_inc_noise <- as.data.frame(NULL)
# Loop through each site i:
for (i in 1:nfolders) {
# Within site i, list folders - should ONLY be individual dates (check!)
sdates <- list.files(paste(rootf,sfolders[i],sep='/'))
# Loop through each date j in site i:
for (j in 1:length(sdates)) {
# List all files within date j in site i:
flist <- list.files(paste(rootf, sfolders[i], sdates[j], sep='/'))
# Read Anabat header file for date j in site i:
headerfile <- read.csv(paste(rootf, sfolders[i], sdates[j], 'header.txt', sep='/'),header=T, sep='\t')
# Create a copy of the headerfile data:
nwheaderfile <- headerfile
# 'Clean' individual Anabat file names (rows in header file).
# Remove extensions:
nwheaderfile$Name <- sub('_000.00#','',nwheaderfile$Name)
# Remove subsequent _, _N_, or _0_ so that file names can be cleanly split:
nwheaderfile$Name <- sub('___0_', '___', nwheaderfile$Name)
nwheaderfile$Name <- sub('_N_0_', '_N_', nwheaderfile$Name)
# Find all files with gps.txt extension:
gpsfilenames <- paste(rootf, sfolders[i], sdates[j],
flist[grep('gps.txt', flist)],sep='/')
# Check if there is more than one:
if (length(gpsfilenames)>1) {
# If there is, start with an empty dataframe
gpsfile <- as.data.frame(NULL)
# Loop through each gps file n:
for (n in 1:length(gpsfilenames)) {
# Append its data to gpsfile dataframe
gpsfile <- rbind(gpsfile, read.csv(gpsfilenames[n],sep='\t',header=T))
}
} else {
# If not, read gps file:
gpsfile <- read.csv(gpsfilenames,sep='\t',header=T)
}
# Vectorize and trim LATITUDE and LONGITUDE columsn in gps data:
gpsfile$LATITUDE <- as.vector(gpsfile$LATITUDE)
gpsfile$LONGITUDE <- as.vector(gpsfile$LONGITUDE)
gpsfile$LATITUDE <- trim(gpsfile$LATITUDE)
gpsfile$LONGITUDE <- trim(gpsfile$LONGITUDE)
# Take of N suffix from LAT:
gpsfile$LATITUDE <- sub(' N','',gpsfile$LATITUDE)
# Check which LON is suffixed 'W':
minus <- grep('W', gpsfile$LONGITUDE)
# Remove LON suffixes:
gpsfile$LONGITUDE <- sub(' W','',gpsfile$LONGITUDE)
gpsfile$LONGITUDE <- sub(' E','',gpsfile$LONGITUDE)
# Add '-' to those LONs marked W:
gpsfile$LONGITUDE[minus] <- paste('-',gpsfile$LONGITUDE[minus],sep='')
# Project coordinates in WGS84.
gpsfile$LATITUDE <- as.numeric(gpsfile$LATITUDE)
gpsfile$LONGITUDE <- as.numeric(gpsfile$LONGITUDE)
coordinates(gpsfile) <- c('LONGITUDE','LATITUDE')
proj4string(gpsfile) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") ### WGS84
gpsfile_bng <- spTransform(gpsfile, CRS("+init=epsg:27700"))
gpsfile_bng <- as.data.frame(gpsfile_bng)
gpsfile$BNG_x <- gpsfile_bng$LONGITUDE
gpsfile$BNG_y <- gpsfile_bng$LATITUDE
# Vectorise gps file name (rows) and remove extension so to match names with header file names:
gpsfile$NAME <- as.vector(gpsfile$NAME)
gpsfile$NAME <- sub('.wav', '',gpsfile$NAME)
# Match LAT and LON columns from gps file to header file data, on both Name columns:
nwheaderfile$LAT <- gpsfile$LATITUDE[match(nwheaderfile$Name, gpsfile$NAME)]
nwheaderfile$LON <- gpsfile$LONGITUDE[match(nwheaderfile$Name, gpsfile$NAME)]
nwheaderfile$BNG_y <- gpsfile$BNG_y[match(nwheaderfile$Name, gpsfile$NAME)]
nwheaderfile$BNG_x <- gpsfile$BNG_x[match(nwheaderfile$Name, gpsfile$NAME)]
# Sort species column.
# First vectorise and trim the column.
nwheaderfile$Species <- as.vector(nwheaderfile$Species)
nwheaderfile$Species <- trim(nwheaderfile$Species)
# Find those values where more than one species was labelled (value split by commas):
no_mult <- grep(',',nwheaderfile$Species)
# Find species for those values with more than one:
spp_mult <- nwheaderfile[no_mult,'Species']
# Split resulting values by commas:
spp_mult_split <- strsplit(spp_mult, ',')
# Count the number of species occurrences in each of these:
spp_mult_count <- unlist(lapply(spp_mult_split, length))
# Now repeat original header data X times for each value with X species based on values extracted above:
headersection <- nwheaderfile[rep(no_mult, spp_mult_count),]
# Make a list of the species values for each multi-spp occurrence (should be same length as new header
# section above), and change species column in new header section to these.
# End result should be a species column with a single species per occurrence.
headersection$Species <- unlist(spp_mult_split)
# Remove the multi-species occurrences from original header file:
if(nrow(nwheaderfile[-no_mult,])!=0) {
nwheaderfile <- nwheaderfile[-no_mult,]
# Add new header section (with multi-ssp occurrences now as repeated rows) to new header data:
nwheaderfile <- rbind(nwheaderfile, headersection)
# Re-order new header data in order of occurrence:
nwheaderfile <- nwheaderfile[order(as.numeric(row.names(nwheaderfile))),]
}
# Add column with site/survey id
nwheaderfile$Loc <- trim(as.vector(nwheaderfile$Loc))
nwheaderfile$SURVEYID <- paste(nwheaderfile$Loc, j, sep='-')
# Write the new header file data with coords to the site/date folder:
write.csv(nwheaderfile, paste(rootf, sfolders[i], sdates[j],
'header_coord.csv', sep='/'),
quote=which(names(nwheaderfile)=='LAT'|names(nwheaderfile)=='LON'))
# Print site/date name to show progress:
print(paste(sfolders[i], sdates[j], sep='/'))
# Merge all bat fixes names with all noise fix names, and add coordinates, as reference.
noisefiles <- list.files(paste(rootf, sfolders[i], sdates[j], 'NOISE',sep='/'))
noisefiles <- sub('_000.00#', '', noisefiles)
noisefiles <- sub('___0_', '___', noisefiles)
allfiles <- c(nwheaderfile$Name, noisefiles)
allfiles <- data.frame(Name=allfiles)
allfiles$LAT <- gpsfile$LATITUDE[match(allfiles$Name, gpsfile$NAME)]
allfiles$LON <- gpsfile$LONGITUDE[match(allfiles$Name, gpsfile$NAME)]
allfiles$BNG_y <- gpsfile$BNG_y[match(allfiles$Name, gpsfile$NAME)]
allfiles$BNG_x <- gpsfile$BNG_x[match(allfiles$Name, gpsfile$NAME)]
allfiles$SURVEYID <- paste(sfolders[i], j, sep='-')
write.csv(allfiles, paste(rootf, sfolders[i], sdates[j],
'bats and noise fixes.csv', sep='/'),
quote=which(names(allfiles)=='LAT'|names(allfiles)=='LON'))
# Add current site/date data to 'all data' output:
alldat <- rbind(alldat, nwheaderfile)
alldat_inc_noise <- rbind(alldat_inc_noise, allfiles)
}
} # Repeat above for all sites i and dates j.
# Write all site/date file to output folder.
write.csv(alldat, 'data/SWT 2014 all bat fixes with coords.csv', row.names=T)
write.csv(alldat_inc_noise, 'data/SWT 2014 all fixes INC NOISE with coords.csv', row.names=T)
|
497f68ac7352b9b0dd6fcc171a3b1ccd68dcc812 | efcda1097e024f543e0359b68aa349187ccedd46 | /Brainwaves-Societe-Generale/ma_weight_cor_km.R | 58433c45ecc6c65580bdd391325b44524d919f33 | [] | no_license | neelnj/Data-Science-Competitions | c473c875c0724de6833a02242288bb9cb86cc055 | 0c926347957162ff2c344330eee854f9bd4a59c2 | refs/heads/master | 2020-07-04T03:41:01.341726 | 2017-03-23T09:52:47 | 2017-03-23T09:52:47 | 74,213,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 524 | r | ma_weight_cor_km.R | E=read.csv("train.csv")
cl=E[,ncol(E)]
cl=as.factor(cl)
E=E[,-ncol(E)]
F=read.csv("test.csv")
E=rbind(E,F)
E=E[,-1]
E=E[1:200,]
ma <- function(x,n){filter(x,2*c(1:n)/(n*(n+1)), sides=2)}
n=20
ME=ma(E,n)
#ME=ME[-(1:n),]
#TE=E[-(1:n),]
rem=which(is.na(ME[,1]))
ME=ME[-(rem),]
TE=E[-(rem),]
NE=matrix(-1,ncol = ncol(ME),nrow = nrow(ME))
for(i in 1:nrow(ME))
{
for(j in 1:ncol(ME))
{
if(ME[i,j]>=TE[i,j])
{
NE[i,j]=1
}
}
}
C=cor(NE)
km=kmeans(C,7)
pred=km$cluster |
9c141e90295a10821ec40ef78066fdbf7fcb0e76 | 7bfc268b89b2538ac6a827c5a69bd281d8702c17 | /01_inst2/ui_inst2.R | 8c0c4f82c103db941b3fde9c1a3e463e9799d64e | [] | no_license | TakuyaK0625/KAKEN.App | 93d4b7776819ae7c5b34f6f1011865c3e7914453 | 003890e1b782848758a50ebfe0ba74e1dbf8743e | refs/heads/master | 2021-02-17T02:53:49.243113 | 2020-04-28T14:42:54 | 2020-04-28T14:42:54 | 245,065,045 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,037 | r | ui_inst2.R | tabItem_inst2 <- tabItem(tabName = "institution2", sidebarLayout(
# サイドバー
sidebarPanel(
# フィルター適用ボタン
actionButton("filter_inst2", (strong("Apply Filter")),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
br(),
br(),
# 研究期間/比較機関
fluidRow(
column(6, selectInput("group_inst2", "グループ", choices = c("---", names(Group)))),
column(6, textInput("inst_inst2", "追加機関", value = "信州")),
column(12, sliderInput("year_inst2", "対象年度", min = 2018, max = 2020, value = c(2018, 2019)))
),
# 審査区分チェックボックス
p(strong("審査区分")),
shinyTree("area_inst2", checkbox = TRUE),
br(),
# 研究種目チェックボックス
checkboxGroupInput("type_inst2", "研究種目", type),
actionLink("selectall_inst2", "Select All")
),
# メインパネル
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("時系列",
# 折れ線グラフ
h1(strong("Line plot")),
fluidRow(
column(3, selectInput("line_yaxis2", "Y軸の値", choices = list("件数", "総額", "平均額", "総額シェア")))
),
plotlyOutput("line_inst2"),
h1(strong("Summary Data")),
dataTableOutput("table_line_inst2"),
downloadButton("downloadData_line2", "Download")
),
tabPanel("備考",
br(),
p("このページでは研究代表者の所属機関単位で各種の集計/可視化が行えるようになっています。特定のグループ内で
複数機関を比較することを想定していますが、そこに任意の機関を加えることも可能です。なお、注意点は以下の通りです。"),
br(),
p(strong("【所属機関について】")),
p("・転職等により所属機関が複数にまたがる場合には、最も古い所属機関を用いて集計しています。"),
p("・所属機関はあらかじめ法人種別や「〜大学」を削除しています。任意の機関を分析に加える場合にも、法人種別や「〜大学」は
削除するようにしてください(例:「国立大学法人信州大学」⇨「信州」)。"),
br(),
p(strong("【研究機関グループについて】")),
p("研究機関グループは以下の文献、サイトを参考にしています。"),
br(),
p(strong("◯旧帝大")),
p("https://ja.wikipedia.org/wiki/旧帝大"),
p(strong("◯旧六医大")),
p("https://ja.wikipedia.org/wiki/旧六医大"),
p(strong("◯新八医大")),
p("https://ja.wikipedia.org/wiki/新八医大"),
p(strong("◯NISTEP_G1~G3")),
p("NISTEPによる2009年〜2013年の論文シェアに基づく大学グループ分類。以下の文献を参考。"),
p("村上 昭義、伊神 正貫 「科学研究のベンチマーキング 2019」,NISTEP RESEARCH MATERIAL, No.284,
文部科学省科学技術・学術政策研究所. DOI: http://doi.org/10.15108/rm284"),
p(strong("◯国立財務_A~H")),
p("https://www.mext.go.jp/b_menu/shingi/kokuritu/sonota/06030714.htm")
)
)
)
)
)
|
541baa1f04a68ed20a09af5977112028e323366a | d78249151946420855a859580ec016e072251f42 | /tests/testthat/test-draw-dm.R | 711b69d9226197bb5b2885cd3f3e97e218113542 | [
"MIT"
] | permissive | philipp-baumann/dm | 4658c63765b4ebedba6612cea7f28f494f04195b | 0a221f3e597c8a7a3ebcb8dd031b68cb4576dbb7 | refs/heads/master | 2020-09-09T09:47:11.733314 | 2019-11-11T06:32:25 | 2019-11-11T06:32:25 | 221,415,191 | 1 | 0 | NOASSERTION | 2019-11-13T08:57:47 | 2019-11-13T08:57:46 | null | UTF-8 | R | false | false | 909 | r | test-draw-dm.R | test_that("API", {
expect_identical(
color_quos_to_display(
flights = "blue",
airlines = ,
airports = "orange",
planes = "green_nb"
) %>%
nest(data = -new_display) %>%
deframe() %>%
map(pull),
list(accent1 = "flights", accent2 = c("airlines", "airports"), accent4nb = "planes")
)
})
test_that("last", {
expect_cdm_error(
color_quos_to_display(
flights = "blue",
airlines =
),
class = "last_col_missing"
)
})
test_that("bad color", {
expect_cdm_error(
color_quos_to_display(
flights = "mauve"
),
class = "wrong_color"
)
})
test_that("getter", {
expect_equal(
cdm_get_colors(cdm_nycflights13()),
tibble::tribble(
~table, ~color,
"airlines", "orange",
"airports", "orange",
"flights", "blue",
"planes", "orange",
"weather", "green"
)
)
})
|
a2cfb45f41478c1c83b56d16e490978ab8caf217 | 7c5f7d788abc5c96a5620aa1322691dd48d55c0c | /farmacja_v1.R | 6a576aa8fe4e2e19088de1b831139cba310a2b7c | [] | no_license | mzareba/IntOb | cf86ed463546d6ed192d0b73f3e8d5cb28c10324 | 2b7f8bfdece41357056d74bde62e6312bc3c48a0 | refs/heads/master | 2021-09-07T08:30:59.151141 | 2018-02-20T09:51:13 | 2018-02-20T09:51:13 | 113,911,308 | 0 | 0 | null | 2018-02-20T09:51:14 | 2017-12-11T21:38:35 | R | UTF-8 | R | false | false | 15,484 | r | farmacja_v1.R | ####################### libraries
require(compiler)
require(stringr)
library(GA)
library(foreach)
source("parameters.R")
####################### functions
## Function used by GA, definiton from source
SELECTION <- function(object, k = 3, ...) {
# (unbiased) Tournament selection
sel <- rep(NA, object@popSize)
for(i in 1:object@popSize)
{ s <- sample(1:object@popSize, size = k)
sel[i] <- s[which.max(object@fitness[s])]
}
out <- list(population = object@population[sel,,drop=FALSE],
fitness = object@fitness[sel])
return(out)
}
CROSSOVER <- function(object, parents, ...) {
# Blend crossover
parents <- object@population[parents,,drop = FALSE]
n <- ncol(parents)
a <- 0.5
# a <- exp(-pi*iter/max(iter)) # annealing factor
children <- matrix(as.double(NA), nrow = 2, ncol = n)
for(i in 1:n)
{ x <- sort(parents[,i])
xl <- max(x[1] - a*(x[2]-x[1]), object@min[i])
xu <- min(x[2] + a*(x[2]-x[1]), object@max[i])
children[,i] <- runif(2, xl, xu)
}
out <- list(children = children, fitness = rep(NA,2))
return(out)
}
MUTATION <- function(object, parent, ...) {
# Random mutation around the solution
mutate <- parent <- as.vector(object@population[parent,])
dempeningFactor <- 1 - object@iter/object@maxiter
direction <- sample(c(-1,1),1)
value <- (object@max - object@min)*0.67
mutate <- parent + direction*value*dempeningFactor
outside <- (mutate < object@min | mutate > object@max)
for(j in which(outside))
{ mutate[j] <- runif(1, object@min[j], object@max[j]) }
return(mutate)
}
## RMSE function
RMSE1 <- function(matrix, parameters, equat) {
res <- Inf
C <- parameters
try (for (i in 1:(dim(matrix)[2] - 1)) {
assign(paste("In", i, sep = ""), as.double(matrix[, i]))
out_RMSE <- as.double(matrix[, dim(matrix)[2]])
}, TRUE)
try (y <- eval(parse(text = equat)), TRUE)
try (res <- sqrt(mean((y - out_RMSE) ^ 2)), TRUE)
return (res)
}
##RMSE function
RMSE1<-function(matrix, parameters, equat){
res<-Inf
C<-parameters
try (for (i in 1:(dim(matrix)[2]-1)) {
assign(paste("In", i, sep=""), as.double(matrix[,i]))
out_RMSE<-as.double(matrix[,dim(matrix)[2]])
},TRUE)
try (y<-eval(parse(text=equat)),TRUE)
try (res<-sqrt(mean((y-out_RMSE)^2)),TRUE)
return(res)
}
## Function for minimization
fitness <- function(parameters, equat) {
C <- parameters
y <- as.numeric(eval(parse(text = equat)))
res <- mean((y - out) ^ 2)
if (is.na(res)) {
res <- Inf
}
return (res)
}
## Negative reflection of function for minimization (fitness)
fitnessReflection <- function(x, y) {
return(pmin( pmax( -fitness(x, y), minusInfForGaFitness), plusInfForGaFitness))
}
## T - res function
tRes1 <- function(matrix, parameters, equat) {
C <- parameters
try (for (i in 1:(dim(matrix)[2] - 1)) {
assign(paste("In", i, sep = ""), as.double(matrix[, i]))
observed <- as.double(matrix[, dim(matrix)[2]])
}, TRUE)
try (predicted <- eval(parse(text = equat)), TRUE)
try (res <- cbind(observed, predicted), TRUE)
colnames(res) <- c("Observed", "Predicted")
return (res)
}
MEETING <- function(population, n_params) {
fitnessIndex = n_params + 2
energyIndex = n_params + 1
out = matrix(ncol = n_params + 2, nrow = 0)
while (nrow(population) >= 2) {
indexes = sample(1:nrow(population), 2)
object1 = population[indexes[1],]
object2 = population[indexes[2],]
population = population[-c(indexes[1], indexes[2]),]
if (object1[fitnessIndex] > object2[fitnessIndex]) {
if (object1[energyIndex] <= ENERGY_EXCHANGE) { #not enough energy 0 < energy < ENERGY_EXCHANGE, take what's left and remove
object2[energyIndex] = object2[energyIndex] + object1[energyIndex]
out = rbind(out, object2)
} else {
object2[energyIndex] = object2[energyIndex] + ENERGY_EXCHANGE
object1[energyIndex] = object1[energyIndex] - ENERGY_EXCHANGE
out = rbind(out, object1, object2)
}
} else { #if energy is equal we still need to make the exchange
if (object2[energyIndex] <= ENERGY_EXCHANGE) { #not enough energy 0 < energy < ENERGY_EXCHANGE, take what's left and remove
object1[energyIndex] = object1[energyIndex] + object2[energyIndex]
out = rbind(out, object1)
} else {
object1[energyIndex] = object1[energyIndex] + ENERGY_EXCHANGE
object2[energyIndex] = object2[energyIndex] - ENERGY_EXCHANGE
out = rbind(out, object1, object2)
}
}
#if theres one object left and it didnt met any other object out it in out
if (is.integer(nrow(population))) {
out = rbind(out, population)
break()
}
}
return(out)
}
BREEDING <- function(population, n_params) {
fitnessIndex = n_params + 2
energyIndex = n_params + 1
parents_population = matrix(ncol = n_params + 2, nrow = 0)
rest_population = matrix(ncol = n_params + 2, nrow = 0)
while(nrow(population) > 0) {
index = sample(1:nrow(population), 1)
parent = population[index,]
population = population[-index,,drop=FALSE]
if(parent[energyIndex] > ENERGY_BREEDING) {
parents_population = rbind(parents_population, parent)
} else {
rest_population = rbind(rest_population, parent)
}
}
iter = 1
children = matrix(nrow = 0, ncol = n_params + 2)
while(iter < nrow(parents_population)) {
index1 = iter
index2 = iter + 1
iter = iter + 2
if (runif(1) > PROBABILITY_BREEDING)
next
parents = rbind(parents_population[index,], parents_population[index2,])
child <- matrix(as.double(NA), nrow = 1, ncol = n_params + 2)
a = 0.1
for(i in 1:n_params) {
x <- sort(parents[,i])
xl <- x[1] - a*(x[2]-x[1])
xu <- x[2] + a*(x[2]-x[1])
child[,i] <- runif(1, xl, xu)
}
energy_parent1 = as.integer(parents_population[index1,energyIndex]/2)
parents_population[index1, energyIndex] = parents_population[index1, energyIndex] - energy_parent1
energy_parent2 = as.integer(parents_population[index2,energyIndex]/2)
parents_population[index2, energyIndex] = parents_population[index2, energyIndex] - energy_parent1
child[1,energyIndex] = energy_parent1 + energy_parent2
children = rbind(children, child)
}
population = rbind(rest_population, parents_population, children)
population = RECALCULATE_FITNESS(population, n_params)
return(population)
}
RECALCULATE_FITNESS <- function(population, n_params) {
for (i in 1:nrow(population)) {
population[i,n_params+2] <- fitness(parameters = head(population[i,], -2), equat = equation)
}
return(population)
}
##Optimize functions
RMSE <- cmpfun(RMSE1)
funct <- cmpfun(fitnessReflection)
tRes <- cmpfun(tRes1)
####################### reading equations
con <- file(fileName, open = "r")
equationsRaw <- readLines(con)
close(con)
####################### preprocessing
skel_plik<-paste("./25in_IKr_iv_padelall_pIC_no")
skel_plik1<-paste("./t-25in_IKr_iv_padelall_pIC_no")
# equationBuf <- gsub(" ", "", equationsRaw[2])
# equation <- gsub("ln", "log", equationBuf)
equaionToOptim<-readLines("equation.txt")
equationBuf<-gsub(" ", "", equaionToOptim)
equation<-gsub("ln", "log", equationBuf)
print("Equation for optimization")
print(equation)
N_params <- str_count(equation, "C")
## Prepare parameters and variables
RMSE_val <- vector(length = max_loop)
all_params <- matrix(data = NA, nrow = max_loop, ncol = N_params)
RMSE_ind <- vector(length = max_loop)
best_all_params <-matrix(data = NA, nrow = max_loop, ncol = N_params)
best_RMSE_val <- vector(length = max_loop)
best_RMSE_ind <- vector(length = max_loop)
best_RMSE_total <- 1000000000000
###############################
########## FIRST ONE ##########
############ START ############
###############################
# ####################### reading inputs
# trainMatrix <-
# read.csv(inputTrain,
# header = FALSE,
# sep = "\t",
# colClasses = "numeric")
# testMatrix <-
# read.csv(inputTest,
# header = FALSE,
# sep = "\t",
# colClasses = "numeric")
# for (i in 1:(dim(trainMatrix)[2] - 1)) {
# assign(paste("In", i, sep = ""), as.double(trainMatrix[, i]))
# out <- as.double(trainMatrix[, dim(trainMatrix)[2]])
# }
# population <- 100
# prmsList <- matrix(data = NA, nrow = population, ncol = N_params)
# for (i in 1:population) {
# prmsList[i, ] <- rnorm(N_params) / 10
# }
# fitness <- function(prms) {
# return (funct(prms, equation))
# }
# populationFunction <- function() {
# return (prmsList)
# }
# #initialFit<-funct(params, equation) #fitness?
# #cat("Initial fitness = ",initialFit,"\n")
# GA <-
# ga(
# fitness = fitness,
# popSize = population,
# type = "real-valued",
# min = c(-1,-1),
# max = c(1, 1)
# )
# summary(GA)
###############################
########## FIRST ONE ##########
############# END #############
###############################
##Supra optimization loop
for (lk_supra_loop in 1: max_supra_loop) {
RMSE_total<-0
##Main optimization function
for(lk_loop in 1:max_loop) {
##Read learn, test files
plik <- paste(skel_plik,lk_loop,".txt",sep="")
plik1 <- paste(skel_plik1,lk_loop,".txt",sep="")
# outfile<-paste(skel_outfile,"_",lk_loop,".RData",sep="")
cat("Iteration no = ",lk_loop,"\n")
cat("Training file = ",plik,"\n")
cat("Test file = ",plik1,"\n")
# cat("Output file = ",outfile,"\n")
matryca <- read.csv(plik,header=FALSE,sep="\t", colClasses="numeric")
matryca1 <- read.csv(plik1,header=FALSE,sep="\t", colClasses="numeric")
for(i in 1:(dim(matryca)[2]-1)) {
assign(paste("In", i, sep=""), as.double(matryca[,i]))
out<-as.double(matryca[,dim(matryca)[2]])
}
#paramFunct <-vector(length=N_params, "numeric")
paramFunct<-rnorm(N_params)/10
print("paramFunct")
print(paramFunct)
best_error<-100000000
cat("Iteration no = ",lk_loop,"\n")
cat("best_error INIT = ",best_error,"\n")
para1.backup<-paramFunct
X<-matryca
print("Check init values")
preliminary_output<-funct(paramFunct, equation)
cat("Preliminary output = ",preliminary_output,"\n")
for_domain <- matrix(data=NA,nrow=length(paramFunct),ncol=2)
for(i in 1:length(paramFunct)) {
for_domain[i,1]<--100*max(abs(paramFunct))
for_domain[i,2]<-100*max(abs(paramFunct))
}
N_ROWS <- 50
initialPopulation <- matrix(nrow = N_ROWS, ncol = N_params + 2)
for (i in 1:N_ROWS) {
initialPopulation[i,] <- c(rnorm(N_params), 50, 0) #energy, fitness
initialPopulation[i,N_params+2] <- fitness(parameters = head(initialPopulation[i,], -2), equat = equation)
}
########################################
population <- initialPopulation
if (use_emas){
for (i in 1:1000) {
population <- MEETING(population, N_params)
population <- BREEDING(population, N_params)
}
print(population)
paramFunct <- head(population[1,], -2)
} else {
require(GA)
print("Running GA")
fit0 <- ga( suggestions = paramFunct,
fitness = function(x) funct(x, equation),
type = "real-valued",
maxiter = maxit_ga,
maxFitness = maxFitnessToStopGA,
selection = SELECTION,
crossover = CROSSOVER,
mutation = MUTATION,
min = for_domain[,1],
max = for_domain[,2]
)
paramFunct<-fit0@solution
print("FINAL RESULTS GA")
print(paramFunct)
}
########################################
## Optim with optim(BFGS)
print("params on start BFGS")
print(paramFunct)
par_optim_NM<-paramFunct
try(fit1 <- optim(
paramFunct,
equat=equation,
fn=funct,
method="BFGS",
control=list(trace=opti_trace,maxit=maxit_optimx)
),TRUE)
print("FINAL RESULTS OPTIM(BFGS)")
try(print(fit1$par),TRUE)
print("WHOLE object")
try(print(fit1),TRUE)
try(par_optim_NM<-fit1$par,TRUE)
RMSE_ucz_NM<-Inf
RMSE_test_NM<-Inf
print("learn_error")
try(RMSE_ucz_NM<-RMSE(matryca,par_optim_NM, equation))
print(RMSE_ucz_NM)
print("test_error")
try(RMSE_test_NM<-RMSE(matryca1,par_optim_NM, equation))
print(RMSE_test_NM)
cat("Iteration no = ",lk_loop,"\n")
print("Final params")
try(print(par_optim_NM),TRUE)
try(all_params[lk_loop,]<-par_optim_NM,TRUE)
print(" ")
##This conditions should help with situation where test RMSE is NA what couse problems and stop current job.
if(is.na(RMSE_test_NM)){
RMSE_test_NM<-Inf
}
rmserror<-RMSE_test_NM
RMSE_ind[lk_loop]<-rmserror
cat("Iteration no = ",lk_loop,"\n")
cat("RMSE_test = ",rmserror,"\n")
# try(save(fit1,file=outfile),TRUE)
print("-------------------------------------")
}
##End of optimization function
##----------------------------------------------------
## End of loop loop lk_loop
print(" ")
print("SUMMARY")
print(" ")
for(lk_loop in 1:max_loop) {
cat("RMSE no",lk_loop," = ",RMSE_ind[lk_loop],"\n")
RMSE_total<-RMSE_total+RMSE_ind[lk_loop]
}
RMSE_total<-RMSE_total/max_loop
print("-------------------------------------")
cat("avRMSE = ",RMSE_total,"\n")
print(" ")
print("All parameters :")
print(all_params)
print("-------------------------------------")
print("<<<<<<<<<< END OF LOOP >>>>>>>>>>>>")
print("-------------------------------------")
if (RMSE_total<best_RMSE_total){
best_all_params<-all_params
best_RMSE_val<-RMSE_val
best_RMSE_ind<-RMSE_ind
best_RMSE_total<-RMSE_total
}
}
#end of supra_optimization_loop
all_params<-best_all_params
RMSE_val<-best_RMSE_val
RMSE_ind<-best_RMSE_ind
RMSE_total<-best_RMSE_total
print(" ")
print("OVERALL SUMMARY")
print(" ")
for(lk_loop in 1:max_loop){
cat("RMSE no",lk_loop," = ",RMSE_ind[lk_loop],"\n")
}
print("-------------------------------------")
cat("avRMSE = ",RMSE_total,"\n")
print(" ")
print("All parameters :")
print(all_params)
print("-------------------------------------")
print("<<<<<<<<<< END OF LOOP >>>>>>>>>>>>")
print("-------------------------------------")
##Save equation and params into text files
sink(paste("optimizedEquation.txt", sep=""))
cat("Orig equation\n\n")
cat(equaionToOptim, sep=" ")
cat("\n\n")
cat("Equation with C \n\n")
cat(equation, "\n\n")
cat("Parameters for equation after optimization \n\n")
for(i in 1:dim(all_params)[1]) {
cat(i, ": ", sep="")
cat(all_params[i,], sep=";")
cat("\n")
}
cat("\n")
cat("RMSE for each data sets in k-cross validataion method\n\n")
for(i in 1:max_loop) {
cat("RMSE_", i, ": ")
cat(RMSE_ind[i], sep="; ")
cat("\n")
}
cat("\n")
cat("Mean_RMSE: ", RMSE_total, "\n", sep="")
##Make Observed and Predicted table
obsPredFileName<-"Results.txt"
cat(paste("Observed\tPredicted\n", sep=""), file=obsPredFileName, append=FALSE)
for(i in 1:max_loop) {
fileName <- paste(skel_plik1,i,".txt",sep="")
testData <- read.csv(fileName,header=FALSE,sep="\t", colClasses="numeric")
predObsMat<-tRes(
matrix=testData,
parameters=all_params[i,],
equat=equation)
write.table(predObsMat, sep="\t", file=obsPredFileName, col.names=FALSE, row.names=FALSE, append=TRUE)
}
|
66318ee92fd06925a81a25744000c1f48cfe6f8a | 4ce3760acea66abe3018e05fe44144409f37a705 | /cm107_shiny/ui.r | 567afadeedd3e447aa1d8b7be20cb0e04a3a3c30 | [] | no_license | ilgan/R_Projects | 2c2eaf0c01c48792e823209875afbe83c1aa24cf | b1ccc66a2c7c19124a415d33d7e6483612480ebb | refs/heads/master | 2021-08-24T02:59:00.291768 | 2017-12-07T19:15:43 | 2017-12-07T19:15:43 | 104,269,020 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 534 | r | ui.r | ui <- fluidPage(
# Application title
titlePanel("My liquor webpage"),
sidebarPanel("This is my sidebar",
img(src = "kitten.jpg", width = "100%")),
sidebarPanel("Side Bar",
sliderInput("priceIn", "Price of booze",
min = 0, max = 300, value = c(10,20), pre = "CAD")),
radioButtons("typeIn", "What kind of booze?",
choices = c("BEER", "SPIRITS", "WINE"), selected = "WINE"),
mainPanel(plotOutput("Hist_AlcCont"),
br(),br(),
tableOutput("table_head"),
plotOutput("Geom_P")
)
) |
777757d6875e2796422ed98005d34b6b953b2030 | 403d886e60b2654c01aa4fb1c384af578cf794d2 | /Lecture/ClassExercises.R | acc1046b067b7ac73c055a4d9f2c9ccb5e45dbb2 | [] | no_license | XiaoruiZhu/Forecasting-and-Time-Series-Methods | 5d51dfbee191b2ff0666face6bd5c833334e0666 | 91017a679e6ef9cd2847ed3e423829ec0a872fd5 | refs/heads/master | 2022-09-16T07:15:17.217862 | 2022-08-31T04:52:28 | 2022-08-31T04:52:28 | 244,001,677 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,488 | r | ClassExercises.R | library(forecast)
# Case 1 ------------------------------------------------------------------
case1=as.ts(scan("case1.txt"))
case1
par(mfrow=c(1,3))
plot(case1)
acf(case1)
pacf(case1)
fit <- arima(case1,order=c(1,0,0))
fit
par(mfrow=c(1,2))
plot(forecast(fit,h=30))
fitauto <- auto.arima(case1)
fitauto
plot(forecast(fitauto,h=30))
par(mfrow=c(1,1))
plot.ts(case1)
points(fitted(fit),pch=20,col="grey")
points(fitted(fit),type="l",col="grey")
points(fitted(fitauto),pch=20,col="red")
points(fitted(fitauto),type="l",col="red")
# Case 2 ------------------------------------------------------------------
case2=as.ts(scan("case2.txt"))
case2
par(mfrow=c(1,3))
plot(case2)
acf(case2)
pacf(case2)
fit1 <- arima(case2, order = c(1, 0, 0))
fit2 <- arima(case2, order = c(1, 0, 5))
fit3 <- arima(case2, order = c(1, 0, 6))
fit3
fit4 <- arima(case2, order = c(1, 0, 2))
fit4
fit5 <- arima(case2, order = c(1, 0, 1))
fit6 <- arima(case2, order = c(1, 0, 2))
fit_auto <- auto.arima(case2)
fit_auto
# Case 3 ------------------------------------------------------------------
case3=as.ts(scan("case3.txt"))
case3
par(mfrow=c(1,3))
plot(case3)
acf(case3)
pacf(case3)
fit1 <- arima(case3, order = c(2, 0, 0))
fit1
fit2 <- arima(case3, order = c(2, 0, 6))
fit2
fit3 <- arima(case3, order = c(2, 1, 3))
fit3
fit4 <- arima(case3, order = c(1, 1, 2))
fit4
BIC(fit4)
fit_auto <- auto.arima(case3)
fit_auto
par(mfrow=(c(1,2)))
plot(case3)
plot(diff(case3))
fit_auto <- auto.arima(case3)
fit_auto
|
c56bc49622e315f45e7e79dcc92e6d71cf8a5cb0 | 189e1fe2ab5a97830317bcdffb0584d12a1a5111 | /manuscripts/Lin2017/2.8.3-Removal-of-redundant-scaffolds/checkPtremuloidesRedundancy.R | 8af98bd4cd563f92c3227fad43d7b7fa61397c7d | [
"MIT"
] | permissive | UPSCb/UPSCb | 851c7b0a13602b8c3d524a90db7e22fcd9f3170f | 1f16689c078a90f48a3080ad7fe6fc3e87e30b8b | refs/heads/master | 2022-05-01T08:43:44.555339 | 2022-04-20T10:27:03 | 2022-04-20T10:27:03 | 74,640,960 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,752 | r | checkPtremuloidesRedundancy.R | #' ---
#' title: "P. tremuloides genome self-self Blast results parsing"
#' author: "Nicolas Delhomme"
#' date: "`r Sys.Date()`"
#' output:
#' html_document:
#' toc: true
#' number_sections: true
#' ---
#' # Setup
#' Set the working dir
setwd("/mnt/picea/projects/aspseq/tremula_tremuloides_comp/BLAST")
#' ```{r set up, echo=FALSE}
#' knitr::opts_knit$set(root.dir="/mnt/picea/projects/aspseq/tremula_tremuloides_comp/BLAST")
#' ```
# #' Load libraries
suppressPackageStartupMessages(library(Biostrings))
suppressPackageStartupMessages(library(LSD))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(VennDiagram))
#' Source helpers
source("~/Git/UPSCb/src/R/blastUtilities.R")
source('~/Git/UPSCb/src/R/plot.multidensity.R')
#' Create a palette
pal <- brewer.pal(5,"Dark2")
# #' # Initial attempt
# #' To enhance the speed, I have split the original file in subset of
# #' 10000 lines. The issue is though, that some scaffold HSPs will be
# #' in separate files and need to be recalculated afterwards
# PotraBLAST <- mclapply(dir(".",pattern=".*\\.blt"),
# readBlast,ignoreSelf = TRUE,
# format=c("query.id",
# "subject.id",
# "percent.identity",
# "alignment.length",
# "mismatches",
# "gap.opening",
# "query.start",
# "query.end",
# "subject.start",
# "subject.end",
# "e.value",
# "bit.score",
# "query.length",
# "subject.length"),
# plot = FALSE,mc.cores=4L)
#
# #' # Process all the chunks
# #' And re-calculate the scaffold values which occured when the original
# #' file was split
# dat <- do.call(rbind,mclapply((1:length(PotraBLAST)),function(i,bl){
#
# ## get the scaffolds in the previous chunk
# if(i==1){
# p.scfs = ""
# } else {
# p.scfs <- unique(bl[[i-1]][["df"]]$query.id)
# }
#
# ## get the scaffolds in the chunk
# scfs <- unique(bl[[i]][["df"]]$query.id)
#
# ## get the scaffolds in the next chunk
# if(i==length(bl)){
# n.scfs <- ""
# } else {
# n.scfs <- unique(bl[[i+1]][["df"]]$query.id)
# }
#
# ## keep the scf we want
# scf <- c(setdiff(scfs,p.scfs),intersect(scfs,n.scfs))
#
# ## get the data
# if(i==length(bl)){
# dat <- bl[[i]][["df"]][bl[[i]][["df"]]$query.id %in% scf,]
# } else {
# dat <- rbind(bl[[i]][["df"]][bl[[i]][["df"]]$query.id %in% scf,],
# bl[[i+1]][["df"]][bl[[i+1]][["df"]]$query.id %in% scf,])
# }
#
# ## identify the doublon
# pos <- which(duplicated(dat[,c("query.id","subject.id")]))
# if(length(pos)){
# q <- dat$query.id[pos]
# s <- dat$subject.id[pos]
# dat <- dat[- which(dat$query.id== q & dat$subject.id==s), ]
#
# ## recalculate the cumulative coverage
# hsp <- rbind(bl[[i]][["blf"]],bl[[i+1]][["blf"]])
# hsp <- hsp[hsp$query.id==q & hsp$subject.id==s,]
#
# dat <- rbind(dat,data.frame(query.id=q,
# subject.id=s,
# query.cum.cov=sum(width(reduce(
# IRanges(
# start=ifelse(hsp$query.start>hsp$query.end,hsp$query.end,hsp$query.start),
# end=ifelse(hsp$query.start<hsp$query.end,hsp$query.end,hsp$query.start))
# )))/hsp$query.length[1],
# subject.cum.cov=sum(width(reduce(
# IRanges(
# start=ifelse(hsp$subject.start>hsp$subject.end,hsp$subject.end,hsp$subject.start),
# end=ifelse(hsp$subject.start<hsp$subject.end,hsp$subject.end,hsp$subject.start))
# )))/hsp$subject.length[1],stringsAsFactors=FALSE))
# }
#
# return(dat)
# },PotraBLAST,mc.cores=4))
#
# #' The obtained object contains all reported hits per scaffold (its cumulative
# #' coverage).
# #' We sort it first by by query.coverage - this minimally matters as hit are
# #' probably duplicated, e.g. it is likely that
# #' scf1, scf2, 1, 0.5 will also appear as scf2, scf1, 0.5, 1 if scaffolds are
# #' very similar.
# s.dat <- dat[order(dat$query.cum.cov,decreasing=TRUE),]
# f.dat <- s.dat[match(unique(s.dat$query.id),s.dat$query.id),]
#
# #' However, to avoid loosing information if some scaffolds are not the best hit
# #' or are not reported, we check the assumption above.
# seq.annot <- read.delim(
# "/mnt/picea/storage/reference/Populus-tremula/v1.0/fasta/Potra01-genome.fa.fai",
# header=FALSE)[,1:2]
# colnames(seq.annot) <- c("scf","len")
#
# #' What are the combination of subject-query?
# sprintf("Out of %s unique scaffolds, %s are present as either query or subject",
# length(union(s.dat$query.id,s.dat$subject.id)),
# length(intersect(s.dat$query.id,s.dat$subject.id)))
#
# qs <- paste(s.dat$query.id,s.dat$subject.id,sep="-")
# sq <- paste(s.dat$subject.id,s.dat$query.id,sep="-")
#
# #' Plotting takes a long time and the results are symmetric
# #' anyway
# sprintf("There are %s common query-subject and %s unique to either set",
# sum(qs %in% sq),
# sum(!qs %in% sq))
#
# #' Get them all, re-order, and keep the best hit.
# #' We need to create a tmp object for reverting the data.frame as
# #' rbind on data.frames uses the column names to do the binding, not their
# #' position, unlike applying rbind to a matrix.
# r.dat <- s.dat[!sq %in% qs,c(2,1,4,3)]
# colnames(r.dat) <- colnames(s.dat)
# f.dat <- rbind(s.dat[qs %in% sq,],
# s.dat[!qs %in% sq,],
# r.dat)
#
# f.dat <- f.dat[order(f.dat$query.cum.cov,decreasing=TRUE),]
# f.dat <- f.dat[match(unique(f.dat$query.id),f.dat$query.id),]
#
# #' Now have a look at the query cumulative coverage distribution
# plot(density(f.dat$query.cum.cov),col=pal[1],lwd=2,
# main="Query Cumulative coverage")
#
# comparisonplot(f.dat$query.cum.cov,f.dat$subject.cum.cov,
# xlab="query cum. cov.",ylab="subject cum. cov.",
# main="")
#
# heatscatter(f.dat$query.cum.cov,f.dat$subject.cum.cov,
# xlab="query cum. cov.",ylab="subject cum. cov.",
# main="")
#
# abline(h=0.97,v=0.97,col=2,lty=2,lwd=2)
#
# #' This is more as expected, a lot of small sequences are almost fully covered
# #' in big sequences, but there's otherwise a wide distribution.
# sel <- f.dat$query.cum.cov == 1 & f.dat$subject.cum.cov == 1
# sprintf("There are %s scaffolds that appear to be fully percent redundant",
# sum(sel))
#
# #' Let's look at these in a pairwise alignment fashion. Subset the necessary data
# red <- f.dat[sel,]
#
# #' Then load the sequences
# seq <- readDNAStringSet("/mnt/picea/storage/reference/Populus-tremula/v1.0/fasta/Potra01-genome.fa")
#
# #' And perform a set if pairwise alignments. This shows the flaw of this approach
# #' which has ignored the percentage of identity of the HSPs. It nevertheless
# #' revealed that a lot of sequences are contained within other sequence with
# #' an high level of identity (>80%). This is a good sign that the assembly
# #' managed to integrate a reasonable number of somewhat repetitive elements.
# pA <- pairwiseAlignment(reverseComplement(seq[[red[1,2]]]),seq[[red[1,1]]])
# length(pattern(pA))
# as.character(pattern(pA))
# as.character(subject(pA))
# nchar(pA)
# length(mismatch(subject(pA))[[1]])
# length(mismatch(pattern(pA))[[1]])
# indel(pA)
#' # Analysis
#' We filter the HSPs based on their percentage of
#' identity and then construct the cumulative coverage.
blt <- readBlast("Potrs_self.txt",ignoreSelf = TRUE,
format=c("query.id",
"subject.id",
"percent.identity",
"alignment.length",
"mismatches",
"gap.opening",
"query.start",
"query.end",
"subject.start",
"subject.end",
"e.value",
"bit.score",
"query.length",
"subject.length"),
plot = FALSE)$blf
#' Reading it in the chunks would be faster but would require
#' to integrate the percent identity filtering in the
#' blastUtility.R helper for it to be efficient.
#' ```{r devnull, echo=FALSE, eval=FALSE}
#' ```
#' The overall density of the percentage identity of all the HSPs is as
#' follows. It is really interesting to observe these very defined peaks
#' towards the right end of the graph. We have a number of perfect hits, some
#' more hits around 97% identity (possible haplotypes), a tinier peak at 95%
#' and a shoulder around 93%. The bulge of the remaining hits centers around
#' 88%. The peak intervals are surprisingly constant and agrees well with the
#' estimated heterozygosity rate. Concerning the peaks lower than 97%, this could
#' lead to wild hypotheses :-). The plot is highly similar to that of
#' P. tremula, which is great! The broad peak at 88% is much lower in comparison,
#' which tallies well with the quality of both genomes (P. tremula raw reads
#' quality was worse than P. tremuloides').
plot(density(blt$percent.identity),main="HSPs percentage identity",
col=pal[1],lwd=2)
abline(v=c(95:100),lty=2,col="grey")
#' Next we define a function (which should be integrated in the blastUtility,R)
#' that filters HSPs based on percent identity and calculate and sort the
#' obtained cumulative coverage
getCumulativeCoverage <- function(blt,perc.ident=95){
blf <- blt[blt$percent.identity>=perc.ident,]
ids <- paste(blf$query.id,blf$subject.id,sep="+")
suids <- sort(unique(ids))
df <- data.frame(query.id=sub("\\+.*","",suids),
subject.id=sub(".*\\+","",suids),
query.cum.cov=sum(width(reduce(split(
IRanges(
start=ifelse(blf$query.start>blf$query.end,blf$query.end,blf$query.start),
end=ifelse(blf$query.start<blf$query.end,blf$query.end,blf$query.start)),
ids))))/blf$query.length[match(sub("\\+.*","",suids),blf$query.id)],
subject.cum.cov=sum(width(reduce(split(
IRanges(
start=ifelse(blf$subject.start>blf$subject.end,blf$subject.end,blf$subject.start),
end=ifelse(blf$subject.start<blf$subject.end,blf$subject.end,blf$subject.start)),
ids))))/blf$subject.length[match(sub(".*\\+","",suids),blf$subject.id)],
stringsAsFactors=FALSE)
return(df[order(df$query.cum.cov,df$subject.cum.cov,decreasing=TRUE),])
}
#' Now iteratively get the cumulative coverage
res <- mclapply(seq(95,100,1),function(p,blt){
return(getCumulativeCoverage(blt,p))
},blt,mc.cores=6L)
names(res) <- paste("Perc","Ident",seq(95,100,1),sep=".")
#' And have a look at the number of scaffold pairs linked by HSPs
barplot(sapply(res,nrow),main="# of linked scaffold pairs")
#' Have a look at the relationship query - subject coverage
dev.null <- sapply(1:length(res),function(i,res){
re <- res[[i]]
comparisonplot(re$query.cum.cov,
re$subject.cum.cov,
xlab="query cumulative coverage",
ylab="subject cumulative coverage",
main=names(res)[i])
},res)
#' Have a look at number of unique scaffolds involved
scfs <- lapply(1:length(res),function(i,res){
re <- res[[i]]
unique(sort(c(re$query.id,re$subject.id)))
},res)
names(scfs) <- names(res)
#' And how do they overlap?
#' As expected the lower percent identity contains all the others and the amount
#' of scaffolds decreases with increasing identity. Nevertheless the vast
#' majority of scaffolds is present at a 100% identity.
plot.new()
grid.draw(venn.diagram(scfs[2:6],
filename=NULL,
col=pal[1:5],
category.names=names(scfs)[2:6])
)
#' Let us use the subset of 100% identity to identify redundant and artefactual
#' scaffolds; i.e. those having a 100% query cumulative coverage (redundant). If
#' the subject cumulative coverage is also a 100%, then there are considered
#' artifacts.
sel <- res[["Perc.Ident.100"]]$query.cum.cov == 1
sprintf("There are %s scaffolds that are redundant",sum(sel))
#' Most of them are contained
plot(density(res[["Perc.Ident.100"]]$subject.cum.cov[sel]),
main="subject coverage of the redundant scaffolds")
#' And none are artefactual
sel <- sel & res[["Perc.Ident.100"]]$subject.cum.cov == 1
sprintf("%s of which are artefactual",sum(sel))
#' Extend the annotation
annot <- read.delim("/mnt/picea/storage/reference/Populus-tremuloides/v1.0/annotation/Potrs01-meta-matrix.tsv")
annot <- annot[,- grep("redund",colnames(annot))]
annot$redundant <- annot$ID %in%
res[["Perc.Ident.100"]]$query.id[res[["Perc.Ident.100"]]$query.cum.cov == 1]
#' Use a value of 97% identity and 97% coverage
#' Note that some scaffolds will be duplicated - i.e. have several hits.
#' We only report the best one here.
dat <- res[["Perc.Ident.97"]]
sel <- match(annot$ID,dat$query.id)
annot$haplotype.ID <- dat$subject.id[sel]
annot$haplotype.query.cum.cov <- dat$query.cum.cov[sel]
annot$haplotype.subject.cum.cov <- dat$subject.cum.cov[sel]
annot$putative.haplotype <- annot$haplotype.query.cum.cov >= 0.97
sel <- annot$putative.haplotype & ! is.na(annot$putative.haplotype) & ! annot$redundant
plot.multidensity(list(all=na.omit(annot$haplotype.subject.cum.cov),
hap=annot$haplotype.subject.cum.cov[sel]),
main="subject coverage of the putative haplotype scaffolds",lwd=2,
xlab="subject coverage")
plot.multidensity(list(all=na.omit(annot$haplotype.query.cum.cov),
hap=annot$haplotype.query.cum.cov[sel]),
main="query coverage of the putative haplotype scaffolds",lwd=2,
xlab="query coverage")
sprintf("There are %s putative haplotype scaffolds",sum(sel))
#' # Save annotation
write.table(annot,"row.names"=FALSE,quote=FALSE,sep="\t",
file="/mnt/picea/storage/reference/Populus-tremuloides/v1.0/annotation/Potrs01-meta-matrix.tsv")
|
715892644b1e55f4beda59117e9dc12ef497e095 | c40a739957c4e0ea5c3f25f7aeca7d39f1fc1191 | /videos-scripts/wk2_functions.R | a48ae4acea0d8a6a20fff03507baa944705a9f28 | [] | no_license | Jintram/workshop_R_aamw | 401a1c4172929bff0e2ce8b04e4aca07fc24d2ef | ceed6d44ce21708dc925be1c905ebbaab6ceab50 | refs/heads/master | 2021-07-11T16:11:06.598680 | 2021-03-25T14:11:39 | 2021-03-25T14:11:39 | 241,334,351 | 0 | 0 | null | 2020-04-15T12:54:22 | 2020-02-18T10:35:30 | R | UTF-8 | R | false | false | 1,494 | r | wk2_functions.R |
# what is a function? --> pre-defined code that performs certain task
# why use a function? --> to perform repetitive tasks
# --> create "lego blocks" to build more complicated code
# e.g. load and normalize sample data
# in fact, we're constantly using functions
# e.g. print, sqrt, mean, sum, etc
# and there are many function written already
# that allow you to perform all kinds of actions
# most of the R code will revolve around calling functions
# let's see how to write our own function
# we've briefly talked about functions before,
sqrt(5)
# how to make our own?
# simplest function
example_function <- function() {
print("hello")
}
# input and output
example_function_2 <- function(a) {
b = a^2
c = a+a
return(b)
}
# note that a and b are "made" inside the function, and are also "forgotten"
# once the function is done
# function with multiple arguments (defaults), more complex code,
# and more complex return arguments (e.g. list)
yet_another_function <- function(a=2, b=4, c=1, some_other_parameter=200, blabla='hallo') {
z = a+b+c+some_other_parameter^2
print(blabla)
return(z)
}
# Note that core of course is not necessarily the lectures,
# but perhaps even more the exercises.
# Some of those might be quite hard; so it might take quite
# a while to solve.
# Maybe you get stuck after only two exercises -- for that
# purpose we have the (online) meetings.
|
b0d1e0c6c64ac9f9d56f398401b97fe2f309d9eb | db8d5421d2f4bdde84eff4f816a27d931dd27b1a | /dREG_paper_analyses/train_svm/run_svm/scan_gm12878.R | 5cba9c45de64c4710ebcf2ebb57a0fa375fd7939 | [] | no_license | omsai/dREG | 2c6ac5552e99751634884ea86aa8a736c26b5de0 | ab6dc2f383772deb67f0c445c80e650cc054e762 | refs/heads/master | 2023-08-16T04:24:40.797588 | 2021-10-11T10:36:45 | 2021-10-11T10:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 788 | r | scan_gm12878.R | require(dREG)
## Read PRO-seq data.
gs_plus_path <- "groseq_plus.bigWig"
gs_minus_path <- "groseq_minus.bigWig"
load("asvm.intersDNase.getTrainSet.RData")#"asvm.RData")
## Now scan all positions in the genome ...
inf_positions <- get_informative_positions(gs_plus_path, gs_minus_path, depth= 0, step=50, use_ANDOR=TRUE, use_OR=FALSE) ## Get informative positions.
gdm <- genomic_data_model(window_sizes= c(10, 25, 50, 500, 5000), half_nWindows= c(10, 10, 30, 20, 20))
pred_val<- eval_reg_svm(gdm, asvm, inf_positions, gs_plus_path, gs_minus_path, batch_size= 10000, ncores=60)
final_data <- data.frame(inf_positions, pred_val)
options("scipen"=100, "digits"=4)
write.table(final_data, file="gm12878.predictions.bedGraph", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
|
18f585528f8ee09009d2179c15b8141a62137a05 | 749645450793e77852f7954aaa399cb1c9df1146 | /plot2.R | 30a0c630b3766cfe03fce681c4e6667dd089798c | [] | no_license | PozhitkovaKristina/ExData_Plotting1 | 6a1b17671b61051bd3ce192220baf7e2602a1cd9 | e64cc29c6feb0db4fa631454c46eba15335a83ff | refs/heads/master | 2020-12-03T03:33:18.446216 | 2017-01-26T11:20:51 | 2017-01-26T11:20:51 | 45,738,489 | 1 | 0 | null | 2015-11-07T13:56:06 | 2015-11-07T13:56:06 | null | UTF-8 | R | false | false | 498 | r | plot2.R |
db <- read.csv2("household_power_consumption.txt", sep = ";", dec = "." , header = TRUE, na.strings = "?")
db <- db[grep("^[1,2]/2/2007", db$Date), ]
db$DateTime <- strptime(paste(db$Date, db$Time), "%d/%m/%Y %H:%M:%S")
db$DateTime
Sys.setlocale("LC_TIME", "US")
png (file = "plot2.png", width = 480, height = 480)
plot(db$DateTime, db$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
#Sys.setlocale("LC_TIME", "Russian")
|
b1ae7b19e5758a407afb38dad80d3bb1f7827266 | def01777d98026b7b71f798f02573b919eda71c0 | /final/nfl_half/EDA/model/predFunctions.R | dc2d597da82986777b3fe8b616bf77be78a23a72 | [] | no_license | lcolladotor/lcollado753 | 1f2cec6ba1b8ee931852d532dd527972b28d8fd7 | 41ceb96f7bd8ac9d1a50ccd9a75e9810d638dba8 | refs/heads/master | 2016-09-05T11:46:59.534503 | 2013-03-24T01:15:24 | 2013-03-24T01:15:24 | 8,037,076 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,467 | r | predFunctions.R | ## Gets the paired predictions
getPred <- function(f, newdata, average=FALSE) {
if(FALSE){
## Testing
f <- fitStep
newdata <- info2012
i <- 1
}
idx <- rep(c(TRUE, FALSE), nrow(newdata)/2)
if(average){
## Hm... maybe later I'll check if I can use this to improve the preds
cnames <- colnames(newdata)
cnames <- cnames[!cnames %in% c("teamA", "teamB", "win", "local", "resumes", "date")]
toavg <- newdata[, cnames]
leagueavg <- colMeans(toavg)
leaguedf <- data.frame(matrix(leagueavg, nrow=1))
colnames(leaguedf) <- names(leagueavg)
leaguedf$local <- FALSE
leaguedf$resumes <- FALSE
logitAvg <- predict(f, newdata=leaguedf)
}
res <- sapply(which(idx), function(i) {
logitA <- predict(f, newdata=info2012[i, ])
logitB <- predict(f, newdata=info2012[i+1, ])
p1 <- ilogit(logitA - logitB)
return(c(p1, 1-p1))
})
as.vector(res)
}
## This function evaluates a prediction according to a given number of breaks (bin)
evalPred <- function(pred, bin, truth, plot=TRUE) {
if(FALSE){
## Testing
pred <- pStep
bin <- 20
truth <- info2012
}
groups <- cut(pred, bin)
real <- tapply(truth$win, groups, mean)
endpoints <- cbind(lower = as.numeric( sub("\\((.+),.*", "\\1", names(real)) ), upper = as.numeric( sub("[^,]*,([^]]*)\\]", "\\1", names(real)) ))
intervalMean <- rowMeans(endpoints)
if(plot) {
plot(intervalMean, real)
abline(a=0, b=1, col="red")
}
list(centers=intervalMean, real=real)
}
|
b98723bc6e316586954039f371bc64aa899f5173 | 7e6e77d30cad820887785e1396c00d9d2ac80f8b | /R/createInputs.R | 8593fc56f15e04cf03cc8635ca2fa6ebc974c524 | [] | no_license | GregorDeCillia/shinyLikert | 14a2d28f3c431619310cc16b3784a692d9158dce | 6386691ac15c97640f23b4ede8b8da443320322d | refs/heads/master | 2021-01-10T17:41:27.557758 | 2016-05-13T16:19:17 | 2016-05-13T16:19:17 | 50,327,074 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,524 | r | createInputs.R | createInputs = function( id,
dropdown_factors,
split_factors,
row_factors,
column_factors,
currentFactors,
getInput,
height,
group
)
{
reactive({
# force reactivity with respect to dropdown choices
x = currentFactors()
# create dropdown menus
out = create_dropdown_selector( id, dropdown_factors,
row_factors, column_factors,
currentFactors()
)
# in case split_factors are given, create a multidropdown
if( ! is.null( split_factors ) ){
selection = getInput( ".split_factors", split_factors )
out$mulipanel = selectInput(
inputId = paste0( id, ".split_factors" ),
label = "split factors",
# make all factors available, that are not used by dropdowns already
choices = setdiff(
union(
names( row_factors),
names( column_factors )
),
dropdown_factors
),
selected = selection,
multiple = TRUE )
}
if( !is.null( group ) ){
selection = getInput( ".group", group )
out$mulipanel = selectInput(
paste0( id, ".group" ),
"grouping factor",
setdiff(
names( row_factors ),
dropdown_factors
),
selection
)
}
# return as list
return( out )
})
}
|
76793e47b7bed7f141c0140fdead7adb57287eda | f7853150ada1913fdc6fe8632b37465a9d19f920 | /script/weather.R | 0136bbbcdaab816625c71d774944aab86cde73a5 | [] | no_license | AhnMonet/2nd_project_R | a5fa8bc26d3b2cc78f29fa8c0ac3396fa6fa30bb | 204a850d2490a06bd6696d3b389fff15cf3fc137 | refs/heads/master | 2020-04-14T20:04:03.200252 | 2019-01-04T08:37:37 | 2019-01-04T08:37:37 | 164,080,423 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,821 | r | weather.R | library(ggplot2)
library(gridExtra)
library(dplyr)
hot <- read.csv("Data/weather/hot_data.csv", header = T)
rain <- read.csv("Data/weather/rain.csv", header = T)
dust <- read.csv("Data/weather/yellowDust.csv", header = T)
avg <- read.csv("Data/weather/average30Years.csv", header = T)
temp90 <- read.csv("Data/weather/temp~1990.csv", header = T)
temp00 <- read.csv("Data/weather/temp~2000.csv", header = T)
temp10 <- read.csv("Data/weather/temp~2010.csv", header = T)
rainVar <- read.csv("Data/weather/rainVar.csv", header = T)
hot <- rename(hot,
"hot" = "tot")
rain <- rename(rain,
"rain" = "tot")
dust <- rename(dust,
"dust" = "tot")
weather <- as.data.frame(c(rain[1], hot[14], rain[14], dust[14]))
windows()
par(mfrow = c(2, 2))
# 같이
plot(x = weather$year, y = weather$rain, type = "l", col = "skyblue", ylim = c(0, 130),
xlab = "", ylab = "", lwd = 2)
lines(x = weather$year, y = weather$dust, type = "l", col = "orange", lwd = 2)
lines(x = weather$year, y = weather$hot, type = "l", col = "red", lwd = 2)
title(main="1980 ~ 2017년 강수 / 황사 / 폭염 일수" , col.main="darkgreen",font.main=4)
title(xlab="YEAR", col.lab="black")
title(ylab="DAY",col.lab="black")
# 폭염
plot(x = weather$year, y = weather$hot, type = "l", col = "red",
xlab = "", ylab = "", lwd = 2)
title(main="1980 ~ 2017년 폭염일수" , col.main="darkgreen",font.main=4)
title(xlab="YEAR", col.lab="black")
title(ylab="DAY",col.lab="black")
# 강수
plot(x = weather$year, y = weather$rain, type = "l", col = "skyblue",
xlab = "", ylab = "", lwd = 2)
title(main="1980 ~ 2017년 강수일수" , col.main="darkgreen",font.main=4)
title(xlab="YEAR", col.lab="black")
title(ylab="DAY",col.lab="black")
# 황사
plot(x = weather$year, y = weather$dust, type = "l", col = "orange",
xlab = "", ylab = "", lwd = 2)
title(main="1980 ~ 2017년 황사일수" , col.main="darkgreen",font.main=4)
title(xlab="YEAR", col.lab="black")
title(ylab="DAY",col.lab="black")
###
high_rain <- avg %>%
arrange(-rain) %>%
head(10)
row_rain <- avg %>%
arrange(rain) %>%
head(10)
windows()
p1 <- ggplot(high_rain, aes(region, rain, fill = region)) +
geom_col() +
labs(title = "강수량이 높은 지역 TOP 10",
y = "강수량") +
geom_hline(yintercept = mean(avg$rain), linetype = "dashed") +
theme(legend.position = "left",
plot.title = element_text(hjust = 0.5),
axis.title.x = element_blank())
p2 <- ggplot(row_rain, aes(region, rain, fill = region)) +
geom_col() +
labs(title = "강수량이 낮은 지역 TOP 10",
y = "강수량") +
geom_hline(yintercept = mean(avg$rain), linetype = "dashed") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_blank())
grid.arrange(p1, p2, ncol=2)
##
temp90 <- temp90 %>%
group_by(high.row) %>%
filter(region %in% c("서울", "부산", "인천", "대구", "대전", "광주", "울산"))
temp00 <- temp00 %>%
group_by(high.row) %>%
filter(region %in% c("서울", "부산", "인천", "대구", "대전", "광주", "울산"))
temp10 <- temp10 %>%
group_by(high.row) %>%
filter(region %in% c("서울", "부산", "인천", "대구", "대전", "광주", "울산"))
windows()
p1 <- ggplot(temp90, aes(x = region, y = temp, fill = high.row)) +
geom_col() +
labs(title = "1961 ~ 1990",
y = "최고/최저 기온( ℃ )") +
geom_hline(yintercept = mean(temp$temp), linetype = "dashed") +
geom_hline(yintercept = 30, linetype = "dashed", col = "red") +
geom_hline(yintercept = 10, linetype = "dashed", col = "blue") +
theme(plot.title = element_text(hjust = 0.5, face = "bold"),
axis.title.x = element_blank(),
legend.position = "none")
p2 <- ggplot(temp00, aes(x = region, y = temp, fill = high.row)) +
geom_col() +
labs(title = "1971 ~ 2000",
y = "최고/최저 기온( ℃ )") +
geom_hline(yintercept = mean(temp$temp), linetype = "dashed") +
geom_hline(yintercept = 30, linetype = "dashed", col = "red") +
geom_hline(yintercept = 10, linetype = "dashed", col = "blue") +
theme(plot.title = element_text(hjust = 0.5, face = "bold"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "none")
p3 <- ggplot(temp10, aes(x = region, y = temp, fill = high.row)) +
geom_col() +
labs(title = "1981 ~ 2010",
y = "최고/최저 기온( ℃ )") +
geom_hline(yintercept = mean(temp$temp), linetype = "dashed") +
geom_hline(yintercept = 30, linetype = "dashed", col = "red") +
geom_hline(yintercept = 10, linetype = "dashed", col = "blue") +
theme(plot.title = element_text(hjust = 0.5, face = "bold"),
axis.title.y = element_blank(),
axis.title.x = element_blank())
grid.arrange(p1, p2, p3, ncol=3)
|
f562fd79f16970259e61cc23e4bd4126a809a4af | de83a2d0fef79a480bde5d607937f0d002aa879e | /P2C2M.SNAPP/R/create_xml2.R | cad2f257ebae8f8459e2cffa6d0eb72ba1f1c298 | [] | no_license | P2C2M/P2C2M_SNAPP | 0565abc0ea93195c9622dc5d4e693ccde17bebc7 | 94cd62285419a79f5d03666ec2ea3e818803d0db | refs/heads/master | 2020-05-07T18:54:40.440682 | 2020-01-10T15:59:45 | 2020-01-10T15:59:45 | 180,788,408 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,557 | r | create_xml2.R | ### Function for creating SNAPP xml files from simulated data files ###
create_xml <- function(xml_file, populations, pops_n, snps, delete_sims){
print("Writing xml files")
wd <- getwd()
dirs <- list.dirs(full.names = FALSE, recursive = FALSE) # get directories with simulations
dirs <- lapply(dirs, function(x) paste(wd, x, sep = "/")) # format path
sim_files <- lapply(dirs, function(x) list.files(x, pattern = "*.arp", full.names = TRUE, recursive = FALSE)) # get simulation file paths
for (arp in sim_files){ # for each arp file
arp_file <- file(unlist(arp), "r") # open connection to arp file
readLines(arp_file, n = (16 + 2 * snps)) # read in header lines
pop_lines <- list()
for (p in 1:length(pops_n)){ # for each population
readLines(arp_file, n = 5) # skip header lines
plines <- readLines(arp_file, n = pops_n[p]) # read in snp lines
pop_lines[[p]] <- plines # add snp lines to list
}
close(arp_file) # close file connection
a1 <- lapply(pop_lines, function(x) lapply(x[c(TRUE, FALSE)], function(x) strsplit(strsplit(x, "\t")[[1]][3], " ")[[1]][2])) # get sequences for allele 1
a2 <- lapply(pop_lines, function(x) lapply(x[c(FALSE, TRUE)], function(x) strsplit(strsplit(x, "\t")[[1]][3], " ")[[1]][2])) # get sequences for allele 2
write_xml(wd, arp, xml_file, pops_n, populations, snps, a1, a2)
}
if (delete_sims == TRUE){
print("Deleting intermediate files")
unlink(unlist(dirs), recursive = TRUE)
unlink("*.par")
unlink("seed.txt")
}
}
|
bf3ca067819f10f1466577576f026335f2bc6724 | 6429e5df8a751bf3fa24bfe31efcadfac7feb390 | /functions/tinker/thresh_mean.R | cc8cf3e730a9013e5a995a79cea7cc6fe1cf720b | [] | no_license | joshhjacobson/masters-thesis | be3e6c882a27ad399f6656988f6c38bbcef568ba | aeafb3aac6e5faa5ac67e4436a1e99e630ed8c9b | refs/heads/master | 2022-06-28T02:03:39.444131 | 2020-05-08T17:37:59 | 2020-05-08T17:37:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 795 | r | thresh_mean.R |
require(RandomFields)
source("build_ensemble.R")
## Function to calculate mean squared difference between
## observation and ensemble threshold means
thresh_mean <- function(xi) {
# xi: numeric value
thresh <- seq(0, 4, 0.5)
thresh_stats <- data.frame()
for (t in thresh) {
data <- build_ensemble(xi=xi) # obs. w/ 11 ensemble members
m <- colMeans(data > t)
obs <- round(m[1], 5) # obs mean
ensemble <- round(mean(m[-1]), 5) # mean of ensemble means
diff <- (obs - ensemble)^2 # sqrd difference of obs and ensemble means
thresh_stats <- rbind(thresh_stats, c(t, obs, ensemble, diff))
}
colnames(thresh_stats) <- c("threshhold", "observation", "ensemble_mean", "sqrd_diff")
return (mean(thresh_stats$sqrd_diff))
} |
e88f69092adf0a819558c998ac5933700bef6726 | 712c71892a6edd61227e2c0c58bbc1e9b43893e4 | /man/dependency-class.Rd | 823f67ccfcc1f503d0e6ecd748af571f4c269c32 | [] | no_license | gelfondjal/adapr | 130a6f665d85cdfae7730196ee57ba0a3aab9c22 | b85114afea2ba5b70201eef955e33ca9ac2f9258 | refs/heads/master | 2021-01-24T10:20:14.982698 | 2020-01-28T22:56:18 | 2020-01-28T22:56:18 | 50,005,270 | 33 | 3 | null | 2018-10-18T16:09:57 | 2016-01-20T04:48:49 | R | UTF-8 | R | false | true | 365 | rd | dependency-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dependency_class.R
\docType{class}
\name{dependency-class}
\alias{dependency-class}
\alias{dependency}
\title{Dependency class}
\description{
Dependency class
}
\section{Methods}{
\describe{
\item{\code{update(df.update)}}{Updates the dependency object with a read in or write out}
}}
|
692d5242d241dd681408030f5685beabe5c99e3b | dbd38ce158841d9d94984629a70651d813cbdef8 | /R/RVenn.R | c6fc68731fa92da93ab65ef103b347544347a2c2 | [] | no_license | gaospecial/RVenn | ebf49636f11aa8ab804a62966f66f622f6cf8cd2 | 13841159034d84a58a8eecfbb12c9778ce0de3ef | refs/heads/master | 2022-01-16T18:54:46.219924 | 2019-07-18T20:40:02 | 2019-07-18T20:40:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,435 | r | RVenn.R | #' \code{RVenn}: A package for set operations for many sets.
#'
#' Set operations for many sets. The base functions for set operations in R can
#' be used for only two sets. This package uses 'purr' to find the union,
#' intersection and difference of three or more sets. This package also provides
#' functions for pairwise set operations among several sets. Further, based on
#' 'ggplot2' and 'ggforce', a Venn diagram can be drawn for two or three sets.
#' For bigger data sets, a clustered heatmap showing presence/absence of the
#' elements of the sets can be drawn based on the 'pheatmap' package. Finally,
#' enrichment test can be applied to two sets whether an overlap is
#' statistically significant or not.
#'
#' @docType package
#' @name RVenn
NULL
#' Build a \code{Venn} object.
#'
#' \code{Venn} builds a \code{Venn} object from a list.
#'
#' @param sets (Required) A list containing vectors in the same class. If a
#' vector contains duplicates they will be discarded. If the list doesn't have
#' names the sets will be named as "Set_1", "Set_2", "Set_3" and so on.
#' @return A \code{Venn} object.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12], letters[6:15]))
#' print(venn)
#' @name Venn
NULL
#' Intersection of many sets.
#'
#' \code{overlap} returns the same elements of the sets in a \code{Venn} object.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of sets of interest. Default
#' is "all", meaning the intersection will be calculated for all the sets.
#' @return A vector showing the intersection of the sets.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12], letters[6:15]))
#' overlap(venn)
#' overlap(venn, slice = c(1, 2))
#' @name overlap
NULL
#' Pairwise intersections of many sets.
#'
#' \code{overlap_pairs} returns the pairwise intersections of the sets in a
#' \code{Venn} object.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of sets of interest. Default
#' is "all", meaning the pairwise intersections will be calculated for all the
#' sets.
#' @return A list showing the pairwise intersections of the sets.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12],
#' letters[6:15], letters[9:18]))
#' overlap_pairs(venn)
#' overlap_pairs(venn, slice = 1:3)
#' @name overlap_pairs
NULL
#' Union of many sets.
#'
#' \code{unite} returns the union of the sets in a \code{Venn} object.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of sets of interest. Default
#' is "all", meaning the union will be calculated for all the sets.
#' @return A vector showing the union of the sets.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12], letters[6:15]))
#' unite(venn)
#' unite(venn, slice = c(1, 2))
#' @name unite
NULL
#' Pairwise unions of many sets.
#'
#' \code{unite_pairs} returns the pairwise unions of the sets in a \code{Venn}
#' object.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of sets of interest. Default
#' is "all", meaning the pairwise intersections will be calculated for all the
#' sets.
#' @return A list showing the pairwise unions of the sets.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12],
#' letters[6:15], letters[9:18]))
#' unite_pairs(venn)
#' unite_pairs(venn, slice = 1:3)
#' @name unite_pairs
NULL
#' Set difference.
#'
#' \code{discern} returns the difference between two group of sets selected from
#' a \code{Venn} object. If multiple sets are chosen for the slices, union of
#' those sets will be used.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice1 (Required) The name or the index of the set of interest.
#' Multiple sets can be selected.
#' @param slice2 (Optional) The name or the index of the set of interest.
#' Multiple sets can be selected. Default is all the sets except the sets of
#' slice1.
#' @return A vector showing the difference between slice1 and slice2.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12], letters[6:15]))
#' discern(venn, slice1 = 1)
#' discern(venn, slice1 = c(1, 2), slice2 = 3)
#' @name discern
NULL
#' Pairwise difference of many sets.
#'
#' \code{discern_pairs} returns the pairwise differences of the sets in a
#' \code{Venn} object.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of sets of interest. Default
#' is "all", meaning the pairwise differences will be calculated for all the
#' sets.
#' @return A list showing the pairwise differences of the sets.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12],
#' letters[6:15], letters[9:18]))
#' discern_pairs(venn)
#' discern_pairs(venn, slice = 1:3)
#' @name discern_pairs
NULL
#' Draw the Venn diagram.
#'
#' Draw the Venn diagram for 2 or 3 sets.
#'
#' This function is based on the packages 'ggplot2' and 'ggforce.' It
#' has been designed for 2 or 3 sets because Venn diagrams are terrible for
#' showing the interactions of 4 or more sets. If you need to visualize such
#' interactions, consider using \code{\link{setmap}}.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of the sets of interest.
#' Default is "all", which is for the cases the \code{Venn} object only
#' contains 2 or 3 sets. If you have 4 or more sets, this argument is
#' required.
#' @param fill (Optional) Fill color of the sets.
#' @param alpha (Optional) Opacity of the fill colors. Default is 0.5 in the
#' range of (0, 0.5).
#' @param thickness (Optional) Stroke size of the sets.
#' @return The function returns the plot in ggplot2 style.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12], letters[6:15]))
#' ggvenn(venn)
#' ggvenn(venn, slice = c(1, 2), thickness = 0, alpha = 0.3)
#' @name ggvenn
NULL
#' Draw a clustered heatmap showing presence/absence of the elements.
#'
#' This function is based on the package 'pheatmap'. \code{\link{ggvenn}}
#' function is useful for 2-3 sets, if you need to show interactions between
#' many sets, you can show the presence/absence of the elements among all the
#' sets and cluster both the sets and the elements based on Jaccard distances.
#'
#' @param venn (Required) A \code{Venn} object.
#' @param slice (Optional) The names or the indices of sets of interest. Default
#' is "all", meaning the union will be calculated for all the sets.
#' @param element_clustering (Optional) Boolean values determining if elements
#' should be clustered.
#' @param set_clustering (Optional) Boolean values determining if sets should be
#' clustered.
#' @param method (Optional) Clustering method used. Accepts the same values as
#' \code{\link[stats]{hclust}}.
#' @param legend (Optional) Boolean values determining if the legend should be
#' drawn.
#' @param title (Optional) Title of the heatmap.
#' @param element_fontsize (Optional) Font size of the elements.
#' @param set_fontsize (Optional) Font size of the sets.
#' @return Presence/absence heatmap of the sets.
#' @examples
#' venn = Venn(list(letters[1:10], letters[3:12], letters[6:15], letters[9:16],
#' letters[15:25], letters[12:20]))
#' setmap(venn)
#' setmap(venn, slice = 1:4, element_clustering = FALSE, set_clustering = FALSE)
#' @name setmap
NULL
#' Perform an enrichment test.
#'
#' Calculate the p-value of occurrence of an overlap between two sets by chance.
#'
#' This type of analysis can also be performed by hypergeometric test or
#' Fisher's exact test. Here, the approach is similar to that described in
#' (\href{https://onlinelibrary.wiley.com/doi/full/10.1111/tpj.13261}{Austin et
#' al., 2016}). Briefly, the test is based on randomly generation of sets with
#' equal size to \code{set1} from the background (universal) set. After creating
#' n (default is 10,000) random sets, the overlap between these and \code{set2}
#' is calculated to make a null distribution. When this distribution is true,
#' the probability of seeing an overlap at least as extreme as what was observed
#' (overlap between \code{set1} and \code{set2}) will be returned as the
#' p-value.
#' @param venn (Required) A \code{Venn} object.
#' @param set1 (Required) The name or the index of the set of interest.
#' @param set2 (Required) The name or the index of the set to be checked whether
#' enriched in \code{set1}.
#' @param univ (Optional) Population size. Default is "all", implying the union
#' of all the sets in the \code{Venn} object will be used. Another set as the
#' whole population can be assigned as well.
#' @param n (Optional) Number of randomly generated sets. Default is 10,000 and
#' minimum is 1,000.
#' @param seed (Optional) An integer passed to set.seed function. It is
#' used to fix a seed for reproducibly random number generation. Default is
#' 42.
#' @return Returns a list containing the probability (Significance) of occurrence
#' of an overlap between two sets by chance and the number of occurrences
#' (Overlap_Counts) in randomly generated sets.
#' @examples
#' set1 = c(1:20, letters[1:10])
#' set2 = letters[-26]
#' univ = unique(c(set1, set2, 21:200))
#' venn = Venn(list(set1, set2, univ))
#' e = enrichment_test(venn, 1, 2)
#' e$Significance
#' @name enrichment_test
NULL
|
5d9e58cf6787f267e5127e2cacbb7a8dd0aba5aa | 5e3ee24c56941df7b903def7d751473806fd133c | /delphi-analysis.r | 955a1d7613773960defe8bfbbdbbbbbdf077b0c5 | [
"Apache-2.0"
] | permissive | WCMetrics/SurveyAnalysis | f45d189ff452542926cc67365335e77a1204bce5 | 38e387cab749c613e5f6ecc4765186ba6939233c | refs/heads/master | 2022-11-19T01:27:02.665454 | 2020-07-11T15:51:13 | 2020-07-11T15:51:13 | 262,256,028 | 1 | 0 | Apache-2.0 | 2020-07-11T15:51:14 | 2020-05-08T07:27:52 | Jupyter Notebook | UTF-8 | R | false | false | 24,440 | r | delphi-analysis.r | # Importing libraries
# Reticulate - needed to import numpy arrays
library(reticulate)
# CA - needed for Correspondence Analysis
library(ca)
# PSY - needed for Chronbach's alpha
library(psy)
# IRR - needed for Kendall's W
library(irr)
# RcppAlgos - needed for Kendall's Clusterization
library(RcppAlgos)
# Loading np
use_python("/usr/local/bin/python3")
np <- import("numpy")
# files path
base_path = "/Users/almo/Development/WCMetrics/SurveyAnalysis/"
answers_path= paste(base_path,"answers.npy",sep="")
metrics_path= paste(base_path,"metrics.npy",sep="")
answers_mean_path = paste(base_path,"answers_mean.npy",sep="")
answers_median_path = paste(base_path,"answers_median.npy",sep="")
answers_std_path = paste(base_path,"answers_std.npy",sep="")
answers_agree_path = paste(base_path,"answers_agree.npy",sep="")
answers_disagree_path = paste(base_path,"answers_disagree.npy",sep="")
answers <- np$load(answers_path)
metrics <- np$load(metrics_path)
answers_mean <- np$load(answers_mean_path)
answers_median <- np$load(answers_median_path)
answers_std <- np$load(answers_std_path)
answers_agree <- np$load(answers_agree_path)
answers_disagree <- np$load(answers_disagree_path)
# Chronbach's Alpah
cronbach(answers)
# Corresponde Analysis
ca_values <- ca(answers)
#R1 ca_values$rownames <- c("E014","E024","E034","E043","E055","E064","E072","E084","E095","E105","E114","E124","E135","E144","E154","E164","E174","E185","E194","E205")
ca_values$rownames <- c("E064","E024","E095","E185","E105","E034","E124","E114","E043","E164","E154","E144","E055","E014","E072","E194","E174","E084","E135","E205")
ca_values$colnames <- c("Q01","Q02","Q03","Q04","Q05","Q06","Q07","Q08","Q09","Q10",
"Q11","Q12","Q13","Q14","Q15","Q16","Q17","Q18","Q19","Q20",
"Q21","Q22","Q23","Q24","Q25","Q26","Q27","Q28","Q29","Q30",
"Q31","Q32","Q33","Q34","Q35","Q36","Q37","Q38","Q39","Q40",
"Q41","Q42","Q43","Q44","Q45")
plot(ca_values)
# Kendall's W Intra Iteraciones
# Note: Fixing ties
kendall(metrics, TRUE)
# Kendall's W Inter #1 #2
# Note: Fixing ties
rounds_avg <- cbind(answers_mean,c(2.80, 3.40, 1.95, 2.35, 2.50, 2.00, 4.15, 3.20, 3.05, 4.10, 2.55, 3.25, 3.15, 2.75, 4.65, 3.50, 2.50, 4.05, 2.65, 2.35, 3.35, 3.00, 4.15, 4.20, 4.15, 3.20, 4.20, 3.25, 4.30, 3.95, 3.30, 3.70, 4.55, 3.80, 3.80, 4.55, 3.80, 3.45, 4.25, 3.40, 4.40, 3.55, 4.55, 4.25, 3.65))
kendall(rounds_avg, TRUE)
# Kendall's W Clustering
# Cluster 2 Experts
eval_num <- c(1:comboCount(20,2))
eval<-comboGeneral(20,2)
StatsClusters <- data.frame()
W2Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","W")
W2Cluster <- rbind(W2Cluster,new_eval)
}
W2ClusterMin <- W2Cluster[which.min(W2Cluster$W),]
W2ClusterMax <- W2Cluster[which.max(W2Cluster$W),]
new_stats <- data.frame("WMC.02",W2ClusterMax$W, W2ClusterMin$W, mean(W2Cluster$W), var(W2Cluster$W) ,sd(W2Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W2Cluster$W,seq(W2ClusterMin$W,W2ClusterMax$W,(W2ClusterMax$W-W2ClusterMin$W)/100))
# Cluster 3 Experts
eval_num <- c(1:comboCount(20,3))
eval<-comboGeneral(20,3)
W3Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","W")
W3Cluster <- rbind(W3Cluster,new_eval)
}
W3ClusterMin <- W3Cluster[which.min(W3Cluster$W),]
W3ClusterMax <- W3Cluster[which.max(W3Cluster$W),]
new_stats <- data.frame("WMC.03",W3ClusterMax$W, W3ClusterMin$W, mean(W3Cluster$W), var(W3Cluster$W), sd(W3Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W3Cluster$W,seq(W3ClusterMin$W,W3ClusterMax$W,(W3ClusterMax$W-W3ClusterMin$W)/100))
# Cluster 4 Experts
eval_num <- c(1:comboCount(20,4))
eval<-comboGeneral(20,4)
W4Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","W")
W4Cluster <- rbind(W4Cluster,new_eval)
}
W4ClusterMin <- W4Cluster[which.min(W4Cluster$W),]
W4ClusterMax <- W4Cluster[which.max(W4Cluster$W),]
new_stats <- data.frame("WMC.04",W4ClusterMax$W, W4ClusterMin$W, mean(W4Cluster$W), var(W4Cluster$W), sd(W4Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W4Cluster$W,seq(W4ClusterMin$W,W4ClusterMax$W,(W4ClusterMax$W-W4ClusterMin$W)/100))
# Cluster 5 Experts
eval_num <- c(1:comboCount(20,5))
eval<-comboGeneral(20,5)
W5Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],answers[eval[i,5],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","W")
W5Cluster <- rbind(W5Cluster,new_eval)
}
W5ClusterMin <- W5Cluster[which.min(W5Cluster$W),]
W5ClusterMax <- W5Cluster[which.max(W5Cluster$W),]
new_stats <- data.frame("WMC.05",W5ClusterMax$W, W5ClusterMin$W, mean(W5Cluster$W), var(W5Cluster$W), sd(W5Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W5Cluster$W,seq(W5ClusterMin$W,W5ClusterMax$W,(W5ClusterMax$W-W5ClusterMin$W)/100))
# Cluster 6 Experts
eval_num <- c(1:comboCount(20,6))
eval<-comboGeneral(20,6)
W6Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],answers[eval[i,5],],answers[eval[i,6],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","W")
W6Cluster <- rbind(W6Cluster,new_eval)
}
W6ClusterMin <- W6Cluster[which.min(W6Cluster$W),]
W6ClusterMax <- W6Cluster[which.max(W6Cluster$W),]
new_stats <- data.frame("WMC.06",W6ClusterMax$W, W6ClusterMin$W, mean(W6Cluster$W), var(W6Cluster$W), sd(W6Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W6Cluster$W,seq(W6ClusterMin$W,W6ClusterMax$W,(W6ClusterMax$W-W6ClusterMin$W)/100))
# Cluster 7 Experts
eval_num <- c(1:comboCount(20,7))
eval<-comboGeneral(20,7)
W7Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","W")
W7Cluster <- rbind(W7Cluster,new_eval)
}
W7ClusterMin <- W7Cluster[which.min(W7Cluster$W),]
W7ClusterMax <- W7Cluster[which.max(W7Cluster$W),]
new_stats <- data.frame("WMC.07",W7ClusterMax$W, W7ClusterMin$W, mean(W7Cluster$W), var(W7Cluster$W), sd(W7Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W7Cluster$W,seq(W7ClusterMin$W,W7ClusterMax$W,(W7ClusterMax$W-W7ClusterMin$W)/100))
# Cluster 8 Experts
eval_num <- c(1:comboCount(20,8))
eval<-comboGeneral(20,8)
W8Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","W")
W8Cluster <- rbind(W8Cluster,new_eval)
}
W8ClusterMin <- W8Cluster[which.min(W8Cluster$W),]
W8ClusterMax <- W8Cluster[which.max(W8Cluster$W),]
new_stats <- data.frame("WMC.08",W8ClusterMax$W, W8ClusterMin$W, mean(W8Cluster$W), var(W8Cluster$W), sd(W8Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W8Cluster$W,seq(W8ClusterMin$W,W8ClusterMax$W,(W8ClusterMax$W-W8ClusterMin$W)/100))
# Cluster 9 Experts
eval_num <- c(1:comboCount(20,9))
eval<-comboGeneral(20,9)
W9Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","W")
W9Cluster <- rbind(W9Cluster,new_eval)
}
W9ClusterMin <- W9Cluster[which.min(W9Cluster$W),]
W9ClusterMax <- W9Cluster[which.max(W9Cluster$W),]
new_stats <- data.frame("WMC.09",W9ClusterMax$W, W9ClusterMin$W, mean(W9Cluster$W), var(W9Cluster$W), sd(W9Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W9Cluster$W,seq(W9ClusterMin$W,W9ClusterMax$W,(W9ClusterMax$W-W9ClusterMin$W)/100))
# Cluster 10 Experts
eval_num <- c(1:comboCount(20,10))
eval<-comboGeneral(20,10)
W10Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","W")
W10Cluster <- rbind(W10Cluster,new_eval)
}
W10ClusterMin <- W10Cluster[which.min(W10Cluster$W),]
W10ClusterMax <- W10Cluster[which.max(W10Cluster$W),]
new_stats <- data.frame("WMC.10",W10ClusterMax$W, W10ClusterMin$W, mean(W10Cluster$W), var(W10Cluster$W), sd(W10Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W10Cluster$W,seq(W10ClusterMin$W,W10ClusterMax$W,(W10ClusterMax$W-W10ClusterMin$W)/100))
# Cluster 11 Experts
eval_num <- c(1:comboCount(20,11))
eval<-comboGeneral(20,11)
W11Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","W")
W11Cluster <- rbind(W11Cluster,new_eval)
}
W11ClusterMin <- W11Cluster[which.min(W11Cluster$W),]
W11ClusterMax <- W11Cluster[which.max(W11Cluster$W),]
new_stats <- data.frame("WMC.11",W11ClusterMax$W, W11ClusterMin$W, mean(W11Cluster$W), var(W11Cluster$W), sd(W11Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W11Cluster$W,seq(W11ClusterMin$W,W11ClusterMax$W,(W11ClusterMax$W-W11ClusterMin$W)/100))
# Cluster 12 Experts
eval_num <- c(1:comboCount(20,12))
eval<-comboGeneral(20,12)
W12Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","W")
W12Cluster <- rbind(W12Cluster,new_eval)
}
W12ClusterMin <- W12Cluster[which.min(W12Cluster$W),]
W12ClusterMax <- W12Cluster[which.max(W12Cluster$W),]
new_stats <- data.frame("WMC.12",W12ClusterMax$W, W12ClusterMin$W, mean(W12Cluster$W), var(W12Cluster$W), sd(W12Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var","StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W12Cluster$W,seq(W12ClusterMin$W,W12ClusterMax$W,(W12ClusterMax$W-W12ClusterMin$W)/100))
# Cluster 13 Experts
eval_num <- c(1:comboCount(20,13))
eval<-comboGeneral(20,13)
W13Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","W")
W13Cluster <- rbind(W13Cluster,new_eval)
}
W13ClusterMin <- W13Cluster[which.min(W13Cluster$W),]
W13ClusterMax <- W13Cluster[which.max(W13Cluster$W),]
new_stats <- data.frame("WMC.13",W13ClusterMax$W, W13ClusterMin$W, mean(W13Cluster$W), var(W13Cluster$W), sd(W13Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W13Cluster$W,seq(W13ClusterMin$W,W13ClusterMax$W,(W13ClusterMax$W-W13ClusterMin$W)/100))
# Cluster 14 Experts
eval_num <- c(1:comboCount(20,14))
eval<-comboGeneral(20,14)
W14Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],],answers[eval[i,14],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],eval[i,14],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","Exp14","W")
W14Cluster <- rbind(W14Cluster,new_eval)
}
W14ClusterMin <- W14Cluster[which.min(W14Cluster$W),]
W14ClusterMax <- W14Cluster[which.max(W14Cluster$W),]
new_stats <- data.frame("WMC.14",W14ClusterMax$W, W14ClusterMin$W, mean(W14Cluster$W), var(W14Cluster$W), sd(W14Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W14Cluster$W,seq(W14ClusterMin$W,W14ClusterMax$W,(W14ClusterMax$W-W14ClusterMin$W)/100))
# Cluster 15 Experts
eval_num <- c(1:comboCount(20,15))
eval<-comboGeneral(20,15)
W15Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],],answers[eval[i,14],],
answers[eval[i,15],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],eval[i,14],eval[i,15],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","Exp14","Exp15","W")
W15Cluster <- rbind(W15Cluster,new_eval)
}
W15ClusterMin <- W15Cluster[which.min(W15Cluster$W),]
W15ClusterMax <- W15Cluster[which.max(W15Cluster$W),]
new_stats <- data.frame("WMC.15",W15ClusterMax$W, W15ClusterMin$W, mean(W15Cluster$W), var(W15Cluster$W), sd(W15Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W15Cluster$W,seq(W15ClusterMin$W,W15ClusterMax$W,(W15ClusterMax$W-W15ClusterMin$W)/100))
# Cluster 16 Experts
eval_num <- c(1:comboCount(20,16))
eval<-comboGeneral(20,16)
W16Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],],answers[eval[i,14],],
answers[eval[i,15],],answers[eval[i,16],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],eval[i,14],eval[i,15],eval[i,16],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","Exp14","Exp15","Exp16","W")
W16Cluster <- rbind(W16Cluster,new_eval)
}
W16ClusterMin <- W16Cluster[which.min(W16Cluster$W),]
W16ClusterMax <- W16Cluster[which.max(W16Cluster$W),]
new_stats <- data.frame("WMC.16",W16ClusterMax$W, W16ClusterMin$W, mean(W16Cluster$W), var(W16Cluster$W), sd(W16Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W16Cluster$W,seq(W16ClusterMin$W,W16ClusterMax$W,(W16ClusterMax$W-W16ClusterMin$W)/100))
# Cluster 17 Experts
eval_num <- c(1:comboCount(20,17))
eval<-comboGeneral(20,17)
W17Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],],answers[eval[i,14],],
answers[eval[i,15],],answers[eval[i,16],],answers[eval[i,17],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],eval[i,14],eval[i,15],eval[i,16],eval[i,17],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","Exp14","Exp15","Exp16","Exp17","W")
W17Cluster <- rbind(W17Cluster,new_eval)
}
W17ClusterMin <- W17Cluster[which.min(W17Cluster$W),]
W17ClusterMax <- W17Cluster[which.max(W17Cluster$W),]
new_stats <- data.frame("WMC.17",W17ClusterMax$W, W17ClusterMin$W, mean(W17Cluster$W), var(W17Cluster$W), sd(W17Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W17Cluster$W,seq(W17ClusterMin$W,W17ClusterMax$W,(W17ClusterMax$W-W17ClusterMin$W)/100))
# Cluster 18 Experts
eval_num <- c(1:comboCount(20,18))
eval<-comboGeneral(20,18)
W18Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],],answers[eval[i,14],],
answers[eval[i,15],],answers[eval[i,16],],answers[eval[i,17],],answers[eval[i,18],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],eval[i,14],eval[i,15],eval[i,16],eval[i,17],eval[i,18],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","Exp14","Exp15","Exp16","Exp17","Exp18","W")
W18Cluster <- rbind(W18Cluster,new_eval)
}
W18ClusterMin <- W18Cluster[which.min(W18Cluster$W),]
W18ClusterMax <- W18Cluster[which.max(W18Cluster$W),]
new_stats <- data.frame("WMC.18",W18ClusterMax$W, W18ClusterMin$W, mean(W18Cluster$W), var(W18Cluster$W), sd(W18Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W18Cluster$W,seq(W18ClusterMin$W,W18ClusterMax$W,(W18ClusterMax$W-W18ClusterMin$W)/100))
# Cluster 19 Experts
eval_num <- c(1:comboCount(20,19))
eval<-comboGeneral(20,19)
W19Cluster <- data.frame()
for (i in eval_num){
answers_slice <- cbind(answers[eval[i,1],],answers[eval[i,2],],answers[eval[i,3],],answers[eval[i,4],],
answers[eval[i,5],],answers[eval[i,6],],answers[eval[i,7],],answers[eval[i,8],],answers[eval[i,9],],
answers[eval[i,10],],answers[eval[i,11],],answers[eval[i,12],],answers[eval[i,13],],answers[eval[i,14],],
answers[eval[i,15],],answers[eval[i,16],],answers[eval[i,17],],answers[eval[i,18],],answers[eval[i,19],])
k_output <- kendall(answers_slice,TRUE)
new_eval <- data.frame(eval[i,1],eval[i,2],eval[i,3],eval[i,4],eval[i,5],eval[i,6],eval[i,7],eval[i,8],eval[i,9],eval[i,10],
eval[i,11],eval[i,12],eval[i,13],eval[i,14],eval[i,15],eval[i,16],eval[i,17],eval[i,18],eval[i,19],k_output["value"])
names(new_eval) <- c("Exp1","Exp2","Exp3","Exp4","Exp5","Exp6","Exp7","Exp8","Exp9","Exp10","Exp11","Exp12","Exp13","Exp14","Exp15","Exp16","Exp17","Exp18","Exp19","W")
W19Cluster <- rbind(W19Cluster,new_eval)
}
W19ClusterMin <- W19Cluster[which.min(W19Cluster$W),]
W19ClusterMax <- W19Cluster[which.max(W19Cluster$W),]
new_stats <- data.frame("WMC.19",W19ClusterMax$W, W19ClusterMin$W, mean(W19Cluster$W), var(W19Cluster$W), sd(W19Cluster$W))
names(new_stats) <- c("Cluster","Max","Min","Mean","Var", "StdDev")
StatsClusters <- rbind(StatsClusters,new_stats)
hist(W19Cluster$W,seq(W19ClusterMin$W,W19ClusterMax$W,(W19ClusterMax$W-W19ClusterMin$W)/10))
## Plotting W Dynamics
plot(StatsClusters$Min, type="b")
text(StatsClusters$Min,labels=StatsClusters$Cluster,cex=0.7, pos=3)
plot(StatsClusters$Max, type="b")
text(StatsClusters$Max,labels=StatsClusters$Cluster,cex=0.7, pos=3)
plot(StatsClusters$Mean, type="b")
text(StatsClusters$Mean,labels=StatsClusters$Cluster,cex=0.7, pos=3)
plot(StatsClusters$Var, type="b")
text(StatsClusters$Var,labels=StatsClusters$Cluster,cex=0.7, pos=3)
plot(StatsClusters$StdDev, type="b")
text(StatsClusters$StdDev,labels=StatsClusters$Cluster,cex=0.7, pos=3) |
0d57d8a4323f52569b7313bcee0b8ea53d8aa31b | 92543d1229fe752753074d6510de00dfdb6a41b1 | /rscripts/euk_functions.R | 949e24b9cb2921bbf6bae6db2f1086f63d80fdb9 | [] | no_license | OldMortality/eukaryotes | 11b8eb302745ffc5f79895f005f8ecff637c299f | 45850eb155006c8232096b37068a14aecc2090c8 | refs/heads/master | 2023-08-27T19:04:42.666509 | 2021-11-14T07:14:51 | 2021-11-14T07:14:51 | 262,228,930 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,224 | r | euk_functions.R | ##
## R code to analyse Paul's eukaryotes data from Antarctica
##
## This file contains all helper functions. The main ones are
## * create.df.species(filepath,locations)
## this one creates a dataframe with all relevant samples
## * getDFByPhylum(df,phylum,cols)
## creates a dataframe for your phylum. Do typically you would do:
## df.species <- create.df.species(filepath=..,locations=LOCATIONS)
## and then for each phylum:
## df.tardigrades <- getDFByPhylum(df.species,'Tardigrada',LR.COLUMNS)
## Now you can use df.tardigrades for logistic regression.
##
##
library(dplyr)
library(pipeR)
## Global constants
##
FILEPATH = 'C:/Users/Michel/Documents/eukaryotes/data/200_all_data_long_export_filtered.Rdata'
# Global variable for our 3 locations
LOCATIONS <- c('Lake Terrasovoje','Mawson Escarpment','Mount Menzies')
# All columns of interest
MYCOLNAMES <- c("AMMN","NITR","PHOS","POTA","SULPH","CARB","COND","PH_CACL",
"RLU","QUARTZ","FELDSPAR","TITANITE","GARNETS","MICAS",
"DOLOMITE","KAOLCHLOR","CALCITE","CHLORITE","SLOPE")
# Columns used in logistic regression. I left out many of the NA columns.
LR.COLUMNS <- c("present", "Location", "Abundance",
"POTA" , "SULPH" , "COND" , "PH_CACL" ,
"RLU" , "QUARTZ" , "FELDSPAR" , "TITANITE" , "GARNETS" ,
"MICAS" , "DOLOMITE" , "KAOLCHLOR" , "CALCITE","CHLORITE")
## Inverse logistic function
invlogit <- function(x) {
return(1/(1+exp(-x)))
}
# returns vector of sample-id's of those samples with low total abundance
getSamplesWithLowAbundance <- function(df,threshold=1000) {
abundances <- aggregate(Abundance ~ Sample, df, sum)
low.abundance <- abundances[which(abundances$Abundance < threshold),c("Sample")]
return(low.abundance)
}
removeSamplesWithLowAbundance <- function(df,threshold=1000) {
lows <- getSamplesWithLowAbundance(df,threshold)
dropm <- which(df$Sample %in% lows)
result <- df
if (length(dropm > 0)) {
result <- df[-dropm,]
}
return(result)
}
# read data from file, and return as data.frame
loadData <- function(filepath=FILEPATH) {
load(filepath)
return(psob_molten)
}
# keepOnlyEukaryotes <- function(df) {
# return(subset(df,superkingdom %in% 'Eukaryota'))
# }
# keepOnlyMyLocations <- function(df,myLocations) {
# return(subset(df,Location %in% myLocations ))
# }
## get all that aren't there :)
# getAbsences <- function(df) {
# return(which(df$Abundance==0))
# }
# removeAbsences <- function(df) {
# abs <- getAbsences(df)
# if (length(abs)==0) {
# result <- df
# } else {
# result <- df[-getAbsences(df=df),]
# }
# return(result)
# }
# Remove rows where we have the same species for multiple OTUs.
# If we do this, anything that follows will only work for presence/absence, but not for counts.
# removeDupSpecies <- function(df) {
# result <- df[!duplicated(df[,c('Sample','species')]),]
# return(result)
# }
library(dplyr) # for join
create.df.species <- function(filepath=FILEPATH) {
d1 <- loadData(filepath = filepath)
return(d1)
}
takeLogoffactor <- function(x) {
return(log(as.numeric(as.character(x))))
}
# take the log of the soil data. These have been read in
# as factors, so we need to do as.character() first
# takeLogs <- function(df,colnames) {
# df[,colnames] <- apply(df[,colnames],2,FUN= log) #takeLogoffactor)
# # we don't want logs of PH
# df$PH_H2O <- exp(df$PH_H2O)
# df$PH_CACL <- exp(df$PH_CACL)
# return(df)
# }
# returns 1 row for each sample, containing soil data
#
getSoilData <- function(df,colnames=MYCOLNAMES) {
# Get the first of each Sample, and for those get the columns. They
# should be the same for each for the same sample
s <- df[!duplicated(df[,c('Sample')]),c(c("Sample","Location"),colnames)]
return(s)
}
# get a dataframe, one row for each sample, and a column 'present',
# which tells us whether the phylum is present in each sample
# df is by species, so there could be many rows in df with the phylum,
# but only 1 row per sample is returned.
getSamplesWithPhylumPresence <- function(df,phylum) {
df <- as.data.frame(df)
all.samples <- unique(df$Sample)
samples.with.phylum <- (unique(df[which(df$phylum==phylum),'Sample']))
df.presence <- data.frame(Sample = all.samples)
df.presence$present <- 0
df.presence[which((df.presence$Sample) %in% samples.with.phylum),'present'] <- 1
return(df.presence)
}
##
## df.species is a dataframe of all samples with positive abundance in any of our three locations.
## If a species has multiple OTUs, it appears in this dataframe only once per sample.
##
# df.species <- create.df.species(filepath = '~/Documents/eukaryotes/data/200_all_data_long_export_filtered.Rdata')
#
# return df with total abundance for each sample
#
getAbundancesBySample <- function(df) {
df.abundances <- aggregate(Abundance ~ Sample, df, sum)
return(df.abundances)
}
# for a given phylum, list how many distinct otu's there are in each sample
getNumberOfDistinctOTUSbySample <- function(df,phylum) {
#& df$species=="Mesobiotus furciger"
df2 <- df[which(df$phylum==phylum),]
z <- tapply(df2$OTU, df2$Sample, FUN = function(x) length(unique(x)))
df.z <- data.frame(Sample=rownames(z),distinct.otus=z)
# merge in the samples with zero OTUs of this phylum
all.samples <- data.frame(Sample=unique(df$Sample))
m <- merge(all.samples,df.z,by='Sample',all.x=T)
m[which(is.na(m$distinct.otus)),'distinct.otus'] <- 0
return(m)
}
## Get all information we need for logistic regression for a given phylum
## Sample, Location, soildata, total abundance (in the sample), present/absent
## df will typically be df.species, and phylum would be e.g. 'Tardigrada'
getAllByPhylum <- function(df,phylum,colnames=MYCOLNAMES) {
print(phylum)
df.distinctOTUs <- getNumberOfDistinctOTUSbySample(df,phylum)[,c('Sample','distinct.otus')]
df.soilData <- getSoilData(df,colnames = colnames)
result <- (merge(df.distinctOTUs,df.soilData,by='Sample'))
result$SLOPE <- as.numeric(result$SLOPE)
result$PH_CACL <- as.numeric(result$PH_CACL)
numCols <- LR.COLUMNS[-c(1,2,3)]
# Convert soildata from char to numeric
for (i in 1:length(numCols)) {
result[,numCols[i]] <- as.numeric(result[,numCols[i]])
}
dim(result)
return(result)
}
lineUp <- function(m,d) {
par(mfrow=c(4,5))
for (i in 1:19) {
sim <- unlist(simulate(m))
leaveOut <- as.numeric(attributes(m$na.action)$names)
if (length(leaveOut > 0)) {
d <- d[-leaveOut,]
}
m2 <- glm(sim ~ log(Abundance) + FELDSPAR + MICAS ,data=d,family='binomial')
hist(residuals(m2),probability = T)
}
hist(residuals(m),probability = T)
}
scaleV <- function(v){
ma <- max(v,na.rm=T)
mi <- min(v,na.rm=T)
v <- (v - mi)/(ma-mi)
return(v)
}
makedummy <- function(vec, makenames = TRUE, contrasts = FALSE) {
z <- unique(vec)
X <- matrix(0, nrow = length(vec), ncol = length(z))
X[cbind(1 : length(vec), match(vec, z))] <- 1
if (makenames) colnames(X) <- paste0(deparse(substitute(vec)), "_", z)
if (contrasts) X <- X[, -ncol(X)]
return(X)
}
|
9daffc09b71fdbc9518e9b9643b8c16189abcf44 | 470eb0441582cede780ca68f929372517942da97 | /man/infoMessages.Rd | eed767875c2123322b5069ea69fc54b779d7be0e | [] | no_license | vr-vr/itsadug | 0f81aa50fd1f2ce0a304791c494c49085f2c3704 | 50255a78dfe23bcc0f793f9f7c9dace0d34f2abb | refs/heads/master | 2021-01-13T02:26:46.065605 | 2015-07-29T09:03:18 | 2015-07-29T09:03:18 | 29,683,588 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 660 | rd | infoMessages.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/version.R
\name{infoMessages}
\alias{infoMessages}
\title{Turn on or off information messages.}
\usage{
infoMessages(input)
}
\arguments{
\item{input}{Input variable indicating to print info messages
("on", or 1, or TRUE) or not ("off", 0, or FALSE).}
}
\description{
Turn on or off information messages.
}
\examples{
# To turn on the info messages (all the same):
infoMessages("on")
infoMessages(1)
infoMessages(TRUE)
# To turn off the info messages (all the same):
infoMessages("off")
infoMessages(0)
infoMessages(FALSE)
# checking output:
(out <- infoMessages(FALSE))
}
|
5163f77dba6994a2b5a9ff7b83a8e3d32fdd288b | 84597ca9950c4205e3c172b8c05de45fb80a5676 | /R/Hits-class.R | d799a1e943d79537dacb1faa05f85dd75f3a239e | [] | no_license | Bioconductor/S4Vectors | 6590230a62f7bbcd48c024f5e4ac952ad21df8c8 | 5cb9c73f6ece6f3a2f1b29b8eb364fc1610657d0 | refs/heads/devel | 2023-08-08T21:26:55.079510 | 2023-05-03T04:40:11 | 2023-05-03T04:40:11 | 101,237,056 | 17 | 23 | null | 2023-07-25T13:44:44 | 2017-08-24T00:37:11 | R | UTF-8 | R | false | false | 30,075 | r | Hits-class.R | ### =========================================================================
### Hits objects
### -------------------------------------------------------------------------
###
### The Hits class hierarchy (4 concrete classes):
###
### Hits <---- SortedByQueryHits
### ^ ^
### | |
### SelfHits <---- SortedByQuerySelfHits
###
### Vector of hits between a set of left nodes and a set of right nodes.
setClass("Hits",
contains="Vector",
representation(
from="integer", # integer vector of length N
to="integer", # integer vector of length N
nLnode="integer", # single integer: number of Lnodes ("left nodes")
nRnode="integer" # single integer: number of Rnodes ("right nodes")
),
prototype(
nLnode=0L,
nRnode=0L
)
)
### A SelfHits object is a Hits object where the left and right nodes are
### identical.
setClass("SelfHits", contains="Hits")
### Hits objects where the hits are sorted by query. Coercion from
### SortedByQueryHits to IntegerList takes advantage of this and is very fast.
setClass("SortedByQueryHits", contains="Hits")
setClass("SortedByQuerySelfHits", contains=c("SelfHits", "SortedByQueryHits"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### parallel_slot_names()
###
### Combine the new "parallel slots" with those of the parent class. Make
### sure to put the new parallel slots **first**. See Vector-class.R file
### for what slots should or should not be considered "parallel".
setMethod("parallel_slot_names", "Hits",
function(x) c("from", "to", callNextMethod())
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessors
###
setGeneric("from", function(x, ...) standardGeneric("from"))
setMethod("from", "Hits", function(x) x@from)
setGeneric("to", function(x, ...) standardGeneric("to"))
setMethod("to", "Hits", function(x) x@to)
setGeneric("nLnode", function(x, ...) standardGeneric("nLnode"))
setMethod("nLnode", "Hits", function(x) x@nLnode)
setGeneric("nRnode", function(x, ...) standardGeneric("nRnode"))
setMethod("nRnode", "Hits", function(x) x@nRnode)
setGeneric("nnode", function(x, ...) standardGeneric("nnode"))
setMethod("nnode", "SelfHits", function(x) nLnode(x))
setGeneric("countLnodeHits", function(x, ...) standardGeneric("countLnodeHits"))
.count_Lnode_hits <- function(x) tabulate(from(x), nbins=nLnode(x))
setMethod("countLnodeHits", "Hits", .count_Lnode_hits)
setGeneric("countRnodeHits", function(x, ...) standardGeneric("countRnodeHits"))
.count_Rnode_hits <- function(x) tabulate(to(x), nbins=nRnode(x))
setMethod("countRnodeHits", "Hits", .count_Rnode_hits)
### query/subject API
queryHits <- function(x, ...) from(x, ...)
subjectHits <- function(x, ...) to(x, ...)
queryLength <- function(x, ...) nLnode(x, ...)
subjectLength <- function(x, ...) nRnode(x, ...)
countQueryHits <- function(x, ...) countLnodeHits(x, ...)
countSubjectHits <- function(x, ...) countRnodeHits(x, ...)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity
###
.valid.Hits.nnode <- function(nnode, side)
{
if (!isSingleInteger(nnode) || nnode < 0L) {
msg <- wmsg("'n", side, "node(x)' must be a single non-negative ",
"integer")
return(msg)
}
if (!is.null(attributes(nnode))) {
msg <- wmsg("'n", side, "node(x)' must be a single integer with ",
"no attributes")
return(msg)
}
NULL
}
.valid.Hits.from_or_to <- function(from_or_to, nnode, what, side)
{
if (!(is.integer(from_or_to) && is.null(attributes(from_or_to)))) {
msg <- wmsg("'", what, "' must be an integer vector ",
"with no attributes")
return(msg)
}
if (anyMissingOrOutside(from_or_to, 1L, nnode)) {
msg <- wmsg("'", what, "' must contain non-NA values ",
">= 1 and <= 'n", side, "node(x)'")
return(msg)
}
NULL
}
.valid.Hits <- function(x)
{
c(.valid.Hits.nnode(nLnode(x), "L"),
.valid.Hits.nnode(nRnode(x), "R"),
.valid.Hits.from_or_to(from(x), nLnode(x), "from(x)", "L"),
.valid.Hits.from_or_to(to(x), nRnode(x), "to(x)", "R"))
}
setValidity2("Hits", .valid.Hits)
.valid.SelfHits <- function(x)
{
if (nLnode(x) != nRnode(x))
return("'nLnode(x)' and 'nRnode(x)' must be equal")
NULL
}
setValidity2("SelfHits", .valid.SelfHits)
.valid.SortedByQueryHits <- function(x)
{
if (isNotSorted(from(x)))
return("'queryHits(x)' must be sorted")
NULL
}
setValidity2("SortedByQueryHits", .valid.SortedByQueryHits)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructors
###
### Very low-level constructor. Doesn't try to sort the hits by query.
.new_Hits <- function(Class, from, to, nLnode, nRnode, mcols)
{
new2(Class, from=from, to=to, nLnode=nLnode, nRnode=nRnode,
elementMetadata=mcols,
check=TRUE)
}
### Low-level constructor. Sort the hits by query if Class extends
### SortedByQueryHits.
new_Hits <- function(Class, from=integer(0), to=integer(0),
nLnode=0L, nRnode=0L,
mcols=NULL)
{
if (!isSingleString(Class))
stop("'Class' must be a single character string")
if (!extends(Class, "Hits"))
stop("'Class' must be the name of a class that extends Hits")
if (!(is.numeric(from) && is.numeric(to)))
stop("'from' and 'to' must be integer vectors")
if (!is.integer(from))
from <- as.integer(from)
if (!is.integer(to))
to <- as.integer(to)
if (!(isSingleNumber(nLnode) && isSingleNumber(nRnode)))
stop("'nLnode' and 'nRnode' must be single integers")
if (!is.integer(nLnode))
nLnode <- as.integer(nLnode)
if (!is.integer(nRnode))
nRnode <- as.integer(nRnode)
mcols <- normarg_mcols(mcols, Class, length(from))
if (!extends(Class, "SortedByQueryHits")) {
## No need to sort the hits by query.
ans <- .new_Hits(Class, from, to, nLnode, nRnode, mcols)
return(ans)
}
## Sort the hits by query.
if (!is.null(mcols)) {
revmap_envir <- new.env(parent=emptyenv())
} else {
revmap_envir <- NULL
}
ans <- .Call2("Hits_new", Class, from, to, nLnode, nRnode, revmap_envir,
PACKAGE="S4Vectors")
if (!is.null(mcols)) {
if (exists("revmap", envir=revmap_envir)) {
revmap <- get("revmap", envir=revmap_envir)
mcols <- extractROWS(mcols, revmap)
}
mcols(ans) <- mcols
}
ans
}
.make_mcols <- function(...)
{
if (nargs() == 0L)
return(NULL)
## We use 'DataFrame(..., check.names=FALSE)' rather than
## 'new_DataFrame(list(...))' because we want to make use of the
## former's ability to deparse unnamed arguments to generate column
## names for them. Unfortunately this means that the user won't be
## able to pass metadata columns named "row.names" or "check.names"
## because things like '.make_mcols(11:13, row.names=21:23)'
## or '.make_mcols(11:13, check.names=21:23)' won't work as expected.
## The solution would be to have a mid-level DataFrame constructor
## that has no extra arguments after the ellipsis and implements the
## same deparsing mechanism as DataFrame(), and to use it here.
DataFrame(..., check.names=FALSE)
}
### 2 high-level constructors.
Hits <- function(from=integer(0), to=integer(0), nLnode=0L, nRnode=0L, ...,
sort.by.query=FALSE)
{
if (!isTRUEorFALSE(sort.by.query))
stop("'sort.by.query' must be TRUE or FALSE")
Class <- if (sort.by.query) "SortedByQueryHits" else "Hits"
mcols <- .make_mcols(...)
new_Hits(Class, from, to, nLnode, nRnode, mcols)
}
SelfHits <- function(from=integer(0), to=integer(0), nnode=0L, ...,
sort.by.query=FALSE)
{
if (!isTRUEorFALSE(sort.by.query))
stop("'sort.by.query' must be TRUE or FALSE")
Class <- if (sort.by.query) "SortedByQuerySelfHits" else "SelfHits"
mcols <- .make_mcols(...)
new_Hits(Class, from, to, nnode, nnode, mcols)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Conversion from old to new internal representation
###
setMethod("updateObject", "Hits",
function(object, ..., verbose=FALSE)
{
if (!is(try(object@queryHits, silent=TRUE), "try-error")) {
object_metadata <- object@metadata
object <- new_Hits("SortedByQueryHits", object@queryHits,
object@subjectHits,
object@queryLength,
object@subjectLength,
object@elementMetadata)
object@metadata <- object_metadata
}
callNextMethod()
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
### --- Coercion within the Hits class hierarchy ---
### There are 4 classes in the Hits class hierarchy. We want to support back
### and forth coercion between all of them. That's 12 possible coercions.
### They can be devided in 3 groups:
### - Group A: 5 demotions
### - Group B: 5 promotions
### - Group C: 2 transversal coercions (from SelfHits to SortedByQueryHits
### and vice-versa)
###
### Group A: Demotions are taken care of by the "automatic coercion methods".
### (These methods that get automatically defined at run time by the methods
### package the 1st time a given demotion is requested e.g. when doing
### as(x, "Hits") where 'x' is any Hits derivative.)
###
### Group B: The methods package also defines automatic coercion methods for
### promotions. Unfortunately, these methods almost never get it right. In
### particular, a serious problem with these automatic promotion methods is
### that they don't even try to validate the promoted object so they tend to
### silently produce invalid objects. This means that we need to define
### methods for all the coercions in group B.
###
### Group C: Note that coercions from SelfHits to SortedByQueryHits and
### vice-versa will actually be taken care of by the coercion methods from
### Hits to SortedByQueryHits and from Hits to SelfHits, respectively (both
### defined in group B).
###
### So the good news is that we only need to define coercion methods for
### group B.
.from_Hits_to_SelfHits <- function(from, to)
{
if (nLnode(from) != nRnode(from))
stop(wmsg(class(from), " object to coerce to ", to,
" must satisfy 'nLnode(x) == nRnode(x)'"))
class(from) <- class(new(to))
from
}
setAs("Hits", "SelfHits", .from_Hits_to_SelfHits)
setAs("SortedByQueryHits", "SortedByQuerySelfHits", .from_Hits_to_SelfHits)
### Note that the 'from' and 'to' arguments below are the standard arguments
### for coercion methods. They should not be confused with the 'from()'
### and 'to()' accessors for Hits objects!
.from_Hits_to_SortedByQueryHits <- function(from, to)
{
new_Hits(to, from(from), to(from), nLnode(from), nRnode(from),
mcols(from, use.names=FALSE))
}
setAs("Hits", "SortedByQueryHits", .from_Hits_to_SortedByQueryHits)
setAs("SelfHits", "SortedByQuerySelfHits", .from_Hits_to_SortedByQueryHits)
### 2 possible routes for this coercion:
### 1. Hits -> SelfHits -> SortedByQuerySelfHits
### 2. Hits -> SortedByQueryHits -> SortedByQuerySelfHits
### They are equivalent. However, the 1st route will fail early rather
### than after a possibly long and expensive coercion from Hits to
### SortedByQueryHits.
setAs("Hits", "SortedByQuerySelfHits",
function(from) as(as(from, "SelfHits"), "SortedByQuerySelfHits")
)
### --- Other coercions ---
setMethod("as.matrix", "Hits",
function(x)
{
ans <- cbind(from=from(x), to=to(x))
if (is(x, "SortedByQueryHits"))
colnames(ans) <- c("queryHits", "subjectHits")
ans
}
)
setMethod("as.table", "Hits", .count_Lnode_hits)
### FIXME: Coercions of Vector derivatives to DFrame are inconsistent.
### For some Vector derivatives (e.g. IRanges, GRanges) the object is stored
### "as is" in the 1st column of the returned DFrame, whereas for others (e.g.
### Hits below) the object is "dismantled" into various parallel components
### that end up in separate columns of the returned DFrame.
setAs("Hits", "DFrame",
function(from)
{
from_mcols <- mcols(from, use.names=FALSE)
if (is.null(from_mcols))
from_mcols <- make_zero_col_DFrame(length(from))
DataFrame(as.matrix(from), from_mcols, check.names=FALSE)
}
)
### S3/S4 combo for as.data.frame.Hits
as.data.frame.Hits <- function(x, row.names=NULL, optional=FALSE, ...)
{
x <- as(x, "DFrame")
as.data.frame(x, row.names=row.names, optional=optional, ...)
}
setMethod("as.data.frame", "Hits", as.data.frame.Hits)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting
###
### The "extractROWS" method for Vector objects doesn't test the validity of
### the result so we override it.
setMethod("extractROWS", "SortedByQueryHits",
function(x, i)
{
ans <- callNextMethod()
pbs <- validObject(ans, test=TRUE)
if (is.character(pbs))
stop(wmsg("Problem(s) found when testing validity of ", class(ans),
" object returned by subsetting operation: ",
paste0(pbs, collapse=", "), ". Make sure to use a ",
"subscript that results in a valid ", class(ans),
" object."))
ans
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Display
###
setMethod("classNameForDisplay", "SortedByQueryHits",
function(x) sub("^SortedByQuery", "", class(x))
)
.Hits_summary <- function(object)
{
object_len <- length(object)
object_mcols <- mcols(object, use.names=FALSE)
object_nmc <- if (is.null(object_mcols)) 0L else ncol(object_mcols)
paste0(classNameForDisplay(object), " object with ", object_len, " ",
ifelse(object_len == 1L, "hit", "hits"),
" and ", object_nmc, " metadata ",
ifelse(object_nmc == 1L, "column", "columns"))
}
### S3/S4 combo for summary.Hits
summary.Hits <- function(object, ...)
.Hits_summary(object, ...)
setMethod("summary", "Hits", summary.Hits)
.from_Hits_to_naked_character_matrix_for_display <- function(x)
{
m <- cbind(from=showAsCell(from(x)),
to=showAsCell(to(x)))
if (is(x, "SortedByQueryHits"))
colnames(m) <- c("queryHits", "subjectHits")
cbind_mcols_for_display(m, x)
}
setMethod("makeNakedCharacterMatrixForDisplay", "Hits",
.from_Hits_to_naked_character_matrix_for_display
)
.show_Hits <- function(x, margin="", print.classinfo=FALSE,
print.nnode=FALSE)
{
cat(margin, summary(x), ":\n", sep="")
## makePrettyMatrixForCompactPrinting() assumes that head() and tail()
## work on 'x'.
out <- makePrettyMatrixForCompactPrinting(x)
if (print.classinfo) {
.COL2CLASS <- c(
from="integer",
to="integer"
)
if (is(x, "SortedByQueryHits"))
names(.COL2CLASS) <- c("queryHits", "subjectHits")
classinfo <- makeClassinfoRowForCompactPrinting(x, .COL2CLASS)
## A sanity check, but this should never happen!
stopifnot(identical(colnames(classinfo), colnames(out)))
out <- rbind(classinfo, out)
}
if (nrow(out) != 0L)
rownames(out) <- paste0(margin, " ", rownames(out))
## We set 'max' to 'length(out)' to avoid the getOption("max.print")
## limit that would typically be reached when 'showHeadLines' global
## option is set to Inf.
print(out, quote=FALSE, right=TRUE, max=length(out))
if (print.nnode) {
cat(margin, " -------\n", sep="")
if (is(x, "SortedByQueryHits")) {
cat(margin, " queryLength: ", nLnode(x),
" / subjectLength: ", nRnode(x), "\n", sep="")
} else {
if (is(x, "SelfHits")) {
cat(margin, " nnode: ", nnode(x), "\n", sep="")
} else {
cat(margin, " nLnode: ", nLnode(x),
" / nRnode: ", nRnode(x), "\n", sep="")
}
}
}
}
setMethod("show", "Hits",
function(object)
.show_Hits(object, print.classinfo=TRUE, print.nnode=TRUE)
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Concatenation
###
.check_that_Hits_objects_are_concatenable <- function(x, objects)
{
objects_nLnode <- vapply(objects, slot, integer(1), "nLnode",
USE.NAMES=FALSE)
objects_nRnode <- vapply(objects, slot, integer(1), "nRnode",
USE.NAMES=FALSE)
if (!(all(objects_nLnode == x@nLnode) &&
all(objects_nRnode == x@nRnode)))
stop(wmsg("the objects to concatenate are incompatible Hits ",
"objects by number of left and/or right nodes"))
}
.bindROWS_Hits_objects <-
function(x, objects=list(), use.names=TRUE, ignore.mcols=FALSE, check=TRUE)
{
objects <- prepare_objects_to_bind(x, objects)
.check_that_Hits_objects_are_concatenable(x, objects)
callNextMethod()
}
setMethod("bindROWS", "Hits", .bindROWS_Hits_objects)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Sorting
###
setMethod("sort", "SortedByQueryHits",
function(x, decreasing = FALSE, na.last = NA, by) {
byQueryHits <- missing(by) || is(by, "formula") &&
all.vars(by)[1L] == "queryHits" && !decreasing
if (!byQueryHits)
x <- as(x, "Hits")
callNextMethod()
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### selectHits()
###
### Return an integer vector parallel to the query (i.e. of length
### 'nLnode(hits)') except when select="all", in which case it's a no-op.
###
### 'nodup' must be TRUE or FALSE (the default) and can only be set to TRUE
### when 'select' is "first", "last" or "arbitrary", and when the input hits
### are sorted by query. When 'nodup=TRUE', a given element in the subject is
### not allowed to be assigned to more than one element in the query, which is
### achieved by following a simple "first come first served" pairing strategy.
### So the returned vector is guaranteed to contain unique non-NA values.
### Note that such vector represents a mapping between the query and subject
### that is one-to-zero-or-one in *both* directions. So it represents a
### pairing between the elements in query and subject, where a given element
### belongs to at most one pair.
### A note about the "first come first served" pairing strategy: This strategy
### is simple and fast, but, in general, it won't achieve a "maximal pairing"
### (i.e. a pairing with the most possible number of pairs) for a given input
### Hits object. However it actually does produce a maximal pairing if the
### Hits object is the result of call to findMatches() (with select="all")'.
### Also, in that case, this pairing strategy is symetric i.e. the resulting
### pairs are not affected by switching 'x' and 'table' in the call to
### findMatches() (or by transposing the input Hits object).
###
### Finally note that when 'select' is "first" or "last" and 'nodup' is FALSE,
### or when 'select' is "count", the output of selectHits() is not affected
### by the order of the hits in the input Hits object.
selectHits <- function(hits,
select=c("all", "first", "last", "arbitrary", "count"),
nodup=FALSE,
rank)
{
if (!is(hits, "Hits"))
stop("'hits' must be a Hits object")
select <- match.arg(select)
if (!isTRUEorFALSE(nodup))
stop(wmsg("'nodup' must be TRUE or FALSE"))
if (nodup && !(select %in% c("first", "last", "arbitrary")))
stop(wmsg("'nodup=TRUE' is only supported when 'select' ",
"is \"first\", \"last\", or \"arbitrary\""))
if (!missing(rank) && (!(select %in% c("first", "last")) || nodup))
stop(wmsg("'rank' is only supported when 'select' ",
"is \"first\" or \"last\" and 'nodup' is FALSE"))
if (select == "all")
return(hits) # no-op
hits_from <- from(hits)
hits_to <- to(hits)
hits_nLnode <- nLnode(hits)
hits_nRnode <- nRnode(hits)
if (!missing(rank)) {
r <- rank(hits, ties.method="first", by=rank)
revmap <- integer()
revmap[r] <- hits_to
hits_to <- r
}
ans <- .Call2("select_hits", hits_from, hits_to, hits_nLnode, hits_nRnode,
select, nodup,
PACKAGE="S4Vectors")
if (!missing(rank))
ans <- revmap[ans]
ans
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### breakTies()
###
### Redundant with selectHits. The only difference is that it returns a Hits
### object. That alone doesn't justify introducing a new verb. Should be
### controlled via an extra arg to selectHits() e.g. 'as.Hits' (FALSE by
### default). H.P. -- Oct 16, 2016
breakTies <- function(x, method=c("first", "last"), rank) {
if (!is(x, "Hits"))
stop("'x' must be a Hits object")
method <- match.arg(method)
to <- selectHits(x, method, rank=rank)
.new_Hits("SortedByQueryHits", which(!is.na(to)), to[!is.na(to)],
nLnode(x), nRnode(x), NULL)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### revmap()
###
### NOT exported (but used in IRanges).
### TODO: Move revmap() generic from AnnotationDbi to S4Vectors. Then split
### the code below in 2 revmap() methods: one for SortedByQueryHits objects
### and one for Hits objects.
revmap_Hits <- function(x)
{
if (is(x, "SortedByQueryHits")) {
## Note that:
## - If 'x' is a valid SortedByQueryHits object (i.e. the hits in it
## are sorted by query), then 'revmap_Hits(x)' returns a
## SortedByQueryHits object where hits are "fully sorted" i.e.
## sorted by query first and then by subject.
## - Because revmap_Hits() reorders the hits by query, doing
## 'revmap_Hits(revmap_Hits(x))' brings back 'x' but with the hits
## in it now "fully sorted".
return(new_Hits(class(x), to(x), from(x), nRnode(x), nLnode(x),
mcols(x, use.names=FALSE)))
}
BiocGenerics:::replaceSlots(x, from=to(x), to=from(x),
nLnode=nRnode(x), nRnode=nLnode(x),
check=FALSE)
}
### FIXME: Replace this with "revmap" method for Hits objects.
t.Hits <- function(x) t(x)
setMethod("t", "Hits", revmap_Hits)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Remap the left and/or right nodes of a Hits object.
###
### Returns 'arg' as a NULL, an integer vector, or a factor.
.normarg_nodes.remapping <- function(arg, side, old.nnode)
{
if (is.null(arg))
return(arg)
if (!is.factor(arg)) {
if (!is.numeric(arg))
stop("'" , side, "nodes.remappping' must be a vector ",
"of integers")
if (!is.integer(arg))
arg <- as.integer(arg)
}
if (length(arg) != old.nnode)
stop("'" , side, "nodes.remapping' must be of length 'n",
side, "node(x)'")
arg
}
.normarg_new.nnode <- function(arg, side, map)
{
if (!isSingleNumberOrNA(arg))
stop("'new.n", side, "node' must be a single number or NA")
if (!is.integer(arg))
arg <- as.integer(arg)
if (is.null(map))
return(arg)
if (is.factor(map)) {
if (is.na(arg))
return(nlevels(map))
if (arg < nlevels(map))
stop("supplied 'new.n", side, "node' must ",
"be >= 'nlevels(", side, "nodes.remapping)'")
return(arg)
}
if (is.na(arg))
stop("'new.n", side, "node' must be specified when ",
"'" , side, "s.remapping' is specified and is not a factor")
arg
}
remapHits <- function(x, Lnodes.remapping=NULL, new.nLnode=NA,
Rnodes.remapping=NULL, new.nRnode=NA,
with.counts=FALSE)
{
if (!is(x, "SortedByQueryHits"))
stop("'x' must be a SortedByQueryHits object")
Lnodes.remapping <- .normarg_nodes.remapping(Lnodes.remapping, "L",
nLnode(x))
new.nLnode <- .normarg_new.nnode(new.nLnode, "L", Lnodes.remapping)
Rnodes.remapping <- .normarg_nodes.remapping(Rnodes.remapping, "R",
nRnode(x))
new.nRnode <- .normarg_new.nnode(new.nRnode, "R", Rnodes.remapping)
if (!isTRUEorFALSE(with.counts))
stop("'with.counts' must be TRUE or FALSE")
x_from <- from(x)
if (is.null(Lnodes.remapping)) {
if (is.na(new.nLnode))
new.nLnode <- nLnode(x)
} else {
if (is.factor(Lnodes.remapping))
Lnodes.remapping <- as.integer(Lnodes.remapping)
if (anyMissingOrOutside(Lnodes.remapping, 1L, new.nLnode))
stop(wmsg("'Lnodes.remapping' cannot contain NAs, or values that ",
"are < 1, or > 'new.nLnode'"))
x_from <- Lnodes.remapping[x_from]
}
x_to <- to(x)
if (is.null(Rnodes.remapping)) {
if (is.na(new.nRnode))
new.nRnode <- nRnode(x)
} else {
if (is.factor(Rnodes.remapping))
Rnodes.remapping <- as.integer(Rnodes.remapping)
if (anyMissingOrOutside(Rnodes.remapping, 1L, new.nRnode))
stop(wmsg("'Rnodes.remapping' cannot contain NAs, or values that ",
"are < 1, or > 'new.nRnode'"))
x_to <- Rnodes.remapping[x_to]
}
x_mcols <- mcols(x, use.names=FALSE)
add_counts <- function(counts) {
if (is.null(x_mcols))
return(DataFrame(counts=counts))
if ("counts" %in% colnames(x_mcols))
warning("'x' has a \"counts\" metadata column, replacing it")
x_mcols$counts <- counts
x_mcols
}
if (is.null(Lnodes.remapping) && is.null(Rnodes.remapping)) {
if (with.counts) {
counts <- rep.int(1L, length(x))
x_mcols <- add_counts(counts)
}
} else {
sm <- selfmatchIntegerPairs(x_from, x_to)
if (with.counts) {
counts <- tabulate(sm, nbins=length(sm))
x_mcols <- add_counts(counts)
keep_idx <- which(counts != 0L)
} else {
keep_idx <- which(sm == seq_along(sm))
}
x_from <- x_from[keep_idx]
x_to <- x_to[keep_idx]
x_mcols <- extractROWS(x_mcols, keep_idx)
}
new_Hits(class(x), x_from, x_to, new.nLnode, new.nRnode, x_mcols)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### SelfHits methods
###
### TODO: Make isSelfHit() and isRedundantHit() generic functions with
### methods for SelfHits objects.
###
### A "self hit" is an edge from a node to itself. For example, the 2nd hit
### in the SelfHits object below is a self hit (from 3rd node to itself):
### SelfHits(c(3, 3, 3, 4, 4), c(2:4, 2:3), 4)
isSelfHit <- function(x)
{
if (!is(x, "SelfHits"))
stop("'x' must be a SelfHits object")
from(x) == to(x)
}
### When there is more than 1 edge between 2 given nodes (regardless of
### orientation), the extra edges are considered to be "redundant hits". For
### example, hits 3, 5, 7, and 8, in the SelfHits object below are redundant
### hits:
### SelftHits(c(3, 3, 3, 3, 3, 4, 4, 4), c(3, 2:4, 2, 2:3, 2), 4, 4)
### Note that this is regardless of the orientation of the edge so hit 7 (edge
### 4-3) is considered to be redundant with hit 4 (edge 3-4).
isRedundantHit <- function(x)
{
if (!is(x, "SelfHits"))
stop("'x' must be a SelfHits object")
duplicatedIntegerPairs(pmin.int(from(x), to(x)),
pmax.int(from(x), to(x)))
}
### Specialized constructor.
### Return a SortedByQuerySelfHits object.
### About 10x faster and uses 4x less memory than my first attempt in pure
### R below.
### NOT exported.
makeAllGroupInnerHits <- function(group.sizes, hit.type=0L)
{
if (!is.integer(group.sizes))
stop("'group.sizes' must be an integer vector")
if (!isSingleNumber(hit.type))
stop("'hit.type' must be a single integer")
if (!is.integer(hit.type))
hit.type <- as.integer(hit.type)
.Call2("make_all_group_inner_hits", group.sizes, hit.type,
PACKAGE="S4Vectors")
}
### Return a SortedByQuerySelfHits object.
### NOT exported.
### TODO: Remove this.
makeAllGroupInnerHits.old <- function(GS)
{
NG <- length(GS) # nb of groups
## First Element In group i.e. first elt associated with each group.
FEIG <- cumsum(c(1L, GS[-NG]))
GSr <- c(0L, GS[-NG])
CGSr2 <- cumsum(GSr * GSr)
GS2 <- GS * GS
nnode <- sum(GS) # length of original vector (i.e. before grouping)
## Original Group Size Assignment i.e. group size associated with each
## element in the original vector.
OGSA <- rep.int(GS, GS) # is of length 'nnode'
ans_from <- rep.int(seq_len(nnode), OGSA)
NH <- length(ans_from) # same as sum(GS2)
## Hit Group Assignment i.e. group associated with each hit.
HGA <- rep.int(seq_len(NG), GS2)
## Hit Group Size Assignment i.e. group size associated with each hit.
HGSA <- GS[HGA]
ans_to <- (0:(NH-1L) - CGSr2[HGA]) %% GS[HGA] + FEIG[HGA]
SelfHits(ans_from, ans_to, nnode, sort.by.query=TRUE)
}
|
ab08868157e29c4338c10d0203b898dac7a364df | b0966c4d1e4d8af78772b78e8f4be5968ef7f837 | /man/diffTablej.Rd | 5a718e3132d1327559f987819d36708d682d40e6 | [] | no_license | amsantac/diffeR | c2949d661910ebcd2d70da418c2d963817c97157 | 36ec0f4a8128156fa3c3e8fcd1e02d08994bafe5 | refs/heads/master | 2023-02-23T17:00:29.394919 | 2023-02-13T17:06:37 | 2023-02-13T17:06:37 | 30,596,442 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,621 | rd | diffTablej.Rd | \name{diffTablej}
\alias{diffTablej}
\title{
calculates difference metrics at the category level from a square contingency table
}
\description{
calculates quantity, exchange and shift components of difference, as well as the overall difference, at the category level from a contingency table derived from the crosstabulation between a comparison variable (or variable at time \emph{t}), and a reference variable (or variable at time \emph{t}+1).
Quantity difference is defined as the amount of difference between the reference variable and a comparison variable that is due to the less than maximum match in the proportions of the categories. Exchange consists of a transition from category \emph{i} to category \emph{j} in some observations and a transition from category \emph{j} to category \emph{i} in an identical number of other observations. Shift refers to the difference remaining after subtracting quantity difference and exchange from the overall difference.
}
\usage{
diffTablej(ctmatrix, digits = 0, analysis = "error")
}
\arguments{
\item{ctmatrix}{
matrix representing a square contingency table between a comparison variable (rows) and a reference variable (columns)
}
\item{digits}{
integer indicating the number of decimal places to be used
}
\item{analysis}{
character string either "error" (default) or "change". The output table shows category-level omission error, agreement and comission error in the "error" analysis, and category-level gain, persistence and loss in the "change" analysis
}
}
\value{
data.frame containing difference metrics at the category level between a comparison variable (rows) and a reference variable (columns). Output values are given in the same units as \code{ctmatrix}
}
\references{
Pontius Jr., R.G., Millones, M. 2011. \emph{Death to Kappa: birth of quantity disagreement and allocation disagreement for accuracy assessment}. International Journal of Remote Sensing 32 (15), 4407-4429.
Pontius Jr., R.G., Santacruz, A. 2014. \emph{Quantity, exchange and shift components of difference in a square contingency table}. International Journal of Remote Sensing 35 (21), 7543-7554.
}
\examples{
comp <- rast(system.file("external/comparison.rst", package = "diffeR"))
ref <- rast(system.file("external/reference.rst", package = "diffeR"))
ctmatCompRef <- crosstabm(comp, ref)
diffTablej(ctmatCompRef)
# Adjustment to population assuming a stratified random sampling
(population <- matrix(c(1, 2, 3, 2000, 4000, 6000), ncol = 2))
ctmatCompRef <- crosstabm(comp, ref, percent = TRUE, population = population)
diffTablej(ctmatCompRef)
}
\keyword{ spatial }
|
09e6cf5ba511e6ea946bd68b729f4c6c1b549d6d | f79cd4e052c5cbb24e7ef3e4bec1c39f9ce4e413 | /BEMTOOL-ver2.5-2018_0901/src/biol/bmtALADYM/ALADYM-ver12.3-2017_0501/gui/biological/biological.sexratio.r | 164b9be8d2f979295593fbadef066b46661942ce | [] | no_license | gresci/BEMTOOL2.5 | 4caf3dca3c67423af327a8ecb1e6ba6eacc8ae14 | 619664981b2863675bde582763c5abf1f8daf34f | refs/heads/master | 2023-01-12T15:04:09.093864 | 2020-06-23T07:00:40 | 2020-06-23T07:00:40 | 282,134,041 | 0 | 0 | null | 2020-07-24T05:47:24 | 2020-07-24T05:47:23 | null | UTF-8 | R | false | false | 1,617 | r | biological.sexratio.r | # ALADYM Age length based dynamic model - version 12.3
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# ALADYM is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
vboxSEXRATIO <- gtkVBox(FALSE, 5)
hboxSEXRATIO <- gtkHBox(FALSE, 5)
hboxSEXRATIO$packStart(gtkLabel("Sex ratio F/F+M"), expand = FALSE, fill = FALSE, padding = 5)
entry_SR_value <- gtkEntry()
gtkEntrySetWidthChars(entry_SR_value, NUMERICAL_ENTRY_LENGTH)
## ---------------------------------------------------------------------------
## ---------------------------------------------------------------------------
## ---------------------------------------------------------------------------
## additional code for BEMTOOL integration
if (IN_BEMTOOL) {
gtkEntrySetText(entry_SR_value, as.numeric(as.character(Populations[[ALADYM_spe]]@sexratio)))
gtkEntrySetEditable(entry_SR_value, FALSE)
} else {
gtkEntrySetText(entry_SR_value, 0.5 )
}
## ---------------------------------------------------------------------------
## ---------------------------------------------------------------------------
hboxSEXRATIO$packStart(entry_SR_value, expand = FALSE, fill = FALSE, padding = 5)
vboxSEXRATIO$packStart(hboxSEXRATIO, expand = FALSE, fill = FALSE, padding = 5)
|
2f6a8d75264d7a1461aa88f4ba8a8e86ab9a29e9 | 5a739c45535c97844af5dfc126be6954e7747890 | /man/colorplaner.Rd | d7edaef27fb560cf52c500e9aeb3ff232d8dfc07 | [] | no_license | cran/colorplaner | d0ed6c8c910c78801bd57a35ad67ee63b207b1f6 | bcd3ce49ef8b4a778efd15854c815f9517069f34 | refs/heads/master | 2020-12-25T22:47:26.090640 | 2016-11-01T11:07:29 | 2016-11-01T11:07:29 | 68,783,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,224 | rd | colorplaner.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colorplaner.R
\name{colorplaner}
\alias{colorplaner}
\title{colorplaner: ggplot2 Extension to Visualize Two Variables Per Color
Aesthetic through Color Space Projection}
\description{
Add additional dimensionality to visualizations by using the color and/or
fill aesthetics to convey the values of two continuous variables each. By
projecting variable values onto YUV color space, a scale is created that
allows viewers to intuitively determine the values of both variables from the
single displayed color. Includes two new scales and a new guide for ggplot2.
See \code{\link{scale_color_colorplane}} for usage.
}
\section{Requirement for Package Attachment}{
At present, \code{guide_colorplane} will only function when the colorplaner
package is attached to the search list. For scripting or interactive use,
use \code{library(colorplaner)}. For package development, add colorplaner
to the Depends list in your DESCRIPTION file.
This requirement exists because
ggplot2 guides function through the use of S3 generics and methods, but the
generic functions are not exported from the ggplot package. Without access
to the generics, the methods for the colorplane guide cannot be properly
registered and will only be found by the dispatcher if in the search path.
Check \url{https://github.com/wmurphyrd/colorplaner/issues/27} for current
status and progress towards resolving this issue.
}
\section{Warning Message About Ignoring Unknown Aesthetics}{
Layers now produce a warning message when unrecognized aesthetics are found
but have no mechanism for notifying them of aesthetics handled by scales.
The warning can be avoided by mapping \code{color2}/\code{fill2} at the plot
level (i.e. in the initial \code{ggplot()} statement). If you want to avoid
colorplane mapping on all layers, map \code{color}/\code{fill} only on the
layers you want, as in the example below.
}
\examples{
library(ggplot2)
ggplot(iris, aes(x = Sepal.Length, y = Sepal.Width,
colour2 = Petal.Width)) +
geom_point(aes(colour = Petal.Length)) +
geom_line(aes(linetype = Species)) +
scale_color_colorplane()
}
|
378ac09d61c6b460a81933e0cc56c8df99d2b818 | afdeadce009c26d559390aac6568346332f412f5 | /h4n/slides/shiny.R | c2a5963221fc7d9eb9d3646313c1ddc73cea7113 | [] | no_license | mbannert/slidedeck | 12b8d97a5a8719cc98b4e8267bee6894dc6a9d8d | 26be5e4b3e47ff07980fcd9200cb24f0dcf3ea55 | refs/heads/master | 2022-11-12T05:20:02.470584 | 2020-06-18T22:22:09 | 2020-06-18T22:22:09 | 267,153,757 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,519 | r | shiny.R | library(grid)
library(shiny)
source("bullet.R")
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
numericInput("members", "Number of participants", 8),
numericInput("rstats", "R / Py", 8),
numericInput("sql", "SQL", 8),
numericInput("git", "git", 8)
),
mainPanel(
plotOutput("lp"),
)
)
)
server <- function(input, output){
output$tp <- renderPlot({
plot(rnorm(100))
})
output$lp <- renderPlot({
techavg <- mean(c(input$rstats,
input$sql,
input$git) / input$members)*100
df1 <- data.frame(units = c("R/Py(%)","SQL(%)","git(%)"),
low = c(25,25,25),
mean = c(50,50,50),
high = c(100,100,100),
target = c(techavg,techavg,techavg),
value = c(100*(input$rstats/input$members),
100*(input$sql/input$members),
100*(input$git/input$members)
)
)
g <- gridBulletGraphH(df1,
bcol = c("#999999","#CCCCCC","#E1E1E1"),
vcol = "#333333", font = 20)
g + title(paste("Usage of Technologies Among Participants",
sep=" "))
})
}
shinyApp(ui = ui, server = server,
options = list(port = 1234))
|
73894a04ddb722019677aae2b99356450dd30324 | c76ca75597ccd2ae7457d6a77ed69723037dd5fb | /testlib/script2.R | caa10766b93a818e8dd523fed65f1a33997e461d | [
"MIT"
] | permissive | gramener/gramex | 2f2fe3f09b4e9fb5e25e24e45c1a5c3a3fde5a8d | 928caa9d3e5508b5ec852e41965441cf496aa068 | refs/heads/master | 2023-08-10T11:57:52.426322 | 2023-07-06T20:37:17 | 2023-07-06T20:37:48 | 169,192,276 | 153 | 61 | NOASSERTION | 2023-09-13T09:21:32 | 2019-02-05T04:53:44 | Python | UTF-8 | R | false | false | 44 | r | script2.R | second = function() {
10 + 20 + 30 + 40
}
|
5e7c8894109b62c5cdfa8f8971a7fef99f33db96 | edf2d3864db8751074133b2c66a7e7995a960c6b | /man/print.CrossValidation.Rd | e457afe3d196679b735ca719548ea7db986a651e | [] | no_license | jkrijthe/RSSL | 78a565b587388941ba1c8ad8af3179bfb18091bb | 344e91fce7a1e209e57d4d7f2e35438015f1d08a | refs/heads/master | 2023-04-03T12:12:26.960320 | 2023-03-13T19:21:31 | 2023-03-13T19:21:31 | 7,248,018 | 65 | 24 | null | 2023-03-28T06:46:23 | 2012-12-19T21:55:39 | R | UTF-8 | R | false | true | 356 | rd | print.CrossValidation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CrossValidation.R
\name{print.CrossValidation}
\alias{print.CrossValidation}
\title{Print CrossValidation object}
\usage{
\method{print}{CrossValidation}(x, ...)
}
\arguments{
\item{x}{CrossValidation object}
\item{...}{Not used}
}
\description{
Print CrossValidation object
}
|
b6eaa4e67529741a189c4faaa4b753db565e93a3 | 00c2ff4fe6659ab5dcb6a80c6c379d4f0ff6c5bd | /R/batchSize.R | 323674fc3f5c8cb94903aaeb0d137ebcae9ba701 | [] | no_license | cran/elrm | 8155d5f6635870a53096aaefe382eec74c309f13 | ebfea7e5f2ae58857b7c9771530183da7a36ab99 | refs/heads/master | 2021-10-28T08:57:13.930409 | 2021-10-26T07:30:02 | 2021-10-26T07:30:02 | 17,695,777 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | batchSize.R | `batchSize` <-
function(vec)
{
N = length(vec);
b <- floor(N^(1/3)); # batch size
a <- floor(N/b); # number of batches
func = function(bs)
{
batches = bm(vals=vec,bs=round(bs,0),g=id)$Ys;
ac = acf(x=batches,lag.max=2,plot=F)$acf[2];
return(abs(ac));
}
if(a > 10)
{
lower = b;
upper = floor(N/10);
b = optimize(f=func,lower=lower,upper=upper)$minimum;
}
return(round(b,0));
}
|
98a2051c8d8769c57b82a7e24836c90ddd1468a5 | ae7d68c9dac684839a4c59373b332b4c6c863584 | /man/anomalize_methods.Rd | 049f305ac780ed786b2df5dfb3cf776304cb50b6 | [] | no_license | business-science/anomalize | 8988753117702c0230f62bfe125785b98fd5c484 | f64272b84127b1b5a517d19f105b48564be3e244 | refs/heads/master | 2023-03-06T22:23:06.880012 | 2023-02-08T21:04:35 | 2023-02-08T21:05:18 | 125,931,913 | 328 | 70 | null | 2021-06-16T11:14:58 | 2018-03-19T23:08:52 | R | UTF-8 | R | false | true | 1,725 | rd | anomalize_methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anomalize_methods.R
\name{anomalize_methods}
\alias{anomalize_methods}
\alias{iqr}
\alias{gesd}
\title{Methods that power anomalize()}
\usage{
iqr(x, alpha = 0.05, max_anoms = 0.2, verbose = FALSE)
gesd(x, alpha = 0.05, max_anoms = 0.2, verbose = FALSE)
}
\arguments{
\item{x}{A vector of numeric data.}
\item{alpha}{Controls the width of the "normal" range.
Lower values are more conservative while higher values are less prone
to incorrectly classifying "normal" observations.}
\item{max_anoms}{The maximum percent of anomalies permitted to be identified.}
\item{verbose}{A boolean. If \code{TRUE}, will return a list containing useful information
about the anomalies. If \code{FALSE}, just returns a vector of "Yes" / "No" values.}
}
\value{
Returns character vector or list depending on the value of \code{verbose}.
}
\description{
Methods that power anomalize()
}
\examples{
set.seed(100)
x <- rnorm(100)
idx_outliers <- sample(100, size = 5)
x[idx_outliers] <- x[idx_outliers] + 10
iqr(x, alpha = 0.05, max_anoms = 0.2)
iqr(x, alpha = 0.05, max_anoms = 0.2, verbose = TRUE)
gesd(x, alpha = 0.05, max_anoms = 0.2)
gesd(x, alpha = 0.05, max_anoms = 0.2, verbose = TRUE)
}
\references{
\itemize{
\item The IQR method is used in \href{https://github.com/robjhyndman/forecast/blob/master/R/clean.R}{\code{forecast::tsoutliers()}}
\item The GESD method is used in Twitter's \href{https://github.com/twitter/AnomalyDetection}{\code{AnomalyDetection}} package and is also available as a function in \href{https://github.com/raunakms/GESD/blob/master/runGESD.R}{@raunakms's GESD method}
}
}
\seealso{
\code{\link[=anomalize]{anomalize()}}
}
|
91afa10eb5b0ff413662a1b5b8dafd31aa48f19d | c0b3c698e5c0e45ba019766645c8c68640de2bb9 | /PERCEPTRON-hardcoding.R | cf234254e32b67142fde822df6791634089a71c7 | [] | no_license | EdMwa/perceptron-pocket-algorithm | dc382c9a37ef7aae04daae7b229a15f3bb9c8eb0 | 151e8429dcae453fd1d1a872afecbd10b2f7bb85 | refs/heads/master | 2020-12-01T23:08:26.652651 | 2016-09-06T00:18:30 | 2016-09-06T00:18:30 | 67,243,620 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,692 | r | PERCEPTRON-hardcoding.R |
###############################Some notes/definitions and assumptions#########################################
#W:"weight" is the transposed vector of w0, w1, w2,...wn
#X:"features" is vector of x0, x1, x3,...xn
#h(x): "hypothesis" is represented by W
#Y: "response" is the variable we are attempting to predict
#Y is assumed to be a binary class +1, -1
#LR: Learning Rate
##############################################WORKFLOW########################################################
#1. Load Iris data from data() & necessary package/s
#2. Data overview
#3. Feature extraction--generate a dataset with selected features & assign Y(response)==+1, -1 to either feature
#4. Hard code the pocket algorithm
#5. Test algorithm
#6. Perform prediction on test set
#7. Assess the performance of the algorithm
#############################################STEP 1 & 2#####################################################
library(MASS)
#data(package = .packages(all.available = TRUE))
#?iris
#Data Overview
names(iris)
summary(iris$Species)
summary(iris)
dim(iris)
################################STEP 3############################################################################
#Considering all possible bi-variate scatter plots
pairs(iris[,1:4], main = "Iris Data", pch = 21, bg = c("red", "pink", "blue")[iris$Species],
oma=c(4,4,6,12))#set outer margins-bottom,left,top,right
par(xpd=TRUE) #Allow plotting of the legend outside the plots region within the space left to the right
legend(0.85, 0.7, as.vector(unique(iris$Species)),
fill=c("red", "pink", "blue"))
#The features Sepal.Width, Petal.Length and Petal.width show Setosa well clustered from the other 2 species
#Use these features for prediction
#create a training set (X) with these 3 features
X <- cbind(iris$Sepal.Width,iris$Petal.Width) #had a bit of a problem when I did 3 features
#Label setosa as +1 and the other 2 species together as -1
Y <- ifelse(iris$Species == 'setosa',+1,-1)
plot(X, cex = 0.5, xlab = '',ylab = '') #Generic plot
#Set setosa points with '+' and the others with a '-'
points(subset(X, Y == +1), col = 'blue',pch='+', cex = 1)
points(subset(X, Y == -1), col = 'red',pch='-', cex = 1)
###########################################################################################################
##############################PERCEPTRON POCKET ALGORITHM##################################################
###########################################################################################################
# The core perceptron learning algorithm
# 1) Initialize the weight vector W to 0
# 2) Calculate hypothesis: h(x) = sign(transpose of W * (X))
# 3) Pick any misclassified point/s-not accurately predicted (xn, yn)
# 4) Update the weight vector by w <- w + yn * xn
# 5) Repeat until no points are misclassified
perceptron <- function(X, Y, LR = 1){
converged <- FALSE
#Initialize the weight vector
W = vector(length = ncol(X))
#number of iterations to run 10,000
for (i in 1:10000){
#calculate the hypothesis h
h <- sign_pred(W %*% t(X))
#compute the misclassified points
mispredicted <- h != Y
#Get TRUE if converged
if (sum(mispredicted) == 0){
converged <- TRUE
break
}else{
#correct w for the mispredicted points and continue iterations
mispredicted_X <- X[mispredicted, drop = FALSE]
mispredicted_Y <- Y[mispredicted]
#Extract a pair of the mispredicted data from above
mispredicted_index <- sample(dim(mispredicted_X)[1], 1)
mispredicted_point_X <- mispredicted_X[mispredicted_index, drop = F]
mispredicted_point_Y <- mispredicted_Y[mispredicted_index]
#update W for the mispredicted pairs above
W <- W + mispredicted_point_Y %*% mispredicted_point_X
} #repeat iteration hoping for convergence after correction!!
}
if (converged){
cat('converged!\n')
}else{
cat('Did not converge!\n')
}#Go ahead and return the best W so far
return(W)
}
#######################Define the sign function used above##############################################
sign_pred <- function(nums){
return(ifelse(nums > 0, +1, -1))
}
################################Line Seperator##########################################################
which_side <- function(line_sep, point){
nums <- (line_sep[2, 1] - line_sep[1, 1])*(point[, 2] - line_sep[1, 2]) -
(line_sep[2, 2]) - (line_sep[1, 2])*(point[, 1] - line_sep[1, 1])
return(sign_pred(nums))
}
##################################Test perceptron#######################################################
pred_W <- perceptron(X, Y)
|
92f657f14529ed32ee4ad5d3ebe4b5452ef7d4f4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/cutpointr/examples/risk_ratio.Rd.R | 75cf94e7c7b0d7204b01393257e41ed0783aba6e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 217 | r | risk_ratio.Rd.R | library(cutpointr)
### Name: risk_ratio
### Title: Calculate the risk ratio (relative risk)
### Aliases: risk_ratio
### ** Examples
risk_ratio(10, 5, 20, 10)
risk_ratio(c(10, 8), c(5, 7), c(20, 12), c(10, 18))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.