blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8ccd384bdbcc72eb2b8cd34cf754b9962b5e2f0 | 5bd4d740e0e91613f66274f44c0eb5b8425caeda | /GettingAndCleaningDataAssignment/run_analysis.R | 0e061bcc22def55a9b76a6f98e841c0379ccee76 | [] | no_license | fryderyc/datasciencecoursera | a7a564b0f13a40e70a5f83f18cf5c186ff1af345 | bf61e02c4ae2f7832b667f13208b257e42a6a81c | refs/heads/master | 2020-03-19T21:16:14.891082 | 2018-07-22T00:17:41 | 2018-07-22T00:17:41 | 136,934,731 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,083 | r | run_analysis.R | if (!"data.table" %in% installed.packages()) {
install.packages("data.table")
}
if (!"reshape2" %in% installed.packages()) {
install.packages("reshape2")
}
library("data.table")
library("reshape2")
if (!file.exists("./data.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "./data.zip", mode = "wb")
unzip("./data.zip")
}
# 1. Merges the training and the test sets to create one data set.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
X_merged <- rbind(X_train, X_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./UCI HAR Dataset/features.txt")
mean_std <- grep("mean()|std()", features[, 2])
X_merged <- X_merged[, mean_std]
# 3. Uses descriptive activity names to name the activities in the data set
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[, 2]
y_merged <- rbind(y_train, y_test)
y_merged[, 1] <- activity_labels[y_merged[, 1]]
# 4. Appropriately labels the data set with descriptive variable names.
feature_names <- sapply(features[, 2], function(x) {gsub("[()]", "",x)})
names(X_merged) <- feature_names[mean_std]
names(y_merged) <- "ActivityLabel"
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
subject_merged <- rbind(subject_train, subject_test);
names(subject_merged) <- "Subject"
merged_dataset <- cbind(subject_merged, y_merged, X_merged)
# 5. From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
melted <- melt(merged_dataset, id.vars = c("Subject", "ActivityLabel"))
tidy <- dcast(melted, Subject + ActivityLabel ~ variable, mean)
write.table(tidy, "tidy_data.txt", sep = ",", row.names = FALSE) |
f6202070914190b175a811be98d1a1c2b50f06b5 | 933061de278d39b4c219367a4a373e1b01518e5a | /release/flashlight/R/all_identical.R | 42dcb2168510abef90d915df39fd7b07d6514ef5 | [] | no_license | minghao2016/flashlight | e81a7589d0efc4cefa67f2c60736101d92294090 | 708baeafcf76ad45a1c2a1b4c3118516c722138c | refs/heads/master | 2023-03-03T22:49:00.813416 | 2021-02-08T06:08:09 | 2021-02-08T06:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 755 | r | all_identical.R | #' all_identical
#'
#' Checks if an aspect is identical for all elements in a nested list. The aspect is specified by \code{fun}, e.g. `[[`, followed by the element name to compare.
#'
#' @param x A nested list of objects.
#' @param fun Function used to extract information of each element of \code{x}.
#' @param ... Further arguments passed to \code{fun}.
#' @return A logical vector of length one.
#' @export
#' @examples
#' x <- list(a = 1, b = 2)
#' y <- list(a = 1, b = 3)
#' all_identical(list(x, y), `[[`, "a")
#' all_identical(list(x, y), `[[`, "b")
all_identical <- function(x, fun, ...) {
if ((m <- length(x)) <= 1L) {
return(TRUE)
}
subs <- lapply(x, fun, ...)
all(vapply(subs[2:m], FUN = identical, FUN.VALUE = TRUE, subs[[1]]))
}
|
a9bf61e269b90cb41edc36bef74fdcff4e4375a2 | 39fa87c15571987f79dae2a1305bb8e1efdb2544 | /shinycrab/app.R | 430cd3b742be50b158fd846b9b2cefa93a80e4fc | [] | no_license | czwartackisr/shinycrab | c762af6582a45e1d3838be75b8b1140b06de560c | b5ee928e9cb842f2ca5817ac7b36a6f9d606e4c6 | refs/heads/master | 2023-02-06T17:25:09.463564 | 2020-12-14T18:35:48 | 2020-12-14T18:35:48 | 198,065,012 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,648 | r | app.R | library(shiny)
library(dplyr)
library(ggplot2)
library(DataCombine)
library(PerformanceAnalytics)
library(knitr)
library(rmarkdown)
crab <- read.csv("MyData2.csv", stringsAsFactors = FALSE)
ui <- fluidPage(
titlePanel("Crab Bivariate Modeling (Czwartacki)"),
sidebarLayout(
sidebarPanel(
selectInput("SpeciesCode", label = h3("Species Code Selector"),
choices = list("D003", "D004", "D005", "D130", "F001"),
selected = 3, multiple = TRUE),
selectInput("dataset2", h3("Dependent Variable (Group)"), choices = c("Abundance",
"Landings",
"Salinity",
"Temperature",
"Precipitation",
"Climate")),
HTML('</br>'),
uiOutput('dv'),
selectInput("dataset", h3("Independent Variable (Group)"), choices = c("Abundance",
"Landings",
"Salinity",
"Temperature",
"Precipitation",
"Climate")),
HTML('</br>'),
uiOutput('iv'),
radioButtons('format', h5('Document format'), c('PDF', 'HTML', 'Word'), inline = TRUE),
downloadButton('downloadReport')
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Distribution", # Plots of distributions
fluidRow(
column(6, plotOutput("distribution1")),
column(6, plotOutput("dvTime"))),
fluidRow(
column(6, plotOutput("distribution2")),
column(6, plotOutput("ivTime")))),
tabPanel("Scatterplot",
fluidRow(
column(12, h4("Lag o (No Lag)"), plotOutput("scatterplot0")),
column(12, h4("Lag 1 (Dep Var +1 yr)"), plotOutput("scatterplot1")),
column(12, h4("Lag 2 (Dep Var +1 yr, Ind Var -1 yr)"), plotOutput("scatterplot2")))),
tabPanel("Regression Summary",
fluidRow(
column(12, h4("Lag 0 (No Lag)"), verbatimTextOutput("summary0")),
column(12, h4("Lag 1 (Dep Var +1 yr)"), verbatimTextOutput("summary1")),
column(12, h4("Lag 2 (Dep Var +1 yr, Ind Var -1 yr)"), verbatimTextOutput("summary2")))), # Regression output
tabPanel("Residuals",
column(12, h4("Lag o (No Lag)"), plotOutput("residuals_hist0")),
column(12, h4("Lag o (No Lag)"), plotOutput("residuals_scatter0")),
column(12, h4("Lag o (No Lag)"), plotOutput("residuals_qqline0")),
column(12, h4("Lag 1 (Dep Var +1 yr)"), plotOutput("residuals_hist1")),
column(12, h4("Lag 1 (Dep Var +1 yr)"), plotOutput("residuals_scatter1")),
column(12, h4("Lag 1 (Dep Var +1 yr)"), plotOutput("residuals_qqline1")),
column(12, h4("Lag 2 (Dep Var +1 yr, Ind Var -1 yr)"), plotOutput("residuals_hist2")),
column(12, h4("Lag 2 (Dep Var +1 yr, Ind Var -1 yr)"), plotOutput("residuals_scatter2")),
column(12, h4("Lag 2 (Dep Var +1 yr, Ind Var -1 yr)"), plotOutput("residuals_qqline2"))),
tabPanel("Select Correlations",
fluidRow(
#column(6, h6("Method = Kendall's"), plotOutput("currentCorr")),
column(12, h4("Harbor Trawl - Creek Trawl Abundance Correlations"), plotOutput("corrAbun1")),
column(12, h4("Creek Trawl - Chas Harbor Landings"), plotOutput("corrAbun2")),
column(12, h4("Harbor Trawl - Chas Harbor Landings"), plotOutput("corrAbun3")),
column(12, h4("Salinity Correlations"), plotOutput("corrAbun4")))),
tabPanel("Data", DT::dataTableOutput('tbl')) # Data as datatable
)
)
))
# SERVER
server <- function(input, output) {
# Dependent Variable Input
dataset2Input <- reactive({
switch(input$dataset2,
"Abundance" = select(crab, 36:72),
"Landings" = select(crab, 73:84),
"Salinity" = select(crab, 15:35),
"Temperature" = select(crab, 9:14),
"Precipitation" = select(crab, 2:8),
"Climate" = select(crab, 85:108))
})
# Infependent variable Input
datasetInput <- reactive({
switch(input$dataset,
"Abundance" = select(crab, 36:84),
"Landings" = select(crab, 73:84),
"Salinity" = select(crab, 15:35),
"Temperature" = select(crab, 9:14),
"Precipitation" = select(crab, 2:8),
"Climate" = select(crab, 85:108))
})
# Dependent variable Output
output$dv = renderUI({
selectInput('dv', h5('Choose an Dep Var from Group'), choices = names(dataset2Input()))
})
# independent variable Output
output$iv = renderUI({
selectInput('iv', h5('Choose an Ind Var from Group'), choices = names(datasetInput()))
})
# regression formula
regFormula <- reactive({
as.formula(paste(input$dv, '~', input$iv))
})
# bivariate model
reg.model0 <- reactive({
lm(crab[,input$dv] ~ crab[,input$iv])
})
# bivariate model
reg.model1 <- reactive({
lm(lead(crab[,input$dv]) ~ crab[,input$iv])
})
# bivariate model
reg.model2 <- reactive({
lm(lead(crab[,input$dv]) ~ lag(crab[,input$iv]))
})
## GRAPHICS
# residuals
output$residuals_hist0 <- renderPlot({
hist(reg.model0()$residuals, main = paste(input$dv, '~', input$iv), xlab = 'Residuals')
})
output$residuals_scatter0 <- renderPlot({
res <- qplot(fitted(reg.model0()), residuals(reg.model0()))
res + geom_hline(yintercept = 0, col="red")
})
output$residuals_qqline0 <- renderPlot({
qqnorm(reg.model0()$residuals)
qqline(reg.model0()$residuals)
})
# residuals lead
output$residuals_hist1 <- renderPlot({
hist(reg.model1()$residuals, main = paste(input$dv, '~', input$iv), xlab = 'Residuals')
})
output$residuals_scatter1 <- renderPlot({
res <- qplot(fitted(reg.model1()), residuals(reg.model1()))
res + geom_hline(yintercept = 0, col="red")
})
output$residuals_qqline1 <- renderPlot({
qqnorm(reg.model1()$residuals)
qqline(reg.model1()$residuals)
})
# residuals lead/lag
output$residuals_hist2 <- renderPlot({
hist(reg.model2()$residuals, main = paste(input$dv, '~', input$iv), xlab = 'Residuals')
})
output$residuals_scatter2 <- renderPlot({
res <- qplot(fitted(reg.model2()), residuals(reg.model2()))
res + geom_hline(yintercept = 0, col="red")
})
output$residuals_qqline2 <- renderPlot({
qqnorm(reg.model2()$residuals)
qqline(reg.model2()$residuals)
})
# Regression output
output$summary0 <- renderPrint({
fit0 <- lm(crab[,input$dv] ~ crab[,input$iv])
names(fit0$coefficients) <- c("Intercept", input$var2)
summary(fit0)
})
# Regression w/ lead output
output$summary1 <- renderPrint({
fit1 <- lm(lead(crab[,input$dv]) ~ crab[,input$iv])
names(fit1$coefficients) <- c("Intercept", input$var2)
summary(fit1)
})
# Regression w/ lead and lag output
output$summary2 <- renderPrint({
fit2 <- lm(lead(crab[,input$dv]) ~ lag(crab[,input$iv]))
names(fit2$coefficients) <- c("Intercept", input$var2)
summary(fit2)
})
# correlation matrix B90 T38
output$corrAbun1 <- renderPlot({
juvcorr1 <- select(crab, 28:33, 62:71)
chart.Correlation(juvcorr1, histogram = FALSE, pch=19, method = "kendall")
})
# correlation matrix B90 Landings
output$corrAbun2 <- renderPlot({
juvcorr2 <- select(crab, c(62:71, 73:84))
chart.Correlation(juvcorr2, histogram = FALSE, pch=19, method = "kendall")
})
# correlation matrix T38 Landings
output$corrAbun3 <- renderPlot({
juvcorr3 <- select(crab, 36:45, 73:84)
chart.Correlation(juvcorr3, histogram = FALSE, pch=19, method = "kendall")
})
# correlation matrix Salinity
output$corrAbun4 <- renderPlot({
juvcorr4 <- select(crab, 15:30, 34, 35)
chart.Correlation(juvcorr4, histogram = FALSE, pch=19, method = "kendall")
}, height = 700)
# Data output
output$tbl = DT::renderDataTable({
DT::datatable(crab, options = list(lengthChange = FALSE))
})
# Scatterplot output
output$scatterplot0 <- renderPlot({
plot(crab[,input$iv], crab[,input$dv], main="Lag 0",
xlab=input$iv, ylab=input$dv, pch=19)
abline(lm(crab[,input$dv] ~ crab[,input$iv]), col="red")
}, height=400, width = 500)
# Scatterplot output w/ lead
output$scatterplot1 <- renderPlot({
plot(lead(crab[,input$iv]), crab[,input$dv], main="Lag 1",
xlab=input$iv, ylab=input$dv, pch=19)
abline(lm(lead(crab[,input$dv]) ~ crab[,input$iv]), col="red")
}, height=400, width = 500)
# Scatterplot output w/ lead and lag
output$scatterplot2 <- renderPlot({
plot(lead(crab[,input$iv]), lag(crab[,input$dv]), main="Lag 2",
xlab=input$iv, ylab=input$dv, pch=19)
abline(lm(crab[,input$dv] ~ crab[,input$iv]), col="red")
}, height=400, width = 500)
# Histogram output var 1
output$distribution1 <- renderPlot({
hist(crab[,input$dv], main="Dependent Variable Distribution", xlab=input$dv)
})
# Histogram output var 2
output$distribution2 <- renderPlot({
hist(crab[,input$iv], main="Independent Variable Distribution", xlab=input$iv)
})
# Time series output iv
output$ivTime <- renderPlot({
plot(crab$Year,crab[,input$iv], main = "Independent Variable Time Series",
type = "l", lty = "twodash",
xlab = "Year", ylab = "Indep Var Metric (Abundance, PSU, Degrees Celsius, etc.)")
})
# Time series output dv
output$dvTime <- renderPlot({
plot(crab$Year,crab[,input$dv], main = "Dependent Variable Time Series",
type = "l", lty = "twodash",
xlab = "Year", ylab = "Abundance Metric")
})
# download report
output$downloadReport <- downloadHandler(
filename = function() {
paste('my-report', sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html', Word = 'docx'
))
},
content = function(file) {
src <- normalizePath('report.Rmd')
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(src, 'report.Rmd')
library(rmarkdown)
out <- render('report.Rmd', switch(
input$format,
PDF = pdf_document(), HTML = html_document(), Word = word_document()
))
file.rename(out, file)
})
}
shinyApp(ui = ui, server = server) |
5820f4f24f2f17dfed7cca5f8f50059bd5300541 | a907ddaf7125844f8e92513bd2eca9fcdda1eafc | /man/rfmods.Rd | 35c9c59a98dc2901d421a7773d4d555026dfde65 | [] | no_license | SCCWRP/ASCI | ba809e5e86bc852def0004248a74fd64525c53b4 | 2b5931289b8790f3144ced9ec062cd3223eaa528 | refs/heads/master | 2023-04-10T00:44:12.540657 | 2023-02-17T09:21:33 | 2023-02-17T09:21:33 | 106,055,957 | 2 | 3 | null | 2020-11-11T20:48:05 | 2017-10-06T22:38:22 | R | UTF-8 | R | false | true | 1,249 | rd | rfmods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rfmods.R
\docType{data}
\name{rfmods}
\alias{rfmods}
\title{Random forest models for metrics}
\format{
A \code{\link[base]{list}} with six elements, one for each predictive metric and a final element for conductivity estimates:
\describe{
\item{\code{diatoms.cnt.spp.most.tol}}{randomForest}
\item{\code{diatoms.EpiRho.richness}}{randomForest}
\item{\code{diatoms.prop.spp.IndicatorClass_TN_low}}{randomForest}
\item{\code{diatoms.prop.spp.Planktonic}}{randomForest}
\item{\code{diatoms.prop.spp.Trophic.E}}{randomForest}
\item{\code{diatoms.Salinity.BF.richness}}{randomForest}
\item{\code{hybrid.cnt.spp.IndicatorClass_TP_high}}{randomForest}
\item{\code{hybrid.cnt.spp.most.tol}}{randomForest}
\item{\code{hybrid.EpiRho.richness}}{randomForest}
\item{\code{hybrid.OxyRed.DO_30.richness}}{randomForest}
\item{\code{hybrid.prop.spp.Planktonic}}{randomForest}
\item{\code{hybrid.prop.spp.Trophic.E}}{randomForest}
\item{\code{hybrid.Salinity.BF.richness}}{randomForest}
\item{\code{cond.qrf}}{quantregForest}
}
}
\usage{
rfmods
}
\description{
List of random forest models for predictive metrics
}
\examples{
data(rfmods)
}
\keyword{datasets}
|
89106a377c99a21138d0a7a25de5d672df787fe2 | 9880ada8c339324fa920906fd7ad3d4195698996 | /spreadsheet_divider.R | 1a88e1e32b8084f24babea21d190dc924aeb47cc | [] | no_license | Atticus29/spreadsheetSplitter | ec4e78fec9dc223dd0845dcc4885588a7c5f0008 | c4a50c431976cd89eb48d3fc3bfae51b414f81ee | refs/heads/master | 2021-08-15T11:43:18.874330 | 2017-11-17T20:10:33 | 2017-11-17T20:10:33 | 111,146,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,048 | r | spreadsheet_divider.R | #########################################
##File name of spreadsheet to read from##
#########################################
fileString = "/Users/mf/2014_and_beyond/Academic/Teaching/Courses/PCC/Biology_101_Fall_2017/master_grades.csv"
################
##Read in file##
################
require(data.table)
data = read.csv(file =fileString , header=T, sep=",", stringsAsFactors = FALSE)
data=as.data.table(data)
#############
##Variables##
#############
colSubset = colnames(data)[1:16] #TODO edit this as needed
dataOnlyImportantCols = data[,colSubset,with=F]
colsManualExclusion = colSubset[!colSubset %in% c("First_Name", "Last_Name","Raw_quiz2_out_of_9", "Quiz3_Grade_out_of_8")] #TODO edit this as needed
############################################
##Apply rowSums and percentage calculation##
############################################
dataShortened = data[,colsManualExclusion, with =F]
dataShortened[] <- lapply(dataShortened, function(x) as.numeric(as.character(x)))
dataShortened[,Total := rowSums(.SD,na.rm=TRUE), .SDcols=colsManualExclusion]
dataShortened[,TotalPercent := Total*100/120] #TODO edit the 120 for the total points as needed
dataOnlyImportantCols = data.table(dataOnlyImportantCols, dataShortened[,c("Total", "TotalPercent"), with=F])
lastRow = nrow(dataOnlyImportantCols)
##############################################################
##Perhaps there's a mean row to add to each sub spreadsheet?##
##############################################################
rowNumToIncludeInEach = lastRow #TODO edit this as needed
#########################################
##Make the spreadsheet for each student##
#########################################
for(r in c(1:lastRow-1)){
newFileName = paste("/Users/mf/2014_and_beyond/Academic/Teaching/Courses/PCC/Biology_101_Fall_2017/", dataOnlyImportantCols[r,Last_Name],"_grades.csv") #TODO edit this as needed
write.table(as.data.frame(dataOnlyImportantCols[c(rowNumToIncludeInEach,r),]),file = newFileName, quote=FALSE, sep=",", col.names = TRUE, row.names = FALSE)
}
|
25fd35c98ae7a32144d421eff2d17e466366ea57 | 2483f9da1398797e38a4dd3e17b86ff967b40432 | /population_analysis.R | 200543cb8bdad9f4a950fc79b0614c766066ee0e | [] | no_license | eclarke/vdj_xscid | b5fccbc11c65ee146884c238692972400c8e06a9 | 8a6142b6429d203daaee351f8327ec39fcf3850a | refs/heads/master | 2016-09-05T16:42:49.117949 | 2013-05-23T20:44:15 | 2013-05-23T20:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,053 | r | population_analysis.R | library(parallel)
if (F) {
load('all_processed_data.RData')
library(data.table)
# isolate just the processed data from Adaptive
ad.f <- subset(df.filt.all, !is.na(filt.copy))
ad.f$key <- paste(ad.f$accn, ad.f$seq, sep="_")
# collapse the replicate copy number data
cn <- data.table(ad.f)
cn <- cn[, list(filt.copy=mean(filt.copy), normalizedCopy=mean(normalizedCopy),
normalizedFrequency=mean(normalizedFrequency),
rawFrequency=mean(rawFrequency), accn=accn), by=key]
# or, collapse the data according to V and J genes (D is too inconsistent)
ad.f$key <- paste(ad.f$accn, ad.f$replicate, ad.f$VGeneName, ad.f$JGeneName)
genes <- data.table(ad.f)
setkey(genes, key)
genes <- genes[, list(filt.copy = sum(filt.copy),
normalized.copy=sum(normalizedCopy),
VGeneName=VGeneName, DGeneName=DGeneName,
JGeneName=JGeneName, accn=accn,
replicate=replicate), by=key]
accns <- unique(ad.f$accn)
} else if (T) {
load("data_collapsed_by_VJgenes.RData")
}
##
# Performs the specified test between one accn/replicate and another
# using data from the specified data frame and column.
# By default, the function returns just the resultant p-value; this can
# be modified by specifying full=T (although then the resulting matrix from
# `test.all` may be a bit difficult to parse).
# `...` specifies arguments to pass to the given test.
##
.test <- function(accn1, accn2, repl1, repl2, df, column = "normalizedCopy",
test = ks.test, full=F, t.value = "p.value", ...) {
print(paste(accn1, accn2, repl1, repl2, sep=" "))
x <- subset(df, accn == accn1 & replicate == repl1 & !is.na(column)
)[, column]
y <- subset(df, accn == accn2 & replicate == repl2 & !is.na(column)
)[, column]
t <- test(x, y, ...)
if (full) return(t) else return(t[[t.value]])
}
##
# Calls the `.test` function on a pairwise comparison between the specified
# accession and every other. The `repl` argument specifies the replicate
# used to select the values representative of those accessions.
##
.by.accn <- function(accn, df, repl, column, test, ...) {
sapply(accns, .test, df=df, accn2=accn, repl1=repl, repl2=repl,
column=column, test=test, ...)
}
##
# Calls the `.test` function on a pairwise comparison between the specified
# sample replicate and every other replicate of that sample.
##
.by.repl <- function(repl, df, repls, accn, column, test, ...) {
sapply(repls, function(x) .test(accn1 = accn, accn2 = accn, repl1=x,
repl2 = repl, df=df, column=column,
test=test, ...))
}
.by.all <- function(x, all.samples, df, column, test, cl, t.value, full, ...) {
cat("X", x, '\n')
print(all.samples)
parApply(cl = cl, X=all.samples, MARGIN=1, FUN=function(y) {
cat("Y", y, '\n')
.test(accn1 = x[[1]], accn2 = y[[1]], repl1 = x[[2]], repl2 = y[[2]],
df = df, column = column, test = test, t.value = t.value, full, ...) })
}
##
# Performs a pairwise comparison using the specified test against either all
# samples or all replicates in a sample, using the specified data frame and
# column containing the data.
#
# test: the statistical test to use
# df: the data frame containing the data (usually ad.f,
# from when the script was loaded)
# by: if "accn", compare samples using replicate 1. if anything else,
# compare replicates from a given accn
# column: the column of the given data frame that contains the data points
# accn: the accn (sample) whose replicates are being compared.
# required if `by` != "accn".
# full: return the full results of the test
# t.value: if not returning the full results, what named attribute of the
# test results to return
#
##
test.all <- function(test, df, by = "accn", column = "normalizedCopy",
accn=NULL, full=F, t.value="p.value", ...) {
if (by == "accn") {
results <- as.matrix(cbind(sapply(accns, .by.accn, df=df, repl="1",
column=column, test=test, full, t.value, ...)))
} else if (by == "repl") {
stopifnot(accn != NULL)
repls <- unique(ad.f$replicate[ad.f$accn == accn])
results <- as.matrix(cbind(sapply(repls, .by.repl, repls=repls,
df=df, accn=accn, column=column, test=test,
full, t.value, ...)))
} else if (by == "all") {
# not implemented, use parallel version
}
return(results)
}
##
# Exactly the same as test.all, but accepts a cluster to run the tests in
# parallel
##
par.test.all <- function(test, df, by = "all", column = "filt.copy",
accn=NULL, full=F, t.value="p.value", ...) {
cat("making cluster...")
cl <- makeCluster(16)
clusterEvalQ(cl, source("population_analysis.R"))
if (by == "accn") {
results <- as.matrix(cbind(parSapply(cl, accns, .by.accn, df=df, repl="1",
column=column, test=test, full, t.value, ...)))
} else if (by == "repl") {
stopifnot(accn != NULL)
repls <- unique(ad.f$replicate[ad.f$accn == accn])
results <- as.matrix(cbind(parSapply(cl, repls, .by.repl, repls=repls,
df=df, accn=accn, column=column, test=test,
full, t.value, ...)))
} else if (by == "all") {
cat("running all by all..\n")
all.s <- unique.data.frame(subset(ad.f, select=c('accn', 'replicate')))
all.s <- all.s[order(all.s$accn, all.s$replicate),]
rownames(all.s) <- paste(all.s$accn, all.s$replicate, sep="_")
results <- apply(X=all.s, FUN=.by.all, MARGIN=1, cl=cl, all.samples=all.s,
df=df, column=column, test=test, t.value=t.value,
full=full, ...)
}
stopCluster(cl)
return(results)
}
|
3282c2b86d69721cb5608c2e46406c344bf2b68c | 712cd388080507751e4bd5a94ed6d77c342c5178 | /src/05_claire_location_analysis.R | bec5b7ad2121e88d2914af2ba8655104934977c7 | [] | no_license | ckelling/Travel_Patterns_SODA_501 | 9f5a18bd9a4ccd602eb2fc7bd4e9186e2a497dc5 | f8fa853e33b70a0f4bedc2bf6d55caa0033ca6aa | refs/heads/master | 2021-09-19T19:55:16.956127 | 2018-07-31T12:21:15 | 2018-07-31T12:21:15 | 117,688,396 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,624 | r | 05_claire_location_analysis.R | ###
### Claire Kelling
### SODA 501
### Final Project
###
### Created 3/29/18 for analysis of locations of Twitter users.
###
library(ggmap)
library(XML)
library(geosphere)
library(geonames)
library(Imap)
library(googleway)
library(dplyr)
library(xtable)
# Load data created my Xiaoran
#load("C:/Users/ckell/OneDrive/Penn State/2017-2018/01_Spring/SODA_501/SODA_501_project/data/user_locations.Rdata")
source("C:/Users/ckell/OneDrive/Penn State/2017-2018/01_Spring/SODA_501/SODA_501_project/src/00_geo_dist_function.R")
load("C:/Users/ckell/OneDrive/Penn State/2017-2018/01_Spring/SODA_501/SODA_501_project/data/user_locations_finalcleaned.Rdata")
#I will use the subsetted data here
user_locations <- user_locations_c2
#consists of username, location (list), and coords (list)
# the coords are all of the unique locations
class(user_locations)
class(user_locations$coords)
########
# we want to find the furthest distance between any two points
########
#first, we want to record which location(s) is in the college town (an index)
towns<-c("Ithaca, NY", "State College, PA", "Bloomington, IN", "Lawrence,
KS", "Blacksburg, VA", "College Station, TX", "Columbia, MO",
"Champaign, IL", "Ann Arbor, MI", "Gainesville, FL")
user_towns <- user_locations$locations[[21]]
# look at user 20 for the problem with "Florida, USA"
#get a API key at the following website:
#https://developers.google.com/maps/documentation/geocoding/get-api-key
key <- "AIzaSyBx0xrnryLGil3jNbKOkgSTBaHeZGqxLQg"
key2 <- "AIzaSyAtI-X5J7uMMqVDQwpKUqLqVJDQ2GBZ1kQ"
key3 <- "AIzaSyAI4JuiuoBbw5ZhmBvK0n-5k5-qr2S4Iwk"
key4 <- "AIzaSyC21ZPoTAADM-C6rK6Df-ZGRkte7mjeqEA"
lonlat3 <- NULL
for(i in 1:length(user_towns)){
lonlat2 <- google_geocode(address = user_towns[i], key = key)
lonlat2 <- lonlat2$results$geometry$location
print(i)
#Sys.sleep(3)
lonlat3 <- rbind(lonlat3, lonlat2)
}
#geocodeQueryCheck()
#record indice that is college town
coll_ind <- which(user_towns %in% towns)
# Now, I will create a distance matrix
#need to get matrix in correct format
df.cities <- data.frame(name = user_towns,
lat = lonlat3$lat,
lon = lonlat3$lng)
#this is in km (units)
dmat3 <- round(GeoDistanceInMetresMatrix(df.cities) / 1000)
#record the furthest and the largest distance from the college town
dist_vec <- dmat3[,coll_ind]
max_dist <- max(dist_vec) #measured in km
max_loc <- rownames(dmat3)[which(dmat3[,coll_ind] == max_dist)]
new_dat <- c(max_loc, max_dist)
rm(list = coll_ind, i, dist_vec, max_dist, max_loc, new_dat,
user_towns, df.cities, dmat3, lonlat2, lonlat3)
###
### Now I will operationalize for the full dataset
###
full_dat <- NULL
for(j in 1161:nrow(user_locations)){
#j <- 1160
user_towns <- user_locations$locations[[j]]
#remove NA's
user_towns <- user_towns[!is.na(user_towns)]
num_loc <- length(user_towns)
print(paste(j, "****************************************"))
if(length(user_towns) < 50){
lonlat3 <- NULL
for(i in 1:length(user_towns)){
#i=2
lonlat2 <- google_geocode(address = user_towns[i], key = key2)
lonlat2 <- lonlat2$results$geometry$location
print(i)
Sys.sleep(runif(1,1,3))
lonlat3 <- rbind(lonlat3, lonlat2)
#print(paste(nrow(lonlat3), "***"))
}
#geocodeQueryCheck()
#record indice that is college town
coll_ind <- which(user_towns %in% towns)
# Now, I will create a distance matrix
#need to get matrix in correct format
df.cities <- data.frame(name = user_towns,
lat = lonlat3$lat,
lon = lonlat3$lng)
#this is in km (units)
dmat3 <- round(GeoDistanceInMetresMatrix(df.cities) / 1000)
#record the furthest and the largest distance
dist_vec <- dmat3[,coll_ind]
max_dist <- max(dist_vec) #measured in km
max_loc <- rownames(dmat3)[which(dmat3[,coll_ind] == max_dist)]
#record coll_town
coll_loc <- rownames(dmat3)[coll_ind]
#record the sum of the distances, assuming traveling in a line
dist_vec <- NULL
if(nrow(dmat3) > 1){
for(k in 1:(nrow(dmat3)-1)){
#k=6
dist1 <- dmat3[k,k+1]
dist_vec <- c(dist_vec,dist1)
}
dist_trav <- sum(dist_vec)
}else{
dist_trav <- 0
}
new_dat <- c(coll_loc, num_loc, max_loc, max_dist, dist_trav)
full_dat <- rbind(full_dat, new_dat)
}
}
#j needs to be up to 1518
#full_dat_final <- NULL
#full_dat_final <- rbind(full_dat_final, full_dat) #done up to 1160
#save(full_dat_final, file = "C:/Users/ckell/OneDrive/Penn State/2017-2018/01_Spring/SODA_501/SODA_501_project/data/coll_ind_dat.Rdata")
#load(file = "C:/Users/ckell/OneDrive/Penn State/2017-2018/01_Spring/SODA_501/SODA_501_project/data/coll_ind_dat.Rdata")
#got up to j as 74
full_dat2 <- full_dat_final
#full_dat_final <- full_dat2
full_dat2 <- full_dat2[-c(169,176),]
nrow(full_dat2)
full_dat2 <- as.data.frame(full_dat2)
colnames(full_dat2) <- c("coll_loc", "num_loc", "max_loc", "max_dist", "dist_trav")
rownames(full_dat2) <- c()
full_dat2$max_dist <- as.numeric(as.character(full_dat2$max_dist))
full_dat2$dist_trav <- as.numeric(as.character(full_dat2$dist_trav))
full_dat2$max_loc <- as.character(full_dat2$max_loc)
#issues with exceeding daily requested number
#create aggregated statistics, by college towns
mean_max <- aggregate(. ~ coll_loc, full_dat2[-3], mean)
xtable(mean_max)
common_dest <- full_dat2 %>% group_by(coll_loc,max_loc) %>% tally() %>% filter(n>4) %>%
filter(max_loc != coll_loc) %>% top_n(3, n) #%>% arrange(coll_loc, max_loc, desc(n))
xtable(common_dest)
##
## Now, I would just like to record the indice of the college town,
## and the total number of unique locations.
##
ind_dat <- NULL
k <- 0
for(j in 1:nrow(user_locations)){
#j <- 168
user_towns <- user_locations$locations[[j]]
#remove NA's
user_towns <- user_towns[!is.na(user_towns)]
num_loc <- length(user_towns)
print(paste(j, "****************************************"))
#record indice that is college town
coll_ind <- which(user_towns %in% towns)
if(length(coll_ind)> 1){
coll_ind <- coll_ind[1]
k <- k+1
}
#record coll_town
coll_loc <- user_towns[coll_ind]
#create new row
new_dat <- c(coll_loc, num_loc, coll_ind)
#new data
ind_dat <- rbind(ind_dat, new_dat)
}
k
#There are 2 users that are in more than one college town
#We just take the first location for these two users
ind_dat <- as.data.frame(ind_dat)
colnames(ind_dat) <- c("coll_loc", "num_loc", "coll_ind")
rownames(ind_dat) <- c()
ind_dat$num_loc <- as.numeric(as.character(ind_dat$num_loc))
ind_dat$coll_ind <- as.numeric(as.character(ind_dat$coll_ind))
hist(ind_dat$num_loc)
hist(ind_dat$coll_ind)
p <- ggplot(ind_dat, aes(coll_loc, num_loc))
p + geom_boxplot()
p2 <- ggplot(ind_dat, aes(coll_loc, coll_ind))
p2 + geom_boxplot()
#aggregating by the college town
coll_ind_summ <- aggregate(. ~ coll_loc, ind_dat, mean)
xtable(coll_ind_summ)
#43.8% of users have only traveled to one location- skewing analysis drastically
length(which(ind_dat$num_loc==1))/nrow(ind_dat)
#performing the same analysis for users that have traveled to more than one location
g1_ind_dat <- ind_dat[which(ind_dat$num_loc>1),]
g1_ind_dat$perc_ind <- (g1_ind_dat$coll_ind-1)/(g1_ind_dat$num_loc-1)
hist(g1_ind_dat$num_loc)
hist(g1_ind_dat$coll_ind)
hist(g1_ind_dat$perc_ind)
ggplot(data=g1_ind_dat, aes(g1_ind_dat$perc_ind)) + geom_histogram(bins=10)+labs(x="College Percent of Travel")
p <- ggplot(g1_ind_dat, aes(coll_loc, num_loc))
p + geom_boxplot()
p2 <- ggplot(g1_ind_dat, aes(coll_loc, coll_ind))
p2 + geom_boxplot()
coll_ind_g1 <- aggregate(. ~ coll_loc, g1_ind_dat, mean)
xtable(coll_ind_g1)
# Create a plot to compare cities for the trip progress across the number of locations
# aggregate by college town, number of locations, and index
agg_dat <- g1_ind_dat %>% group_by(coll_loc,num_loc,coll_ind) %>% tally()
# aggregate by college town and number of locations
agg_dat_num_loc <- g1_ind_dat %>% group_by(coll_loc, num_loc) %>% tally()
colnames(agg_dat_num_loc) <-c("coll_loc","num_loc", "n_2")
#joining the data
agg_dat <- left_join(agg_dat, agg_dat_num_loc)
#creating a measure of relative frequency
agg_dat$perc <- agg_dat$n/agg_dat$n_2
#creating measure for the trip progress number of the college town
agg_dat$coll_perc <- (agg_dat$coll_ind-1)/(agg_dat$num_loc-1)
#plotting this, by college town
v <- ggplot(agg_dat, aes(num_loc, coll_perc, size = perc))
v + geom_point()+facet_wrap(~ coll_loc, ncol = 3)+labs(y="Trip Progress",
x= "Number of locations",
size = "Relative Frequency")
#Plotting a couple of histograms
load("C:/Users/ckell/OneDrive/Penn State/2017-2018/01_Spring/SODA_501/SODA_501_project/data/user_locations_precleaned.Rdata")
hist(user_locations_c$probab[which(user_locations_c$probab>0)])
hist(user_locations_c$location_num[which(user_locations_c$probab>0)])
hist(user_locations_c$location_num[which(user_locations_c$probab>0 &
user_locations_c$probab<0.59)])
mean(user_locations_c$location_num[which(user_locations_c$probab>0)])
mean(user_locations_c$location_num[which(user_locations_c$probab>0 &
user_locations_c$probab<0.59)])
median(user_locations_c$location_num[which(user_locations_c$probab>0)])
median(user_locations_c$location_num[which(user_locations_c$probab>0 &
user_locations_c$probab<0.59)])
hist(user_locations_c$location_num[which(user_locations_c$probab>0 &
user_locations_c$location_num<600)], binwidth = 40)
gg_dat <- as.data.frame(as.numeric(user_locations_c$location_num[which(user_locations_c$probab>0 &
user_locations_c$location_num<200)]))
colnames(gg_dat) <- "location_num"
ggplot(data=gg_dat,
aes(location_num)) +
geom_histogram(bins = 200)+labs(x="Number of Locations")+ geom_vline(xintercept=25, col ="red")
max(user_locations_c$location_num[which(user_locations_c$probab>0 &
user_locations_c$probab<0.59)])
max(user_locations_c$probab)
gg_dat2 <- user_locations_c[which(user_locations_c$probab>0),]
ggplot(data=gg_dat2,
aes(probab)) +
geom_histogram(bins = 200)+labs(x="Bot Check Probability")+ geom_vline(xintercept=0.59, col ="red")
|
10edd78fe8c403d320736264b7096241817c4bba | 97c2e7f4087eacc6f2356e0451413f7b71373ec5 | /R/get_janes_data_xml.R | b0fe771bfbfd0aa0cbcc3cbbdb9605535226fd20 | [] | no_license | cgpeltier/janes | 5c2bd46d7d3850fbe412bafae24213fc42d77b99 | 414218830dfb3d3646595983a83e7fb2d134ba65 | refs/heads/master | 2023-04-08T08:32:53.786585 | 2021-04-13T14:22:31 | 2021-04-13T14:22:31 | 282,264,512 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 668 | r | get_janes_data_xml.R | #' @title get_janes_data_xml
#' @description Pulls Janes data for all data endpoints. Helper function.
#'
#' @param x For iteration on URLs
#'
#' @return Janes data. Used in all "get_janes_x" functions.
#' @importFrom httr GET
#' @importFrom httr add_headers
#' @importFrom httr content
#' @importFrom jsonlite fromJSON
#' @importFrom jsonlite toJSON
#' @importFrom magrittr "%>%"
#' @importFrom tibble tibble
#' @importFrom xml2 xml_children
#' @importFrom xml2 xml_find_all
#' @importFrom xml2 xml_text
get_janes_data_xml <- function(x){
request <- GET(url = x, add_headers(Authorization = Sys.getenv("JANES_KEY")))
request %>%
content()
}
#' @export
|
e6fe9f3f01b8cf97fee6f8c28a69e6c9dc598e7a | f8ce1034cef41685ab2387fa42084ac1ee5c96cf | /chapter16/methodgrouped.R | 99e45a2247c950c0b6daa6b1aa0329c0178b6fff | [] | no_license | AnguillaJaponica/RProgramming | ab7e15d99153ebe585745289490e6e3df118a35c | ae1401920b5a52795cffc95bd83922f15761e531 | refs/heads/master | 2020-11-28T10:07:02.579925 | 2019-12-23T15:28:23 | 2019-12-23T15:28:23 | 229,777,900 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 36 | r | methodgrouped.R | plot(rules.Epub, method = "grouped") |
c70fdc72187d77b0c118446fd9d15b46006ef61b | 84b12c5708b2512790b4f3496bb904cac2c520a8 | /R/exam_function.R | 649ffc72a86803d13b3eb4ee356541da640a1c4e | [] | no_license | charlotte-ngs/rmdexam | b753effcc1d464eda41d75b2021ce0a9b54f3b17 | 0842e4dc02206ae06915f0bf2dbb01028028fa8e | refs/heads/main | 2023-01-29T08:15:49.989861 | 2020-12-17T06:13:34 | 2020-12-17T06:13:34 | 322,000,312 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,629 | r | exam_function.R | #' ---
#' title: Create Rmarkdown Exam
#' date: 2020-12-15
#' ---
#'
#' @title Generate Rmd Solutions and Exams
#'
#' @description
#' Given a vector of problem names and a vector of student names, a series of
#' individualised solutions and exams are created. The output currently consists
#' of Rmarkdown source files and pdf-files for the exam questions and the solutions.
#' Each problem requires a Rmarkdown source file which is stored in a file named
#' the same way as the problem name. Hence the source of the problem 'nrm' must
#' be stored in a file called 'nrm.Rmd'. Any problem setup should be included in
#' an R-script called 'nrm.R' which contains a single function 'generate_problem_setup()'
#'
#' @details
#' Solutions to questions must be tagged with the begin-tag 'solstart' and the
#' end-tag 'solend'. These tags have to be defined in the yaml header or in a
#' preamble.tex file
#'
#' @param pvec_problem vector of problem names
#' @param pvec_names vector of student names
#' @param pn_nr_exam number of exams to be generated, if pvec_names is missing
#' @param ps_rmd_dir directory where rmd-sources for the problems are stored
#' @param ps_rsrc_dir directory where R-scripts for problem-setup are stored
#' @param ps_tex_dir directory containing additional tex files
#' @param ps_out_dir directory where output should be stored
#' @param pb_force flag to first delete old output directories
#'
#' @example
#' \dontrun{
#' exam2rmd(pvec_problem = c('nrm'), pvec_names = c('test_student'), pn_nr_exam = 1)
#' }
#'
#' @export exam2rmd
exam2rmd <- function(pvec_problem,
pvec_names = NULL,
pn_nr_exam = 1,
ps_rmd_dir = 'rmd',
ps_rsrc_dir = 'R',
ps_tex_dir = 'tex',
ps_out_dir = 'out',
pb_force = FALSE){
# with pb_force, existing output is deleted
if (pb_force && dir.exists(ps_out_dir)){
cat(" * Removing existing output directory: ", ps_out_dir, "\n")
unlink(ps_out_dir)
}
# check whether ps_out_dir exists
if (!dir.exists(ps_out_dir)) {
cat(" * Create directory: ", ps_out_dir, "\n")
dir.create(path = ps_out_dir)
}
# get a time-stamp
s_cur_ts <- format(Sys.time(), '%Y%m%d%H%M%S')
# loop over number of exams to produce
for (eidx in 1:pn_nr_exam){
# create output directory for current exam
if (!is.null(pvec_names) && eidx <= length(pvec_names)) {
cur_outdir <- file.path(ps_out_dir, paste0('exam_', pvec_names[eidx]))
cur_exam_rmd_file <- paste0(s_cur_ts, '_exam_', pvec_names[eidx], '.Rmd', collapse = '')
cur_sol_rmd_file <- paste0(s_cur_ts, '_sol_', pvec_names[eidx], '.Rmd', collapse = '')
} else {
cur_outdir <- file.path(ps_out_dir, paste0('exam_', eidx))
cur_exam_rmd_file <- paste0(s_cur_ts, '_exam_', eidx, '.Rmd', collapse = '')
cur_sol_rmd_file <- paste0(s_cur_ts, '_sol_', eidx, '.Rmd', collapse = '')
}
if (!dir.exists(cur_outdir)){
cat(" ** Current exam directory: ", cur_outdir, "\n")
dir.create(cur_outdir)
}
# copy tex dir
if (dir.exists(ps_tex_dir)) {
cat(" ** Copy ", ps_tex_dir, " to: ", cur_outdir, "\n")
fs::dir_copy(path = ps_tex_dir, new_path = cur_outdir)
}
# name of current exam-rmdfile
cur_exam_rmd_path <- file.path(cur_outdir, cur_exam_rmd_file)
cur_sol_rmd_path <- file.path(cur_outdir, cur_sol_rmd_file)
# put together current exam-rmdfile
s_index_rmd <- file.path(ps_rmd_dir, 'index.Rmd')
if (file.exists(s_index_rmd)){
con_index <- file(description = s_index_rmd)
vec_index <- readLines(con = con_index)
close(con_index)
} else {
cat (" ** Cannot find index file: ", s_index_rmd, "\n")
}
# write the index to solution-rmd file
cat(" ** Writing index.rmd to: ", cur_sol_rmd_path, "\n")
cat(paste0(vec_index, collapse = '\n'), '\n', file = cur_sol_rmd_path, append = FALSE)
# write the index to the exam-rmd file
vec_index_exam <- gsub('Solutions To ', '', vec_index, fixed = TRUE)
cat(paste0(vec_index_exam, collapse = '\n'), '\n', file = cur_exam_rmd_path, append = FALSE)
# add problems to solution and exam rmd files
for (pidx in seq_along(pvec_problem)){
# obtain name of current problem
cur_problem <- pvec_problem[pidx]
# write page break to exam rmd file
cat("\n\\clearpage\n\\pagebreak\n\n", file = cur_sol_rmd_path, append = TRUE)
cat("\n\\clearpage\n\\pagebreak\n\n", file = cur_exam_rmd_path, append = TRUE)
# check wether there is an R-script for the current problem
cur_rscript <- file.path(ps_rsrc_dir, paste0(cur_problem, '.R'))
if (file.exists(cur_rscript)){
cat(" *** Sourcing R-script: ", cur_rscript, "\n")
source(cur_rscript)
l_problem_setup <- generate_problem_setup()
# write problem setup statement
cat(" *** Writing problem setup to: ", cur_sol_rmd_path, "\n")
cat(paste0("```{r ", cur_problem, "-problem-setup, echo=FALSE, results='hide'}\n", collapse = ''),
file = cur_sol_rmd_path, append = TRUE)
cat(l_problem_setup$rstmt, "\n", file = cur_sol_rmd_path, append = TRUE)
cat("```\n\n", file = cur_sol_rmd_path, append = TRUE)
cat(" *** Writing problem setup to: ", cur_exam_rmd_path, "\n")
cat(paste0("```{r ", cur_problem, "-problem-setup, echo=FALSE, results='hide'}\n", collapse = ''),
file = cur_exam_rmd_path, append = TRUE)
cat(l_problem_setup$rstmt, "\n", file = cur_exam_rmd_path, append = TRUE)
cat("```\n\n", file = cur_exam_rmd_path, append = TRUE)
}
# write problem rmd to solution and exam rmd
cur_problem_rmd_path <- file.path(ps_rmd_dir, paste0(cur_problem, '.Rmd'))
if (file.exists(cur_problem_rmd_path)){
con_problem <- file(description = cur_problem_rmd_path)
vec_problem <- readLines(con = con_problem)
close(con = con_problem)
cat(" *** Writing problem rmd: ", cur_problem_rmd_path, " to solution: ", cur_sol_rmd_path, "\n")
cat(paste0(vec_problem, collapse = "\n"), "\n", file = cur_sol_rmd_path, append = TRUE)
# write exam-version without solution part
l_sol_pos <- list(start = which(vec_problem == '\\solstart'),
end = which(vec_problem == '\\solend'))
if (length(l_sol_pos$start) != length(l_sol_pos$end))
stop(" * ERROR: not the same number of solution start positions and end positions")
n_nr_section <- length(l_sol_pos$start)
vec_exam <- vec_problem[1:l_sol_pos$start[1]]
if (n_nr_section > 1){
for (idx in 2:n_nr_section){
vec_exam <- c(vec_exam, vec_problem[(l_sol_pos$end[idx-1]+1):l_sol_pos$start[idx]])
}
}
# remaining
if ((l_sol_pos$end[n_nr_section]+1) < length(vec_problem)){
vec_exam <- c(vec_exam, vec_problem[(l_sol_pos$end[n_nr_section]+1):length(vec_problem)])
}
cat(" *** Writing problem rmd: ", cur_problem_rmd_path, " to exam: ", cur_exam_rmd_path, "\n")
cat(paste0(vec_exam, collapse = '\n'), '\n', file = cur_exam_rmd_path, append = TRUE)
} else {
cat(" *** Cannot find problem rmd: ", cur_problem_rmd_path, "\n")
}
}
# render exam rmd
cat(" ** Render solution rmd: ", cur_sol_rmd_path, '\n')
rmarkdown::render(input = cur_sol_rmd_path)
cat(" ** Render exam: ", cur_exam_rmd_path, '\n')
rmarkdown::render(input = cur_exam_rmd_path)
}
return(invisible())
}
|
3e7d4f91ec8aa7dc60d861b2faf4d40bd219ff82 | b1e1a193db8d4647a2ae1566724beebcfbc2c167 | /index/data/observational/scripts/sensitivity/5_forestplot_master.R | fbb6ab4f1f49a35ddee582ad8141263e8551e01f | [] | no_license | mattlee821/000_thesis | 166cef4616ad70ea47a6d558c77c8c4ec0a021b3 | 867c9f08daea61ecca7aa73e660d5001d1315a1b | refs/heads/master | 2022-05-08T08:26:24.394209 | 2022-04-07T09:30:51 | 2022-04-07T09:30:51 | 229,047,207 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,729 | r | 5_forestplot_master.R | rm(list=ls())
# packages ====
library(ggforestplot)
library(tidyverse)
library(patchwork)
library(cowplot)
library(MetaboQC)
# source
source("index/data/index/colour_palette.R")
source("index/data/observational/scripts/my_forestplot.R")
ng_anno <- (metaboprep:::ng_anno)
# data ====
data <- read.table("index/data/observational/data/analysis/results/combined/combined.txt", header = T, sep = "\t", stringsAsFactors = T)
data <- subset(data, subclass != "NA")
data$sensitivity <- "no"
sensitivity <- read.table("index/data/observational/data/analysis/sensitivity/results/combined/combined.txt", header = T, sep = "\t", stringsAsFactors = T)
sensitivity <- subset(sensitivity, subclass != "NA")
sensitivity$sensitivity <- "yes"
# model 2 ====
data1 <- subset(data, model == "model2")
sensitivity1 <- subset(sensitivity, model == "model2")
## bmi ====
data <- subset(data1, exposure == "bmi")
sensitivity <- subset(sensitivity1, exposure == "bmi")
### reorder classes
data$subclass <- factor(data$subclass, levels=c("Amino acids","Aromatic amino acids","Branched-chain amino acids",
"Apolipoproteins","Cholesterol","Fatty acids","Fatty acids ratios",
"Fluid balance","Glycerides and phospholipids","Glycolysis related metabolites",
"Inflammation","Ketone bodies","Lipoprotein particle size",
"Very large HDL","Large HDL","Medium HDL","Small HDL",
"Large LDL","Medium LDL","Small LDL","IDL",
"Extremely large VLDL","Very large VLDL","Large VLDL","Medium VLDL","Small VLDL","Very Small VLDL",
"Very large HDL ratios","Large HDL ratios","Medium HDL ratios","Small HDL ratios",
"Large LDL ratios","Medium LDL ratios","Small LDL ratios","IDL ratios",
"Extremely large VLDL ratios","Very large VLDL ratios","Large VLDL ratios","Medium VLDL ratios","Small VLDL ratios","Very Small VLDL ratios"))
data$subclass <- fct_rev(data$subclass)
data <- data[order(data$subclass, data$metabolite),]
sensitivity$subclass <- factor(sensitivity$subclass, levels=c("Amino acids","Aromatic amino acids","Branched-chain amino acids",
"Apolipoproteins","Cholesterol","Fatty acids","Fatty acids ratios",
"Fluid balance","Glycerides and phospholipids","Glycolysis related metabolites",
"Inflammation","Ketone bodies","Lipoprotein particle size",
"Very large HDL","Large HDL","Medium HDL","Small HDL",
"Large LDL","Medium LDL","Small LDL","IDL",
"Extremely large VLDL","Very large VLDL","Large VLDL","Medium VLDL","Small VLDL","Very Small VLDL",
"Very large HDL ratios","Large HDL ratios","Medium HDL ratios","Small HDL ratios",
"Large LDL ratios","Medium LDL ratios","Small LDL ratios","IDL ratios",
"Extremely large VLDL ratios","Very large VLDL ratios","Large VLDL ratios","Medium VLDL ratios","Small VLDL ratios","Very Small VLDL ratios"))
sensitivity$subclass <- fct_rev(sensitivity$subclass)
sensitivity <- sensitivity[order(sensitivity$subclass, sensitivity$metabolite),]
# plot
plot_data <- rbind(data,sensitivity)
psignif <- 1
ci <- 0.95
xmin <- min(data$lower_ci)
xmax <- max(data$upper_ci)
## children
p1 <- my_forestplot(
df = plot_data,
name = raw.label,
estimate = b,
pvalue = p,
psignif = psignif,
ci = ci,
xlab = "linear regression of z-score",
colour = sensitivity,
shape = group) +
scale_color_manual(values = c(discrete_wes_pal[[16]],discrete_wes_pal[[14]],discrete_wes_pal[[18]])) +
ggforce::facet_col(
facets = ~subclass,
scales = "free",
space = "free"
) +
theme(legend.position = "bottom") +
theme(axis.title.x = element_blank())
pdf("index/data/observational/data/analysis/sensitivity/results/comparison/model2_bmi.pdf",
width = 20, height = 100)
p1
dev.off()
## whr ====
data <- subset(data1, exposure == "whr")
sensitivity <- subset(sensitivity1, exposure == "whr")
### reorder classes
data$subclass <- factor(data$subclass, levels=c("Amino acids","Aromatic amino acids","Branched-chain amino acids",
"Apolipoproteins","Cholesterol","Fatty acids","Fatty acids ratios",
"Fluid balance","Glycerides and phospholipids","Glycolysis related metabolites",
"Inflammation","Ketone bodies","Lipoprotein particle size",
"Very large HDL","Large HDL","Medium HDL","Small HDL",
"Large LDL","Medium LDL","Small LDL","IDL",
"Extremely large VLDL","Very large VLDL","Large VLDL","Medium VLDL","Small VLDL","Very Small VLDL",
"Very large HDL ratios","Large HDL ratios","Medium HDL ratios","Small HDL ratios",
"Large LDL ratios","Medium LDL ratios","Small LDL ratios","IDL ratios",
"Extremely large VLDL ratios","Very large VLDL ratios","Large VLDL ratios","Medium VLDL ratios","Small VLDL ratios","Very Small VLDL ratios"))
data$subclass <- fct_rev(data$subclass)
data <- data[order(data$subclass, data$metabolite),]
sensitivity$subclass <- factor(sensitivity$subclass, levels=c("Amino acids","Aromatic amino acids","Branched-chain amino acids",
"Apolipoproteins","Cholesterol","Fatty acids","Fatty acids ratios",
"Fluid balance","Glycerides and phospholipids","Glycolysis related metabolites",
"Inflammation","Ketone bodies","Lipoprotein particle size",
"Very large HDL","Large HDL","Medium HDL","Small HDL",
"Large LDL","Medium LDL","Small LDL","IDL",
"Extremely large VLDL","Very large VLDL","Large VLDL","Medium VLDL","Small VLDL","Very Small VLDL",
"Very large HDL ratios","Large HDL ratios","Medium HDL ratios","Small HDL ratios",
"Large LDL ratios","Medium LDL ratios","Small LDL ratios","IDL ratios",
"Extremely large VLDL ratios","Very large VLDL ratios","Large VLDL ratios","Medium VLDL ratios","Small VLDL ratios","Very Small VLDL ratios"))
sensitivity$subclass <- fct_rev(sensitivity$subclass)
sensitivity <- sensitivity[order(sensitivity$subclass, sensitivity$metabolite),]
# plot
plot_data <- rbind(data,sensitivity)
psignif <- 1
ci <- 0.95
xmin <- min(data$lower_ci)
xmax <- max(data$upper_ci)
## children
p1 <- my_forestplot(
df = plot_data,
name = raw.label,
estimate = b,
pvalue = p,
psignif = psignif,
ci = ci,
xlab = "linear regression of z-score",
colour = sensitivity,
shape = group) +
scale_color_manual(values = c(discrete_wes_pal[[16]],discrete_wes_pal[[14]],discrete_wes_pal[[18]])) +
ggforce::facet_col(
facets = ~subclass,
scales = "free",
space = "free"
) +
theme(legend.position = "bottom") +
theme(axis.title.x = element_blank())
pdf("index/data/observational/data/analysis/sensitivity/results/comparison/model2_whr.pdf",
width = 20, height = 100)
p1
dev.off()
## bf ====
data <- subset(data1, exposure == "bf")
sensitivity <- subset(sensitivity1, exposure == "bf")
### reorder classes
data$subclass <- factor(data$subclass, levels=c("Amino acids","Aromatic amino acids","Branched-chain amino acids",
"Apolipoproteins","Cholesterol","Fatty acids","Fatty acids ratios",
"Fluid balance","Glycerides and phospholipids","Glycolysis related metabolites",
"Inflammation","Ketone bodies","Lipoprotein particle size",
"Very large HDL","Large HDL","Medium HDL","Small HDL",
"Large LDL","Medium LDL","Small LDL","IDL",
"Extremely large VLDL","Very large VLDL","Large VLDL","Medium VLDL","Small VLDL","Very Small VLDL",
"Very large HDL ratios","Large HDL ratios","Medium HDL ratios","Small HDL ratios",
"Large LDL ratios","Medium LDL ratios","Small LDL ratios","IDL ratios",
"Extremely large VLDL ratios","Very large VLDL ratios","Large VLDL ratios","Medium VLDL ratios","Small VLDL ratios","Very Small VLDL ratios"))
data$subclass <- fct_rev(data$subclass)
data <- data[order(data$subclass, data$metabolite),]
sensitivity$subclass <- factor(sensitivity$subclass, levels=c("Amino acids","Aromatic amino acids","Branched-chain amino acids",
"Apolipoproteins","Cholesterol","Fatty acids","Fatty acids ratios",
"Fluid balance","Glycerides and phospholipids","Glycolysis related metabolites",
"Inflammation","Ketone bodies","Lipoprotein particle size",
"Very large HDL","Large HDL","Medium HDL","Small HDL",
"Large LDL","Medium LDL","Small LDL","IDL",
"Extremely large VLDL","Very large VLDL","Large VLDL","Medium VLDL","Small VLDL","Very Small VLDL",
"Very large HDL ratios","Large HDL ratios","Medium HDL ratios","Small HDL ratios",
"Large LDL ratios","Medium LDL ratios","Small LDL ratios","IDL ratios",
"Extremely large VLDL ratios","Very large VLDL ratios","Large VLDL ratios","Medium VLDL ratios","Small VLDL ratios","Very Small VLDL ratios"))
sensitivity$subclass <- fct_rev(sensitivity$subclass)
sensitivity <- sensitivity[order(sensitivity$subclass, sensitivity$metabolite),]
# plot
plot_data <- rbind(data,sensitivity)
psignif <- 1
ci <- 0.95
xmin <- min(data$lower_ci)
xmax <- max(data$upper_ci)
## children
p1 <- my_forestplot(
df = plot_data,
name = raw.label,
estimate = b,
pvalue = p,
psignif = psignif,
ci = ci,
xlab = "linear regression of z-score",
colour = sensitivity,
shape = group) +
scale_color_manual(values = c(discrete_wes_pal[[16]],discrete_wes_pal[[14]],discrete_wes_pal[[18]])) +
ggforce::facet_col(
facets = ~subclass,
scales = "free",
space = "free"
) +
theme(legend.position = "bottom") +
theme(axis.title.x = element_blank())
pdf("index/data/observational/data/analysis/sensitivity/results/comparison/model2_bf.pdf",
width = 20, height = 100)
p1
dev.off()
|
18df63d1ea87c1cbd4d862647a8fa62c9f40fe6c | 4c0a13d33cd8f90c2ff58c7a297d99d27558804b | /R/frdHouseTest.R | 424dd7ac1e0215d2f5b31a557a7e0b15b29858f8 | [] | no_license | cran/PMCMRplus | ff2109713beb561325dc2dbefeb11cee93f4e82d | a286c945209b4274eeaf972c7d55d47c02b06457 | refs/heads/master | 2023-07-24T04:22:27.676376 | 2023-07-05T23:14:18 | 2023-07-05T23:14:18 | 117,816,366 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,061 | r | frdHouseTest.R | ## frdHouseTest.R
## Part of the R package: PMCMR
##
## Copyright (C) 2022 Thorsten Pohlert
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## A copy of the GNU General Public License is available at
## http://www.r-project.org/Licenses/
##
#' @rdname frdHouseTest
#' @title House Test
#'
#' @description
#' Performs House nonparametric equivalent of William's test
#' for contrasting increasing dose levels of a treatment in
#' a complete randomized block design.
#'
#'
#' @details
#' House test is a non-parametric step-down trend test for testing several treatment levels
#' with a zero control. Let there be \eqn{k} groups including the control and let
#' the zero dose level be indicated with \eqn{i = 0} and the highest
#' dose level with \eqn{i = m}, then the following \code{m = k - 1} hypotheses are tested:
#'
#' \deqn{
#' \begin{array}{ll}
#' \mathrm{H}_{m}: \theta_0 = \theta_1 = \ldots = \theta_m, & \mathrm{A}_{m} = \theta_0 \le \theta_1 \le \ldots \theta_m, \theta_0 < \theta_m \\
#' \mathrm{H}_{m-1}: \theta_0 = \theta_1 = \ldots = \theta_{m-1}, & \mathrm{A}_{m-1} = \theta_0 \le \theta_1 \le \ldots \theta_{m-1}, \theta_0 < \theta_{m-1} \\
#' \vdots & \vdots \\
#' \mathrm{H}_{1}: \theta_0 = \theta_1, & \mathrm{A}_{1} = \theta_0 < \theta_1\\
#' \end{array}
#' }
#'
#' Let \eqn{Y_{ij} ~ (1 \leq i \leq n, 0 \leq j \leq k)} be a i.i.d. random variable
#' of at least ordinal scale. Further, let \eqn{\bar{R}_0,~\bar{R}_1, \ldots,~\bar{R}_k}
#' be Friedman's average ranks and set \eqn{\bar{R}_0^*, \leq \ldots \leq \bar{R}_k^*}
#' to be its isotonic regression estimators under the order restriction
#' \eqn{\theta_0 \leq \ldots \leq \theta_k}.
#'
#' The statistics is
#' \deqn{
#' T_j = \left(\bar{R}_j^* - \bar{R}_0 \right)~ \left[ \left(V_j - H_j \right)
#' \left(2 / n \right) \right]^{-1/2} \qquad (1 \leq j \leq k),
#' }
#'
#' with
#' \deqn{
#' V_j = \left(j + 1\right) ~ \left(j + 2 \right) / 12
#' }
#'
#' and
#' \deqn{
#' H_j = \left(t^3 - t \right) / \left(12 j n \right),
#' }
#'
#' where \eqn{t} is the number of tied ranks.
#'
#' The critical \eqn{t'_{i,v,\alpha}}-values
#' as given in the tables of Williams (1972) for \eqn{\alpha = 0.05} (one-sided)
#' are looked up according to the degree of freedoms (\eqn{v = \infty}) and the order number of the
#' dose level (\eqn{j}).
#'
#' For the comparison of the first dose level \eqn{(j = 1)} with the control, the critical
#' z-value from the standard normal distribution is used (\code{\link[stats]{Normal}}).
#'
#' @references
#' Chen, Y.-I., 1999. Rank-Based Tests for Dose Finding in
#' Nonmonotonic Dose–Response Settings.
#' *Biometrics* **55**, 1258--1262. \doi{10.1111/j.0006-341X.1999.01258.x}
#'
#' House, D.E., 1986. A Nonparametric Version of Williams’ Test for
#' Randomized Block Design. *Biometrics* **42**, 187--190.
#'
#' @concept friedmanranks
#' @keywords htest nonparametric
#'
#' @example examples/frdManyOne.R
#'
#' @seealso
#' \code{\link{friedmanTest}}, \code{\link[stats]{friedman.test}},
#' \code{\link{frdManyOneExactTest}}, \code{\link{frdManyOneDemsarTest}}
#'
#' @template class-PMCMR
#' @export
frdHouseTest <- function(y, ...) UseMethod("frdHouseTest")
#' @rdname frdHouseTest
#' @method frdHouseTest default
#' @aliases frdHouseTest.default
#' @param alternative the alternative hypothesis. Defaults to \code{greater}.
#' @template two-way-parms
#' @export
frdHouseTest.default <-
function(y,
groups,
blocks,
alternative = c("greater", "less"),
...)
{
## Check arguments
alternative <- match.arg(alternative)
if (alternative == "less") {
y <- -y
}
## Friedman-type ranking
ans <- frdRanks(y, groups, blocks)
Rij <- ans$r
## number of levels including 0-control
k <- ncol(Rij)
if (k-1 > 10)
stop("Critical t-values are only available for up to 10 dose levels.")
## sample size per group
n <- nrow(Rij)
## mean ranks per group
Rj <- as.vector(
colMeans(Rij, na.rm = TRUE)
)
names(Rj) <- NULL
## ties
ties <- sapply(seq_len(n),
function(i){
t <- table(Rij[i,])
sum(t - 1)
})
t <- sum(ties)
##
## call to own pava
Rjiso <- .Fortran(
"pava",
y = as.double(Rj), # vector
w = as.double(rep(n, k)), # vector
kt = integer(k),
n = as.integer(k)
)$y
## zero control
Rj0 <- Rj[1]
Rjiso <- Rjiso[-1]
Tj <- sapply(1:(k-1),
function(j) {
Vj <- (j + 1) * (j + 2) / 12 -
(t^3 - t) / (12 * j * n)
(Rjiso[j] - Rj0) /
sqrt(Vj * 2 / n)
}
)
## critical t'-values for df = Inf
Tkdf <- getTkalpha(1:(k-1))
## Create output matrices
STAT <- cbind(ctr = Tj)
row.names(STAT) <- sapply(1:(k - 1), function(i)
paste0("mu", i))
STATCRIT <- cbind(ctr = Tkdf)
row.names(STATCRIT) <- row.names(STAT)
DAT <- ans$inDF
names(DAT) <- c("y", "groups", "blocks")
## re-scale original data
if (alternative == "less") {
DAT$y <- -DAT$y
}
METHOD <- c("House (Williams) test on Friedman-Ranks")
parameter <- Inf
names(parameter) <- "df"
ans <- list(
method = METHOD,
data.name = ans$DNAME,
crit.value = STATCRIT,
statistic = STAT,
parameter = parameter,
alternative = alternative,
dist = "t\'",
model = DAT
)
class(ans) <-"osrt"
return(ans)
}
|
02964dac6b9c2d6c91b907ed6a21934c8dc51e8d | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610387012-test.R | 124c07022818300238c729e9b98784ef166134b9 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 314 | r | 1610387012-test.R | testlist <- list(a = 0L, b = 0L, x = c(688392448L, -1694498817L, -738263040L, 0L, 5855577L, 1499027801L, -1L, -1L, 1499027801L, 1499027801L, 1499027720L, 589657L, 1499027801L, 1499027801L, 1499027801L, 22873L, 0L, 66304L, 128L, 0L, 0L, 128L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
b2693d2e90871149c15164097fae27a2f8128dca | 07ae778ef3a85a9ce3f2ff76a970c6e61d9ef83e | /rprogramming/lab1/corr.R | 79a37a392f99e56647bacc6a229e4dc964fc7bcc | [] | no_license | jiyoochang95/datasciencecoursera | 3f2700ccb2c13f50c03d8ee59aa77edf45798e7a | b786827c8d36a04b0857ecd55ae8eecfadb2e895 | refs/heads/master | 2021-01-16T18:26:19.857191 | 2017-08-25T23:56:50 | 2017-08-25T23:56:50 | 100,079,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 270 | r | corr.R | #calculates the correlation between sulfate and nitrate ?
corr <- function(directory, thresehold =0){
threhold>
path <- paste(directory, "/", sep="")
data<-read.csv(path)
nitr<- data$nitrate
sulf<-data$sulfate
cr<-cor(nitr,sulf,na.rm=TRUE)
}
|
78cd5d1c1518804c6a4815fb20ff572fbfe23ce2 | f28fcb459691838193cdb3f33b47238a6d9048ed | /R/pw_cum_mat.R | f2c5ea372c06daa8c32429eaa89dc925d9b455c7 | [
"MIT"
] | permissive | harrisonreeder/AFTTV | af7aee0924b66fb0c2130e2bc4ce4f9e5927ac5f | fad7442512ff7c8aa15343322d20d778dc28d447 | refs/heads/main | 2023-08-16T03:56:56.147806 | 2021-10-25T12:46:47 | 2021-10-25T12:46:47 | 383,913,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,349 | r | pw_cum_mat.R | #' Generate Matrix of Values of Integrated Piecewise Constant Function
#'
#' This helper function takes in a vector of event times, and
#' generates a matrix having each row showing the time accrued within each
#' interval between consecutive elements of a vector of knots.
#'
#' @param y vector of event times.
#' @param knots increasing vector of cutpoints. If it does not start with 0, one will be appended to the start.
#' However, it should not include Inf at the end.
#' @param intercept if true, includes column corresponding with 'first' interval, and if false does not.
#' It makes sense to include an intercept if the time-varying covariate is not also included in the "baseline",
#' otherwise, there would be an identifiability issue.
#'
#' @return a numeric matrix with, with (number of nonzero knots + 1) columns, and with rows corresponding to elements of y.
#' @export
pw_cum_mat <- function(y, knots, intercept=TRUE){
if(knots[1] != 0){knots <- c(0,knots)}
#vector giving the length of the intervals between each knot
knots_diff <- diff(knots)
#count of the number of intervals in each list
num_int <- c(length(knots))
n <- length(y)
#matrix with each row being an observation, and each column being an interval, capturing how much time is accrued in each interval
knots_mat <- matrix(c(knots_diff,0),nrow=n,ncol=num_int,byrow = TRUE)
#an n length vector of integers indicating which interval the observation is in
cut_cats <- findInterval(x = y, vec = knots)
#an n length vector capturing the residual time accrued in the final interval of each observation
y_last <- y-knots[cut_cats]
#a loop through each observation to finalize the matrix of time intervals
for(i in 1:n){
knots_mat[i,cut_cats[i]] <- y_last[i]
knots_mat[i,-c(1:cut_cats[i])] <- 0
}
#removing the intercept changes the parameterization, so that every parameter is a change from the "reference"
#group which is the first interval. (it basically subtracts the parameter from the first interval from every subsequent interval
#could be worth changing to instead be the 'chained' parameterization,
#in which we have each one be difference from the one previous?
if(!intercept){
if(ncol(knots_mat) <= 2){stop("include at least two nonzero knots.")}
knots_mat <- knots_mat[,-1]
}
return(knots_mat)
}
|
391d92209402e2cb8237efd33b88d8368dcb4cf3 | fc97ee42c50b798eadaeb5f1b51330b7b5002ba0 | /man/dataME.Rd | c8d1fcb323824eb9e3eb570f3c7e91ec18f90abc | [] | no_license | cran/saeME | 268c8a78cf9feea0f32ca747d435f838035c7911 | ec44e009e7fc9d227e8faeec311d29abf8e77ffb | refs/heads/master | 2023-08-31T12:54:26.631299 | 2023-08-21T04:10:02 | 2023-08-21T07:30:42 | 248,760,316 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,513 | rd | dataME.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataME.R
\name{dataME}
\alias{dataME}
\title{dataME}
\format{
A data frame with 100 observations on the following 4 variables.
\describe{
\item{\code{small_area}}{areas of interest.}
\item{\code{y}}{direct estimator for each domain.}
\item{\code{x.hat}}{auxiliary variable for each domain.}
\item{\code{vardir}}{sampling variances for each domain.}
\item{\code{var.x}}{mean squared error of auxiliary variable and sorted as \code{x.hat}}
}
}
\usage{
data(dataME)
}
\description{
This data generated by simulation based on Fay-Herriot with Measurement Error Model by following these steps:
\enumerate{
\item Generate \eqn{x_{i}}{xi} from a UNIF(5, 10) distribution, \eqn{\psi_{i}}{\psii} = 3, \eqn{c_{i}}{ci} = 0.25, and \eqn{\sigma_{v}^{2}}{\sigma2v} = 2.
\item Generate \eqn{u_{i}}{ui} from a N(0, \eqn{c_{i}}{ci}) distribution, \eqn{e_{i}}{ei} from a N(0, \eqn{\psi_{i}}{\psii}) distribution, and \eqn{v_{i}}{vi} from a N(0, \eqn{\sigma_{v}^{2}}{\sigma2v}) distribution.
\item Generate \eqn{\hat{x}_{i}}{x.hati} = \eqn{x_{i}}{xi} + \eqn{u_{i}}{ui}.
\item Then for each iteration, we generated \eqn{Y_{i}}{Yi} = \eqn{2 + 0.5 \hat{x}_{i} + v_{i}}{2 + 0.5*x.hati + vi} and \eqn{y_{i}}{yi} = \eqn{Y_{i} + e_{i}}{Yi + ei}.
}
Direct estimator \code{y}, auxiliary variable \eqn{\hat{x}}{x.hat}, sampling variance \eqn{\psi}{\psi}, and \eqn{c}{c} are arranged in a dataframe called \code{dataME}.
}
|
685d4c611931511e4bdea8caab1ffe798db7f485 | 75747a2cb89888b2eeb812514178007542666be1 | /finalproject_group5/code/modelselection.R | d80660f7110eb68cd14ca5180fa9173e8a7aa12b | [] | no_license | yyy855029/1082-datascience | d105609ff437638efcff4e8a61a15070c01a54a1 | 2eb72acb1631086c8db405a43484908f723224e7 | refs/heads/master | 2023-03-05T08:25:41.655065 | 2021-02-06T06:21:57 | 2021-02-06T06:21:57 | 273,643,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,339 | r | modelselection.R | library(ggplot2)
library(dplyr)
library(rpart)
library(randomForest)
library(argparser)
library(party)
library(xgboost)
library(pROC)
library(DiagrammeR)
library(rpart)
library(plyr)
library(dplyr)
library(gridExtra)
library(lattice)
library(caret)
library(e1071)
draw_confusion_matrix <- function(cm) {
layout(matrix(c(1,1,2)))
par(mar=c(2,2,2,2))
plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
title('CONFUSION MATRIX', cex.main=2)
# create the matrix
rect(150, 430, 240, 370, col='#3F97D0')
text(195, 435, 'Class1', cex=1.2)
rect(250, 430, 340, 370, col='#F7AD50')
text(295, 435, 'Class2', cex=1.2)
text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
text(245, 450, 'Actual', cex=1.3, font=2)
rect(150, 305, 240, 365, col='#F7AD50')
rect(250, 305, 340, 365, col='#3F97D0')
text(140, 400, 'Class1', cex=1.2, srt=90)
text(140, 335, 'Class2', cex=1.2, srt=90)
# add in the cm results
res <- as.numeric(cm$table)
text(195, 400, res[1], cex=1.6, font=2, col='white')
text(195, 335, res[2], cex=1.6, font=2, col='white')
text(295, 400, res[3], cex=1.6, font=2, col='white')
text(295, 335, res[4], cex=1.6, font=2, col='white')
# add in the specifics
plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)
# add in the accuracy information
text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}
p <- arg_parser("target, input and output")
p <- add_argument(p,"--fold", help = "assign how many fold k to do the cross validation")
p <- add_argument(p, "--pokemon", help="train file")
p <- add_argument(p, "--combat", help="train file")
p <- add_argument(p, "--report", help="report file", default="performance.csv")
p <- add_argument(p, "--predict", help="predict file", default="predict.csv")
p <- add_argument(p, "--methods", help="use which model")
argv <- parse_args(p, commandArgs(trailingOnly = TRUE))
k_fold <- argv$fold
k_fold <- as.numeric(k_fold)
report <- argv$report
prediction <- argv$predict
methods <- argv$methods
performance <- data.frame(list(set="", training="", validation="",testing="",method = ""), stringsAsFactors=FALSE,row.names = 1)
#load data
#pokemon <- read.csv("pokemon.csv",header = TRUE,sep = ",", stringsAsFactors = F,row.names = NULL)
pokemon <- read.csv(argv$pokemon,header = TRUE,sep = ",", stringsAsFactors = F,row.names = NULL)
#combat <- read.csv("combats.csv",header = TRUE, sep=",", stringsAsFactors = F,row.names = NULL)
combat <- read.csv(argv$combat,header = TRUE, sep=",", stringsAsFactors = F,row.names = NULL)
names(pokemon)[1] <- "number"
#join to create train and test data
#library(dplyr)
firstpokemon <- data.frame(combat$First_pokemon)
secondpokemon <- data.frame(combat$Second_pokemon)
colnames(firstpokemon) <- "number"
colnames(secondpokemon) <- "number"
#這段是勝率,不是預測誰贏
#merge data together
#library(plyr)
merge1 <- join(firstpokemon,pokemon,by="number")
merge2 <- join(secondpokemon,pokemon,by="number")
newdata <- cbind(merge1,merge2,combat$Winner)
colnames(newdata)[colnames(newdata)=="combat$Winner"] = "Winner"
colnames(newdata) <- c("number_a","name_a","type1_a","type2_a","hp_a","attack_a","defense_a","spatk_a","spdef_a","speed_a","generation_a","legendary_a","number_b","name_b","type1_b","type2_b","hp_b","attack_b","defense_b","spatk_b","spdef_b","speed_b","generation_b","legendary_b","Winner")
newdata2 <- newdata
#data transition
newdata2$type1_a <- as.numeric(factor(newdata2$type1_a))
newdata2$type2_a <- as.numeric(factor(newdata2$type2_a))
newdata2$legendary_a <- as.numeric(factor(newdata2$legendary_a))
newdata2$type1_b <- as.numeric(factor(newdata2$type1_b))
newdata2$type2_b <- as.numeric(factor(newdata2$type2_b))
newdata2$legendary_b <- as.numeric(factor(newdata2$legendary_b))
#new columns
binarywinner = rep(1, 50000)
binarywinner[newdata2$Winner == newdata2$number_b] = 0
newdata2$binary_winner <- binarywinner
newdata3 <- newdata2[,-c(1,2,13,14,25)]
# to draw plot
temp <- 0
score <- function(word,tb){
if (word>temp){
temp <- word
write.csv("aaa.csv",file=tb)
}
}
#cross validate k=10
k=k_fold
#k = 10
folds = cut(seq(1,nrow(newdata)),breaks = k, labels= FALSE)
for (i in 1:k){
testIndexes <- which(folds==i,arr.ind=TRUE)
validIndexes <- which(folds==(k-i+1),arr.ind=TRUE)
d_test <- newdata3[testIndexes, ]
d_valid <- newdata3[validIndexes,]
d_train <- newdata3[-c(testIndexes,validIndexes), ]
#model training
#y_train <- d_train$binary_winner
y_train <- d_train[,21]
x_train <- d_train[,-21]
y_valid <- d_valid[,21]
x_valid <- d_valid[,-21]
y_test <- d_test[,21]
x_test <- d_test[,-21]
fmla = paste("binary_winner",paste(names(x_train),collapse = "+"),sep="~")
if (methods == "logistic"){
#train
#model1 <- glm(fmla,data=d_train,family="binomial")
model1 <- readRDS("./logistic.rds")
result_train <- predict(model1,x_train,type="response")
rt1_train <- table(result_train > .5, y_train)
acc_train <- (rt1_train[1]+rt1_train[4])/sum(rt1_train)
#test
result_valid <- predict(model1,x_valid,type="response")
rt1_valid <- table(result_valid > .5, d_valid$binary_winner)
acc_valid <- (rt1_valid[1]+rt1_valid[4])/sum(rt1_valid)
result_test <- predict(model1,x_test,type="response")
rt1_test <- table(result_test > .5, d_test$binary_winner)
acc_test <- (rt1_test[1]+rt1_test[4])/sum(rt1_test)
sen <- (rt1_test[4])/(rt1_test[2]+rt1_test[4])
spe <- (rt1_test[1])/(rt1_test[1]+rt1_test[3])
pre <- (rt1_test[4])/(rt1_test[3]+rt1_test[4])
F1 <- 2*pre*sen/(pre+sen)
result_test <- c(result_test)
result_test[which(result_test>=0.5)] =1
result_test[which(result_test<0.5)] =0
result_test <- as.numeric(result_test)
cm <- confusionMatrix(as.factor(result_test),reference = as.factor(y_test), mode = "prec_recall", positive="1")
pdf("matrix_log.pdf")
draw_confusion_matrix(cm)
dev.off()
#score(acc_test,rt1_test)
#myroc = roc(response= y_valid, predictor = x_valid)
#pdf("roc_curve.pdf")
#plot1 <- plot.roc(myroc)
#text <- paste("AUC: ", as.character(round(auc(myroc),2)))
#mtext(text)
#dev.off()
}
else if(methods =="randomforest"){
#model2 <- randomForest(formula = binary_winner~., data = d_train, ntree=500)
#rf_tree <- randomForest(binary_winner ~. , data = d_train, type = 'classification', ntree = 500 )
model2<- readRDS("./rf_tree.rds")
pred <- predict(model2, x_train)
rt2_train <- table(pred >0.5, y_train)
acc_train <- (rt2_train[1]+rt2_train[4])/sum(rt2_train)
pdf("varimportance.pdf")
plot2 <- randomForest::varImpPlot(model2)
dev.off()
pred <- predict(model2, x_valid)
rt2_valid <- table(pred >0.5, y_valid)
acc_valid <- (rt2_valid[1]+rt2_valid[4])/sum(rt2_valid)
pred <- predict(model2, x_test)
rt2_test <- table(pred >0.5, y_test)
acc_test <- (rt2_test[1]+rt2_test[4])/sum(rt2_test)
sen <- (rt2_test[4])/(rt2_test[2]+rt2_test[4])
spe <- (rt2_test[1])/(rt2_test[1]+rt2_test[3])
pre <- (rt2_test[4])/(rt2_test[3]+rt2_test[4])
F1 <- 2*pre*sen/(pre+sen)
result_test <- c(pred)
result_test[which(result_test>=0.5)] =1
result_test[which(result_test<0.5)] =0
result_test <- as.numeric(result_test)
cm <- confusionMatrix(as.factor(result_test),reference = as.factor(y_test), mode = "prec_recall", positive="1")
pdf("matrix_rf.pdf")
draw_confusion_matrix(cm)
dev.off()
#score(acc_test,rt2_test)
#tree_func(model,tree_num = 500)
#rownames(d_train) = make.names(rownames(d_train), unique=TRUE)
#plot4 <- reprtree:::plot.getTree(model)
#pt <- party:::prettytree(bst@ensemble[[1]], names(bst@data@get("input")))
#nt <- new("BinaryTree")
#nt@tree <- pt
#nt@data <- bst@data
#nt@responses <-bst@responses
#plot(nt,type="simple")
}
else{
dtrain = xgb.DMatrix(as.matrix(sapply(d_train, as.numeric)), label=d_train$binary_winner)
dvalid = xgb.DMatrix(as.matrix(sapply(d_valid, as.numeric)), label=d_valid$binary_winner)
#model3 <- xgboost(dtrain,
#nrounds = 10, objective = "binary:logistic")
model3 <- readRDS("./xgboost.rds")
preds = predict(model3, as.matrix(d_train))
rt3_train <- table(preds > .5, d_train$binary_winner)
acc_train <- (rt3_train[1]+rt3_train[4])/sum(rt3_train)
preds = predict(model3, as.matrix(d_valid))
rt3_valid <- table(preds > .5, d_valid$binary_winner)
acc_valid <- (rt3_valid[1]+rt3_valid[4])/sum(rt3_valid)
preds = predict(model3, as.matrix(d_test))
rt3_test <- table(preds > .5, d_test$binary_winner)
acc_test <- (rt3_test[1]+rt3_test[4])/sum(rt3_test)
sen <- (rt3_test[4])/(rt3_test[2]+rt3_test[4])
spe <- (rt3_test[1])/(rt3_test[1]+rt3_test[3])
pre <- (rt3_test[4])/(rt3_test[3]+rt3_test[4])
F1 <- 2*pre*sen/(pre+sen)
result_test <- c(preds)
result_test[which(result_test>=0.5)] =1
result_test[which(result_test<0.5)] =0
result_test <- as.numeric(result_test)
cm <- confusionMatrix(as.factor(result_test),reference = as.factor(y_test), mode = "prec_recall", positive="1")
pdf("matrix_xg.pdf")
draw_confusion_matrix(cm)
dev.off()
#score(acc_test,rt3_test)
#xgb.plot.tree(feature_names=names(dtrain),render=FALSE,model=model3, trees=2)
#export_graph(gr, 'tree.pdf', width=1500, height=1900)
}
#------------- for logistic plot
#roc plot
#這個function 可以畫圖
#------------- for random forest plot
#pdf("random_forest.pdf")
#plot3 <- plot(randomForest(x=x_valid, y=y_valid, keep.forest=FALSE, ntree=500), log="y")
#dev.off()
#performance
#(rtab <- table(resultframe))
fold_num <- paste("fold",i,sep="")
temp_out <- c(fold_num, round(acc_train,3),round(acc_valid,3),round(acc_test,3),methods)
performance[i,] <- temp_out
}
#performance combine
average <- c("ave.",as.character(round(mean(as.numeric(performance$training)),3)),as.character(round(mean(as.numeric(performance$validation)),3)),as.character(round(mean(as.numeric(performance$testing)),3)),methods)
performance <- rbind(performance,average)
write.csv(performance, file=report, fileEncoding = "UTF-8", row.names=F,quote = FALSE)
#save model
#saveRDS(modell, "./logistic.rds")
#saveRDS(rf_tree,"./rf_tree.rds")
#saveRDS(model3, "./xgboost.rds")
#load model
#model1 <- readRDS("./logistic.rds")
#model2<- readRDS("./rf_tree.rds")
#model3 <- readRDS("./xgboost.rds")
#plotting function
|
4c36c4fd1ab1d8b642f6387efa4a18adb5191209 | 0d28ed3493511cf3c37518c4f5b7d9d5c95ff8bc | /C_Code/reactiontimes.R | 585dce2ac9f2a0dbb1b9f7f9065e3411ac80b99d | [] | no_license | horotat/kwg | a75e9e476d19c4eb5048d5b24e5f8d1340f7ad2a | 912d1a4f037418792174e51b8d527f86cfdb55c0 | refs/heads/master | 2020-09-24T23:11:39.566594 | 2019-06-18T16:38:32 | 2019-06-18T16:38:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,974 | r | reactiontimes.R | #Reaction time plots
#Eric Schulz, March 2018
#house keeping
rm(list=ls())
#packages
packages <- c('plyr', 'ggplot2', 'jsonlite', 'gridExtra', 'ggjoy', "tikzDevice")
lapply(packages, require, character.only = TRUE)
#read in data
myjson<-fromJSON("kwg.json")
#empty frame
dat<-data.frame(id=numeric(), cond=numeric(), age=numeric(),
x=numeric(), y=numeric(), z=numeric(), time=numeric(),
trial=numeric(), round=numeric())
#loop through participants
for (i in 1:myjson$count){
#we need rounds 2-9
x<-as.vector(t(myjson$records$data$searchHistory$xcollect[[i]][2:9,]))
y<-as.vector(t(myjson$records$data$searchHistory$ycollect[[i]][2:9,]))
z<-as.vector(t(myjson$records$data$searchHistory$zcollect[[i]][2:9,]))
time<-as.vector(t(myjson$records$data$searchHistory$tscollect[[i]][2:9,]))
cond<-rep(myjson$records$data$condition[i], length(x))
age<-rep(myjson$records$data$age[i], length(x))
trial<-rep(0:25, 8)
round<-rep(1:8, each=26)
id<-rep(i, length(x))
#get the dummy frame
dummy<-data.frame(id=id, cond=cond, age=age, x=x, y=y, z=z, time=time, trial=trial, round=round)
#conctatenate
dat<-rbind(dat, dummy)
}
#first 10 entries are us!
dat<-subset(dat, id>10)
#kids younger than 7 are not allowed
dat<-subset(dat, age>=7)
#fontsize is 8
fontsize<-14
#standard error
se<-function(x){sd(x)/sqrt(length(x))}
#age groups are 7-8, 9-11, and adults
dat$agegroup<-ifelse(dat$age<9, "7-8", dat$age)
dat$agegroup<-ifelse(dat$age>=9 & dat$age <12, "9-11", dat$agegroup)
dat$agegroup<-ifelse(dat$age>18, ">18", dat$agegroup)
#conditions are smooth and rough
dat$Condition<-ifelse(dat$cond==1, "Rough", "Smooth")
dat$Age<-dat$agegroup
dat$time<-c(0, diff(dat$time))
100-100*nrow(subset(dat,trial>0 & time <5000 & time >100))/nrow(subset(dat,trial>0))
#data frame with everything we need, i.e. min, max, lower nad upper quantile, median, CIs & mean
dp1<-ddply(subset(dat,trial>0 & time <5000 & time >100), ~Condition+Age, summarize,
d_ymin = max(min(log(time)), quantile(log(time), 0.25) - 1.5 * IQR(log(time))),
d_ymax = min(max(log(time)), quantile(log(time), 0.75) + 1.5 * IQR(log(time))),
d_lower = quantile(log(time), 0.25), d_middle = median(log(time)), d_upper = quantile(log(time), 0.75),
mu=mean(log(time)))
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
data1<-ddply(subset(dat,trial>0 & time <5000 & time >100), ~Age+id+Condition, summarize, time=mean(log(time)))
########################################################
#Figure A: Error
########################################################
dp1$Age<-factor(dp1$Age, levels=c("7-8", "9-11", ">18"))
dat$Age<-factor(dat$Age, levels=c("7-8", "9-11", ">18"))
p1<-ggplot(data = dp1) +
#boxplot with given values, we only need half of it
geom_boxplot(aes(x = as.numeric(Age)-0.2, ymin = d_lower, ymax = d_upper, lower = d_lower,
middle = d_middle, upper = d_upper, width = 2 * 0.2, fill = Age), stat = "identity") +
#jitter of raw data points, needs the full data frame
geom_jitter(data=data1, aes(x = as.numeric(Age) + 0.2, y = time, color = Age),
width = 0.2 - 0.25 * 0.2, height = 0, size=1)+
#vertical segment
geom_segment(aes(x = as.numeric(Age), y = d_ymin, xend = as.numeric(Age), yend = d_ymax)) +
geom_point(aes(x = as.numeric(Age)-0.2, y = mu), shape=23, size=3, fill="white", color="black") +
#top horizontal segment
geom_segment(aes(x = as.numeric(Age) - 0.1, y = d_ymax, xend = as.numeric(Age), yend = d_ymax)) +
#top vertical segment
geom_segment(aes(x = as.numeric(Age) - 0.1, y = d_ymin, xend = as.numeric(Age), yend = d_ymin)) +facet_wrap(~Condition)+
#theme minimal
theme_minimal()+
#sans
theme(text = element_text(size=fontsize, family="sans"))+
#colors and fill
scale_fill_manual(values = c(cbPalette[c(7,6)], "grey40"))+
scale_color_manual(values = c(cbPalette[c(7,6)], "grey40"))+
#labs
xlab("Age")+ylab("Log-time")+
#no legend
theme(legend.position="none", strip.background=element_blank(), legend.key=element_rect(color=NA))+
#labe x-axis
scale_x_continuous(breaks = c(1,2,3),labels = c("7-8","9-11", ">18"))+ggtitle("(a) Reaction times")+
#various theme changes including reducing white space and adding axes
theme(axis.line.x = element_line(color="grey20", size = 1),
axis.line.y = element_line(color="grey20", size = 1),
panel.spacing.x=unit(0.2, "lines"),
panel.spacing.y=unit(1, "lines"),
plot.title = element_text(family = "sans", margin=margin(0,0,0,0)))
p1
dat$prev<-round(c(0, dat$z[-length(dat$z)])/3)*3
p2 <- ggplot(subset(dat,prev>=0 & prev<=50 & trial>0 & time <5000), aes(x=prev, y = log(time), color = Age, fill=Age)) +
#geom_count(alpha=0.2, show.legend = F, position = position_dodge(width=0.1))+
#scale_size_area(max_size = 5)+
#geom_jitter(alpha=0.05, size=0.5)+
stat_summary(fun.y = mean, geom = 'line')+
stat_summary(fun.data = mean_se, geom = 'ribbon', alpha = 0.5, color=NA) +
ylab("Reaction time on t+1")+
xlab('Reward on t')+ggtitle("(b) Reaction time and rewards")+
#ylim(c(0,50))+
scale_fill_manual(values = c(cbPalette[c(7,6)], "grey40"))+
scale_color_manual(values = c(cbPalette[c(7,6)], "grey40"))+
#scale_x_continuous(breaks = scales::pretty_breaks(n = 5))+
#scale_color_brewer(palette = 'Dark2', name="")+
#scale_fill_brewer( palette = 'Dark2', name="")+
facet_grid(~Condition)+
theme_minimal()+
theme(text = element_text(size=fontsize, family="sans"))+
#various theme changes including reducing white space and adding axes
theme(axis.line.x = element_line(color="grey20", size = 1),
axis.line.y = element_line(color="grey20", size = 1),
panel.spacing.x=unit(0.2, "lines"),
panel.spacing.y=unit(1, "lines"),
plot.title = element_text(family = "sans", margin=margin(0,0,0,0)),
legend.position = "none")
p2
library(gridExtra)
pdf("reactiontimes.pdf", width=10, height=4)
grid.arrange(p1,p2, nrow=1)
dev.off()
cohensd.ci <- function(d, n1, n2, ci = 0.95) {
t <- d * sqrt((n1 * n2)/(n1 + n2))
capture.output(
fit <- compute.es::tes(t = t, n.1 = n1, n.2 = n2, level = 100 * ci),
file = "NUL"
)
c(lower.ci = fit$l.d, upper.ci = fit$u.d)
}
d<-read.csv("kwgdata.csv")
head(d)
cohensd.ci(d=0.70, n1=length(unique(subset(d, cond=="Rough")$id)), n2=length(unique(subset(d, cond=="Smooth")$id)))
cohensd.ci(d=0.51, n1=length(unique(subset(d, agegroup=="9-11")$id)), n2=length(unique(subset(d, agegroup==">18")$id)))
cohensd.ci(d=0.43, n1=length(unique(subset(d, agegroup=="9-11")$id)), n2=length(unique(subset(d, agegroup=="7-8")$id)))
cohensd.ci(d=1.08, n1=length(unique(subset(d, agegroup=="9-11")$id)), n2=length(unique(subset(d, agegroup==">18")$id)))
cohensd.ci(d=0.37, n1=length(unique(subset(d, agegroup=="9-11")$id)), n2=length(unique(subset(d, agegroup=="7-8")$id)))
|
dd885c7b77a7e83480bd2c5d12e5186c8109c190 | 830233ea981ae9a9b12651ed259814abb7c8e11c | /Module 3 Project R File Xinfei Li.R | d57ab15be889e5e3b7aa8322bad1facdf48b9af0 | [] | no_license | FireDragonBall/Statistical_Analysis_Probability_Distributions | a925d10ef84b5c0b9cbd3c1ff38c574721d0b939 | 250ab168fb43d19ae4e16b42916abe68ce06d2a1 | refs/heads/main | 2023-04-16T01:07:59.626475 | 2021-04-23T14:25:15 | 2021-04-23T14:25:15 | 360,907,539 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,833 | r | Module 3 Project R File Xinfei Li.R |
## Part 1 ##
library(ggplot2)
x= c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
# a
a=dhyper(x,20,80,20, log = F)
View(a)
# b
b=phyper(x,20,80,20,lower.tail = T )
View(b)
# c
plot(x,a)
# d
plot(x,b)
# e
e1mean=sum(x*a)
e2var=sum((e1mean-x)^2*a)
e3sd=e2var^(1/2)
# f
f=runif(1000, min=0, max=1)
View(f)
# g
g=rhyper(1000,20,80,20)
# h
h1mean=mean(g)
h2var=var(g)
h3sd=sd(g)
# i
i01=mean(rhyper(20,20,80,20))
i02=mean(rhyper(40,20,80,20))
i03=mean(rhyper(60,20,80,20))
i04=mean(rhyper(80,20,80,20))
i05=mean(rhyper(100,20,80,20))
i06=mean(rhyper(200,20,80,20))
i07=mean(rhyper(300,20,80,20))
i08=mean(rhyper(400,20,80,20))
i09=mean(rhyper(500,20,80,20))
i10=mean(rhyper(600,20,80,20))
i11=mean(rhyper(700,20,80,20))
i12=mean(rhyper(800,20,80,20))
i13=mean(rhyper(900,20,80,20))
i14=mean(rhyper(1000,20,80,20))
# j
j01=phyper(20,20,80,20)
jmean01=sum(j01*e1mean)
j02=phyper(40,20,80,20)
jmean02=sum(e1mean*j02)
j03=phyper(60,20,80,20)
jmean03=sum(e1mean*j03)
j04=phyper(80,20,80,20)
jmean04=sum(e1mean*j04)
j05=phyper(100,20,80,20)
jmean05=sum(e1mean*j05)
j06=phyper(200,20,80,20)
jmean06=sum(e1mean*j06)
j07=phyper(300,20,80,20)
jmean07=sum(e1mean*j07)
j08=phyper(400,20,80,20)
jmean08=sum(e1mean*j08)
j09=phyper(500,20,80,20)
jmean09=sum(e1mean*j09)
j10=phyper(600,20,80,20)
jmean10=sum(e1mean*j10)
j11=phyper(700,20,80,20)
jmean11=sum(e1mean*j11)
j12=phyper(800,20,80,20)
jmean12=sum(e1mean*j12)
j13=phyper(900,20,80,20)
jmean13=sum(e1mean*j13)
j14=phyper(1000,20,80,20)
jmean14=sum(e1mean*j14)
# k
I=c(i01,i02,i03,i04,i05,i06,i07,i08,i09,i10,i11,i12,i13,i14)
n=c(20,40,60,80,100,200,300,400,500,600,700,800,900,1000)
plot(n,I,type='l', lty = 1)
abline(h =e1mean)
## Part 2 ##
library(readxl)
library(ggplot2)
Module_3_Project_Keno_v1_3_ <- read_excel("C:/Users/Administrator/Desktop/Module 3 Project_Keno_v1(3).xlsx",
sheet = "Part 2")
View(Module_3_Project_Keno_v1_3_)
# a
Pop0=c(Module_3_Project_Keno_v1_3_$`X - A Normal Population`)
Pop=c(Pop0)
mean(Pop)
var(Pop)
sd(Pop)
# b
hist(Pop,breaks = 20, prob=TRUE)
# c,d
x = sample(Pop, 900, replace = T)
result1 <-list()
for (i in 1:30) {
result1[i] <- mean(sample(x, 30))
}
result2 <-list()
for (i in 1:30) {
result2[i] <- var(sample(x, 30))
}
result3 <-list()
for (i in 1:30) {
result3[i] <- sd(sample(x, 30))
}
# e
Samplemean=mean(as.numeric(result1))
Samplevar=mean(as.numeric(result2))
Samplesd=mean(as.numeric(result3))
# f.compare with Central Limit Theorem
mean(Pop)
Samplemean
sd(Pop)
Samplesd
sd(Pop)/sqrt(30)
# g
hist(as.numeric(result1),breaks = 30)
##### or for Part 2 c-e #####
# c
s01=sample(Pop, 30, replace = F)
s02=sample(Pop, 30, replace = F)
s03=sample(Pop, 30, replace = F)
s04=sample(Pop, 30, replace = F)
s05=sample(Pop, 30, replace = F)
s06=sample(Pop, 30, replace = F)
s07=sample(Pop, 30, replace = F)
s08=sample(Pop, 30, replace = F)
s09=sample(Pop, 30, replace = F)
s10=sample(Pop, 30, replace = F)
s11=sample(Pop, 30, replace = F)
s12=sample(Pop, 30, replace = F)
s13=sample(Pop, 30, replace = F)
s14=sample(Pop, 30, replace = F)
s15=sample(Pop, 30, replace = F)
s16=sample(Pop, 30, replace = F)
s17=sample(Pop, 30, replace = F)
s18=sample(Pop, 30, replace = F)
s19=sample(Pop, 30, replace = F)
s20=sample(Pop, 30, replace = F)
s21=sample(Pop, 30, replace = F)
s22=sample(Pop, 30, replace = F)
s23=sample(Pop, 30, replace = F)
s24=sample(Pop, 30, replace = F)
s25=sample(Pop, 30, replace = F)
s26=sample(Pop, 30, replace = F)
s27=sample(Pop, 30, replace = F)
s28=sample(Pop, 30, replace = F)
s29=sample(Pop, 30, replace = F)
s30=sample(Pop, 30, replace = F)
# Import samples into excel designate boxes
Module_3_Project_Keno_v1_3_$'Sample 1'[1:30]=s01
Module_3_Project_Keno_v1_3_$'Sample 2'[1:30]=s02
Module_3_Project_Keno_v1_3_$'Sample 3'[1:30]=s03
Module_3_Project_Keno_v1_3_$'Sample 4'[1:30]=s04
Module_3_Project_Keno_v1_3_$'Sample 5'[1:30]=s05
Module_3_Project_Keno_v1_3_$'Sample 6'[1:30]=s06
Module_3_Project_Keno_v1_3_$'Sample 7'[1:30]=s07
Module_3_Project_Keno_v1_3_$'Sample 8'[1:30]=s08
Module_3_Project_Keno_v1_3_$'Sample 9'[1:30]=s09
Module_3_Project_Keno_v1_3_$'Sample 10'[1:30]=s10
Module_3_Project_Keno_v1_3_$'Sample 11'[1:30]=s11
Module_3_Project_Keno_v1_3_$'Sample 12'[1:30]=s12
Module_3_Project_Keno_v1_3_$'Sample 13'[1:30]=s13
Module_3_Project_Keno_v1_3_$'Sample 14'[1:30]=s14
Module_3_Project_Keno_v1_3_$'Sample 15'[1:30]=s15
Module_3_Project_Keno_v1_3_$'Sample 16'[1:30]=s16
Module_3_Project_Keno_v1_3_$'Sample 17'[1:30]=s17
Module_3_Project_Keno_v1_3_$'Sample 18'[1:30]=s18
Module_3_Project_Keno_v1_3_$'Sample 19'[1:30]=s19
Module_3_Project_Keno_v1_3_$'Sample 20'[1:30]=s20
Module_3_Project_Keno_v1_3_$'Sample 21'[1:30]=s21
Module_3_Project_Keno_v1_3_$'Sample 22'[1:30]=s22
Module_3_Project_Keno_v1_3_$'Sample 23'[1:30]=s23
Module_3_Project_Keno_v1_3_$'Sample 24'[1:30]=s24
Module_3_Project_Keno_v1_3_$'Sample 25'[1:30]=s25
Module_3_Project_Keno_v1_3_$'Sample 26'[1:30]=s26
Module_3_Project_Keno_v1_3_$'Sample 27'[1:30]=s27
Module_3_Project_Keno_v1_3_$'Sample 28'[1:30]=s28
Module_3_Project_Keno_v1_3_$'Sample 29'[1:30]=s29
Module_3_Project_Keno_v1_3_$'Sample 30'[1:30]=s30
#d
S=c(s01,s02,s03,s04,s05,s06,s07,s08,s09,s10,s11,s12,s13,s14,s15,s16,s17,s18,s19,s20,s21,s22,s23,s24,s25,s26,s27,s28,s29,s30)
Slist=matrix(S,nrow=30,ncol=30)
Slist.df= data.frame(Slist)
funs <- function(C) { c(mean=mean(C), var=var(C), sd=sd(C))}
sapply(Slist.df, funs)
#e
Slist.mean=c(sapply(Slist.df, mean))
Slist.var=c(sapply(Slist.df, var))
Slist.sd=c(sapply(Slist.df, sd))
mean(Slist.mean)
mean(Slist.var)
mean(Slist.sd)
|
586d5c1ab456352554f59b78652c988833acf188 | 01477eb2e08dbdd9fa9c68ddee28085de2d34946 | /analysis.R | 491eee5cab76945ba352c67694c5cb1bfca2cefc | [
"MIT"
] | permissive | danielredondo/Gender_academic_study | 7110fd01f248db3d63781eae49d298377d539d50 | 7a1347dd4d813e860f7cd5b954a75951e79003a9 | refs/heads/master | 2023-07-01T12:50:41.568916 | 2021-07-30T07:14:27 | 2021-07-30T07:14:27 | 247,641,949 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 27,320 | r | analysis.R | # In order not to overload the E-utility servers, NCBI recommends that users post no more
# than three URL requests per second and limit large jobs to either weekends or between
# 9:00 PM and 5:00 AM Eastern time during weekdays. Failure to comply with this policy may
# result in an IP address being blocked from accessing NCBI.
# ----- Packages -----
# devtools::install_github("skoval/RISmed")
library(RISmed)
library(dplyr)
library(ggplot2)
library(rjson)
library(httr)
library(reshape)
# ----- Get data from PubMed -----
# Define the terms of the query
terms_query <- '("Neoplasms"[Mesh])
AND humans[MeSH Terms]
AND (("N ENGL J MED"[JOURNAL])
OR ("JAMA"[JOURNAL])
OR ("Lancet"[JOURNAL])
OR ("BMJ"[JOURNAL])
OR ("J Clin Oncol"[JOURNAL])
OR ("JAMA Oncol"[JOURNAL])
OR ("Lancet Oncol"[JOURNAL])
OR ("J Natl Cancer Inst"[JOURNAL])
OR ("Cancer"[JOURNAL])
OR ("ANN Oncol"[JOURNAL])
OR ("Int J Radiat Oncol Biol Phys"[JOURNAL])
OR ("Radiother Oncol"[JOURNAL]))
AND Journal Article[ptyp]
NOT (Letter[ptyp] OR Case Reports[ptyp] OR Comment[ptyp] OR Editorial[ptyp]
OR Review[ptyp] OR News[ptyp] OR Congress[ptyp] OR Retracted Publication[ptyp]
OR Published Erratum[ptyp] OR Biography[ptyp] OR Personal Narrative[ptyp]
OR Book Illustrations[ptyp] OR Introductory Journal Article[ptyp]
OR Guideline [ptyp] OR Practice guideline[ptyp]
OR consensus development conferences[ptyp] OR Clinical Conference[ptyp]
OR Address[ptyp] OR Duplicate Publication[ptyp] OR Interview[ptyp] OR Legal case[ptyp])
AND (2002/01/01:2019/12/31[Date - Publication]
OR 2002[Date - Publication] OR 2003[Date - Publication]
OR 2004[Date - Publication] OR 2005[Date - Publication]
OR 2006[Date - Publication] OR 2007[Date - Publication]
OR 2008[Date - Publication] OR 2009[Date - Publication]
OR 2010[Date - Publication] OR 2011[Date - Publication]
OR 2012[Date - Publication] OR 2013[Date - Publication]
OR 2014[Date - Publication] OR 2015[Date - Publication]
OR 2016[Date - Publication] OR 2017[Date - Publication]
OR 2018[Date - Publication] OR 2019[Date - Publication]
)
NOT (1800/01/01:2001/12/31[Date - Publication])
NOT (2020/01/01:3000[Date - Publication])'
# Get information
query <- EUtilsSummary(terms_query, mindate = 2002, maxdate = 2019, retmax = 50000)
# Number of results (23/03/2020 - 42066 articles)
QueryCount(query)
# Get the data from PubMed
# Downloaded 23/03/2020 - 42066 articles
#records <- EUtilsGet(query, type = "efetch", db = "pubmed")
#save(records, file = "records.RData")
load("records.RData")
# ----- Data preprocessing -----
# Convert authors from list to dataframe
authors_list <- Author(records)
# Initialise authors
authors <- data.frame(first_forename = rep("", length(authors_list)),
last_forename = rep("", length(authors_list)),
number_authors = NA)
# Convert to character
class(authors[, 1]) <- "character"
class(authors[, 2]) <- "character"
# Extract names and number of authors
for(i in 1:length(authors_list)){
# Counter
if(i %% 500 == 0) cat(".")
# Extract forenames from first and last authors
authors$first_forename[i] <- authors_list[i][[1]][1,]$ForeName
authors$last_forename[i] <- authors_list[i][[1]][nrow(authors_list[i][[1]]),]$ForeName
authors$number_authors[i] <- max(authors_list[i][[1]]$order)
}
# Create data.frame
pubmed_data <- data.frame(
"Title" = ArticleTitle(records),
"first_forename" = as.character(authors$first_forename),
"last_forename" = as.character(authors$last_forename),
"number_authors" = authors$number_authors,
"PMID" = PMID(records),
"Year" = YearPubmed(records),
"Journal" = ISOAbbreviation(records)
)
# 14/04/2020 Remove Ann. Oncol and JAMA Oncol
pubmed_data <- pubmed_data %>%
filter(! Journal %in% c("JAMA Oncol", "Ann. Oncol."))
# See the first rows
nrow(pubmed_data)
head(pubmed_data)
# To character
pubmed_data$first_forename <- as.character(pubmed_data$first_forename)
pubmed_data$last_forename <- as.character(pubmed_data$last_forename)
# Results by year
table(pubmed_data$Year)
# Results by journal
table(pubmed_data$Journal)
# Barplot by journal
table(pubmed_data$Journal) %>% as.data.frame() %>%
ggplot() +
geom_col(aes(x = Var1, y = Freq), fill = "darkblue") +
ylab("Frequency of articles") +
xlab("Journal") +
theme_classic() +
theme(axis.text = element_text(size = 7))
# ----- Text mining -----
# Names more frequent
table(pubmed_data$first_forename) %>% sort(decreasing = TRUE) %>% head(20)
table(pubmed_data$last_forename) %>% sort(decreasing = TRUE) %>% head(20)
# keep articles if first and last names are not NA
nrow(pubmed_data)
pubmed_data <- pubmed_data %>% filter(is.na(first_forename) == FALSE)
pubmed_data <- pubmed_data %>% filter(is.na(last_forename) == FALSE)
nrow(pubmed_data)
# Lisa M -> Lisa
# Michael M -> Michael
for(i in 1:nrow(pubmed_data)){
if(i %% 500 == 0) cat(".")
pubmed_data$first_forename[i] <- strsplit(pubmed_data$first_forename[i], split = " ", fixed = TRUE)[[1]][1]
pubmed_data$last_forename[i] <- strsplit(pubmed_data$last_forename[i], split = " ", fixed = TRUE)[[1]][1]
}
# keep only names with length > 1
table(pubmed_data$Year)
nrow(pubmed_data)
pubmed_data <- pubmed_data %>% filter(nchar(first_forename)>1)
pubmed_data <- pubmed_data %>% filter(nchar(last_forename)>1)
nrow(pubmed_data)
table(pubmed_data$Year)
# 20 names more frequent
table(pubmed_data$first_forename) %>% sort(decreasing = TRUE) %>% head(20)
table(pubmed_data$last_forename) %>% sort(decreasing = TRUE) %>% head(20)
# ----- Table to get genders -----
# This code is to do the minimum number of querys to GenderAPI:
names <- unique(c(pubmed_data$first_forename, pubmed_data$last_forename))
print(paste0("Number of articles: ", nrow(pubmed_data), ". Number of names: ", length(names), "."))
head(names)
# ----- GenderAPI - Creating files -----
# This code uses an API from GenderAPI to obtain a dataframe containing:
# - name
# - gender
# - samples (Number of samples used to obtain the gender)
# - accuracy (Percentage of accuracy of the gender)
## # API of GenderAPI
## api = ""
## # Get the genders
## for(i in 1:length(names)){
## # Small sleep between names
## Sys.sleep(0.1)
## # Counter
## if(i %% 50 == 0) print(i)
## # Build URL
## url <- paste0("https://gender-api.com/get?name=", names[i], "&key=", api)
## # Get the gender from GenderAPI
## content <- url %>% GET %>% content %>% data.frame
## name_i_with_gender <- content %>% select(names = name, gender, accuracy)
## if(i == 1){
## # Create table
## names_with_genders <- name_i_with_gender
## } else {
## # Append to the table
## names_with_genders <- rbind(names_with_genders, name_i_with_gender)
## }
## # Save raw result in case it's needed
## write.csv(content, file = paste0("genderapi/", names[i], ".csv"))
## }
# ----- GenderAPI - Joining files -----
## # Slight modifications of characters, only in the name of the CSV:
## # Çağlar.csv -> Çaglar.csv
## # Doğangün.csv -> Dogangün.csv
## # Grażyna.csv -> Grazyna.csv
## # Jiří.csv -> Jirí.csv
## # Łukasz.csv -> Lukasz.csv
##
## # Changes reflected also in pubmed_data (using UNICODE characters)
## names_with_issues <- c("Ça\U011Flar", paste0("Do", "\U011F", "ang\U00FCn"), "Gra\U017Cyna", "Ji\U0159í", "\U0141ukasz")
## print(names_with_issues) # See the names with issues
## names_corrected <- c("Çaglar", "Dogangün", "Grazyna", "Jirí", "Lukasz")
## for(i in 1:length(names_with_issues)){
## pubmed_data$first_forename[pubmed_data$first_forename == names_with_issues[i]] <- names_corrected[i]
## pubmed_data$last_forename[pubmed_data$last_forename == names_with_issues[i]] <- names_corrected[i]
## }
##
## # Join all files produced with GenderAPI
## setwd("genderapi")
## files <- list.files()
##
## for(i in 1:length(files)){
## # Counter
## if(i %% 500 == 0) cat(".")
## file_i <- read.csv(files[i])
## if(i == 1) names_with_genders <- file_i
## else names_with_genders <- rbind(names_with_genders, file_i)
## }
##
## setwd("..")
## # Some names are duplicated: (e.g. Ayşe is processed through GenderAPI like "Ayse", that was present already)
## # We remove duplicates
## names_with_genders <- unique(names_with_genders)
##
## head(names_with_genders)
## save(names_with_genders, file = "names_with_genders.RData")
load("names_with_genders.RData")
# ----- Processing genders -----
# Convert name to character
names_with_genders$name <- as.character(names_with_genders$name)
head(names_with_genders)
# Removing unknowns and empty gender:
table(names_with_genders$gender, useNA = "always")
nrow(names_with_genders)
names_with_genders <- names_with_genders %>% filter(gender != "unknown", gender != "")
names_with_genders$gender <- factor(names_with_genders$gender)
nrow(names_with_genders)
# Establish a threshold for accuracy (in percentage)
nrow(names_with_genders)
threshold <- 60
# See names + genders that are going to be removed
names_with_genders %>% filter(accuracy < threshold) %>% head(10)
# Remove
names_with_genders <- names_with_genders %>% filter(accuracy >= threshold)
nrow(names_with_genders)
# Establish a threshold for samples
nrow(names_with_genders)
threshold <- 10 # For example
# See names + genders that are going to be removed
names_with_genders %>% filter(samples < threshold) %>% head(10)
# Remove
names_with_genders <- names_with_genders %>% filter(accuracy >= threshold)
nrow(names_with_genders)
# ----- Updating gender in pubmed_data -----
# Remove capitals from names
pubmed_data$first_forename <- tolower(pubmed_data$first_forename)
pubmed_data$last_forename <- tolower(pubmed_data$last_forename)
# Create columns
pubmed_data$first_gender <- NULL
pubmed_data$last_gender <- NULL
# Merge to get the gender
pubmed_data$first_gender <- left_join(x = pubmed_data, y = names_with_genders,
by = c("first_forename" = "name"))$gender %>% as.character
pubmed_data$last_gender <- left_join(x = pubmed_data, y = names_with_genders,
by = c("last_forename" = "name"))$gender %>% as.character
# Export pubmed_data
save(pubmed_data, file = "pubmed_data.RData")
write.csv(pubmed_data, file = "pubmed_data.csv", row.names = F)
# ----- Analysis of single authored articles -----
sa_articles <- pubmed_data %>%
filter(number_authors == "1") %>%
filter(!is.na(first_gender)) %>%
select(Year, first_gender) %>%
group_by(Year, first_gender) %>%
summarise(n = n()) %>%
reshape2::dcast(formula = Year ~ first_gender, value.var = "n") %>%
mutate(ratio = round(male/female, 1))
sa_articles
ggplot(data = sa_articles, aes(x = Year, y = ratio, label = ratio)) +
geom_col(color = "grey") +
geom_label(color = "white", fill = NA, label.size = 0, nudge_y = -0.3) +
scale_x_continuous(breaks = 2002:2019, minor_breaks = 2002:2019) +
labs(x = "Year", y = "Male-female ratio for single-authored articles") +
theme_minimal() +
theme(axis.text = element_text(color = "black"))
ggsave(filename = "online_figure_2.png", width = 8, height = 5, dpi = 300)
sum(sa_articles$female) + sum(sa_articles$male) # 977 articles
977*100 / nrow(pubmed_data) # 2.8% of all articles with gender for first and last author
977*100 / length(Author(records)) # 2.3% of all articles
# ----- Pubmed_data (each row is an article) -----
# Added 27/03/2020
first_and_last_combinations <- pubmed_data %>%
filter(is.na(first_gender) == FALSE, is.na(last_gender) == FALSE) %>%
mutate(first_and_last = paste0("first_", first_gender, "_last_", last_gender))
# Table
table(first_and_last_combinations$first_and_last)
# Save file
save(first_and_last_combinations, file = "masterdata/first_and_last_combinations.RData")
# ----- Master data file -----
# Create
master <- pubmed_data %>%
group_by(Year, Journal) %>%
summarise() %>%
mutate(first_male = 0,
first_female = 0,
last_male = 0,
last_female = 0) %>%
as.data.frame
# Auxiliar tables
pubmed_data_F <- pubmed_data %>%
group_by(Year, Journal, first_gender) %>%
summarise(n = n()) %>%
as.data.frame
head(pubmed_data_F)
# Auxiliar tables
pubmed_data_L <- pubmed_data %>%
group_by(Year, Journal, last_gender) %>%
summarise(n = n()) %>%
as.data.frame
head(pubmed_data_L)
# Update - first
for(i in 1:nrow(pubmed_data_F)){
if(i %% 50 == 0) cat(".")
for(j in 1:nrow(master)){
if(pubmed_data_F[i, "Year"] == master[j, "Year"] & pubmed_data_F[i, "Journal"] == master[j, "Journal"]){
if(is.na(pubmed_data_F[i, "first_gender"]) == FALSE){
if(pubmed_data_F[i, "first_gender"] == "male") master[j, "first_male"] = pubmed_data_F[i, "n"]
if(pubmed_data_F[i, "first_gender"] == "female") master[j, "first_female"] = pubmed_data_F[i, "n"]
}
break()
}
}
}
# Update - last
for(i in 1:nrow(pubmed_data_L)){
if(i %% 50 == 0) cat(".")
for(j in 1:nrow(master)){
if(pubmed_data_L[i, "Year"] == master[j, "Year"] & pubmed_data_L[i, "Journal"] == master[j, "Journal"]){
if(is.na(pubmed_data_L[i, "last_gender"]) == FALSE){
if(pubmed_data_L[i, "last_gender"] == "male") master[j, "last_male"] = pubmed_data_L[i, "n"]
if(pubmed_data_L[i, "last_gender"] == "female") master[j, "last_female"] = pubmed_data_L[i, "n"]
}
break()
}
}
}
head(master)
# Export
save(master, file = "master.RData")
write.csv(master, file = "master.csv", row.names = FALSE)
# ----- Results - Overall male-female ratio -----
table(pubmed_data$first_gender, useNA = "always")
table(pubmed_data$last_gender, useNA = "always")
# ----- Figures - Ratio male/female -----
# First author
df <- table(pubmed_data$first_gender, pubmed_data$Year)
df <- rbind(df, df["male",]/df["female",])
rownames(df)[3] <- "ratio_male_female"
df <- melt(df) %>% filter(X1 == "ratio_male_female")
names(df) <- c("ratio", "year", "value")
df
ggplot(df) +
geom_line(aes(x = year, y = value), size = 0.75) +
#dgeom_col(aes(x = year, y = value), size = 0.75, col = "black", fill = "black") +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (First author)") +
ylab("Ratio male-female") +
ylim(c(0, 5)) +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/r_first.png", width = 200, height = 200, units = "mm")
# Save graph to combine
graph_first <- ggplot(df) +
geom_line(aes(x = year, y = value), size = 0.75) +
#dgeom_col(aes(x = year, y = value), size = 0.75, col = "black", fill = "black") +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (First author)") +
ylab("Ratio male-female") +
ylim(c(0, 5)) +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
# Last author
df <- table(pubmed_data$last_gender, pubmed_data$Year)
df <- rbind(df, df["male",]/df["female",])
rownames(df)[3] <- "ratio_male_female"
df <- melt(df) %>% filter(X1 == "ratio_male_female")
names(df) <- c("ratio", "year", "value")
df
ggplot(df) +
geom_line(aes(x = year, y = value), size = 0.75) +
#geom_col(aes(x = year, y = value), size = 0.75, col = "black", fill = "black") +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (Last author)") +
ylab("Ratio male-female") +
ylim(c(0, 5)) +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/r_last.png", width = 200, height = 200, units = "mm")
# Save graph to combine
graph_last <- ggplot(df) +
geom_line(aes(x = year, y = value), size = 0.75) +
#geom_col(aes(x = year, y = value), size = 0.75, col = "black", fill = "black") +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (Last author)") +
ylab("Ratio male-female") +
ylim(c(0, 5)) +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
# Combine figures
library(ggpubr)
ggarrange(graph_first, graph_last)
ggsave(filename = "plots/r_first_last_combined.png", width = 400, height = 200, units = "mm")
# ----- 21/09/2020 NEW figures - Ratio male/female -----
# First author
df <- table(pubmed_data$first_gender, pubmed_data$Year)
df <- rbind(df, df["male",]/df["female",])
rownames(df)[3] <- "ratio_male_female"
df <- melt(df) %>% filter(X1 == "ratio_male_female")
names(df) <- c("ratio", "year", "value")
df
# Last author
df2 <- table(pubmed_data$last_gender, pubmed_data$Year)
df2 <- rbind(df2, df2["male",]/df2["female",])
rownames(df2)[3] <- "ratio_male_female"
df2 <- melt(df2) %>% filter(X1 == "ratio_male_female")
names(df2) <- c("ratio", "year", "value")
df2
# cbind first and last
names(df) <- c("ratio", "year", "value_first")
df$year == df2$year
df <- cbind(df, "value_last" = df2$value)
head(df)
ggplot(df) +
geom_line(aes(x = year, y = value_first, col = "Ratio male/female (first author)"), size = 1.5) +
geom_line(aes(x = year, y = value_last, col = "Ratio male/female (last author)"), size = 1.5) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female") +
ylab("Ratio male-female") +
ylim(c(0, 5)) +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(text = element_text(color = "black"),
legend.title = element_blank(),
legend.position="top") +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/20200921_r_first_and_last.png", width = 250, height = 200, units = "mm")
# ----- Figures - Ratio male/female by journal -----
# Only 4 journals with more articles
table(pubmed_data$Journal) %>% sort(decreasing = TRUE)
journals <- table(pubmed_data$Journal) %>% sort(decreasing = TRUE) %>% head(4) %>% names()
# journals <- c(journals, "N. Engl. J. Med.")
# First author
df <- pubmed_data %>%
filter(is.na(first_gender) == FALSE) %>%
filter(is.na(Journal) == FALSE) %>%
filter(Journal %in% journals) %>%
mutate(first_gender = as.character(first_gender)) %>%
mutate(Journal = as.character(Journal)) %>%
group_by(Year, Journal, first_gender) %>%
summarise(n = n()) %>%
mutate(n = as.double(n)) %>%
as.data.frame()
# Calculate ratio
for(i in 2002:2019){
for(j in 1:length(journals)){
ratio <- df[df$Year == i & df$Journal == journals[j] & df$first_gender == "male", "n"] /
df[df$Year == i & df$Journal == journals[j] & df$first_gender == "female", "n"]
aux <- data.frame(i, journals[j], "ratio", ratio)
names(aux) <- names(df)
df <- rbind(df, aux)
}
}
df <- df %>% filter(first_gender == "ratio")
names(df)[4] <- "ratio"
head(df)
ggplot(df) +
geom_line(aes(x = Year, y = ratio), size = 0.75) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (First author)") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
facet_wrap(vars(Journal)) +
scale_y_continuous("Ratio male-female", breaks = 1:11) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/r_first_by_journal.png", width = 200, height = 200, units = "mm")
# Save graph to combine figures
graph_first_by_journal <- ggplot(df) +
geom_line(aes(x = Year, y = ratio), size = 0.75) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (First author)") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
facet_wrap(vars(Journal)) +
scale_y_continuous("Ratio male-female", breaks = 1:11) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
# Last author
df <- pubmed_data %>%
filter(is.na(last_gender) == FALSE) %>%
filter(is.na(Journal) == FALSE) %>%
filter(Journal %in% journals) %>%
mutate(last_gender = as.character(last_gender)) %>%
mutate(Journal = as.character(Journal)) %>%
group_by(Year, Journal, last_gender) %>%
summarise(n = n()) %>%
mutate(n = as.double(n)) %>%
as.data.frame()
# Calculate ratio
for(i in 2002:2019){
for(j in 1:length(journals)){
ratio <- df[df$Year == i & df$Journal == journals[j] & df$last_gender == "male", "n"] /
df[df$Year == i & df$Journal == journals[j] & df$last_gender == "female", "n"]
aux <- data.frame(i, journals[j], "ratio", ratio)
names(aux) <- names(df)
df <- rbind(df, aux)
}
}
df <- df %>% filter(last_gender == "ratio")
names(df)[4] <- "ratio"
head(df)
ggplot(df) +
geom_line(aes(x = Year, y = ratio), size = 0.75) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (Last author)") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
scale_y_continuous("Ratio male-female", breaks = 1:11) +
facet_wrap(vars(Journal)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/r_last_by_journal.png", width = 200, height = 200, units = "mm")
# Save graph to combine figures
graph_last_by_journal <- ggplot(df) +
geom_line(aes(x = Year, y = ratio), size = 0.75) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female (Last author)") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
scale_y_continuous("Ratio male-female", breaks = 1:11) +
facet_wrap(vars(Journal)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
# Combine figures
library(ggpubr)
ggarrange(graph_first_by_journal, graph_last_by_journal)
ggsave(filename = "plots/r_first_last_combined_by_journal.png", width = 400, height = 200, units = "mm")
# ----- 21/09/2020 NEW figures - Ratio male/female by journal -----
# Only 4 journals with more articles
table(pubmed_data$Journal) %>% sort(decreasing = TRUE)
journals <- table(pubmed_data$Journal) %>% sort(decreasing = TRUE) %>% head(4) %>% names()
# journals <- c(journals, "N. Engl. J. Med.")
# First author
df <- pubmed_data %>%
filter(is.na(first_gender) == FALSE) %>%
filter(is.na(Journal) == FALSE) %>%
filter(Journal %in% journals) %>%
mutate(first_gender = as.character(first_gender)) %>%
mutate(Journal = as.character(Journal)) %>%
group_by(Year, Journal, first_gender) %>%
summarise(n = n()) %>%
mutate(n = as.double(n)) %>%
as.data.frame()
# Calculate ratio
for(i in 2002:2019){
for(j in 1:length(journals)){
ratio <- df[df$Year == i & df$Journal == journals[j] & df$first_gender == "male", "n"] /
df[df$Year == i & df$Journal == journals[j] & df$first_gender == "female", "n"]
aux <- data.frame(i, journals[j], "ratio", ratio)
names(aux) <- names(df)
df <- rbind(df, aux)
}
}
df <- df %>% filter(first_gender == "ratio")
names(df)[4] <- "ratio_first"
head(df)
# Last author
df2 <- pubmed_data %>%
filter(is.na(last_gender) == FALSE) %>%
filter(is.na(Journal) == FALSE) %>%
filter(Journal %in% journals) %>%
mutate(last_gender = as.character(last_gender)) %>%
mutate(Journal = as.character(Journal)) %>%
group_by(Year, Journal, last_gender) %>%
summarise(n = n()) %>%
mutate(n = as.double(n)) %>%
as.data.frame()
# Calculate ratio
for(i in 2002:2019){
for(j in 1:length(journals)){
ratio <- df2[df2$Year == i & df2$Journal == journals[j] & df2$last_gender == "male", "n"] /
df2[df2$Year == i & df2$Journal == journals[j] & df2$last_gender == "female", "n"]
aux <- data.frame(i, journals[j], "ratio", ratio)
names(aux) <- names(df2)
df2 <- rbind(df2, aux)
}
}
df2 <- df2 %>% filter(last_gender == "ratio")
names(df2)[4] <- "ratio_last"
head(df2)
# cbind first and last
df$Year == df2$Year
df$Journal == df2$Journal
df <- cbind(df, "ratio_last" = df2$ratio_last)
head(df)
ggplot(df) +
geom_line(aes(x = Year, y = ratio_first, col = "Ratio male/female (first author)"), size = 1) +
geom_line(aes(x = Year, y = ratio_last, col = "Ratio male/female (last author)"), size = 1) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of ratio male-female") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
scale_y_continuous("Ratio male-female", breaks = 1:11) +
facet_wrap(vars(Journal)) +
theme_classic() +
theme(text = element_text(color = "black"),
legend.title = element_blank(),
legend.position="top") +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/20200921_r_first_and_last_by_journal.png", width = 250, height = 200, units = "mm")
# ----- Figures - Number -----
# First author
df <- table(pubmed_data$first_gender, pubmed_data$Year) %>% melt
names(df) <- c("gender", "year", "value")
df
ggplot(df) +
geom_line(aes(x = year, y = value, color = gender), size = 0.75) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of number of articles by gender (First author)") +
ylab("Number of articles") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/n_first.png", width = 200, height = 200, units = "mm")
# Last author
df <- table(pubmed_data$last_gender, pubmed_data$Year) %>% melt
names(df) <- c("gender", "year", "value")
df
ggplot(df) +
geom_line(aes(x = year, y = value, color = gender), size = 0.75) +
geom_hline(yintercept = 1, lty = 2, col = "gray60", size = 0.5) +
ggtitle("Evolution of number of articles by gender (Last author)") +
ylab("Number of articles") +
scale_x_continuous("Year", breaks = seq(2002, 2020, 2)) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ggsave(filename = "plots/n_last.png", width = 200, height = 200, units = "mm")
# Trends Ratio male/female over time (accouting for overdispersion)
library(sandwich)
load("First.RData")
load("Last.RData")
# First author
trendF <- glm(n ~ I(first_gender) + Year + first_gender:Year, family=poisson(link="log"), data=pubmed_data_F)
summary(trendF)
confint(trendF)
# Robust Standard Errors (accounting for overdispersion)
cov.F <- vcovHC(trendF, type="HC0")
stdf.err <- sqrt(diag(cov.F))
rf.est <- cbind(Estimate= coef(trendF), "Robust SE" = stdf.err,
"Pr(>|z|)" = 2 * pnorm(abs(coef(trendF)/stdf.err), lower.tail=FALSE),
LL = coef(trendF) - 1.96 * stdf.err,
UL = coef(trendF) + 1.96 * stdf.err)
rf.est
# Last author
trendL <- glm(n ~ I(last_gender) + Year + last_gender:Year, family=poisson(link="log"), data=pubmed_data_L)
summary(trendL)
confint(trendL)
# Robust Standard Errors (accounting for overdispersion)
cov.L <- vcovHC(trendF, type="HC0")
stdl.err <- sqrt(diag(cov.L))
rl.est <- cbind(Estimate= coef(trendL), "Robust SE" = stdl.err,
"Pr(>|z|)" = 2 * pnorm(abs(coef(trendL)/stdl.err), lower.tail=FALSE),
LL = coef(trendL) - 1.96 * stdl.err,
UL = coef(trendL) + 1.96 * stdl.err)
rl.est
|
f42214c3ea16e7ac060c092db956dc4b9777203b | 3926260e014b713e47f2a144b7e91f5ec62f5cae | /man/Joint.Wright.Rd | 80027c729dc370f1772ceda39cdb7ba85d772513 | [] | no_license | cran/vrtest | 867cc9fed8c5584c198aa1649413df820357f99d | 405dd094f06dc1cbe6c90c4f00c462c132858d02 | refs/heads/master | 2022-09-24T22:56:16.537410 | 2022-09-05T05:50:02 | 2022-09-05T05:50:02 | 17,700,823 | 1 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,225 | rd | Joint.Wright.Rd | \name{Joint.Wright}
\alias{Joint.Wright}
\title{ A Joint Version of Wight's Rank and Sign Test }
\description{
This function returns joint or multiple version of Wright's rank and sign tests.
The test takes the maximum value of the individual rank or sign tests, in the same manner as Chow-Denning test
}
\usage{
Joint.Wright(y, kvec)
}
\arguments{
\item{y}{ a vector of time series, typically financial return }
\item{kvec}{ a vector of holding periods }
}
\value{
\item{Holding.Period }{holding periods used}
\item{JR1 }{Joint test based on R1 statistics}
\item{JR2 }{Joint test based on R2 statistics}
\item{JS1 }{Joint test based on S1 statistics}
}
\references{
Belaire-Franch G, Contreras D. Ranks and signs-based multiple variance ratio tests, Working paper, University of Valencia 2004.
Kim, J. H. and Shamsuddin, A., 2008, Are Asian Stock Markets Efficient? Evidence from New Multiple Variance Ratio Tests, Journal of Empirical Fiance 15(8), 518-532.
}
\author{Jae H. Kim}
\examples{
data(exrates)
y <- exrates$ca
nob <- length(y)
r <- log(y[2:nob])-log(y[1:(nob-1)])
kvec <- c(2,5,10)
Joint.Wright(r,kvec)
}
\keyword{ htest } |
9accfd055138c56136eed3d5b7bafa31809eaed0 | ba4f888da890e53f17aad4b11435f11cb1784e9f | /diffexpr_PD_RNAseq.R | 24e71d53dc115cd9a147dc3a8103c169dbf35ffd | [] | no_license | arlinkeo/pd_braak | 1b22be85157bd513ea6fab3b4834fe0749b0df38 | 0a6e3642cd4d5832805f4cd2be108e43e5a8f03a | refs/heads/master | 2023-02-27T19:49:31.918756 | 2021-02-03T16:58:34 | 2021-02-03T16:58:34 | 93,852,543 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 17,669 | r | diffexpr_PD_RNAseq.R | setwd("M:/doorgeefluik/Arlin Keo doorgeefuik/RNAseq")
options(stringsAsFactors = FALSE)
library(ggplot2)
library(ggsignif)
library(reshape2)
library(RColorBrewer)
library(PSEA)
##############################################################################
# Prepare and load data
# Braak region colors
braakColors <- brewer.pal(6, "Set2")
names(braakColors) <- paste0("R", c(1:6))
# function to select colors for each region in this dataset
braak.color <- function(x) {
unname(unlist(lapply(x, function(r){
if (r == "SN") braakColors[3]
else braakColors[5]
})))
}
# Sample info
samples <- list(control =
list(
SN = c("A53.3", "A34.3", "Anke3", "A36.3", "A37.2", "A77.2", "A45.3", "Anke4", "Anke5", "Anke7", "Anke6"),
GTM = c("A54.3", "A11.3", "A10.3", "A38.3", "A03.3", "A15.2", "A50.2", "A71.3", "A74.2")
),
PD =
list(
SN = c("A30.3", "A24.3", "A43.2", "Anke2", "Anke1", "A60.2", "A57.1", "A55.2", "A51.2", "A47.2"),
GTM = c("A21.3", "A05.3", "A08.3", "A01.3", "A23.3")
))
sapply(samples, function(x)sapply(x, length))
sample_info <- melt(samples)
colnames(sample_info) <- c("code", "region", "disease")
rownames(sample_info) <- sample_info$code
#Load PD and control data
data_PD_region <- read.csv2("region comparison/compare_condition_to_SN Park PD Late_VS_GTM Park PDD Late.csv", skip = 3)
rownames(data_PD_region) <- data_PD_region$ID
data_CTRL_region <- read.csv2("region comparison/compare_condition_to_SN nonDem CTL_VS_GTM nonDem CTL.csv", skip = 3)
rownames(data_CTRL_region) <- data_CTRL_region$ID
# Select expression data and combine into 1 data matrix
expr <- cbind(data_PD_region[, unlist(samples$PD)], data_CTRL_region[rownames(data_PD_region), unlist(samples$control)])
# Load region data
data_SN_disease <- read.csv2("disease comparison/regroup_compare_condition_to_SN Park PD Late_VS_SN nonDem CTL.csv")
rownames(data_SN_disease) <- data_SN_disease$ID
data_GTM_disease <- read.csv2("disease comparison/compare_condition_to_GTM Park PDD Late_VS_GTM nonDem CTL.csv")
rownames(data_GTM_disease) <- data_GTM_disease$ID
# tables from excel
data_list <- list('R3_PD_vs_CTRL' = data_SN_disease, 'R4/R5_PD_vs_CTRL' = data_GTM_disease,
'PD_SN_vs_GTM' = data_PD_region, 'CTRL_SN_vs_GTM' = data_CTRL_region)
# Info
# genes <- rownames(expr)
regions <- c("SN", "GTM")
names(regions) <- regions
diseases <- c("control", "PD")
names(diseases) <- diseases
##############################################################################
# Number of DEGs between diseases and between regions
degs <- lapply(data_list, function(t){
down <- rownames(t)[which(t$log2FoldChange < -1 & t$padj<0.05)]
up <- rownames(t)[which(t$log2FoldChange > 1 & t$padj<0.05)]
list(down = down, up = up)
})
no_degs <- sapply(degs, function(x) sapply(x, length))
# Heatmap number of differentially expressed genes (DESeq2)
m1 <- data.frame('R3.vs.R4/R5' = apply(no_degs[, c(3,4)], 2, sum), row.names = rev(diseases))
m2 <- data.frame('PD.vs.controls' = apply(no_degs[, c(1,2)], 2, sum), row.names = c("R3", "R4/R5"))
max_degs <- max(m1, m2)
pdf("heatmap_diffgenes.pdf", 2.3, 2)
t1 <- melt(as.matrix(m1))
t1$Var1 <- factor(t1$Var1, levels = rev(unique(t1$Var1)))
ggplot(t1) +
geom_tile(aes(Var2, Var1, fill=value), color = "black") +
geom_text(aes(Var2, Var1, label=value)) +
scale_x_discrete(position = "top") +
scale_fill_gradient(low="white", high = "chocolate", limits = c(0,max_degs)) +
theme(axis.title=element_blank(), axis.ticks=element_blank(), panel.background=element_blank()) +
ggtitle("PD vs. controls")
t2 <- melt(as.matrix(m2))
t2$Var1 <- factor(t2$Var1, levels = rev(unique(t2$Var1)))
ggplot(t2) +
geom_tile(aes(Var2, Var1, fill=value), color = "black") +
geom_text(aes(Var2, Var1, label=value)) +
scale_x_discrete(position = "top") +
scale_fill_gradient(low="white", high = "chocolate", limits = c(0,max_degs)) +
theme(axis.title=element_blank(), axis.ticks=element_blank(), panel.background=element_blank()) +
ggtitle("R3 vs. R4/R5")
dev.off()
#####################################################################################
#Volcano plot of fold-change and p-values
volcano.theme <- theme(legend.position = "none",
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.title = element_text(size = 12),
plot.title = element_text(size = 12, face = "bold")
)
volcano.plot <- function(t, title){
diffGenes <- rownames(t)[t$padj < 0.05 & abs(t$log2FoldChange) > 1]
t$info <- ifelse(rownames(t) %in% diffGenes, 1, 0)
t$logp <- -log10(t$padj)
p <- ggplot(t, aes(log2FoldChange, logp, colour = info)) +
geom_point(size = 0.25, alpha = 0.3) +
labs(x = "Log2 FC", y = "-log10 P") +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(expand = c(0,0)) +
ggtitle(title) +
volcano.theme
p
}
pdf("volcanoplots.pdf", 3, 2)
lapply(names(data_list), function(n){
t <- data_list[[n]]
volcano.plot(t, n)
})
dev.off()
#####################################################################################
# Boxplots of PD genes
# Conversion table gene IDs
conversion <- read.csv("../ahba_entrez2ensembl.txt", sep = "\t")
# PD genes
pd_genes <- c(SCARB2 = "950", ELOVL7 = "79993", SH3GL2 = "6456",
SNCA = "6622", BAP1 = "8314", ZNF184 = "7738", SLC18A2 = "6571")
pd_names <- names(pd_genes)
pd_genes <- conversion$ensembl_gene_id[match(pd_genes, conversion$entrezgene)]
names(pd_genes) <- pd_names
box.theme <- theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_text(size = 10),
axis.title = element_text(size = 10),
plot.title = element_text(size = 12, face = "bold"),
strip.text.x = element_text(size = 12),
legend.position = "bottom")
pdf("boxplot_RNAseq_PDgenes.pdf", 2.6, 4)
plots <- lapply(pd_genes, function(g){
print(names(pd_genes)[match(g, pd_genes)])
t <- lapply(diseases, function(d){
t <- lapply(regions, function(r){
s <- samples[[d]][[r]]
t <- data.frame(t(expr[g, s]))
colnames(t) <- "expr"
t$region <- r
t$disease <- d
t
})
Reduce(rbind, t)
})
t <- Reduce(rbind, t)
t$region <- factor(t$region, levels = c("SN", "GTM"))
t$disease <- factor(t$disease, levels = rev(unique(t$disease)))
max_y <- max(t$expr)
min_y <- min(t$expr)
range_y <- max_y-min_y
pval <- format(sapply(data_list, function(t){ # DESeq2 results
t[g, 'padj']
}), digits = 2, scientific = TRUE)
pval_region <- pval[3:4]
pval_disease <- pval[1:2]
p1 <- ggplot(t, aes(x=region, y=expr, fill=region)) +
geom_boxplot() +
geom_signif(comparisons = list(c("SN", "GTM")),
step_increase = 0.1) +
scale_y_continuous(limits = c(min_y, max_y+range_y/6)) +
scale_fill_manual(values = unname(braak.color(levels(t$region)))) +
labs(x = "", y = "Expression (CPM)") +
ggtitle(paste(names(pd_genes)[match(g, pd_genes)], "per group")) +
box.theme +
facet_grid(~disease)
pg <- ggplot_build(p1)
pg$data[[2]]$annotation <- pval_region
pg$data[[2]]$textsize <- 2.5
pg$data[[2]]$colour <- ifelse(as.numeric(pg$data[[2]]$annotation) < 0.05, "red", "black")
q1 <- ggplot_gtable((pg))
p1 <- plot(q1)
p2 <- ggplot(t, aes(x=disease, y=expr, fill = disease)) +
geom_boxplot() +
geom_signif(comparisons = list(c("PD", "control")),
step_increase = 0.1) +
scale_y_continuous(limits = c(min_y, max_y+range_y/6)) +
scale_fill_manual(values = rep("#FFFFFF",3)) +
labs(x = "", y = "Expression (CPM)") +
ggtitle(paste(names(pd_genes)[match(g, pd_genes)], "per region")) +
box.theme +
facet_grid(~region)
pg <- ggplot_build(p2)
pg$data[[1]]$fill <- unlist(lapply(unname(braak.color(levels(t$region))), function(x)rep(x, 2)))
pg$data[[2]]$annotation <- pval_disease
pg$data[[2]]$textsize <- 2.5
pg$data[[2]]$colour <- ifelse(as.numeric(pg$data[[2]]$annotation) < 0.05, "red", "black")
q2 <- ggplot_gtable((pg))
p2 <- plot(q2)
})
dev.off()
#####################################################################################
# Box plots for Braak genes
load("../braakGenes.RData")
# Convert entrez IDs to ensembl IDs (and intersect genes in PD data)
bg <- list( # Split Braak genes
down = braakGenes$entrez_id[braakGenes$r < 0],
up = braakGenes$entrez_id[braakGenes$r > 0]
)
bg <- lapply(bg, function(g){
rows <- which(conversion$entrezgene %in% g)
id <- conversion[rows, "ensembl_gene_id"]
})
# Check diff. expressed Braak genes
lapply(bg, function(gl){
sapply(degs, function(x){
sapply(x, function(y){
length(intersect(y, gl))
})
})
})
pdf("boxplot_RNAseq_braakgenes.pdf", 2.6, 4)
lapply(names(bg), function(dir){
gl <- bg[[dir]]
t <- lapply(diseases, function(d){
t <- lapply(regions, function(r){
s <- samples[[d]][[r]]
t <- expr[gl, s]
t <- data.frame('expr' = apply(t, 2, function(x) mean(x, na.rm = TRUE))) # Average across genes
t$region <- r
t$disease <- d
t
})
# t <- melt(t)
Reduce(rbind, t)
})
# t <- melt(t)
t <- Reduce(rbind, t)
t$region <- factor(t$region, levels = c("SN", "GTM"))
t$disease <- factor(t$disease, levels = rev(unique(t$disease)))
p1 <- ggplot(t, aes(x=region, y=expr, fill=region)) +
geom_boxplot() +
scale_fill_manual(values = unname(braak.color(levels(t$region)))) +
labs(x = "", y = "Expression (CPM)") +
ggtitle(paste0(dir, "regulated \n Braak genes per group")) +
box.theme +
facet_grid(~disease)
plot(p1)
p2 <- ggplot(t, aes(x=disease, y=expr, fill = disease)) +
geom_boxplot() +
scale_fill_manual(values = rep("#FFFFFF",3)) +
labs(x = "", y = "Expression (CPM)") +
ggtitle(paste0(dir, "regulated \n Braak genes per region")) +
box.theme +
facet_grid(~region)
pg <- ggplot_build(p2)
pg$data[[1]]$fill <- unlist(lapply(unname(braak.color(levels(t$region))), function(x)rep(x, 2)))
q2 <- ggplot_gtable((pg))
p2 <- plot(q2)
})
dev.off()
# #####################################################################################
# # check presence of genes in module M47
# m47 <- unlist(read.table("../m47_genes.txt", colClasses = 'character'))
# m47 <- conversion$ensembl_gene_id[match(m47, conversion$entrezgene)]
# m47 <- m47[!is.na(m47)]
########## PSEA: cell-type correction ##########
# Cell-type genes
celltypes <- sapply(c("Neurons", "Astrocytes", "Oligodendrocytes", "Microglia", "Endothelial_cells"), function(type){
file = paste0("../brainscope_celltypes/", type, ".txt")
entrez_ids <- as.character(read.csv(file, header = TRUE)$entrez_id)
ensembl_ids <- conversion$ensembl_gene_id[match(entrez_ids, conversion$entrezgene)]
ensembl_ids[!is.na(ensembl_ids)]
}, simplify = FALSE)
# Cell-type mean expression
ct_mean <- t(sapply(celltypes, function(ct){
x <- expr[ct, ]
colMeans(x, na.rm = TRUE)
}))
psea <- function(gene, ct, groups){
# Reference signals
neurons <- ct["Neurons", ]
astrocytes <- ct["Astrocytes", ]
oligodendrocytes <- ct["Oligodendrocytes", ]
microglia <- ct["Microglia", ]
endothelial_cells <- ct["Endothelial_cells", ]
# Interaction regressors
neurons_diff <- groups * neurons
astrocytes_diff <- groups * astrocytes
oligodendrocytes_diff <- groups * oligodendrocytes
microglia_diff <- groups * microglia
endothelial_cells_diff <- groups * endothelial_cells
# fit <- lm(gene ~ neurons + astrocytes + oligodendrocytes + microglia + endothelial_cells, subset = which(groups==0))
# # par(mfrow=c(2,3), mex=0.8)
# # crplot(fit, "neurons", newplot = FALSE)
# # crplot(fit, "astrocytes", newplot = FALSE)
# # crplot(fit, "oligodendrocytes", newplot = FALSE)
# # crplot(fit, "microglia", newplot = FALSE)
# # crplot(fit, "endothelial_cells", newplot = FALSE)
# summary <- summary(fit)
# coefficients <- summary$coefficients
# celltype_fit <- coefficients[-1, c(1,4)]
# colnames(celltype_fit) <- c("celltype_beta", "celltype_pval")
fit_neurons <- lm(gene ~ neurons + neurons_diff)
# crplot(fit_neurons, "neurons", g = "neurons_diff")
summary_neurons <- summary(fit_neurons)
pval_neurons <- summary_neurons$coefficients["neurons_diff", "Pr(>|t|)"]
foldchange_neurons <- (fit_neurons$coefficients[2] + fit_neurons$coefficients[3]) / fit_neurons$coefficients[2]
fit_astrocytes <- lm(gene ~ astrocytes + astrocytes_diff)
# crplot(fit_astrocytes, "astrocytes", g = "astrocytes_diff")
summary_astrocytes <- summary(fit_astrocytes)
pval_astrocytes <- summary_astrocytes$coefficients["astrocytes_diff", "Pr(>|t|)"]
foldchange_astrocytes <- (fit_astrocytes$coefficients[2] + fit_astrocytes$coefficients[3]) / fit_astrocytes$coefficients[2]
fit_oligodendrocytes <- lm(gene ~ oligodendrocytes + oligodendrocytes_diff)
# crplot(fit_oligodendrocytes, "oligodendrocytes", g = "oligodendrocytes_diff")
summary_oligodendrocytes <- summary(fit_oligodendrocytes)
pval_oligodendrocytes <- summary_oligodendrocytes$coefficients["oligodendrocytes_diff", "Pr(>|t|)"]
foldchange_oligodendrocytes <- (fit_oligodendrocytes$coefficients[2] + fit_oligodendrocytes$coefficients[3]) / fit_oligodendrocytes$coefficients[2]
fit_microglia <- lm(gene ~ microglia + microglia_diff)
# crplot(fit_microglia, "microglia", g = "microglia_diff")
summary_microglia <- summary(fit_microglia)
pval_microglia <- summary_microglia$coefficients["microglia_diff", "Pr(>|t|)"]
foldchange_microglia <- (fit_microglia$coefficients[2] + fit_microglia$coefficients[3]) / fit_microglia$coefficients[2]
fit_endothelial_cells <- lm(gene ~ endothelial_cells + endothelial_cells_diff)
# crplot(fit_endothelial_cells, "endothelial_cells", g = "endothelial_cells_diff")
summary_endothelial_cells <- summary(fit_endothelial_cells)
pval_endothelial_cells <- summary_endothelial_cells$coefficients["endothelial_cells_diff", "Pr(>|t|)"]
foldchange_endothelial_cells <- (fit_endothelial_cells$coefficients[2] + fit_endothelial_cells$coefficients[3]) / fit_endothelial_cells$coefficients[2]
fc <- c(foldchange_neurons, foldchange_astrocytes, foldchange_oligodendrocytes, foldchange_microglia, foldchange_endothelial_cells)
pval <- c(pval_neurons, pval_astrocytes, pval_oligodendrocytes, pval_microglia, pval_endothelial_cells)
# if (length(pval) !=5) break
# cbind(celltype_fit, group_fc = fc, group_pval = pval)
cbind(group_pval = pval, group_fc = fc)
}
geneset <- conversion$ensembl_gene_id[conversion$entrezgene %in% braakGenes$entrez_id]
geneset <- intersect(genes, geneset)
# PSEA per region between disease groups
brgs_regions <- sapply(regions, function(r){
s <- sample_info$code[sample_info$region == r]
groups <- sample_info[s, "disease"]
groups <- as.numeric(groups == "PD")
# BRGs
psea_brgs <- sapply(geneset, function(g){
gene <- unlist(expr[g, s])
ct <- ct_mean[, s]
psea(gene, ct, groups)
}, simplify = FALSE)
psea_brgs <- simplify2array(psea_brgs)
# # Celltype-specific expression
# m1 <- t(psea_brgs[, "celltype_pval", ])
# m1 <- apply(m1, 2, function(x)p.adjust(x, method = "BH"))
# colSums(m1 < 0.05, na.rm = T)
# Group-dependent expression
m2 <- t(psea_brgs[, "group_pval", ])
m2 <- apply(m2, 2, function(x) p.adjust(x, method = "BH"))
colSums(m2 < 0.05, na.rm = T)
})
# PSEA per patient group between regions
brgs_diseases <- sapply(diseases, function(d){
s <- sample_info$code[sample_info$disease == d]
groups <- sample_info[s, "region"]
groups <- as.numeric(groups == "GTM")
# BRGs
psea_brgs <- sapply(geneset, function(g){
gene <- unlist(expr[g, s])
ct <- ct_mean[, s]
psea(gene, ct, groups)
}, simplify = FALSE)
psea_brgs <- simplify2array(psea_brgs)
# # Celltype-specific expression
# m1 <- t(psea_brgs[, "celltype_pval", ])
# m1 <- apply(m1, 2, function(x)p.adjust(x, method = "BH"))
# colSums(m1 < 0.05, na.rm = T)
# Group-dependent expression
m2 <- t(psea_brgs[, "group_pval", ])
m2 <- apply(m2, 2, function(x) p.adjust(x, method = "BH"))
colSums(m2 < 0.05, na.rm = T)
})
max_brgs <- max(brgs_diseases, brgs_regions)
pdf("heatmap_diffgenes_BRGs.pdf", 3.3, 2)
m <- brgs_regions
rownames(m) <- gsub("_", " ", rownames(m))
colnames(m) <- c("R3", "R4/R5")
t <- melt(m)
t$Var2 <- factor(t$Var2, levels = rev(unique(t$Var2)))
ggplot(t) +
geom_tile(aes(Var2, Var1, fill=value), color = "black") +
geom_text(aes(Var2, Var1, label=value)) +
scale_x_discrete(position = "top") +
scale_fill_gradient(low="white", high = "chocolate", limits = c(0,max_brgs)) +
theme(axis.title=element_blank(), axis.ticks=element_blank(), panel.background=element_blank()) +
ggtitle("PD vs. controls")
m <- brgs_diseases
rownames(m) <- gsub("_", " ", rownames(m))
t <- melt(m)
t$Var2 <- factor(t$Var2, levels = rev(unique(t$Var2)))
ggplot(t) +
geom_tile(aes(Var2, Var1, fill=value), color = "black") +
geom_text(aes(Var2, Var1, label=value)) +
scale_x_discrete(position = "top") +
scale_fill_gradient(low="white", high = "chocolate", limits = c(0,max_brgs)) +
theme(axis.title=element_blank(), axis.ticks=element_blank(), panel.background=element_blank()) +
ggtitle("R1 vs. R3")
dev.off()
|
effcb9031747aa129c67ff4b645f7e872443b419 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hierfstat/examples/allelic.richness.rd.R | 1292b587463f19fd4f0a82e0b82ed0b9cd6d4879 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | allelic.richness.rd.R | library(hierfstat)
### Name: allelic.richness
### Title: Estimates allelic richness
### Aliases: allelic.richness
### Keywords: univar
### ** Examples
data(gtrunchier)
allelic.richness(gtrunchier[,-1])
|
52c4a6e0dc0f16a6362e9465a4dd44db3f422a12 | b48ea7f06b12d71fe597adefa5da35e81d08caf8 | /inst/examples/09-themes/ui.R | e5742818255d7a4024c57cb849f7533947338d5c | [
"MIT"
] | permissive | shinyTree/shinyTree | c840dd94af787e15cce6b7c0d4b73b2537123b8a | 110b49970d117d0638746d47b074e955287abed0 | refs/heads/master | 2023-08-30T22:50:33.875203 | 2023-08-07T15:53:07 | 2023-08-07T15:53:07 | 22,565,116 | 75 | 42 | NOASSERTION | 2023-02-08T10:37:30 | 2014-08-03T02:44:13 | JavaScript | UTF-8 | R | false | false | 744 | r | ui.R | library(shiny)
library(shinyTree)
#' Define UI for application that demonstrates a simple Tree editor
#' @author Mike Schaffer \email{mschaff@gmail.com}
shinyUI(
pageWithSidebar(
# Application title
headerPanel("shinyTree with built-in themes"),
sidebarPanel(
helpText(HTML("An example of using shinyTree themes."))
),
mainPanel(
"Theme: default",
shinyTree("tree", theme="default"),
hr(),
"Theme: default-dark",
shinyTree("tree2", theme="default-dark"),
hr(),
"Theme: proton",
shinyTree("tree3", theme="proton"),
hr(),
"Theme: proton without icons and dots",
shinyTree("tree4", theme="proton", themeIcons = FALSE, themeDots = FALSE)
)
)) |
41281f95ee9b4c599e7fe62288276bb6a4d40f90 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed_and_cleaned/12396_0/rinput.R | 4da8ebd95373e9430a5c630d02588891afc00452 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("12396_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12396_0_unrooted.txt") |
0a9a61509447462bc8fa8458eed7e60b37c2c2e6 | 402d5c4ac8d58eb0d4248f791568dd5f31fc60e0 | /tests/testthat/test_martCheck.R | b24fb16d60a830b180ca77bf341c491e5ae462dc | [] | no_license | JianZhangSMU/biomaRt | 59bb8b067356b4b3cbc15e32fc9771cbe77e4110 | ad5e80ca5f72a64c54181d96b4e66a01f15b970e | refs/heads/master | 2020-07-29T16:42:52.455896 | 2019-08-19T13:56:24 | 2019-08-19T13:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 834 | r | test_martCheck.R | library(biomaRt)
context('Testing martCheck function')
test_that("martCheck() catches bad input", {
expect_error(biomaRt:::martCheck())
expect_error(biomaRt:::martCheck("INVALID_OBJECT"))
ensembl <- useMart("ensembl")
expect_error(biomaRt:::martCheck(ensembl),
regex = "No dataset selected, please select a dataset first")
ensembl <- useDataset("hsapiens_gene_ensembl",mart=ensembl)
expect_error(biomaRt:::martCheck(ensembl, biomart = "Not_real_mart"),
regexp = "This function only works when used with")
})
test_that('martCheck() is quiet for valid input', {
ensembl <- useEnsembl('ensembl', dataset = 'hsapiens_gene_ensembl')
expect_silent(biomaRt:::martCheck(ensembl,
biomart = "ENSEMBL_MART_ENSEMBL"))
})
|
fa3560a85527bcd0efdbb4da669613e889a78d86 | ed12c9bfe2bf74f24d69b5e52f041383202e46b9 | /exercise-1/exercise.R | 8ef3d38d8a2e71d22024e5770a54f8095c70bf0b | [
"MIT"
] | permissive | Kidusyohanes/m10-dataframes | f3ded85f927195dd42683aa6ca92364bb02ac888 | 1879290beccc15afd62bf1e17e94cde85b1c0ae1 | refs/heads/master | 2020-11-29T15:28:21.903430 | 2017-04-12T16:49:07 | 2017-04-12T16:49:07 | 87,478,038 | 0 | 0 | null | 2017-04-06T21:47:03 | 2017-04-06T21:47:03 | null | UTF-8 | R | false | false | 874 | r | exercise.R | # Exercise 1: Creating data frames
# Create a vector of the number of points the Seahawks scored the first 4 games of last season
# Hint: (google "Seahawks scores 2016") 35W 25W 20L 6T
first.4.games <- c(35,25, 20, 6)
# Create a vector of the number of points the Seahawks have allowed to be scored against them in the first 4 games
#
points.allowed <- c(24 , 31, 25, 6)
# Combine your two vectors into a dataframe
my.data <- data.frame(first.4.games,points.allowed)
# Create a new column "diff" that is the difference in points
diff <- c(abs(first.4.games - points.allowed))
# Create a new column "won" which is TRUE if the Seahawks wom
won <- first.4.games > points.allowed
# Create a vector of the opponents
opponents <- c('Dolphins','49ers','Broncos','Packers')
# Assign your dataframe rownames of their opponents
opponents.name <- data.frame(opponents,my.data)
|
d69c8a4b9f1c9e753ee880d55a3bac5f6d93041c | a8279a7eb91b2ad6a139af9be2fa0d1b04ccc04f | /man/results_raw.Rd | dfa4e3dc84d4bb03e380388917d2665133351403 | [
"MIT"
] | permissive | apreshill/bakeoff | c97374b7266f5538c06d1e2c41be24eb806383cb | 00fdf4dd339b69a60b1d7c9fa7e5850971e04463 | refs/heads/main | 2022-11-06T21:27:17.805375 | 2022-11-02T11:38:26 | 2022-11-02T11:38:26 | 137,939,073 | 56 | 23 | NOASSERTION | 2022-03-04T19:37:05 | 2018-06-19T19:56:12 | R | UTF-8 | R | false | true | 1,146 | rd | results_raw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/results_raw.R
\docType{data}
\name{results_raw}
\alias{results_raw}
\title{Each baker's results by episode (raw)}
\format{
A data frame with 1,136 rows representing individual bakers per
episode and 4 variables:
\describe{
\item{series}{A factor denoting UK series (1-10)}
\item{episode}{A factor denoting episode number within a series}
\item{baker}{A character string giving given or nickname only (note: see
\code{bakers} for full baker names)}
\item{result}{A factor denoting if the baker was \code{"IN"}, \code{"OUT"},
\code{"STAR BAKER"}, or \code{"SICK"} for a given episode. For finale episodes,
values are either \code{"WINNER"} or \code{"RUNNER-UP"}. If \code{NA}, baker did not
appear in episode.}
}
}
\source{
See
\url{https://en.wikipedia.org/wiki/The_Great_British_Bake_Off_(series_1)#Results_summary},
for example, for series 1 results summary.
}
\usage{
results_raw
}
\description{
This data has one row per baker for every episode,
including episodes they did not appear in.
}
\examples{
if (require('tibble')) {
results_raw
}
}
\keyword{datasets}
|
58348abdb56c78a8ae4311bcccc5a1d60be8bfb7 | 315cef05afe87c65d129eb0b5bc3514f35623ed6 | /plot3.R | 2f2ad68245233fc05a6a7cb35651c03bd8d54dc5 | [] | no_license | maxzeller/ExData_Plotting1 | e1e76395d45d87ae622fd6d2cdf8de150c78c4ed | 7c42bc768dbb645ab471183ed59ec070c7a769a8 | refs/heads/master | 2020-02-26T16:37:50.551075 | 2016-05-03T19:35:58 | 2016-05-03T19:35:58 | 57,995,208 | 0 | 0 | null | 2016-05-03T19:15:56 | 2016-05-03T19:15:56 | null | UTF-8 | R | false | false | 1,218 | r | plot3.R | temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip",temp)
test <- read.table(unz(temp, "household_power_consumption.txt"),header=T,sep=";",nrows=5,na.strings="?",stringsAsFactors=F)
print(test)
data <- read.table(unz(temp, "household_power_consumption.txt"),header=T,sep=";",nrows=90*24*60,na.strings="?",stringsAsFactors=F)
unlink(temp)
data$Date<-as.Date(strptime(data$Date,format="%d/%m/%Y"))
start<-min(which(data$Date>=as.Date("2007-02-01")))
end<-max(which(data$Date<=as.Date("2007-02-02")))+1
data<-data[start:end,]
#=================================PLOT3
png(filename = "plot3.png", width = 480, height = 480,
units = "px",
restoreConsole = TRUE)
plot(data$Sub_metering_1,type="l",
ylab="Energy sub metering",
xlab="",
main="",
axes=F)
axis(side=2,at=seq(0,30,by=10),labels=seq(0,30,by=10))
axis(side=1,at=c(1,24*60+1,24*60*2+1),labels=c("Thu","Fri","Sat"))
box(lty=1)
lines(data$Sub_metering_2,type="l",col="red")
lines(data$Sub_metering_3,type="l",col="blue")
legend(x="topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c(1,2,4))
dev.off()
|
06b6db0f307dd2d1d3026f1dad9ec69bb3320a26 | 9716a6d75781d99a1cfdfc09cb55b7f53ea46b81 | /misc/fibPlay.r | 887f6334264829f2cfdc5a5557ef72f992707cbb | [] | no_license | ricardianambivalence/Rproject | 17ae2017de25cb8e934fbce5bc4619b86ce49194 | a7136c6cf654cc8c1eda4d74d98764e3037b5764 | refs/heads/master | 2021-01-01T06:04:47.397872 | 2014-01-26T23:37:43 | 2014-01-26T23:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,866 | r | fibPlay.r | # fib benchmarks
require(rbenchmark)
require(lambda.r)
require(compiler)
Fib <- function(n)
{
if(n < 2) return(n)
Fib(n-1) + Fib(n-2)
}
cFib <- cmpfun(Fib)
FibG <- function(n)
{
if (n == 0) return(0)
if (n %in% c(1:2)) 1
else {
y <- rep(1, n)
for (i in 3:n) y[i] <- y[i-1] + y[i-2]
y[n]
}
}
cFibG <- cmpfun(FibG)
Fib_lr(0) %as% 0
Fib_lr(1) %as% 1
Fib_lr(n) %as% {
Fib_lr(n-1) + Fib_lr(n-2)
}
cFib_lr <- cmpfun(Fib_lr)
FibG_lr(0) %as% 0
FibG_lr(1) %as% 1
FibG_lr(2) %as% 1
FibG_lr(n) %as% {
y <- rep(1, n)
for (i in 3:n) {
y[i] <- y[i-1] + y[i-2] }
y[n]
}
cFibG_lr <- cmpfun(FibG_lr)
fl(0) %as% 0
fl(n) %as% {sum(choose(n - 1:n, 1:n - 1))}
seal(fl)
cfl <- cmpfun(fl)
f <- function(n){
k=1:n
ifelse(n<1,0,sum(choose(n-k,k-1)))
}
cf <- cmpfun(f)
bb <- benchmark(sapply(1:30, Fib),
sapply(1:30, FibG),
sapply(1:30, FibG_lr),
sapply(1:30, f),
sapply(1:30, cFib),
sapply(1:30, cFibG),
sapply(1:30, cFibG_lr),
sapply(1:30, cf),
sapply(1:30, fl),
sapply(1:30, cfl),
columns = c('test', 'replications', 'elapsed', 'relative'),
order = 'elapsed',
replications = 10
)
print(bb)
# test replications elapsed relative
# 8 sapply(1:30, cf) 10 0.003 1.000
# 6 sapply(1:30, cFibG) 10 0.003 1.000
# 4 sapply(1:30, f) 10 0.004 1.333
# 2 sapply(1:30, FibG) 10 0.011 3.667
# 3 sapply(1:30, FibG_lr) 10 0.044 14.667
# 7 sapply(1:30, cFibG_lr) 10 0.046 15.333
# 1 sapply(1:30, Fib) 10 82.471 27490.333
# 5 sapply(1:30, cFib) 10 92.327 30775.667
|
1497828882c2fe0b37a0bfbb03a53fe62fbedb97 | 84b1dd8d67a6b29fcd8967060892873cb12e6962 | /man/grapes-plus-equals-grapes.Rd | 5d1e97887448216974b06ca463b35130239ae9b6 | [
"MIT"
] | permissive | jakedilliott/covidfireMASS | 0423a28bc1d6b6436b40c337d08ab625ee50f8ba | e6643c0f75f8791ddd4b73669e9566aac1ea3501 | refs/heads/main | 2023-04-09T08:21:50.387803 | 2021-06-19T05:48:14 | 2021-06-19T05:48:14 | 327,392,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 267 | rd | grapes-plus-equals-grapes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/increment.R
\name{\%+=\%}
\alias{\%+=\%}
\title{Increment operator}
\usage{
e1 \%+=\% e2
}
\arguments{
\item{e1}{...}
\item{e2}{...}
}
\description{
used to increment days in quarantine
}
|
35ef26aabae22ba94e97ad6417cdee3f4a99376b | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.compute/man/ec2_reset_instance_attribute.Rd | 7fbf65a20a2ac58a2ce3a94439f4b90dbaa361be | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 2,208 | rd | ec2_reset_instance_attribute.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_reset_instance_attribute}
\alias{ec2_reset_instance_attribute}
\title{Resets an attribute of an instance to its default value}
\usage{
ec2_reset_instance_attribute(Attribute, DryRun, InstanceId)
}
\arguments{
\item{Attribute}{[required] The attribute to reset.
You can only reset the following attributes: \code{kernel} | \code{ramdisk} |
\code{sourceDestCheck}. To change an instance attribute, use
\code{\link[=ec2_modify_instance_attribute]{modify_instance_attribute}}.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InstanceId}{[required] The ID of the instance.}
}
\value{
An empty list.
}
\description{
Resets an attribute of an instance to its default value. To reset the
\code{kernel} or \code{ramdisk}, the instance must be in a stopped state. To reset
the \code{sourceDestCheck}, the instance can be either running or stopped.
The \code{sourceDestCheck} attribute controls whether source/destination
checking is enabled. The default value is \code{true}, which means checking
is enabled. This value must be \code{false} for a NAT instance to perform
NAT. For more information, see \href{https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html}{NAT Instances}
in the \emph{Amazon Virtual Private Cloud User Guide}.
}
\section{Request syntax}{
\preformatted{svc$reset_instance_attribute(
Attribute = "instanceType"|"kernel"|"ramdisk"|"userData"|"disableApiTermination"|"instanceInitiatedShutdownBehavior"|"rootDeviceName"|"blockDeviceMapping"|"productCodes"|"sourceDestCheck"|"groupSet"|"ebsOptimized"|"sriovNetSupport"|"enaSupport"|"enclaveOptions",
DryRun = TRUE|FALSE,
InstanceId = "string"
)
}
}
\examples{
\dontrun{
# This example resets the sourceDestCheck attribute for the specified
# instance.
svc$reset_instance_attribute(
Attribute = "sourceDestCheck",
InstanceId = "i-1234567890abcdef0"
)
}
}
\keyword{internal}
|
9aae319e14dee864369c9518eb243e70c6143f63 | 0cc521004d73ec5cfc1098684c651b8e98f25dd9 | /man/importDittoBulk.Rd | 7cea5531db6570f3c789a3e1fb0d10ff40e130ab | [
"MIT"
] | permissive | dtm2451/dittoSeq | 875c3e125ff5de2f367f741279db1cec00114b85 | 727304c42a263d423e211844be01e25b6cc0452a | refs/heads/devel | 2023-08-31T07:07:13.662247 | 2023-08-17T18:57:40 | 2023-08-17T18:57:40 | 171,289,840 | 121 | 15 | MIT | 2023-09-14T14:45:27 | 2019-02-18T13:31:48 | R | UTF-8 | R | false | true | 6,034 | rd | importDittoBulk.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importDittoBulk.R
\name{importDittoBulk}
\alias{importDittoBulk}
\title{import bulk sequencing data into a SingleCellExperiment format that will work with other dittoSeq functions.}
\usage{
importDittoBulk(x, reductions = NULL, metadata = NULL, combine_metadata = TRUE)
}
\arguments{
\item{x}{A \code{DGEList}, or \code{\linkS4class{SummarizedExperiment}} (includes \code{DESeqDataSet}) class object containing the sequencing data to be imported.
Alternatively, for import from a raw matrix format, a named list of matrices (or matrix-like objects) where names will become the assay names of the eventual SCE.
NOTE: As of dittoSeq version 1.1.11, all dittoSeq functions can work directly with SummarizedExperiment objects, so this import function is nolonger required for such data.}
\item{reductions}{A named list of dimensionality reduction embeddings matrices.
Names will become the names of the dimensionality reductions and how each will be used with the \code{reduction.use} input of \code{dittoDimPlot} and \code{dittoDimHex}.
For each matrix, rows of the matrices should represent the different samples of the dataset, and columns the different dimensions.}
\item{metadata}{A data.frame (or data.frame-like object) where rows represent samples and named columns represent the extra information about such samples that should be accessible to visualizations.
The names of these columns can then be used to retrieve and plot such data in any dittoSeq visualization.}
\item{combine_metadata}{Logical which sets whether original \code{colData} (DESeqDataSet/SummarizedExperiment) or \code{$samples} (DGEList) from \code{x} should be retained.
When \code{x} is a SummarizedExperiment or DGEList:
\itemize{
\item When \code{FALSE}, sample metadata inside \code{x} (colData or $samples) is ignored entirely.
\item When \code{TRUE} (the default), metadata inside \code{x} is combined with what is provided to the \code{metadata} input; but names must be unique, so when there are similarly named slots, the \strong{values provided to the \code{metadata} input take priority.}
}}
}
\value{
A \code{\linkS4class{SingleCellExperiment}} object...
that contains all assays (SummarizedExperiment; includes DESeqDataSets), all standard slots (DGEList; see below for specifics), or expression matrices of the input \code{x},
as well as any dimensionality reductions provided to \code{reductions}, and any provided \code{metadata} stored in colData.
}
\description{
import bulk sequencing data into a SingleCellExperiment format that will work with other dittoSeq functions.
}
\section{Note about SummarizedExperiments}{
As of dittoSeq version 1.1.11, all dittoSeq functions can work directly with SummarizedExperiment objects, so this import function is nolonger required for such data.
}
\section{Note on assay names}{
One recommended assay to create if it is not already present in your dataset, is a log-normalized version of the counts data.
The logNormCounts function of the scater package is an easy way to make such a slot.
dittoSeq visualizations default to grabbing expression data from an assay named logcounts > normcounts > counts
}
\examples{
library(SingleCellExperiment)
# Generate some random data
nsamples <- 60
exp <- matrix(rpois(1000*nsamples, 20), ncol=nsamples)
colnames(exp) <- paste0("sample", seq_len(ncol(exp)))
rownames(exp) <- paste0("gene", seq_len(nrow(exp)))
logexp <- log2(exp + 1)
# Dimensionality Reductions
pca <- matrix(runif(nsamples*5,-2,2), nsamples)
tsne <- matrix(rnorm(nsamples*2), nsamples)
# Some Metadata
conds <- factor(rep(c("condition1", "condition2"), each=nsamples/2))
timept <- rep(c("d0", "d3", "d6", "d9"), each = 15)
genome <- rep(c(rep(TRUE,7),rep(FALSE,8)), 4)
grps <- sample(c("A","B","C","D"), nsamples, TRUE)
clusts <- as.character(1*(tsne[,1]>0&tsne[,2]>0) +
2*(tsne[,1]<0&tsne[,2]>0) +
3*(tsne[,1]>0&tsne[,2]<0) +
4*(tsne[,1]<0&tsne[,2]<0))
score1 <- seq_len(nsamples)/2
score2 <- rnorm(nsamples)
### We can import the counts directly
myRNA <- importDittoBulk(
x = list(counts = exp,
logcounts = logexp))
### Adding metadata & PCA or other dimensionality reductions
# We can add these directly during import, or after.
myRNA <- importDittoBulk(
x = list(counts = exp,
logcounts = logexp),
metadata = data.frame(
conditions = conds,
timepoint = timept,
SNP = genome,
groups = grps),
reductions = list(
pca = pca))
myRNA$clustering <- clusts
myRNA <- addDimReduction(
myRNA,
embeddings = tsne,
name = "tsne")
# (other packages SCE manipulations can also be used)
### When we import from SummarizedExperiment, all metadata is retained.
# The object is just 'upgraded' to hold extra slots.
# The output is the same, aside from a message when metadata are replaced.
se <- SummarizedExperiment(
list(counts = exp, logcounts = logexp))
myRNA <- importDittoBulk(
x = se,
metadata = data.frame(
conditions = conds,
timepoint = timept,
SNP = genome,
groups = grps,
clustering = clusts,
score1 = score1,
score2 = score2),
reductions = list(
pca = pca,
tsne = tsne))
myRNA
### For DESeq2, how we might have made this:
# DESeqDataSets are SummarizedExperiments, and behave similarly
# library(DESeq2)
# dds <- DESeqDataSetFromMatrix(
# exp, data.frame(conditions), ~ conditions)
# dds <- DESeq(dds)
# dds_ditto <- importDittoBulk(dds)
### For edgeR, DGELists are a separate beast.
# dittoSeq imports what I know to commonly be inside them, but please submit
# an issue on the github (dtm2451/dittoSeq) if more should be retained.
# library(edgeR)
# dgelist <- DGEList(counts=exp, group=conditions)
# dge_ditto <- importDittoBulk(dgelist)
}
\seealso{
\code{\linkS4class{SingleCellExperiment}} for more information about this storage structure.
}
|
2936bef12df1de2bf4a380cc6ce1cc327051f247 | 17bce903f04119fd1e888216675eea2834191315 | /R/UScancer.R | 22118fa39ab598527d93c32926902ece449fe368 | [] | no_license | cran/UScancer | a81d91adbf571edd9d575c6887efb3886d3f4346 | c8b0bde7b24f5ec32957e5b32b072a27737d32b0 | refs/heads/master | 2020-12-24T16:49:53.609812 | 2014-08-08T00:00:00 | 2014-08-08T00:00:00 | 17,693,982 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 180 | r | UScancer.R | # depends: diseasemapping
# depends: maps for county fips lookup table
# NEVERMIND, use own since maps::county_fips is missing hawaii, alaska, puerto rico
# depends: rgdal
|
f81488772f0f85e0ddc6687f0345c4a9e4707b56 | a011404b82226d0c87614aeae8e5bbeb35627e06 | /Male-Femal-Modelling.R | 634ae19c52053980bc2e5d22d926b95828a0f980 | [] | no_license | srihari-busam/ist707 | 548d74da8c25f192189c45f2c3d6f13884bbbc6f | 9ecdca3012efb8b74a1ce5a3bd13e63000307e4f | refs/heads/master | 2021-01-06T22:45:59.117954 | 2020-03-23T02:54:44 | 2020-03-23T02:54:44 | 241,503,711 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,349 | r | Male-Femal-Modelling.R | # load required libraries
library(caret)
library(rpart)
library(tidyverse)
library(rpart.plot)
library(arules)
library(arulesViz)
library(rattle)
cardio_male_df <- read.csv('cardio_train.csv', header = T, sep = ';')
str(cardio_male_df)
nrow(cardio_male_df)
# convert age to years.
cardio_male_df$age <- round(cardio_male_df$age/365)
#Check Age range in data.
# Based on the data it seems the age ranges from 30-65 which seems reasonable.
range(cardio_male_df$age)
# Check height. Height is in cm
# keeping data for people only between 130cm(4.2ft) to 200cm(6.5ft). Removing remaing data.
range(cardio_male_df$height)
length(which(cardio_male_df$height > 200))
length(which(cardio_male_df$height < 130))
cardio_male_df <- cardio_male_df[-which(cardio_male_df$height < 130),]
cardio_male_df <- cardio_male_df[-which(cardio_male_df$height > 200),]
#check weight. Weight is in KG
# Removing the data assuming that weight below 40 and above 150kg are outliers.
range(cardio_male_df$weight)
length(which(cardio_male_df$weight < 40))
length(which(cardio_male_df$weight > 165))
cardio_male_df <- cardio_male_df[-which(cardio_male_df$weight < 40),]
cardio_male_df <- cardio_male_df[-which(cardio_male_df$weight > 165),]
# Check systolic blood pressure
# Based on this citation https://trialsjournal.biomedcentral.com/articles/10.1186/1468-6708-6-5 any value below 85 or above 200 is considered as outlier
range(cardio_male_df$ap_hi)
length(which(cardio_male_df$ap_hi < 85))
length(which(cardio_male_df$ap_hi > 240))
cardio_male_df <- cardio_male_df[-which(cardio_male_df$ap_hi < 85),]
cardio_male_df <- cardio_male_df[-which(cardio_male_df$ap_hi > 200),]
# Check systolic blood pressure
# Based on this citation https://trialsjournal.biomedcentral.com/articles/10.1186/1468-6708-6-5 any value below 85 or above 200 is considered as outlier
range(cardio_male_df$ ap_lo )
length(which(cardio_male_df$ ap_lo < 50))
length(which(cardio_male_df$ ap_lo > 140))
cardio_male_df <- cardio_male_df[-which(cardio_male_df$ ap_lo < 50),]
cardio_male_df <- cardio_male_df[-which(cardio_male_df$ ap_lo > 140),]
## Remove id
cardio_male_df <- cardio_male_df[ , !(names(cardio_male_df) %in% c("id"))]
str(cardio_male_df)
head(cardio_male_df)
#######################################################################################################################################
#######################################################################################################################################
#######################################################################################################################################
cardio_male_df$cardio <- factor(cardio_male_df$cardio, levels = c(0,1), labels = c("negative", "positive"))
nrow(cardio_male_df)
female_df = cardio_male_df[which(cardio_male_df$gender==1),]
male_df = cardio_male_df[which(cardio_male_df$gender==2),]
male_df = male_df[, -2]
female_df = female_df[,-2]
nrow(male_df)
set.seed(12345)
train_index <- createDataPartition(male_df$cardio, p = 0.80, list = FALSE)
train_male_df <- male_df[train_index,]
test_male_df <- male_df[-train_index,]
nrow(train_male_df)
nrow(test_male_df)
set.seed(123)
start_time <- Sys.time()
model_male_rpart <- train(cardio ~ ., data = train_male_df, method = "rpart",
trControl = trainControl(method = "repeatedcv", number = 15, repeats = 3, allowParallel = T),
tuneGrid = expand.grid(cp = seq(0, 0.5, 0.05))
)
end_time <- Sys.time()
end_time - start_time
varImp(model_male_rpart, scale=FALSE)
fancyRpartPlot(model_male_rpart$finalModel)
set.seed(123)
start_time <- Sys.time()
model_male_knn <- train(cardio ~ ., data = train_male_df, method = "knn",
tuneGrid = data.frame(k = seq(15, 30)),
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv",
number = 15, repeats = 3, allowParallel = T))
end_time <- Sys.time()
end_time - start_time
varImp(model_male_knn, scale=FALSE)
### NB
set.seed(123)
start_time <- Sys.time()
model_male_nb <- train(cardio ~ ., data = train_male_df, method = "nb",
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv", number = 15,repeats=3, allowParallel = T),
tuneGrid = expand.grid(fL = 1:2, usekernel = c(TRUE, FALSE), adjust = 1:2))
end_time <- Sys.time()
end_time - start_time
set.seed(123)
start_time <- Sys.time()
cardio_male_rf <- train(cardio ~ ., data = train_male_df,
method = "rf",
trControl = trainControl(method = "repeatedcv", number = 15, repeats = 3, allowParallel = T))
end_time <- Sys.time()
end_time - start_time
set.seed(123)
start_time <- Sys.time()
model_male_svm_linear <- train(cardio ~ ., data = train_male_df,
method = "svmLinear",
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv", number = 15, repeats = 3, allowParallel = T),
tuneGrid = expand.grid(C = seq(0.1, 3.0, 0.5)))
end_time <- Sys.time()
end_time - start_time
set.seed(123)
start_time <- Sys.time()
model_male_svm_radial <- train(cardio ~ ., data = train_male_df,
tuneGrid = expand.grid(sigma = seq(0.1,0.2,0.1),
C = seq(0.1,1.0,0.5)),
method = "svmRadial",
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv",
number = 15, repeats = 1,allowParallel = T))
end_time <- Sys.time()
end_time - start_time
varImp(model_male_svm_radial, scale=FALSE)
model_comparison <- resamples(list(RPart = model_rpart,RF = cardio_model_rf, NB = model_nb, KNN = model_knn, SVM_L = cardio_svm_linear), metric = "Recall")
scales <- list(x = list(relation = "free"),
y = list(relation = "free"))
bwplot(model_comparison, scales = scales)
###############################################################################################
###############################################################################################
###### Female
set.seed(12345)
train_index_female <- createDataPartition(female_df$cardio, p = 0.80, list = FALSE)
train_female_df <- female_df[train_index_female,]
test_female_df <- female_df[-train_index_female,]
nrow(train_female_df)
nrow(test_female_df)
set.seed(123)
start_time <- Sys.time()
model_female_rpart <- train(cardio ~ ., data = train_female_df, method = "rpart",
trControl = trainControl(method = "repeatedcv", number = 15, repeats = 2, allowParallel = T),
tuneGrid = expand.grid(cp = seq(0, 0.5, 0.05))
)
end_time <- Sys.time()
end_time - start_time
varImp(model_female_rpart, scale=FALSE)
fancyRpartPlot(model_female_rpart$finalModel)
set.seed(123)
start_time <- Sys.time()
model_female_knn <- train(cardio ~ ., data = train_female_df, method = "knn",
tuneGrid = data.frame(k = seq(15, 30)),
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv",
number = 15, repeats = 2, allowParallel = T))
end_time <- Sys.time()
end_time - start_time
varImp(model_female_knn, scale=FALSE)
### NB
set.seed(123)
start_time <- Sys.time()
model_female_nb <- train(cardio ~ ., data = train_female_df, method = "nb",
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv", number = 15,repeats=2, allowParallel = T),
tuneGrid = expand.grid(fL = 1:2, usekernel = c(TRUE, FALSE), adjust = 1:2))
end_time <- Sys.time()
end_time - start_time
set.seed(123)
start_time <- Sys.time()
model_female_rf <- train(cardio ~ ., data = train_female_df,
method = "rf",
trControl = trainControl(method = "repeatedcv", number = 15, repeats = 2, allowParallel = T))
end_time <- Sys.time()
end_time - start_time
set.seed(123)
start_time <- Sys.time()
model_female_svm_linear <- train(cardio ~ ., data = train_female_df,
method = "svmLinear",
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv", number = 15, repeats = 2, allowParallel = T),
tuneGrid = expand.grid(C = seq(0.1, 3.0, 0.5)))
end_time <- Sys.time()
end_time - start_time
set.seed(123)
start_time <- Sys.time()
model_female_svm_radial <- train(cardio ~ ., data = train_female_df,
tuneGrid = expand.grid(sigma = seq(0.1,0.2,0.1),
C = seq(0.1,1.0,0.5)),
method = "svmRadial",
preProcess = c("center", "scale"),
trControl = trainControl(method = "repeatedcv",
number = 15, repeats = 2,allowParallel = T))
|
32550223d1b2f19b74527c64e86484e624a26b7b | d17f8ecb0a36d31e316a724b578443c461db65dd | /H2ORF.R | dbb871f507ae4b078f53aed4f7706814e2fc3dfe | [] | no_license | cjferba/Kaggle-West-Nile-Virus-Prediction | abaec70952b753baa1c0f85cbaaece1852b8e029 | 82a40c22bacb1c12e90448f664f8de17c0ab772e | refs/heads/master | 2021-01-10T13:41:32.770728 | 2015-10-25T19:55:42 | 2015-10-25T19:55:42 | 44,927,015 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,821 | r | H2ORF.R | #install.packages("mefa", repos="http://R-Forge.R-project.org")
library(mefa)
library(lubridate)
path <- "input/"
train = read.csv(paste0(path,"train.csv"),header=TRUE,stringsAsFactors = T)
test = read.csv(paste0(path,"test.csv"),header=TRUE,stringsAsFactors = T)
weather = read.csv(paste0(path,"weather.csv"),header=TRUE,stringsAsFactors = T)
spray = read.csv(paste0(path,"spray.csv"),header=TRUE)
subm = read.csv(paste0(path,"west_nile_v6 (1).csv"),header=TRUE,stringsAsFactors = F)
weather[(weather == " ")] <- NA
weather[(weather == "M")] <- NA
weather[(weather == "-")] <- NA
weather[(weather == "T")] <- NA
weather[(weather == " T")] <- NA
weather[(weather == " T")] <- NA
weather$Water1 = NULL
weather$Depth = NULL
weather$SnowFall = NULL
weather$Sunrise = NULL
weather$Sunset = NULL
weather$Depart = NULL
#Get the nearest station
train$Station <- ifelse((((train$Latitude-41.995)^2 + (train$Longitude + 87.933)^2) <
((train$Latitude-41.786)^2 + (train$Longitude + 87.752)^2)),1,2)
test$Station <- ifelse((((test$Latitude-41.995)^2 + (test$Longitude + 87.933)^2) <
((test$Latitude-41.786)^2 + (test$Longitude + 87.752)^2)),1,2)
w1 = weather[weather$Station ==1,]
w2 = weather[weather$Station ==2,]
#Replace NA's with the nearest value above
W1 <- rbind(w1[2,],w1)
W1 <- fill.na(W1)
W1 <- W1[-1,]
rownames(W1) <- NULL
W2 <- rbind(w2[2,],w2)
W2 <- fill.na(W2)
W2 <- W2[-1,]
rownames(W2) <- NULL
Weather <- rbind(W1,W2)
for(i in c(3:9,11:16)){
Weather[,i] <- as.numeric(Weather[,i])
}
Weather[,10] <- factor(Weather[,10])
train <- merge.data.frame(train,Weather)
test <- merge.data.frame(test,Weather)
test <- test[with(test,order(Id)),]
train$day<-as.numeric(day(as.Date(train$Date)))
train$dayofyear<-as.numeric(yday(as.Date(train$Date)))
#train$month<-factor(month(as.Date(train$Date)))
train$dayofweek<-as.factor(wday(as.Date(train$Date)))
train$year <- as.factor(year(as.Date(train$Date)))
train$week <- as.integer(week(as.Date(train$Date)))
test$day<-as.numeric(day(as.Date(test$Date)))
test$dayofyear<-as.numeric(yday(as.Date(test$Date)))
#test$month<-factor(month(as.Date(test$Date)))
test$dayofweek<-as.factor(wday(as.Date(test$Date)))
test$year <- as.factor(year(as.Date(test$Date)))
test$week <- as.integer(week(as.Date(test$Date)))
library(h2o)
localH2O <- h2o.init(nthreads = -1,max_mem_size = '7g')
test.hex <- as.h2o(localH2O,test)
train.hex <- as.h2o(localH2O,train)
# model <- h2o.randomForest(x=c(4:11,14:32),y = 13,data = train.hex,
# mtries = 18,
# sample.rate = 0.5,
# classification = T,ntree = 500,verbose = T)
for(i in 1:20){
model <- h2o.deeplearning(x=c(4:11,14:32),#nfolds = 3,
balance_classes = F,variable_importances = T,
y=27,#13,
data = train.hex,
classification=T,
activation="RectifierWithDropout",
hidden=c(1340,612,256,180),
hidden_dropout_ratio=c(0.5,0.4,0.3,0.25),#c(0.55,0.55,0.55),
input_dropout_ratio=0.18,#0.02,
epochs=50,#50,
l1=1e-5,
l2=1e-5,
rho=0.99,
epsilon=1e-8,
train_samples_per_iteration=1800,#2000,
max_w2=10,
seed=1)
model@model$confusion
pred <- h2o.predict(model,test.hex)
p <- as.data.frame(pred)
summary(p)
subm[,2] =( p[,3]+ subm[,2])/2
print(i)
}
summary(subm)
write.csv(subm,file="wNileVirusRF.csv",row.names=FALSE)
|
20ddc44c23090540a2a7db2b790106e1d23f9421 | 72427aee24b2ec3ffd6eb93d72bfb1aa1baa81ca | /cachematrix.R | 94447c4e7d92f0895a821c52b68abeb5ff327c9f | [] | no_license | Jake-Kim71/ProgrammingAssignment2 | 981af0ac14dfd6ad107b89928d3448f27a776bbc | 8d34436bad74ad7d37eb08fd989b25d129b6bc71 | refs/heads/master | 2021-07-01T06:47:50.275416 | 2017-09-22T01:06:23 | 2017-09-22T01:06:23 | 104,399,959 | 0 | 0 | null | 2017-09-21T21:18:06 | 2017-09-21T21:18:05 | null | UTF-8 | R | false | false | 1,869 | r | cachematrix.R |
## By Jake Kim - Coursera R-programming wk3 assignment
## makeCacheMatrix funtion:
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv_matrix <- NULL # initialize the inverse matrix as NULL
set <- function(y) { # set the value of the matrix
x <<- y
inv_matrix <<- NULL
}
get <- function() x # get the matrix from above set function
set_inverse <- function(inverse) inv_matrix <<- inverse #set the inverse matrix of the given matrix
get_inverse <- function() inv_matrix # get the inverse matrix of the given matrix
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv_matrix <- x$get_inverse()
if(!is.null(inv_matrix)) { # if the inverse matris is not null(if there is a value in cache)
message("getting cached data") # print Message getting cached data"
return(inv_matrix) # retrun the inverse matrix
}
data <- x$get() # if there is no value in the cache, get the input matrix data
inv_matrix <- solve(data, ...) # get the inverse matrix from solve function
x$set_invers(inv_matrix) # set the inverse matrix
inv_matrix # return the inverse matrixx
}
|
6950926b2c849682418dd869502e08c276b4a706 | acdb4641a5fe28bfe145dcdd84ba8badbc69a355 | /run_analysis.R | 02ee7b5fb2d5550da78a4419cb3f130b75f78b37 | [] | no_license | bigtunacan/GettingAndCleaningData | ff97c81b7cde4ca6d6fac77141e81b6aeeadb035 | 32b1331591854a74f472cceb113182152165b6f8 | refs/heads/master | 2021-01-01T18:54:50.315449 | 2015-07-26T19:52:57 | 2015-07-26T19:52:57 | 39,480,650 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,304 | r | run_analysis.R | # This line of code will set the working directory to
# match the directory of this source file
setwd(dirname(sys.frame(1)$ofile))
if(!file.exists("data")){ dir.create("data")}
if(!file.exists("data/data.zip")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/data.zip", method = "curl")
setwd("./data/")
unzip("data.zip")
setwd("..")
}
# read in top level mapping files and add some user friendly column names
features <- read.csv("data/UCI HAR Dataset/features.txt", header = FALSE, sep = " ", col.names = c("feature_id", "feature_name"))
activity_labels <- read.csv("data/UCI HAR Dataset/activity_labels.txt", header = FALSE, sep = " ", col.names = c("activity_id", "activity_name"))
# read in train files
xtrain <- read.fwf("data/UCI HAR Dataset/train/X_train.txt", rep(16, 561))
ytrain <- read.csv("data/UCI HAR Dataset/train/y_train.txt", header = FALSE)
subject_train <- read.csv("data/UCI HAR Dataset/train/subject_train.txt", header = FALSE)
# read in test files
xtest <- read.fwf("data/UCI HAR Dataset/test/X_test.txt", rep(16, 561))
ytest <- read.csv("data/UCI HAR Dataset/test/y_test.txt", header = FALSE)
subject_test <- read.csv("data/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
# 1. Merges the training and the test sets to create one data set.
# First I'm rbind-ing the data with similar columns
# then mapping on human readable column names
x <- rbind(xtrain, xtest)
colnames(x) = features$feature_name # This maps on the feature names for Step #4
y <- rbind(ytrain, ytest)
colnames(y) = "activity_id"
subject <- rbind(subject_train, subject_test)
colnames(subject) = "subject_id"
# At this point all of the datasets (x,y,subject) should have the same number or rows
# so I cbind them up into one unified set of data
merged_data <- cbind(x,y,subject)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Note: I kept activity_id and subject_id around for now to make later steps easier.
mean_std_cols <- grep(".*(mean\\(\\)|std|activity_id|subject_id).*", colnames(merged_data))
merged_data <- merged_data[,mean_std_cols]
# 3. Uses descriptive activity names to name the activities in the data set
merged_data$activity_name <- activity_labels[merged_data$activity_id, 2]
# This cleans up the activity_id column since I no longer need it
merged_data <- merged_data[,!(names(merged_data) %in% c("activity_id"))]
# 4. Appropriately labels the data set with descriptive variable names.
# I mapped the activity_labels on earlier, now I'm just cleaning up a bit
# to make things more human readable
columns <- colnames(merged_data)
columns <- gsub("\\()", "", columns)
columns <- gsub("-std", "StdDev", columns)
columns <- gsub("-mean", "Mean", columns)
columns <- gsub("^t", "Time", columns)
columns <- gsub("^f", "Frequency", columns)
columns <- gsub("-", "", columns)
colnames(merged_data) <- columns
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(plyr)
avg <- ddply(merged_data, .(merged_data$subject_id, activity_name), function(i) colMeans(i[, 1:66]))
write.table(avg, "tiny_data.txt", row.name=FALSE) |
f5e4f98a53f305c7e414ddbea9bf6b326864c496 | 6b769ade12829c97f7aa9930265418bede361967 | /R/table15_10.R | 20324ace7a145535f39a43370eb31dc2c60fac33 | [] | no_license | brunoruas2/gujarati | 67b579a4fde34ae1d57f4a4fd44e5142285663c1 | a532f2735f9fa8d7cd6958e0fc54e14720052fd4 | refs/heads/master | 2023-04-07T01:15:45.919702 | 2021-04-04T15:10:58 | 2021-04-04T15:10:58 | 284,496,240 | 14 | 6 | null | null | null | null | UTF-8 | R | false | false | 288 | r | table15_10.R | #' Table 15_10
#'
#' Estimating the Index Ii from the Standard Normal CDF
#'
#'
#' @docType data
#' @usage data('Table15_10')
#' @format
#'
#' \itemize{
#' \item \strong{P}P are from Table 15.5;
#' \item \strong{I} I = F^-1(P) are estimated from the standard normal CDF
#' }
'Table15_10'
|
3b03efdcb55202db84e7e3c2c58da6532392ae7e | 371625687f067baacf648a725b1649df98f5f542 | /writeCSV.R | 11eb67891f0bf8b453ed0be4096b138745ff0fb2 | [] | no_license | jfroeschke/CoralPredictiveModel | 55dc1c77b2287e5c178ebd26b1ffb3f4170fc875 | 0e7fc569ec4d7512d49239d28e06d628c36e52c5 | refs/heads/master | 2021-01-23T02:15:04.565958 | 2017-11-22T14:28:46 | 2017-11-22T14:28:46 | 85,975,148 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,131 | r | writeCSV.R | ##write occurrence data as csv
## Read in coral data with environmental variables values extracted to dataframe
## Read in Coral and Extracted Environmental data at each coral location (all species)
#### See section 3.1 - 3.5 of maxextentcoralv010.R for information on the preparation of this shapefile
library(rgdal)
CoralNOAAsp <- readOGR("X:/Data_John/coral/maxent/gulfcoral/CoralNOAAsp.shp", layer="CoralNOAAsp")
## create taxa specfic data sets
coraldf <- CoralNOAAsp@data
coraldf2 <- cbind(coordinates(CoralNOAAsp), coraldf)
###get nrows by group
Octocoral <- subset(coraldf2, Name=="Octocoral") #6371
Stonycoral <- subset(coraldf2, Name=="Stony coral") #1571
Blackcoral <- subset(coraldf2, Name=="Black coral") #2223
Octocoralxy <- data.frame(
taxa=rep("Octocoral", nrow(Octocoral)),
x=Octocoral[,1],
y=Octocoral[,2])
Blackcoralxy <- data.frame(
taxa=rep("Blackcoral", nrow(Blackcoral)),
x=Blackcoral[,1],
y=Blackcoral[,2])
Stonycoralxy <- data.frame(
taxa=rep("Stonycoral", nrow(Stonycoral)),
x=Stonycoral[,1],
y=Stonycoral[,2])
write.csv(Blackcoralxy,
"X:/Data_John/coral/CoralPredictiveModel/csv/BlackCoral.csv",
row.names=FALSE)
write.csv(Octocoralxy,
"X:/Data_John/coral/CoralPredictiveModel/csv/Octocoral.csv",
row.names=FALSE)
write.csv(Stonycoralxy,
"X:/Data_John/coral/CoralPredictiveModel/csv/Stonycoral.csv",
row.names=FALSE)
##This will export file for use in the maxent program
# presencePoints <- data.frame(coordinates(tmptrain))
# Species <- data.frame(taxa=rep("BlackCoral", 1257))
# BlackCoral <- cbind(Species, presencePoints)
# write.csv(BlackCoral,
# "X:/Data_John/coral/CoralPredictiveModel/csv/BlackCoral.csv",
# row.names=FALSE)
#
# ##write occurrence data as csv
# presencePoints <- data.frame(coordinates(tmptrain))
# Species <- data.frame(taxa=rep("BlackCoral", 1257))
# BlackCoral <- cbind(Species, presencePoints)
# write.csv(BlackCoral,
# "X:/Data_John/coral/CoralPredictiveModel/csv/BlackCoral.csv",
# row.names=FALSE) |
7d206ca90171523bdd781b93adae11710819a915 | 5e821282973b62d9092cb9086e3651d0b56bd354 | /sbatch_skill_against_reference.R | 4390b580163f133989af040fcb32dc3ca0193702 | [] | no_license | jonasbhend/Rdump | 80e9348b454c249a8aa4e0d1efe69680e16c5a1e | e24483bf69867c5a25ef1c55244cdf9c1ad6998c | refs/heads/master | 2022-09-27T22:30:35.909822 | 2022-09-19T15:03:57 | 2022-09-19T15:27:22 | 49,267,714 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,870 | r | sbatch_skill_against_reference.R | ## script to compute skill metrics from debiased or raw forecast metrics
## load packages
library(myhelpers)
library(easyVerification)
library(methods) ## for processing with Rscript
library(biascorrection)
## library(veri)
library(verification)
scores <- c('FairRpss', 'FairCrpss', 'EnsRpss', 'EnsCrpss', 'EnsRmsess')
scorelist <- list(Ens2AFC='generalized discrimination score',
EnsCorr='correlation',
EnsCrpss='continuous ranked probability skill score',
FairCrpss='fair continuous ranked probability skill score',
FairCrps='mean fair continuous ranked probability score',
EnsMae='mean absolute error',
EnsMaess='mean absolute error skill score',
EnsMe='mean error (bias)',
EnsMse='mean squared error',
EnsMsess='mean squared error skill score',
EnsRmse='root mean squared error',
EnsRmsess='root mean sqaured error skill score',
EnsRps='ranked probability score',
EnsRpss='ranked probability skill score',
FairRpss='fair ranked probability skill score',
EnsSprErr='spread error ratio',
EnsTrend="mean error in linear trend",
EnsCond="conditional error",
EnsVarlog="log ratio of variances",
EnsStdfrac="fraction of standard deviation",
FairSprErr='fair spread error ratio',
EnsRocss.cat1='ROC area skill score (lower tercile)',
EnsRocss.cat2='ROC area skill score (middle tercile)',
EnsRocss.cat3='ROC area skill score (upper tercile)',
EnsIgnss="ignorance skill score")
## scorefunction for use with veriApply
scorefun <- function(f, o, score, ref, prob=NULL, ...){
if (length(grep('Rps', score)) == 1 | score == 'EnsRocss' | score == 'EnsRoca'){
return(veriApply(score, f, o, fcst.ref=ref, ensdim=4, tdim=5, prob=prob))
} else {
return(veriApply(score, f, o, fcst.ref=ref, ensdim=4, tdim=5, ...))
}
}
## initialise
scratchdir <- Sys.getenv('SCRATCH')
## read command line arguments:
args <- commandArgs(trailingOnly=TRUE)
## check if there are any command line arguments <
if (length(args) == 0){
args <- c('UCAN-PP-LR-15PC',
'ecmwf-system4',
'ERA-INT',
'CDD',
'IT',
'none-crossval1_1981-2010_ERA-INT',
'05',
TRUE,
'none',
FALSE)
} else if (length(args) < 7){
stop('Not enough command line arguments')
}
## disaggregate command line arguments
model <- args[1]
reference <- args[2]
obsname <- args[3]
index <- args[4]
grid <- args[5]
method <- args[6]
initmon <- args[7]
if (length(args) > 7){
seasonals <- as.logical(args[8])
} else {
seasonals <- c(FALSE, TRUE)
}
if (length(args) > 8){
indmethod <- args[9]
} else {
indmethod <- 'none'
}
ind.cali <- indmethod != "none"
if (length(args) == 10){
detrends <- as.logical(args[10])
} else {
detrends <- c(FALSE, TRUE)
}
## replace placeholder for method string
dpath <- '/store/msclim/bhendj/EUPORIAS'
if (length(grep('????-????', method)) == 1 & length(grep("none", method)) != 1){
mfiles <- system(paste0('ls ', dpath, '/', model,'/', grid, '/monthly/', index, '/', method, '/', index, '_????', initmon, '??_*.nc'), intern=TRUE)
method <- sapply(strsplit(mfiles, '/'), function(x) x[length(x) - 1])[1]
}
stopifnot(is.character(method))
## specify the file paths
fpath <- paste(dpath, model, grid, 'monthly', index, method, sep='/')
rpath <- paste(dpath, reference, grid, 'monthly', index, method, sep='/')
opath <- paste(dpath, obsname, grid, 'monthly', index, sep='/')
## get forecast and observation files
fcfiles <- list.files(fpath, pattern=paste0('^', index, '_....', initmon, '.._.*_',grid,'_',method, '.nc'), full.name=TRUE)
reffiles <- list.files(rpath, pattern=paste0('^', index, '_....', initmon, '.._.*_', grid, '_', method, '.nc'), full.name=TRUE)
if (length(grep("none", method)) == 1){
fpath <- paste(dpath, model, grid, 'monthly', index, 'none', sep='/')
fcfiles <- list.files(fpath, pattern=paste0('^', index, '_....', initmon, '.._.*_',grid,'_none.nc'), full.name=TRUE)
rpath <- paste(dpath, reference, grid, 'monthly', index, 'none', sep='/')
reffiles <- list.files(rpath, pattern=paste0('^', index, '_....', initmon, '.._.*_', grid, '_none.nc'), full.name=TRUE)
}
obsfiles <- list.files(opath, pattern=paste0('^', index, '_'), full.name=TRUE)
## check whether there is at least one of each
if (length(fcfiles) < 1) stop('no forecast file found')
if (length(reffiles) < 1) stop("no reference forecast found")
if (length(obsfiles) < 1) stop('no observation file found')
## only use most recent obsfile
details <- file.info(obsfiles)
details <- details[with(details, order(as.POSIXct(mtime), decreasing=TRUE)),]
obsfiles <- rownames(details)[1]
rm(details)
## enquire file properties (names and dims from forecast)
nc <- nc_open(fcfiles[1])
## get variable name in forecasts
## parfc <- names(nc$var)[names(nc$var) %in% varlist[[index]]]
parfc <- index
## get dimensions
fdims <- nc$var[[parfc]]$dim
fdimnames <- sapply(fdims, function(x) x$name)
names(fdims) <- fdimnames
if (length(fdimnames) == 3){
nlon <- fdims[[grep('ncells', fdimnames)]]$len
nlat <- 1
} else {
nlon <- fdims[[grep('lon', fdimnames)]]$len
nlat <- fdims[[grep('lat', fdimnames)]]$len
}
nens <- fdims[[which(!fdimnames %in% grep('tim|lon|lat|ncells|nb2', fdimnames, value=TRUE))]]$len
## get more information on time (i.e. initialisation month)
time.i <- grep('tim', fdimnames)
ntime <- fdims[[time.i]]$len
fctime <- as.Date(nc_time(nc))
## number of months with all days present
initmn <- as.numeric(format(fctime[1], '%m'))
nc_close(nc)
ncref <- nc_open(reffiles[1])
refdims <- nc$var[[parfc]]$dim
refdimnames <- sapply(refdims, function(x) x$name)
names(refdims) <- refdimnames
nrefens <- refdims[[which(!refdimnames %in% grep('tim|lon|lat|ncells|nb2', refdimnames, value=TRUE))]]$len
reftime <- as.Date(nc_time(ncref))
## more complex approach to account for incomplete teim series (e.g. downscaling)
fc.day <- as.numeric(format(fctime, '%d'))
ref.day <- as.numeric(format(reftime, '%d'))
fc.month <- format(fctime, '%m')
ref.month <- format(reftime, '%m')
fullmonths <- intersect(ref.month[ref.day >= 14], fc.month[fc.day >= 14])
ncomplete <- length(fullmonths)
nc_close(ncref)
## name from observations
nc <- nc_open(obsfiles[1])
## parobs <- names(nc$var)[names(nc$var) %in% varlist[[index]]]
parobs <- index
obstime <- as.Date(nc_time(nc))
nc_close(nc)
## set up file connections
fc.con <- lapply(as.list(fcfiles), nc_open)
ref.con <- lapply(as.list(reffiles), nc_open)
obs.con <- lapply(as.list(obsfiles), nc_open)
## get forecast times
fc.times <- lapply(fc.con, function(x) as.Date(nc_time(x)))
names(fc.times) <- sapply(fc.times, function(x) format(x[1], '%Y'))
ref.times <- lapply(ref.con, function(x) as.Date(nc_time(x)))
names(ref.times) <- sapply(ref.times, function(x) format(x[1], '%Y'))
ref.ym <- lapply(ref.times, format, '%Y%m')
obs.ym <- format(obstime, '%Y%m')
fc.ym <- lapply(fc.times, format, '%Y%m')
## check whether forecasts are in obs
is.in.obs <- sapply(fc.ym, function(x) all(x[substr(x, 5, 6) %in% fullmonths] %in% obs.ym & x[substr(x, 5, 6) %in% fullmonths] %in% unlist(ref.ym)))
ref.in.obs <- sapply(ref.ym, function(x) all(x[substr(x, 5, 6) %in% fullmonths] %in% obs.ym & x[substr(x, 5, 6) %in% fullmonths] %in% unlist(fc.ym)))
stopifnot(sum(!is.in.obs) < 3)
## reduce to overlapping set
fcfiles <- fcfiles[is.in.obs]
fc.con <- fc.con[is.in.obs]
fc.times <- fc.times[is.in.obs]
reffiles <- reffiles[ref.in.obs]
ref.con <- ref.con[ref.in.obs]
ref.times <- ref.times[ref.in.obs]
## number of years
nyears <- length(fcfiles)
years <- as.numeric(names(fc.times))
mtmp <- strsplit(method, '_')[[1]]
mtmp <- as.numeric(strsplit(mtmp[max(1, length(mtmp) - 1)], '-')[[1]])
if (is.na(mtmp)[1]){
myears <- if (length(years) > 30) years[years %in% 1981:2014] else years
} else {
myears <- mtmp[1]:mtmp[2]
}
## get the forecast ensemble members
nens <- min(sapply(fc.con, function(x) x$dim[[which(! names(x$dim) %in% grep("tim|lon|lat|ncells|nb2|bnd", names(x$dim), value=TRUE))]]$len))
nrefens <- min(sapply(ref.con, function(x) x$dim[[which(! names(x$dim) %in% grep("tim|lon|lat|ncells|nb2|bnd", names(x$dim), value=TRUE))]]$len))
## read in the observations (corresponding time steps)
if (length(obsfiles) == 1){
obs <- array(NA, c(nlon, nlat, ncomplete, nyears))
fc.i <- sapply(fc.times, function(x) {
xmd <- format(x, '%Y%m')
xmd <- xmd[format(x, '%m') %in% fullmonths]
return(xmd %in% unlist(sapply(obstime, format, '%Y%m')))
})
obs.i <- sapply(obstime, function(x) format(x, '%Y%m') %in% unlist(sapply(fc.times, function(x) format(x[1:ncomplete], '%Y%m'))) & format(x, '%m') %in% fullmonths)
if (length(obs.con[[1]]$dim) == 2){
obs.tmp <- ncvar_get(obs.con[[1]], parobs)[,obs.i]
} else {
obs.tmp <- ncvar_get(obs.con[[1]], parobs)[,,obs.i]
}
obs[fc.i[rep(1:nrow(fc.i), each=nlon*nlat), ]] <- as.vector(obs.tmp)
obs <- aperm(obs, c(3,1,2,4))
} else {
stop('Multiple observation files not implemented yet')
}
## fix units
if (index %in% c('tas', 'tasmax', 'tasmin') & max(obs, na.rm=T) < 200){
obs <- obs + 273.15
}
## read in all the data
ffcst <- array(NA, c(ncomplete, nlon, nlat, nens, nyears))
rref <- array(NA, c(ncomplete, nlon, nlat, nrefens, nyears))
if (length(fc.con[[1]]$dim) == 3) {
for (i in 1:nyears){
ffcst[,,,,i] <- aperm(ncvar_get(fc.con[[i]], parfc,
start=c(1, 1, min(which(format(fc.times[[i]], '%m') %in% fullmonths))),
count=c(-1,nens,ncomplete)), c(3,1,2))
rref[,,,,i] <- aperm(ncvar_get(ref.con[[i]], parfc,
start=c(1,1, min(which(format(ref.times[[i]], '%m') %in% fullmonths))),
count=c(-1,nrefens,ncomplete)), c(3,1,2))
}
} else {
for (i in 1:nyears){
ffcst[,,,,i] <- aperm(ncvar_get(fc.con[[i]], parfc,
start=c(1,1,1, min(which(format(fc.times[[i]], '%m') %in% fullmonths))),
count=c(-1,-1,nens,ncomplete)), c(4,1,2,3))
rref[,,,,i] <- aperm(ncvar_get(ref.con[[i]], parfc,
start=c(1,1,1, min(which(format(ref.times[[i]], '%m') %in% fullmonths))),
count=c(-1,-1,nrefens,ncomplete)), c(4,1,2,3))
}
}
## also subset fc.times and ref.times
fc.times <- lapply(fc.times, function(x) x[format(x, '%m') %in% fullmonths])
ref.times <- lapply(ref.times, function(x) x[format(x, '%m') %in% fullmonths])
rownames(ffcst) <- unique(sapply(fc.times, format, '%m'))[1:ncomplete]
dimnames(ffcst)[[5]] <- names(fc.times)
rownames(rref) <- unique(sapply(ref.times, format, '%m'))[1:ncomplete]
dimnames(rref)[[5]] <- names(ref.times)
## get the grid data for display of forecasts
nc <- fc.con[[1]]
if (length(grep('eobs', grid)) == 1){
lon <- nc$dim$rlon$vals
lat <- nc$dim$rlat$vals
plon <- ncatt_get(nc, 'rotated_pole', 'grid_north_pole_longitude')$val
plat <- ncatt_get(nc, 'rotated_pole', 'grid_north_pole_latitude')$val
} else if (length(nc$dim) == 3) {
lon <- ncvar_get(nc, 'lon')
lat <- ncvar_get(nc, 'lat')
} else {
lon <- nc$dim$lon$vals
lat <- nc$dim$lat$vals
plon <- plat <- NULL
}
rm(nc)
## close file connections (desperate debugging)
sapply(fc.con, nc_close)
sapply(ref.con, nc_close)
sapply(obs.con, nc_close)
rm(fc.con, ref.con, obs.con)
## get crossvalidation string
crossval <- length(grep('crossval', method)) == 1
forward <- length(grep("forward", method)) == 1
block <- length(grep("block", method)) == 1
stopifnot(sum(forward, crossval, block) <= 1)
strategy <- list(nfcst=length(fc.times),
type=c("none", 'crossval', 'block', 'forward')[1 + 1*crossval + 2*block + 3*forward])
if (crossval | block) {
suppressWarnings(strategy$blocklength <- as.numeric(gsub('_.*', '', gsub('.*crossval', '', method))))
if (is.na(strategy$blocklength)) strategy$blocklength <- 1
}
if (strategy$type != "none"){
fcyears <- as.numeric(names(fc.times))
syears <- strsplit(method, '_')
if (syears[[1]][2] == "????-????"){
syears <- range(as.numeric(names(fc.times)), na.rm=T)
} else {
syears <- as.numeric(strsplit(syears[[1]][length(syears[[1]]) - 1], '-')[[1]])
}
strategy$indices <- which(fcyears %in% syears[1]:syears[2])
}
## loop over seasonal or monthly forecast skill
for (seasonal in seasonals){
outdir <- paste(dpath, 'skill_against_reference', grid, if (seasonal) 'seasonal' else 'monthly', index, sep='/')
if (seasonal){
## compute three-monthly aggregates
if (grid == 'EAF-22'){
nred <- 0
} else {
nred <- 2
}
fcst.seas <- array(NA, dim(ffcst) - c(nred,0,0,0,0))
ref.seas <- array(NA, dim(rref) - c(nred,0,0,0,0))
obs.seas <- array(NA, dim(obs) - c(nred,0,0,0))
for (i in 1:(nrow(ffcst) - 2)){
fcst.seas[i,,,,] <- colMeans(ffcst[i+0:2,,,,,drop=F], dims=1)
ref.seas[i,,,,] <- colMeans(rref[i+0:2,,,,,drop=F], dims=1)
obs.seas[i,,,] <- colMeans(obs[i + 0:2,,,,drop=F], dims=1)
}
if (grid == 'EAF-22'){
mii <- which(as.numeric(format(fc.times[[1]], '%m')) %in% 6:9)
fcst.seas[nrow(fcst.seas),,,,] <- colMeans(ffcst[mii,,,,,drop=F], dims=1)
ref.seas[nrow(fcst.seas),,,,] <- colMeans(rref[mii,,,,,drop=F], dims=1)
obs.seas[nrow(fcst.seas),,,] <- colMeans(obs[mii,,,,drop=F], dims=1)
}
## rearrange time for output
fcst.seastimes <- sapply(fc.times, function(x) format(x[1:ncomplete], "%Y-%m-%d"))[setdiff(1:ncomplete, 1:2),,drop=F]
## add in rainy season for eastern Africa
if (grid == 'EAF-22'){
fcst.seastimes <- fcst.seastimes[c(1:nrow(fcst.seastimes), NA, nrow(fcst.seastimes)), ]
}
fcst.seastimes <- array(0, dim(fcst.seastimes)) + as.Date(fcst.seastimes)
} else {
fcst.seas <- ffcst[1:ncomplete,,,,,drop=F]
ref.seas <- rref[1:ncomplete,,,,,drop=F]
obs.seas <- obs[1:ncomplete,,,,drop=F]
fcst.seastimes <- sapply(fc.times, function(x) format(x[1:ncomplete], '%Y-%m-%d'))
fcst.seastimes <- array(0, dim(fcst.seastimes)) + as.Date(fcst.seastimes)
}
## mask forecasts and obs where more than 80% of the years are missing
fcst.mask <- rowMeans(is.na(fcst.seas)*1, dims=3) <= .2
ref.mask <- rowMeans(is.na(ref.seas)*1, dims=3) <= .2
obs.mask <- rowMeans(is.na(obs.seas)*1, dims=3) <= .2
## mask according to observation climatology
if (index %in% c('HDD', 'CDD', 'HDDch')){
## new criterion at least 67% of forecasts in climatology are non-zero
nclim <- min(30, dim(obs.seas)[4])
obs.mask <- obs.mask & rowMeans(obs.seas[,,,1:nclim, drop=F] > 0, dims=3, na.rm=T) > 0.67
##obs.mask <- obs.mask & rowMeans(obs.seas, dims=3, na.rm=T) > 0.1
## this amounts to ~3 HDD per month, or 9 HDD per season
} else if (index %in% c('FD', 'SD', 'HD')){
obs.mn <- rowMeans(obs.seas, dims=3, na.rm=T)
obs.mask <- obs.mask & obs.mn > 0.05 & obs.mn < 0.95
} else if (index == 'WDF') {
nclim <- min(30, dim(obs.seas)[4])
obs.mask <- obs.mask & rowMeans(obs.seas[,,,1:nclim, drop=F] > 0, dims=3, na.rm=T) > 0.67 & rowMeans(obs.seas[,,,1:nclim, drop=F] < 1, dims=3, na.rm=T) > 0.67
}
## like this, all years are masked out (and therefore the corresponding scores also)
obs.seas[rep(!(obs.mask & fcst.mask & ref.mask), length=length(obs.seas))] <- NA
## set up temporary directory
tmpdir <- paste0(scratchdir, '/skill_', index, '_', grid, '_', method, '_initmon', initmon, '_', ceiling(runif(1, min=0, max=9999)))
if (! file.exists(tmpdir)) dir.create(tmpdir)
## loop through raw / detrended forecasts and obs
for (detrend in detrends){
if (detrend){
## detrend obs
obs.seas <- aperm(apply(obs.seas, 1:3, function(y){
if (all(!is.na(y))){
mean(y) + lm(y ~ seq(along=y))$res
} else {
NA*y
}
}), c(2,3,4,1))
## detrend forecasts
fcst.seas <- aperm(apply(fcst.seas, 1:4, function(y){
if(all(!is.na(y))){
mean(y) + lm(y ~ seq(along=y))$res
} else {
NA * y
}
}), c(2,3,4,5,1))
ref.seas <- aperm(apply(ref.seas, 1:4, function(y){
if(all(!is.na(y))){
mean(y) + lm(y ~ seq(along=y))$res
} else {
NA * y
}
}), c(2,3,4,5,1))
}
## figure out which years are used for verification
yind <- apply(!is.na(fcst.seas), 5, any) & apply(!is.na(obs.seas), 4, any)
strategy$nfcst <- min(strategy$nfcst, sum(yind))
strategy$indices <- strategy$indices[strategy$indices <= sum(yind)]
## find out whether seasonal/monthly indices need calibration
if (ind.cali){
fcst.cali <- debiasApply(fcst=aperm(fcst.seas[,,,,which(years %in% myears), drop=F], c(2,3,1,5,4)),
obs=aperm(obs.seas[,,,which(years %in% myears), drop=F], c(2,3,1,4)),
fcst.out=aperm(fcst.seas, c(2,3,1,5,4)),
method=indmethod,
strategy=strategy,
type=ifelse(strategy$type == 'none', 'calibration', 'prediction'))
fcst.seas <- aperm(fcst.cali, c(3,1,2,5,4))
ref.cali <- debiasApply(fcst=aperm(ref.seas[,,,,which(years %in% myears), drop=F], c(2,3,1,5,4)),
obs=aperm(obs.seas[,,,which(years %in% myears), drop=F], c(2,3,1,4)),
fcst.out=aperm(ref.seas, c(2,3,1,5,4)),
method=indmethod,
strategy=strategy,
type=ifelse(strategy$type == 'none', 'calibration', 'prediction'))
ref.seas <- aperm(ref.cali, c(3,1,2,5,4))
gc()
}
## set up output file to write to
nc <- nc_open(obsfiles[1])
nc2 <- nc_open(fcfiles[1])
if (length(nc$dim) == 2){
ncells.nc <- nc$dim$ncells
} else if (length(grep('eobs', grid)) == 1){
lon.nc <- nc2$dim$rlon
lat.nc <- nc2$dim$rlat
} else {
lon.nc <- nc2$dim$lon
lat.nc <- nc2$dim$lat
}
time.nc <- ncdim_def(name='time',
units='days since 1980-01-01',
vals=as.numeric(fcst.seastimes[, ncol(fcst.seastimes)] - as.Date('1980-01-01')),
unlim=TRUE,
longname=if (seasonal) 'end date of 3-monthly seasons' else 'end date of month')
if (length(nc$dim) == 2){
dims.nc <- list(ncells.nc, time.nc)
} else {
dims.nc <- list(lon.nc, lat.nc, time.nc)
}
## create variables
vars.nc <- list()
## check whether this is a rotated grid
if (length(grep('eobs', grid)) == 1){
vars.nc[['lon']] <- ncvar_def('lon', 'degrees_east', dims.nc[1:2], missval=-1e20, longname='longitude')
vars.nc[['lat']] <- ncvar_def('lat', 'degrees_north', dims.nc[1:2], missval=-1e20, longname='latitude')
vars.nc[['rotated_pole']] <- ncvar_def('rotated_pole', units='', dim=list(), missval=NULL, prec='char')
} else if (length(nc$dim) == 2){
vars.nc[['lon']] <- nc$var$lon
vars.nc[['lat']] <- nc$var$lat
}
for (score in scores){
if (score == 'EnsRocss'){
for (i in 1:3) {
rscore <- paste0(score, '.cat', i)
vars.nc[[rscore]] <- ncvar_def(rscore, '1', dims.nc, missval=-1e20, longname=scorelist[[rscore]])
vars.nc[[paste0(rscore, '.sigma')]] <- ncvar_def(paste0(rscore, '.sigma'), '1',
dims.nc, missval=-1e20,
longname=scorelist[[rscore]])
}
} else {
vars.nc[[score]] <- ncvar_def(score, '1', dims.nc, missval=-1e20, longname=scorelist[[score]])
if (score %in% c(scores[grep('pss$', scores)], 'EnsTrend', 'EnsCond', 'EnsVarlog')){
vars.nc[[paste(score, 'sigma', sep='.')]] <- ncvar_def(paste(score, 'sigma', sep='.'), '1', dims.nc, missval=-1e20, longname=scorelist[[score]])
}
}
}
## get additional information on content of files
fc.name <- sapply(strsplit(fcfiles, '/')[1], function(x) x[grep('EUPORIAS', x) + 1])
ref.name <- sapply(strsplit(reffiles, '/')[1], function(x) x[grep('EUPORIAS', x) + 1])
obs.name <- sapply(strsplit(obsfiles, '/')[1], function(x) x[grep('EUPORIAS', x) + 1])
ofile <- paste0(
paste(
paste0(index,
ifelse(detrend, '_detrend', ''),
ifelse(ind.cali, paste0('_', toupper(indmethod)), '')),
method,
paste0(fc.name, '-ref-', ref.name),
'vs', obs.name,
paste(range(names(fc.times)), collapse='-'),
sep="_"), '_initmon', initmon, '.nc')
outfile <- paste(tmpdir, ofile, sep='/')
desc <- paste0('Skill in ',
ifelse(detrend, 'detrended ', ''),
ifelse(ind.cali, paste0('bias corrected (', indmethod, ')'), ''),
ifelse(seasonal, 'seasonal ', 'monthly '),
index,
' from ',
ifelse(method == 'none', '', paste0('bias-corrected (', method, ') ')),
gsub('-', ' ', toupper(fc.name)),
' forecasts verified against ', obs.name,
' with reference forecast ', ref.name,
' for ', paste(range(names(fc.times)[yind]), collapse='-'))
## create netcdf file
ncout <- nc_create(outfile, vars.nc)
## write variables
if (length(nc$dim) == 2){
ncvar_put(ncout, varid='lon', ncvar_get(nc, 'lon'))
ncvar_put(ncout, varid='lat', ncvar_get(nc, 'lat'))
} else if (length(grep('eobs', grid)) == 1){
if ('Actual_longitude' %in% names(nc$var)){
ncvar_put(ncout, varid='lon', ncvar_get(nc, 'Actual_longitude'))
ncvar_put(ncout, varid='lat', ncvar_get(nc, 'Actual_latitude'))
} else {
library(geocors)
plon <- ncatt_get(nc, 'rotated_pole', attname='grid_north_pole_longitude')$val
plat <- ncatt_get(nc, 'rotated_pole', attname='grid_north_pole_longitude')$val
lola <- geocors.trafo(rep(nc$dim$rlon$vals, nc$dim$rlat$len),
rep(nc$dim$rlat$vals, each=nc$dim$rlon$len),
from.type='rotpol',
from.pars=list(plon=as.numeric(plon), plat=as.numeric(plat)),
to.type='lonlat')
ncvar_put(ncout, varid='lon', lola$lon)
ncvar_put(ncout, varid='lat', lola$lat)
}
## fill in the rotated grid coordinates
ncatt_put(ncout, varid='rotated_pole', attname='grid_mapping_name',
attval='rotated_latitude_longitude', prec='text')
ncatt_put(ncout, varid='rotated_pole', attname='grid_north_pole_longitude',
attval=-162, prec='double')
ncatt_put(ncout, varid='rotated_pole', attname='grid_north_pole_latitude',
attval=39.25, prec='double')
}
## add in a global attribute with description
ncatt_put(ncout, varid=0, attname='description', attval=desc, prec='text')
nc_close(ncout)
## compute scores and write to output file
for (score in scores){
ncout <- nc_open(outfile, write=TRUE)
print(score)
print(system.time( sfo <- scorefun(f=fcst.seas[,,,,yind, drop=F],
o=obs.seas[,,,yind,drop=F],
score=score,
ref=ref.seas[,,,,yind,drop=F],
prob=1:2/3,
strategy=strategy)))
if (is.list(sfo)){
sfo <- lapply(sfo, function(x){
x[x == -Inf] <- -9999
x[abs(x) == Inf] <- NA
return(x)
})
if (score == 'EnsRocss'){
for (i in 1:3){
ncvar_put(ncout, varid=paste0(score, '.cat', i), vals=aperm(sfo[[paste0('cat', i)]], c(2,3,1)))
ncvar_put(ncout, varid=paste0(score, '.cat', i, '.sigma'), vals=aperm(sfo[[paste0('cat', i, '.sigma')]], c(2,3,1)))
}
} else {
ncvar_put(ncout, varid=score, vals=aperm(sfo[[1]], c(2,3,1)))
ncvar_put(ncout, varid=paste(score, 'sigma', sep='.'), vals=aperm(sfo[[2]], c(2,3,1)))
}
} else {
## set negative infinity (division by zero) to large negative value
sfo[sfo == -Inf] <- -9999
sfo[abs(sfo) == Inf] <- NA
## write to output file
if (length(sfo) == length(obs.seas[,,,yind])){
sfo <- rowMeans(sfo, dims=3)
}
ncvar_put(ncout, varid=score, vals=aperm(sfo, c(2,3,1)))
}
nc_close(ncout)
## readjust grid specification for variables
## this is very important to happen AFTER the file connection to the NetCDF
## is closed from R - otherwise the output is shifted latitudinally
if (length(nc$dim) == 2 | length(grep('eobs', grid)) == 1){
if (length(nc$dim) == 2){
atts <- c(gridtype='unstructured', coordinates="lon lat")
} else {
atts <- c(grid_mapping='rotated_pole')
}
for (attn in names(atts)){
if (score == 'EnsRocss'){
for (i in 1:3) system(paste0("ncatted -h -a ", attn, ",", score, ".cat", i, ",c,c,'",atts[attn],"' ", outfile))
} else if (is.list(sfo)){
system(paste0("ncatted -h -a ", attn, ",", score, ".sigma,c,c,'",atts[attn],"' ", outfile))
system(paste0("ncatted -h -a ", attn, ",", score, ",c,c,'",atts[attn],"' ", outfile))
} else {
system(paste0("ncatted -h -a ", attn, ",", score, ",c,c,'",atts[attn],"' ", outfile))
}
}
}
## make sure we free as much memory as possible/reasonable
rm(sfo, ncout)
gc()
} ## end of loop on scores
## copy temporary file to permanent location
## make sure the output directory exists
if (!file.exists(outdir)) dir.create(outdir, recursive=TRUE)
system(paste0('mv ', outfile, ' ', outdir, '/', ofile))
} ## end of loop on detrend
## remove temporary directory
system(paste0('rm -rf ', tmpdir))
} ## end of loop on seasonal/monthly skill
warnings()
print('Finished successfully')
q(save='no')
|
38a0de2912c526be742fd529ed77a0ee59b266b4 | 6e80b38f49df58293aa40e4ab6d1a9b87de73193 | /Network Code/Network_Analysis_woOptimization_7_26_17.R | a854fb28c470f8adcc111ddf83570f22dea02db7 | [] | no_license | anooshrees/Jackson-SSP-Code | 981676de895a3dfcb873500e12a074a9f00a1b78 | a0e4277e8a90b665be43679460aadde8647e0ae9 | refs/heads/master | 2020-03-23T00:39:10.788298 | 2018-07-13T17:44:30 | 2018-07-13T17:44:30 | 140,876,683 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 52,683 | r | Network_Analysis_woOptimization_7_26_17.R | ################################
#
# Processing for TF networks
#
################################
geneset.info <- load("~/Downloads/geneset.info.RData")
network.data <- load("~/Downloads/aging_fpanalysis_gene.target.mode.radius_p900_consensus.filtered_mbf1_peaks.PD_explicit (1).RData")
temp = list.files(pattern="*.txt", path = "~/Downloads/genesetAnalysis/")
addresses <- paste("~/Downloads/genesetAnalysis/", temp, sep = "")
list2env(
lapply(setNames(addresses, make.names(gsub("*.txt$", "", temp))),
read.csv, sep = "\t", header = T, stringsAsFactors =F), envir = .GlobalEnv)
atac.samples <- sample.info$samp
rna.samples <- colnames(source.data$RNA)[3:ncol(source.data$RNA)]
gene.stats <- summary.data$RNA
################################
#
# Filtering
#
################################
# Some general filtering
# by ATAC-seq FDR and P-value
fp.data <- fp.data[intersect(which(fp.data$FDR.atac < 0.05), which(fp.data$PValue.atac < 0.01)), ]
# by delta binding and delta expression
fp.data.negative <- fp.data[fp.data$HY_HO.deltaProp < 0, ]
fp.data.negative <- fp.data.negative[abs(fp.data.negative$HY_HO.deltaProp) > quantile(abs(fp.data.negative$HY_HO.deltaProp),prob=1-1/100),]
fp.data.positive <- fp.data[fp.data$HY_HO.deltaProp > 0, ]
fp.data.positive <- fp.data.positive[abs(fp.data.positive$HY_HO.deltaProp) > quantile(abs(fp.data.positive$HY_HO.deltaProp),prob=1-1/100),]
fp.data <- rbind(fp.data.negative, fp.data.positive)
# When selecting genes for networks, there are three options for ensuring each TF connnects to only one gene
# 1) the closest gene:
fp.data.single <- do.call(rbind,lapply(split(fp.data,fp.data$fpco),function(chunk) chunk[which.min(chunk$DistancetoTSS),]))
# 2) the gene with highest logFC expression in relation to TF binding
fp.data.single <- do.call(rbind,lapply(split(fp.data,fp.data$tf.name),function(chunk) chunk[which.max(abs(gene.stats[gene.stats$GeneName %in% chunk$GeneName, "logFC.rna"])),]))
# 3) the gene whose accessibility matches expression the closest (maximizing logFC product)
fp.data.single <- do.call(rbind,lapply(split(fp.data,fp.data$tf.name),function(chunk) chunk[which.max(abs(gene.stats[gene.stats$GeneName %in% chunk$GeneName, "logFC.rna"]*chunk$logFC.atac)),]))
#########################
#
# Filtering by gene sets
#
#########################
# GOBP
all.names <- geneset.names.gobp
atac.enrichment <- atac_do.gobp_enrichment
rna.enrichment <- rna_do.gobp_enrichment
genes <- geneset.genes.gobp
gene.color <- data.frame(unlist(geneset.colors.gobp), stringsAsFactors = FALSE)
# GOCC
all.names <- geneset.names.gocc
atac.enrichment <- atac_do.gocc_enrichment
rna.enrichment <- rna_do.gocc_enrichment
genes <- geneset.genes.gocc
gene.color <- data.frame(unlist(geneset.colors.gocc), stringsAsFactors = FALSE)
# GOMF
all.names <- geneset.names.gomf
atac.enrichment <- atac_do.gomf_enrichment
rna.enrichment <- rna_do.gomf_enrichment
genes <- geneset.genes.gomf
gene.color <- data.frame(unlist(geneset.colors.gomf), stringsAsFactors = FALSE)
# GWAS
all.names <- geneset.names.gwas
atac.enrichment <- atac_do.gwas_enrichment
rna.enrichment <- rna_do.gwas_enrichment
genes <- geneset.genes.gwas
gene.color <- data.frame(unlist(geneset.colors.gwas), stringsAsFactors = FALSE)
# IMM_COARSE
all.names <- geneset.names.imm_coarse
atac.enrichment <- atac_do.imm_coarse_enrichment
rna.enrichment <- rna_do.imm_coarse_enrichment
genes <- geneset.genes.imm_coarse
gene.color <- data.frame(unlist(geneset.colors.imm_coarse), stringsAsFactors = FALSE)
# IMM_FINE
all.names <- geneset.names.imm_fine
atac.enrichment <- atac_do.imm_fine_enrichment
rna.enrichment <- rna_do.imm_fine_enrichment
genes <- geneset.genes.imm_fine
gene.color <- data.frame(unlist(geneset.colors.imm_fine), stringsAsFactors = FALSE)
# INTERPRO
all.names <- geneset.names.interpro
atac.enrichment <- atac_do.interpro_enrichment
rna.enrichment <- rna_do.interpro_enrichment
genes <- geneset.genes.interpro
gene.color <- data.frame(unlist(geneset.colors.interpro), stringsAsFactors = FALSE)
# KEGG
all.names <- geneset.names.kegg
atac.enrichment <- atac_do.kegg_enrichment
rna.enrichment <- rna_do.kegg_enrichment
genes <- geneset.genes.kegg
gene.color <- data.frame(unlist(geneset.colors.kegg), stringsAsFactors = FALSE)
# MSIGDB
all.names <- geneset.names.msigdb
atac.enrichment <- atac_do.msigdb_enrichment
rna.enrichment <- rna_do.msigdb_enrichment
genes <- geneset.genes.msigdb
gene.color <- data.frame(unlist(geneset.colors.msigdb), stringsAsFactors = FALSE)
# PID
all.names <- geneset.names.pid
atac.enrichment <- atac_do.pid_enrichment
rna.enrichment <- rna_do.pid_enrichment
genes <- geneset.genes.pid
gene.color <- data.frame(unlist(geneset.colors.pid), stringsAsFactors = FALSE)
# REACTOME
all.names <- geneset.names.reactome
atac.enrichment <- atac_do.reactome_enrichment
rna.enrichment <- rna_do.reactome_enrichment
genes <- geneset.genes.reactome
gene.color <- data.frame(unlist(geneset.colors.reactome), stringsAsFactors = FALSE)
# VP2008
all.names <- geneset.names.vp2008
atac.enrichment <- atac_do.vp2008_enrichment
rna.enrichment <- rna_do.vp2008_enrichment
genes <- geneset.genes.vp2008
gene.color <- data.frame(unlist(geneset.colors.vp2008), stringsAsFactors = FALSE)
# VP2015
all.names <- geneset.names.vp2015
atac.enrichment <- atac_do.vp2015_enrichment
rna.enrichment <- rna_do.vp2015_enrichment
genes <- geneset.genes.vp2015
gene.color <- data.frame(unlist(geneset.colors.vp2015), stringsAsFactors = FALSE)
# WP
all.names <- geneset.names.wp
atac.enrichment <- atac_do.wp_enrichment
rna.enrichment <- rna_do.wp_enrichment
genes <- geneset.genes.wp
gene.color <- data.frame(unlist(geneset.colors.wp), stringsAsFactors = FALSE)
# General code to reformat color matrix
gene.color$Module.ID <- (all.names %>% slice(match(rownames(gene.color), Module.Name)) %>% select(Module.ID))$Module.ID
colnames(gene.color) <- c("color", "Module.ID")
gene.color$GeneName <- (genes %>% slice(match(gene.color$Module.ID, Module.ID)) %>% select(GeneName))$GeneName
# Refilter fp.data -- may not be necessary...
fp.data.example <- fp.data.single[fp.data.single$GeneName %in% genes[genes$Module.ID %in% all.names$Module.ID, "GeneName"], ]
fp.data.example$Module.ID <- (genes %>% slice(match(fp.data.example$GeneName,GeneName)) %>% select(Module.ID))$Module.ID
################################
#
# Function that creates all networks for all gene sets
#
################################
# genesets <- c("gobp", "gocc", "gomf", "gwas", "imm_coarse", "imm_fine", "interpro", "kegg", "msigdb", "pid", "reactome", "vp2008", "vp2015", "wp")
# genesets <- c("kegg", "msigdb", "pid", "reactome", "vp2008", "vp2015", "wp")
genesets <- c("kegg", "vp2008", "gobp", "reactome")
generate_networks <- function(x){
for(geneset.name in genesets){
# since data is presented differently for some of the gene sets, individual debugging may be necessary
# geneset.name <- "gobp"
all.names <- get(paste("geneset.names.", geneset.name, sep = ""))
atac.enrichment <- get(paste("atac_do.", geneset.name, "_enrichment", sep = ""))
rna.enrichment <- get(paste("rna_do.", geneset.name, "_enrichment", sep = ""))
all.names <- all.names[all.names$Module.Name %in% rna.enrichment[rna.enrichment$hypergeom.fdr < 0.05, "Module.Name"], ]
genes <- get(paste("geneset.genes.", geneset.name, sep= ""))
genes <- genes[genes$Module.ID %in% all.names$Module.ID, ]
gene.color <- data.frame(unlist(get(paste("geneset.colors.", geneset.name, sep = ""))), stringsAsFactors = FALSE)
gene.color$Module.Name <- names(get(paste("geneset.colors.", geneset.name, sep = ""))) #kegg
gene.color <- gene.color[gene.color$Module.Name %in% rna.enrichment[rna.enrichment$hypergeom.fdr < 0.05, "Module.Name"], ]
# gene.color$Module.ID <- (all.names %>% slice(match(rownames(gene.color), Module.Name)) %>% select(Module.ID))$Module.ID
gene.color$Module.ID <- (all.names %>% slice(match(gene.color$Module.Name, Module.Name)) %>% select(Module.ID))$Module.ID #kegg
colnames(gene.color)[1] <- c("color")
# gene.color$GeneName[gene.color$Module.ID %in% genes$Module.ID] <- (genes %>% slice(match(gene.color$Module.ID, Module.ID)) %>% select(GeneName))$GeneName #kegg1
# gene.color <- gene.color[(which(!is.na(gene.color$GeneName))),]
print(paste("Working on geneset", geneset.name))
for(current.module in all.names$Module.ID){
fp.data.example <- fp.data[fp.data$GeneName %in% current_geneset, ]
if(nrow(fp.data.example) > 2){
fp.data.example$Module.ID <- (genes %>% slice(match(fp.data.example$GeneName,GeneName)) %>% select(Module.ID))$Module.ID
fp.data.example <- do.call(rbind,lapply(split(fp.data.example,fp.data.example$fpco),function(chunk) chunk[which.min(chunk$DistancetoTSS),]))
df <- data.frame(fp.data.example %>% select(tf_distype,GeneName), stringsAsFactors = FALSE)
# ***If you want to differentiate between proximal and distal, use:***
# df <- data.frame(fp.data.example %>% select(tf_distype, GeneName), stringsAsFactors = FALSE)
df <- df[!duplicated(df), ]
nw <- graph_from_data_frame(df,directed=T)
edge_attr(nw,"deltaProp") <- fp.data.example$HY_HO.deltaProp[intersect(which(fp.data.example$GeneName %in% df$GeneName), which(fp.data.example$tf_distype %in% df$tf_distype))]
edge_attr(nw,"deltaProp_color")[which(edge_attr(nw,"deltaProp") < 0)] <- "blue"
edge_attr(nw,"deltaProp_color")[which(edge_attr(nw,"deltaProp") > 0)] <- "red"
vertex_attr(nw,"logFC")[which(vertex_attr(nw, "name") %in% fp.data.example$GeneName)] <- (gene.stats %>% slice(match(vertex_attr(nw, "name"),GeneName)) %>% select(logFC.rna))$logFC.rna
# vertex_attr(nw, "logFC") <-
vertex_attr(nw,"logFC_color")[which(vertex_attr(nw,"logFC") < 0)] <- "blue"
vertex_attr(nw,"logFC_color")[which(vertex_attr(nw,"logFC") > 0)] <- "red"
# vertex_attr(nw, "color")[!vertex_attr(nw, "name") %in% df$tf.name] <- "white"
vertex_attr(nw, "is.tf") <- vertex_attr(nw,"name") %in% fp.data.example$tf_distype
vertex_attr(nw,"logFC_color")[vertex_attr(nw,"is.tf")] <- NULL
vertex_attr(nw,"transparency")[vertex_attr(nw,"is.tf")] <- "transparent"
vertex_attr(nw,"transparency")[!vertex_attr(nw,"is.tf")] <- 0.2
vertex_attr(nw,"logFC") <- rescale(abs(vertex_attr(nw,"logFC")), c(5, 30))
vertex_attr(nw,"logFC")[vertex_attr(nw, "is.tf")] <- 0
nw.gplot <- ggnet2(nw,
node.size = vertex_attr(nw,"logFC"),
# node.shape = vertex_attr(nw,"shape"),
label = F, # I use ggplot (below) to add labels
mode = "fruchtermanreingold",
arrow.size = 5,
arrow.type = "open",
arrow.gap = 0.025,
edge.color = edge_attr(nw,"deltaProp_color"),
edge.size = rescale(abs(edge_attr(nw, "deltaProp")), c(0.1, 2)),
node.alpha = 0.8,
node.color = unlist(vertex_attr(nw,"logFC_color")),
alpha = vertex_attr(nw, "transparency"),
legend.position = "none")
# Then just plot with p
current.module.name <- all.names[all.names$Module.ID %in% current.module, "Module.Name"]
pdf(paste("~/Desktop/Differentiation_General_Network", ".pdf", sep = ""))
p <- nw.gplot +
# This replots the nodes, adding edges to the nodes drawn by ggnet2
# I have included an attribute “is.tf” that informs whether a node is a TF or a target, and use this to plot TFs and targets with different shapes and sizes, and to format the labels differently
geom_point(shape=as.numeric(sub("TRUE",22,sub("FALSE",21,vertex_attr(nw,"is.tf")))),
size=as.numeric(sub("TRUE",8,sub("FALSE",9,vertex_attr(nw,"is.tf")))),
color=sub("TRUE","maroon2",sub("FALSE","transparent",vertex_attr(nw,"is.tf"))), fill=NA) +
# This adds labels to the nodes
geom_text(aes(label=label),
size=as.numeric(sub("FALSE",2.5,sub("TRUE",2,vertex_attr(nw,"is.tf")))),
color=sub("FALSE","gray40",sub("TRUE","maroon4",vertex_attr(nw,"is.tf"))),hjust=-0.2)
print(p)
dev.off()
print(paste("Just finished network for ", current.module, "with name ", current.module.name))
}
}
}
}
generate_networks(genesets)
################################
#
# Filtering by metadata
#
################################
# upper or lower half of logCPM
fp.data.example <- fp.data.example[fp.data.example$logCPM.atac > median(fp.data.example$logCPM.atac), ]
fp.data.example <- fp.data.example[fp.data.example$logCPM.atac < median(fp.data.example$logCPM.atac), ]
# only positive logFC or only negative
fp.data.example <- fp.data.example[fp.data.example$logFC.atac > 0, ]
fp.data.example <- fp.data.example[fp.data.example$logFC.atac < 0, ]
# Distal or proximal location
fp.data.example <- fp.data.example[fp.data.example$DisType %in% "Proximal", ]
fp.data.example <- fp.data.example[fp.data.example$DisType %in% "Distal", ]
# Low FDR and P Val for gene expression
fp.data.example <- fp.data.example[fp.data.example$GeneName %in% gene.stats[intersect(which(gene.stats$FDR.rna < 0.05), which(gene.stats$PValue.rna < 0.01)), "GeneName"], ]
# Only negative logFC or only positive (for RNA)
fp.data.example <- fp.data.example[fp.data.example$GeneName %in% gene.stats[gene.stats$logFC.rna > 0, "GeneName"], ]
fp.data.example <- fp.data.example[fp.data.example$GeneName %in% gene.stats[gene.stats$logFC.rna < 0, "GeneName"], ]
################################
#
# Plotting
#
################################
library(igraph)
library(ggplot2)
library(GGally)
library(dplyr)
library(sna)
library(network)
library(igraph) # again to preserve certain functions
df <- data.frame(fp.data.example %>% select(tf.name,GeneName), stringsAsFactors = FALSE)
nw <- graph_from_data_frame(df,directed=T)
vertex_attr(nw,"logFC") <- (gene.stats %>% slice(match(vertex_attr(nw,"name"),GeneName)) %>% select(logFC.rna))$logFC.rna
vertex_attr(nw, "color")[vertex_attr(nw, "name") %in% df$GeneName] <- (gene.color %>% slice(match(vertex_attr(nw,"name"), GeneName)) %>% select(color))$color
vertex_attr(nw, "color")[!vertex_attr(nw, "name") %in% df$GeneName] <- rep("white", length(which(!vertex_attr(nw, "name") %in% df$GeneName)))
vertex_attr(nw, "is.tf") <- vertex_attr(nw,"name") %in% df$tf.name
ggnet2(nw)
nw.gplot <- ggnet2(nw,
# node.size = vertex_attr(nw,"logFC"),
# node.shape = vertex_attr(nw,"shape"),
label = F, # I use ggplot (below) to add labels
mode = "fruchtermanreingold",
arrow.size = 5,
arrow.type = "open",
arrow.gap = 0.025,
# edge.color = edge_attr(nw,"color"),
edge.size = 0.5,
node.alpha = 0.8,
node.color = unlist(vertex_attr(nw,"color")),
legend.position = "none")
p <- nw.gplot +
# This replots the nodes, adding edges to the nodes drawn by ggnet2
# I have included an attribute “is.tf” that informs whether a node is a TF or a target, and use this to plot TFs and targets with different shapes and sizes, and to format the labels differently
geom_point(shape=as.numeric(sub("TRUE",22,sub("FALSE",21,vertex_attr(nw,"is.tf")))),
size=as.numeric(sub("TRUE",8,sub("FALSE",9,vertex_attr(nw,"is.tf")))),
color=sub("TRUE","maroon2",sub("FALSE","white",vertex_attr(nw,"is.tf"))),alpha=0.6) +
# This adds labels to the nodes
geom_text(aes(label=label),
size=as.numeric(sub("FALSE",2.5,sub("TRUE",2,vertex_attr(nw,"is.tf")))),
color=sub("FALSE","gray40",sub("TRUE","maroon4",vertex_attr(nw,"is.tf"))),hjust=-0.2)
# Then just plot with p
p
#######################
#
# Optimized Networks
#
#######################
# Networks with proximal/distal annotation
network.data.pd <- load("~/Downloads/geneset.data_playground.p900_consensus.radius_RNA_explicit_fullnet.RData")
path.all.negative <- path.all[path.all$HY_HO.mean_deltaProp < 0, ]
path.all.negative <- path.all.negative[abs(path.all.negative$HY_HO.mean_deltaProp) > quantile(abs(path.all.negative$HY_HO.mean_deltaProp),prob=1-50/100),]
path.all.positive <- path.all[path.all$HY_HO.mean_deltaProp > 0, ]
path.all.positive <- path.all.positive[abs(path.all.positive$HY_HO.mean_deltaProp) > quantile(abs(path.all.positive$HY_HO.mean_deltaProp),prob=1-50/100),]
path.all <- rbind(path.all.positive, path.all.negative)
for(geneset.name in unique(path.all$schema)){
# since data is presented differently for some of the gene sets, individual debugging may be necessary
# geneset.name <- "reactome"
# all.info <- path.all[path.all$schema %in% geneset.name, ]
#
# print(paste("Working on geneset", geneset.name))
current_geneset <- test.table.tcell$GeneName
for(current.module in unique(all.info$pathway)){
current.module <- "Cellular Senescence"
fp.data.example <- all.info[all.info$pathway %in% current.module, ]
fp.data.example <- fp.data.example[abs(fp.data.example$signed_logOR) > quantile(abs(fp.data.example$signed_logOR), prob=1-75/100), ]
if(nrow(fp.data.example) > 2){
df <- data.frame(fp.data.example %>% select(tf.name,GeneName), stringsAsFactors = FALSE)
# ***If you want to differentiate between proximal and distal, use:***
# df <- data.frame(fp.data.example %>% select(tf_distype, GeneName), stringsAsFactors = FALSE)
df <- df[!duplicated(df), ]
nw <- graph_from_data_frame(df,directed=T)
edge_attr(nw,"deltaProp") <- fp.data.example$HY_HO.mean_deltaProp
edge_attr(nw,"deltaProp_color")[which(edge_attr(nw,"deltaProp") < 0)] <- "blue"
edge_attr(nw,"deltaProp_color")[which(edge_attr(nw,"deltaProp") > 0)] <- "red"
gene.names <- unique(df$GeneName)
tf.names <- unique(df$tf.name)
vertex_attr(nw, "logFC")[which(vertex_attr(nw, "name") %in% gene.names)] <- unique(fp.data.example[fp.data.example$GeneName %in% gene.names, "logFC.rna"])
vertex_attr(nw, "logFC")[which(vertex_attr(nw, "name") %in% tf.names)] <- rep(0, length(vertex_attr(nw, "logFC")[which(vertex_attr(nw, "name") %in% tf.names)]))
vertex_attr(nw,"logFC_color")[which(vertex_attr(nw,"logFC") < 0)] <- "blue"
vertex_attr(nw,"logFC_color")[which(vertex_attr(nw,"logFC") > 0)] <- "red"
# vertex_attr(nw, "color")[!vertex_attr(nw, "name") %in% df$tf.name] <- "white"
vertex_attr(nw, "is.tf") <- vertex_attr(nw,"name") %in% tf.names
vertex_attr(nw,"logFC_color")[vertex_attr(nw,"is.tf")] <- "white"
nw.gplot <- ggnet2(nw,
node.size = rescale(abs(vertex_attr(nw,"logFC")), c(5, 30)),
# node.shape = vertex_attr(nw,"shape"),
label = F, # I use ggplot (below) to add labels
mode = "fruchtermanreingold",
arrow.size = 5,
arrow.type = "open",
arrow.gap = 0.025,
edge.color = edge_attr(nw,"deltaProp_color"),
edge.size = rescale(abs(edge_attr(nw, "deltaProp")), c(0.1, 2)),
node.alpha = 0.8,
node.color = unlist(vertex_attr(nw,"logFC_color")),
legend.position = "none")
# Then just plot with p
pdf(paste("~/Desktop/", geneset.name, "_", gsub("[[:punct:]]", " ", gsub(",.*$", "", current.module)), ".pdf", sep = ""))
p <- nw.gplot +
# This replots the nodes, adding edges to the nodes drawn by ggnet2
# I have included an attribute “is.tf” that informs whether a node is a TF or a target, and use this to plot TFs and targets with different shapes and sizes, and to format the labels differently
geom_point(shape=as.numeric(sub("TRUE",22,sub("FALSE",21,vertex_attr(nw,"is.tf")))),
size=as.numeric(sub("TRUE",8,sub("FALSE",9,vertex_attr(nw,"is.tf")))),
color=sub("TRUE","maroon2",sub("FALSE","white",vertex_attr(nw,"is.tf"))),alpha=0.6) +
# This adds labels to the nodes
geom_text(aes(label=label),
size=as.numeric(sub("FALSE",2.5,sub("TRUE",2,vertex_attr(nw,"is.tf")))),
color=sub("FALSE","gray40",sub("TRUE","maroon4",vertex_attr(nw,"is.tf"))),hjust=-0.2)
print(p)
dev.off()
print(paste("Just finished network for ", current.module, "with name ", current.module.name))
}
}
}
# Networks without annotation (just TF names)
network.data.full <- load("~/Downloads/geneset.data_playground.p900_consensus.radius_RNA__fullnet.RData")
path.all.negative <- path.all[path.all$HY_HO.mean_deltaProp < 0, ]
path.all.negative <- path.all.negative[abs(path.all.negative$HY_HO.mean_deltaProp) > quantile(abs(path.all.negative$HY_HO.mean_deltaProp),prob=1-25/100),]
path.all.positive <- path.all[path.all$HY_HO.mean_deltaProp > 0, ]
path.all.positive <- path.all.positive[abs(path.all.positive$HY_HO.mean_deltaProp) > quantile(abs(path.all.positive$HY_HO.mean_deltaProp),prob=1-25/100),]
path.all <- rbind(path.all.positive, path.all.negative)
for(geneset.name in unique(path.all$schema)){
# since data is presented differently for some of the gene sets, individual debugging may be necessary
# geneset.name <- "gobp"
all.info <- path.all[path.all$schema %in% geneset.name, ]
print(paste("Working on geneset", geneset.name))
for(current.module in unique(all.info$pathway)){
fp.data.example <- all.info[all.info$pathway %in% current.module, ]
if(nrow(fp.data.example) > 2){
df <- data.frame(fp.data.example %>% select(tf.name,GeneName), stringsAsFactors = FALSE)
# ***If you want to differentiate between proximal and distal, use:***
# df <- data.frame(fp.data.example %>% select(tf_distype, GeneName), stringsAsFactors = FALSE)
df <- df[!duplicated(df), ]
nw <- graph_from_data_frame(df,directed=T)
edge_attr(nw,"deltaProp") <- fp.data.example$HY_HO.mean_deltaProp
edge_attr(nw,"deltaProp_color")[which(edge_attr(nw,"deltaProp") < 0)] <- "blue"
edge_attr(nw,"deltaProp_color")[which(edge_attr(nw,"deltaProp") > 0)] <- "red"
gene.names <- unique(df$GeneName)
tf.names <- unique(df$tf.name)
vertex_attr(nw, "logFC")[which(vertex_attr(nw, "name") %in% gene.names)] <- unique(fp.data.example[fp.data.example$GeneName %in% gene.names, "logFC.rna"])
vertex_attr(nw, "logFC")[which(vertex_attr(nw, "name") %in% tf.names)] <- rep(0, length(vertex_attr(nw, "logFC")[which(vertex_attr(nw, "name") %in% tf.names)]))
vertex_attr(nw,"logFC_color")[which(vertex_attr(nw,"logFC") < 0)] <- "blue"
vertex_attr(nw,"logFC_color")[which(vertex_attr(nw,"logFC") > 0)] <- "red"
# vertex_attr(nw, "color")[!vertex_attr(nw, "name") %in% df$tf.name] <- "white"
vertex_attr(nw, "is.tf") <- vertex_attr(nw,"name") %in% tf.names
vertex_attr(nw,"logFC_color")[vertex_attr(nw,"is.tf")] <- "white"
nw.gplot <- ggnet2(nw,
node.size = rescale(abs(vertex_attr(nw,"logFC")), c(5, 30)),
# node.shape = vertex_attr(nw,"shape"),
label = F, # I use ggplot (below) to add labels
mode = "fruchtermanreingold",
arrow.size = 5,
arrow.type = "open",
arrow.gap = 0.025,
edge.color = edge_attr(nw,"deltaProp_color"),
edge.size = rescale(abs(edge_attr(nw, "deltaProp")), c(0.1, 2)),
node.alpha = 0.8,
node.color = unlist(vertex_attr(nw,"logFC_color")),
legend.position = "none")
# Then just plot with p
pdf(paste("~/Desktop/SSP/Networks_7_22_17/", "less_detail_", geneset.name, "_", gsub("[[:punct:]]", " ", gsub(",.*$", "", current.module)), ".pdf", sep = ""))
p <- nw.gplot +
# This replots the nodes, adding edges to the nodes drawn by ggnet2
# I have included an attribute “is.tf” that informs whether a node is a TF or a target, and use this to plot TFs and targets with different shapes and sizes, and to format the labels differently
geom_point(shape=as.numeric(sub("TRUE",22,sub("FALSE",21,vertex_attr(nw,"is.tf")))),
size=as.numeric(sub("TRUE",8,sub("FALSE",9,vertex_attr(nw,"is.tf")))),
color=sub("TRUE","maroon2",sub("FALSE","white",vertex_attr(nw,"is.tf"))),alpha=0.6) +
# This adds labels to the nodes
geom_text(aes(label=label),
size=as.numeric(sub("FALSE",2.5,sub("TRUE",2,vertex_attr(nw,"is.tf")))),
color=sub("FALSE","gray40",sub("TRUE","maroon4",vertex_attr(nw,"is.tf"))),hjust=-0.2)
print(p)
dev.off()
print(paste("Just finished network for ", current.module, "with name ", current.module.name))
}
}
}
#######################
#
# Export to Gephi
#
#######################
library(rgexf)
find.edgeid <- function(edgelabel,nodes) grep(paste("^",edgelabel,"$",sep = ""),nodes$label)
# network
nw <- topnw.all
# nodes
nodes <- data.frame(id=seq_len(vcount(nw)),
label=vertex_attr(nw,"name"),
stringsAsFactors = F)
nodeAtt <- data.frame(logFC_color=vertex_attr(nw,"color"),
logFC.rna=vertex_attr(nw,"logFC"))
# specificity=vertex_attr(nw,"specificity"),
# target.scaled_freq=(data.frame(tf.gene=vertex_attr(nw,"name"),stringsAsFactors = F) %>% full_join(ylong_bytargets_freqs %>% select(-target.specificity) %>% mutate(tf.gene=as.character(tf.gene))) %>% slice(match(vertex_attr(nw,"name"),tf.gene)) %>% select(freq) %>% mutate(freq=2*(freq-0.5)) %>% mutate(freq=ifelse(is.na(freq),0,freq)))$freq)
nodeVizAtt <- list(size=as.numeric(sub("TRUE",5.5,sub("FALSE",6,vertex_attr(nw,"is.tf")))),
shape=sub("TRUE","square",sub("FALSE","circle",vertex_attr(nw,"is.tf"))),
color=as.matrix(setNames(data.frame(t(col2rgb(vertex_attr(nw,"color"))),a=0.8),c("r","g","b","a")))) # color nodes by logFC
# color=as.matrix(setNames(data.frame(t(col2rgb(vec2color.symmetrized(nodeAtt$target.scaled_freq,colorRampPalette(RColorBrewer::brewer.pal(11,"RdBu")[-c(4,7)])(nrow(nodeAtt))))),a=0.8),c("r","g","b","a")))) # color nodes by freq of cell specific targets (-1,1, positive for bcells, negative for tcells, zero for unbiased and for non-tf genes)
# edges
edges.df <-as_data_frame(nw)
colnames(edges.df) <- sub("color","slogOR_color",sub("from","source",sub("to","target",colnames(edges.df))))
edges <- sapply(edges.df[,c("source","target")],function(e) as.vector(sapply(e,find.edgeid,nodes)))
edgeAtt <- edges.df %>% select(source,slogOR_color,slogOR,affinity) %>% left_join(ylong_bytargets_freqs %>% mutate(source=as.character(tf.gene)) %>% select(source,freq),by = "source") %>% mutate(target.cum_specificity=sub("TRUE","TCells",sub("FALSE","BCells",(freq-0.5)<0))) %>% select(-source,-freq) # target.cum_specificity labels an edge by the overall proportion of cell-specifity of the targets. For example, if 55% of the targest of TF x are B-cell specific (ie. more expressed in B cell) then all edges from this TF are labeled "BCells"
edgeVizAtt <- list(size=rep(2,ecount(nw))
# color=as.matrix(setNames(data.frame(t(col2rgb(vec2color.symmetrized((edges.df %>% select(source,slogOR_color,slogOR,affinity) %>% left_join(ylong_bytargets_freqs %>% mutate(source=as.character(tf.gene)) %>% select(source,freq),by = "source") %>% mutate(freq=2*(freq-0.5)))$freq,colorRampPalette(colors_redblu10[-9])(nrow(edges.df))))),a=1),c("r","g","b","a")))
# color=setNames(data.frame(t(col2rgb(edges.df$slogOR_color)),a=1),c("r","g","b","a"))
)
# export
grfx <- write.gexf(nodes = nodes,
edges = edges,
# edgesAtt = edgeAtt,
nodesAtt = nodeAtt,
# edgesVizAtt = edgeVizAtt,
nodesVizAtt = nodeVizAtt,
output = "test1.gexf",
defaultedgetype = "directed")
###############################
#
# T Cell differentiation geneset
#
###############################
test.table.tcell <- read.csv(file = "~/Desktop/SSP/Table1.txt", sep=" ", header = TRUE)
# library(biomaRt)
# tcells.genes <- getBM(attributes = c("ensembl_gene_id","external_gene_name","gene_biotype","chromosome_name"),
# filters = "family",
# values = test.table.tcell$Antigen,
# mart=useMart(biomart="ENSEMBL_MART_ENSEMBL",
# dataset="hsapiens_gene_ensembl",
# host="grch37.ensembl.org"))
# write.table(paste(gsub("-", "", test.table.tcell$Antigen), "_HUMAN", sep=""), file="~/Desktop/protein_names.txt", quote = F,row.names = F,col.names = T)
#########################################
#
# Create ATAC logFC vs. RNA logFC plot to
# prove that T cells further along in
# linear differentiation are more present
# in older individuals
#
#########################################
library(ggplot2)
library(reshape2)
library(MKmisc)
library(pheatmap)
library(ggExtra)
library(gridExtra)
library(gtable)
library(GenomicRanges)
library(dplyr)
library(broom)
sse <- function(x) sum((x-mean(x))^2)
andred <- function(x) {Reduce("&",x)}
orred <- function(x) {Reduce("|",x)}
minmaxnorm <- function(x) {(x - min(x, na.rm=TRUE)) / diff(range(x, na.rm=TRUE))}
minmaxnormc <- function(x) {minmaxnorm(x)-mean(minmaxnorm(x))}
znorm <- function(x) {(x - mean(x)) / sd(x)}
colors_hmm6=c("gold", "mediumaquamarine", "bisque", "slategray2", "firebrick1", "green4")
colors_redblu10 = c("#3360a4","#1a8af9","#06b8ff","#b3defb","#ffffff","#fdc82f","#f67028","#e9302a","firebrick3","firebrick4")
colors_tcells= c("violet", "violetred", "violetred1", "violetred2", "violetred3", "violetred4")
# setwd("/Volumes/emext/Dropbox/Jax/PBMC/DifferentialAnalysis/manuscripts/aging-01/scripts/figures.regen.stm/")
# setwd("~/Dropbox/Jax/PBMC/DifferentialAnalysis/manuscripts/aging-01/scripts/figures.regen.stm/")
# setwd("H:/Dropbox/Jax/PBMC/DifferentialAnalysis/manuscripts/aging-01/scripts/figures.regen.stm/")
# function to relabel samples on-the-fly wrt randomIDs
sampinfo <- read.table("~/Desktop/SSP/data/jax/sample.info.comprehensive.txt",header = TRUE,sep = "\t",stringsAsFactors = F) # check data/jax
rownames(sampinfo) <- with(sampinfo,paste(sampid,type,sep = "_"))
sampinfo[sampinfo$age>40 & sampinfo$group=="HY",]$group <- "HM"
random.codes <- read.table("~/Downloads/RandomRecodeAgingStudy.txt", header = T, sep = "\t", quote = "")
random.codes <- data.frame(merge(data.frame(random.codes,sampid=sub("VHY402","VHY402D0",paste(random.codes$Cohort,random.codes$PID,sep = "")),row.names = "sampid"),sampinfo,by.x="row.names", by.y="sampid"))
rownames(random.codes) <- with(random.codes,paste(random.codes[,1],random.codes[,8],sep = "_"))
random.codes <- random.codes[, -1]
rand.recode <- function(x) with(random.codes[x,], paste(group,RandomID,sep=""))
# Previoulsy analyzed ATACseq data
# da.pbmc.out <- load("~/Downloads/da.pbmc.RData")
#full.results <- read.table("datasources.201702/pbmc_whitelisted_filtered_glm.results.txt", sep = "\t", quote = "", header = T,row.names = "peakco")
full.results <- read.csv("~/Desktop/SSP/DA_7_12_17/full_results_age.csv")
full.results$peakco <- paste(full.results$chr,"_",full.results$start,"_",full.results$end,sep = "")
rownames(full.results) <- full.results$peakco
adj.info <- read.table("~/Desktop/SSP/DA_7_12_17/tcells_merged_consensus_adj_atac_whitelisted.txt",header = T, sep = "\t", quote = "")
adj.info$peakco <- paste(adj.info$chr, "_", adj.info$start, "_", adj.info$end, sep="")
adj.atac <- data.frame(merge(full.results[,c("chr","start","end")],adj.info, by.x = "row.names", by.y = "peakco"),row.names = 1)
atacsamps <- gsub("*_PBMC$", "", colnames(adj.atac[,-c(1:6)]))
rm(list = da.pbmc.out)
# Previoulsy analyzed RNAseq data (samps common in RNA and filtered ATAC)
da.rna.pbmc.out <- load("~/Downloads/da.rna.pbmc.RData")
rna.glmtop <- read.table("~/Downloads/pbmc_rna.atacpaired_glm.results.txt", sep = "\t", quote = "", header = T)
# Collect data from DA script
atac.glm.tcells$peakco <- paste(atac.glm.tcells$chr, "_", atac.glm.tcells$start, "_", atac.glm.tcells$end, sep="")
rna.glmtop2 <- merge(atac.glm.tcells, adj.info, by = "peakco")
tsspeaks.df$peakco <- paste(tsspeaks.df$chr, "_", tsspeaks.df$start, "_", tsspeaks.df$end, sep="")
rna.glmtop2 <- merge(tsspeaks.df, rna.glmtop2, by = "peakco")
rna.glmtop2 <- merge(annotated.filter.bed[, c("logCPM.atac", "chr", "start", "end")], rna.glmtop2, by = c("chr", "start", "end"))
# Reorganize and rename
colnames(rna.glmtop2)[21:64] <- gsub("*_PBMC$", "", colnames(rna.glmtop2)[21:64])
rna.glmtop2[, which(colnames(rna.glmtop2) %in% c("chr.x", "start.x", "end.x", "peakco", "seqnames", "chr.y", "start.y", "end.y", "start", "end", "width", "chr"))] <- NULL
colnames(rna.glmtop2)[1] <- c("logCPM")
colnames(rna.glmtop2)[7:8] <- c("PValue", "FDR")
rna.glmtop <- rna.glmtop2
rna.glmtop <- rna.glmtop[rna.glmtop$GeneName %in% test.table.tcell$GeneName, ]
rna.glmtop <- do.call(rbind,lapply(split(rna.glmtop,rna.glmtop$GeneName),function(chunk) chunk[which.min(chunk$FDR),]))
rownames(rna.glmtop) <- rna.glmtop$GeneName
# rnasamps <- gsub("_PBMC_subsampled", "", rna.samples)
# atacsamps <- gsub("_PBMC_subsampled", "", atac.samples)
adj.rna <- read.table(file = "~/Desktop/SSP/DA_7_12_17/merged_consensus_adj_rna_whitelisted.txt", sep = "\t", quote = "", header = T)
adj.rna$GeneName <- unique(aging.rna$GeneName)
rpkm <- da.pbmc$raw
rm(list = da.rna.pbmc.out)
samps <- intersect(rnasamps,atacsamps) # Samples that are those present in both datasets
expressed.genes <- rownames(rpkm)
samps <- samps[samps %in% gsub("_PBMC", "", colnames(adj.rna))]
rnasamps <- rnasamps[rnasamps %in% gsub("_PBMC", "", colnames(adj.rna))]
atacsamps <- atacsamps[atacsamps %in% gsub("_PBMC", "", colnames(adj.rna))]
# Splinters TSS peaks out to combine with RNAseq data
# atac.tss <- read.table("datasources.201702/raw_data_tss.peaks_minOver0_hard_stm.txt",quote = "",header = T, sep = "\t")
flankd = 1000
# tss.bed <- GRanges(read.table(paste("../../data/Promoters/tssflank",flankd,"_prom_complete.bed",sep = ""),header = F,sep = "\t",quote = "",col.names = c("chr","start","end","GeneName","score","strand")))
tss.bed <- GRanges(tsspeaks.df)
tss.bed.expressed <- subset(tss.bed,GeneName %in% expressed.genes)
tss_peaks <- findOverlaps(GRanges(full.results[,c("chr","start","end")]),tss.bed.expressed)
# The following ensures gene names are derived from TSS definitions instead of nearest-TSS annotations
full.results.tss_red <- data.frame(GeneName=as.character(tss.bed.expressed[tss_peaks@to]$GeneName),full.results[tss.bed.expressed[tss_peaks@to]$peakco,which(colnames(full.results)!="GeneName")],row.names = NULL)
# full.results.tss_red <- full.results.tss_red[full.results.tss_red$GeneName %in% test.table.tcell$GeneName, ]
# full.results.tss_red <- merge(full.results.tss_red,data.frame(meanExpression=rowMeans(rpkm)),by.x = "GeneName", by.y = "row.names") # to keep peak associated to highest-expressed gene
full.results.tss_red <- unique(full.results.tss_red[order(full.results.tss_red$GeneName,abs(full.results.tss_red$DistancetoTSS)),]) # to keep one peak per gene (multiple genes per peak allowed), kept peak closest to nearest TSS
full.results.tss <- full.results.tss_red[!duplicated(full.results.tss_red$GeneName),]
rownames(full.results.tss) <- full.results.tss$GeneName
rm("full.results.tss_red")
colnames(tsspeaks.df)[1] <- "chr"
colnames(adj.atac)[1:3] <- c("chr", "start", "end")
adj.atac <- adj.atac[, -c(4:6)]
adj.atac.tss <- merge(tsspeaks.df[,c("chr","start","end","GeneName")], adj.atac, by = c("chr","start","end"))
# adj.atac.tss.nodup <- adj.atac.tss[order(adj.atac.tss$GeneName,-rowSums(2^adj.atac.tss[,atacsamps])),c("GeneName",atacsamps)]
# adj.atac.tss.nodup <- adj.atac.tss.nodup[!duplicated(adj.atac.tss.nodup$GeneName),]
samps_pbmc <- paste(samps, "_", "PBMC", sep="")
rnasamps_pbmc <- paste(rnasamps, "_", "PBMC", sep="")
atacsamps_pbmc <- paste(atacsamps, "_", "PBMC", sep="")
atacgroup <- relevel(factor(sampinfo[atacsamps_pbmc[atacsamps_pbmc %in% colnames(adj.atac.tss)],]$group),ref = "HY")
atacsex <- relevel(factor(sampinfo[atacsamps_pbmc,]$sex),ref = "F")
rnagroup <- relevel(factor(sampinfo[rnasamps_pbmc,]$group),ref = "HY")
rnasex <- relevel(factor(sampinfo[rnasamps_pbmc,]$sex),ref = "F")
group <- relevel(factor(sampinfo[samps_pbmc %in% colnames(adj.atac.tss),]$group),ref = "HY")
sex <- relevel(factor(sampinfo[samps_pbmc,]$sex),ref = "F")
##### Permutation tests to derive empirical Pvalues for each gene ATAC/RNA fold change
nperm = 1000
#### RNAseq FC
### Code used to generate perms:
# rna.permlist <- replicate(nperm,adj.rna,simplify = F)
# rna.permfc <- mclapply(rna.permlist, function(Y) apply(Y,1,function(x) diff(aggregate(x, list(group=sample(group)), mean)[,2])),mc.cores = 6)
# save(list = c("rna.permlist","rna.permfc"),file = "datasources.201702/RNAseq_permutation.test_stm.RData")
rna.permout <- load("~/Downloads/RNAseq_permutation.test_stm.RData")
# Observed FC
row.names(adj.rna) <- adj.rna$GeneName
adj.rna <- adj.rna[which(row.names(adj.rna) %in% test.table.tcell$GeneName), ]
rna.obsfc <- data.frame(logFC.rna.obs=apply(adj.rna[,samps_pbmc[samps %in% gsub("_PBMC", "", colnames(adj.rna))]],1,function(x) diff(aggregate(x, list(group=group), mean)[,2])), row.names = row.names(adj.rna[,samps_pbmc[samps %in% gsub("_PBMC", "", colnames(adj.rna))]]))
# Permuted FC
row.names(rna.obsfc) <- row.names(adj.rna)
rna.permfc.mx <- do.call(cbind, rna.permfc)
rna.permfc.mx <- data.frame(rna.permfc.mx)[row.names(rna.obsfc) %in% row.names(data.frame(rna.permfc.mx)),]
rna.permfc.mx <- rna.permfc.mx[row.names(rna.permfc.mx) %in% test.table.tcell$GeneName, ]
temp <- rna.obsfc[row.names(rna.obsfc) %in% row.names(data.frame(rna.permfc.mx)), ]
rna.obsfc <- data.frame(logFC.rna.obs = temp, row.names = row.names(rna.obsfc)[rna.obsfc$logFC.rna.obs %in% temp])
rna.obsfc$logFC.rna.pcounts[rna.obsfc$logFC.rna.obs >= 0] <- rowSums(rna.obsfc$logFC.rna.obs[rna.obsfc$logFC.rna.obs >= 0] >= rna.permfc.mx[which(rna.obsfc$logFC.rna.obs >= 0),])
rna.obsfc$logFC.rna.pcounts[rna.obsfc$logFC.rna.obs < 0] <- rowSums(rna.obsfc$logFC.rna.obs[rna.obsfc$logFC.rna.obs < 0] < rna.permfc.mx[rna.obsfc$logFC.rna.obs < 0,])
rna.obsfc$logFC.rna.pvalue <- 1-(rna.obsfc$logFC.rna.pcounts/(1+ncol(rna.permfc.mx)))
rna.obsfc$logFC.rna.fdr <- p.adjust(rna.obsfc$logFC.rna.pvalue, method = "fdr")
rm(list = c(rna.permout,"rna.permfc.mx"))
#### ATACseq TSS peaks FC
### Code used to generate perms:
# atac.permlist <- replicate(nperm,adj.atac.tss[,samps_pbmc],simplify = F)
# atac.permfc <- mclapply(atac.permlist, function(Y) apply(Y,1,function(x) diff(aggregate(x, list(group=sample(group)), mean)[,2])),mc.cores = 6)
###atac.permfc <- lapply(atac.permlist, function(Y) apply(Y,1,function(x) diff(aggregate(x, list(group=sample(group)), mean)[,2])))
# save(list = c("atac.permlist","atac.permfc"),file = "datasources.201702/ATACseq_permutation.test_stm.RData")
atac.permout <- load("~/Downloads/ATACseq_permutation.test_stm.RData")
# Observed FC
group <- relevel(factor(sampinfo[samps_pbmc[samps_pbmc %in% colnames(adj.atac.tss)],]$group),ref = "HY")
atac.obsfc <- data.frame(adj.atac.tss[,c("chr","start","end")],logFC.atac.obs=apply(adj.atac.tss[,colnames(adj.atac.tss) %in% samps_pbmc],1,function(x) diff(aggregate(x, list(group=group), mean)[,2])))
# Permuted FC
atac.permfc.mx <- do.call(cbind, atac.permfc)
atac.permfc.mx <- data.frame(atac.permfc.mx)[rownames(atac.obsfc),]
atac.obsfc$logFC.atac.pcounts[atac.obsfc$logFC.atac.obs >= 0] <- rowSums(atac.obsfc$logFC.atac.obs[atac.obsfc$logFC.atac.obs >= 0] >= atac.permfc.mx[atac.obsfc$logFC.atac.obs >= 0,])
atac.obsfc$logFC.atac.pcounts[atac.obsfc$logFC.atac.obs < 0] <- rowSums(atac.obsfc$logFC.atac.obs[atac.obsfc$logFC.atac.obs < 0] < atac.permfc.mx[atac.obsfc$logFC.atac.obs < 0,])
atac.obsfc$logFC.atac.pvalue <- 1-(atac.obsfc$logFC.atac.pcounts/(1+ncol(atac.permfc.mx)))
atac.obsfc$logFC.atac.fdr <- p.adjust(atac.obsfc$logFC.atac.pvalue, method = "fdr")
atac.obsfc$peakco <- paste(atac.obsfc$chr,atac.obsfc$start,atac.obsfc$end,sep = "_")
atac.obsfc <- atac.obsfc[order(atac.obsfc$peakco,atac.obsfc$logFC.atac.pvalue),]
atac.obsfc <- atac.obsfc[!duplicated(atac.obsfc$peakco),]
atac.obsfc <- atac.obsfc[which(!is.na(atac.obsfc$logFC.atac.pvalue)), ]
rm(list = c(atac.permout,"atac.permfc.mx"))
#####
# Combines ATAC and RNAseq data/results, assigns Pvalues based on permutation tests
colnames(full.results.tss) <- gsub("_PBMC", "", colnames(full.results.tss))
atacrna.glm <- merge(full.results.tss,rna.glmtop, by = "GeneName",suffixes = c(".atac",".rna"))
atacrna.glm <- merge(merge(atacrna.glm, atac.obsfc, by = c("chr","start","end")),
rna.obsfc[row.names(rna.obsfc) %in% atacrna.glm$GeneName, ], by.x = "GeneName", by.y = "row.names")
pdf("~/Desktop/Fig3A_pbmc.arc.fcfcplot.pdf",paper = "USr")
fcfcplot <- atacrna.glm # atacrna.glm|atacrna.glm.nodup|atacrna.comp.glm|atacrna.comp.glm.nodup :: if use "comp", variables below have to be modified
fcfc.corr <- with(fcfcplot,cor.test(logFC.atac.obs,logFC.rna.obs, method = "pearson"))
fcfc.pcorr_txt <- ifelse(fcfc.corr$p.value<0.001,"P < 0.001",ifelse(fcfc.corr$p.value<0.01,"P < 0.01",ifelse(fcfc.corr$p.value<0.05,"P < 0.05","P > 0.05")))
alpha = 0.01;
xlims <- c(min(fcfcplot$logFC.atac.obs),
min(quantile(fcfcplot$logFC.atac.obs,0.05),max(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs<0),1,andred)])),
max(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs<0),1,andred)]),
min(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs>0),1,andred)]),
max(quantile(fcfcplot$logFC.atac.obs,0.95),min(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs>0),1,andred)])),
max(fcfcplot$logFC.atac.obs))
ylims <- c(min(fcfcplot$logFC.rna.obs),
min(quantile(fcfcplot$logFC.rna.obs,0.05),max(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs<0),1,andred)])),
max(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs<0),1,andred)]),
min(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs>0),1,andred)]),
max(quantile(fcfcplot$logFC.rna.obs,0.95),min(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs>0),1,andred)])),
max(fcfcplot$logFC.rna.obs))
ylims[2:3] <- c(0,0)
# xlims[2:3] <- c(0,0)
# genesel <- c("LCN2","PDGFRB","KIR3DL1","PDGFD","GNLY","GZMB","KLRF1","CD248","NOG","CCR7","IL7R","LEF1","BACH2","CD8A","LRRN3","GSTT1","NRCAM","SLC16A10","NBEA","GSTM1","MTUS1","B3GAT1","IGFBP3","NCR1","FGFBP2","GZMH","PRSS23","NT5E","S100B") # highlighted in ms.
genesel <- test.table.tcell$GeneName
fcfcplot.sel <- subset(fcfcplot,with(fcfcplot,GeneName %in% genesel))
fcfcplot.sel <- fcfcplot.sel[order(fcfcplot.sel$GeneName,fcfcplot.sel$logFC.atac.obs),]
fcfcplot.sel <- fcfcplot.sel[!duplicated(fcfcplot.sel$GeneName),]
p <- ggplot(data=fcfcplot[order(fcfcplot$logFC.atac.obs),], aes(logFC.atac.obs, logFC.rna.obs)) +
annotate("rect", xmin = -max(abs(xlims*1.1)), xmax = xlims[3], ymin = -max(abs(ylims*1.1)), ymax = ylims[3], fill = "darkcyan", alpha = 0.1) + # alt version with differently-colored quadrants. Use "darkorange1" to match both quadrants to the original version
annotate("rect", xmin = xlims[3], xmax = max(abs(xlims)), ymin = 0, ymax = max(abs(ylims)), fill = "darkorange1", alpha = 0.1) +
geom_point(aes(size=factor(apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>0&logFC.rna.obs>0),1,orred)),
alpha=factor(apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>0&logFC.rna.obs>0),1,orred)),
color=factor(apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>0&logFC.rna.obs>0),1,orred)))) +
geom_text(data = fcfcplot[fcfcplot$logFC.atac.obs*fcfcplot$logFC.rna.obs > 0,], aes(logFC.atac.obs,logFC.rna.obs,label=GeneName), size=5) + # uses a threshhold to apply labels to extreme points
geom_text(data = subset(fcfcplot.sel,logFC.atac.obs<0),aes(logFC.atac.obs,logFC.rna.obs,label=GeneName), size=5, vjust = 1.3, hjust = 1, color = "green4") + # preselect labels used in the paper (negative)
geom_text(data = subset(fcfcplot.sel,logFC.atac.obs>0),aes(logFC.atac.obs,logFC.rna.obs,label=GeneName), size=5, vjust = -0.3, hjust = 0, color = "green4") + # preselect labels used in the paper (positive)
geom_point(data = fcfcplot.sel,aes(logFC.atac.obs,logFC.rna.obs), color = "green3", size = 0.8) +
scale_color_manual(values = c("FALSE"="dimgray","TRUE"="navy"), guide=F) +
scale_size_manual(values = c("FALSE"=0.5,"TRUE"=0.7), guide=F) +
scale_alpha_manual(values = c("FALSE"=0.25,"TRUE"=0.75), guide=F) +
scale_x_continuous(limits = c(-max(abs(xlims*1.1)),max(abs(xlims)))) +
scale_y_continuous(limits = c(-max(abs(ylims*1.1)),max(abs(ylims)))) +
xlab("ATAC-seq logFC (Promoter accessibility)") +
ylab("RNA-seq logFC (Expression)") +
ggtitle(paste("logFC RNAseq vs ATACseq")) +
theme_bw(base_family = "Helvetica",base_size = 10) +
theme(panel.background = element_rect(fill = "white", color = "black"),
# panel.grid.major.x = element_line(color = "ivory3", size = 0.2, linetype = 2),
# panel.grid.major.y = element_line(color = "ivory3", size = 0.2, linetype = 2),
panel.grid.major = element_blank(),
axis.line = element_line(color = "black", size = 0.25),
axis.text.x = element_text(size=10)) +
geom_hline(yintercept = ylims[3]:0, color = "palegreen3", linetype = "22", size=0.5, alpha=0.5) +
geom_vline(xintercept = xlims[3]:0, color = "palegreen3", linetype = "22", size=0.5, alpha=0.5) +
# p <- ggMarginal(p,type = "histogram", size = 15, bins = 150, fill="palegreen3", color="seagreen3")
print(p)
fcfcplot <- atacrna.glm # atacrna.glm|atacrna.glm.nodup|atacrna.comp.glm|atacrna.comp.glm.nodup :: if use "comp", variables below have to be modified
fcfc.corr <- with(fcfcplot,cor.test(logFC.atac.obs,logFC.rna.obs, method = "pearson"))
fcfc.pcorr_txt <- ifelse(fcfc.corr$p.value<0.001,"P < 0.001",ifelse(fcfc.corr$p.value<0.01,"P < 0.01",ifelse(fcfc.corr$p.value<0.05,"P < 0.05","P > 0.05")))
alpha = 0.01;
xlims <- c(min(fcfcplot$logFC.atac.obs),
min(quantile(fcfcplot$logFC.atac.obs,0.05),max(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs<0),1,andred)])),
max(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs<0),1,andred)]),
min(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs>0),1,andred)]),
max(quantile(fcfcplot$logFC.atac.obs,0.95),min(fcfcplot$logFC.atac.obs[apply(cbind(fcfcplot$logFC.atac.pvalue<alpha, fcfcplot$logFC.atac.obs>0),1,andred)])),
max(fcfcplot$logFC.atac.obs))
ylims <- c(min(fcfcplot$logFC.rna.obs),
min(quantile(fcfcplot$logFC.rna.obs,0.05),max(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs<0),1,andred)])),
max(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs<0),1,andred)]),
min(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs>0),1,andred)]),
max(quantile(fcfcplot$logFC.rna.obs,0.95),min(fcfcplot$logFC.rna.obs[apply(cbind(fcfcplot$logFC.rna.pvalue<alpha, fcfcplot$logFC.rna.obs>0),1,andred)])),
max(fcfcplot$logFC.rna.obs))
ylims[2:3] <- c(0,0)
# genesel <- c("LCN2","PDGFRB","KIR3DL1","PDGFD","GNLY","GZMB","KLRF1","CD248","NOG","CCR7","IL7R","LEF1","BACH2","CD8A","LRRN3","GSTT1","NRCAM","SLC16A10","NBEA","GSTM1","MTUS1","B3GAT1","IGFBP3","NCR1","FGFBP2","GZMH","PRSS23","NT5E","S100B") # highlighted in ms.
genesel <- c("LCN2","PDGFRB","KIR3DL1","PDGFD","GNLY","GZMB","KLRF1","CD248","NOG","CCR7","IL7R","LEF1","BACH2","CD8A","LRRN3","GSTT1","NRCAM","SLC16A10","NBEA","GSTM1","MTUS1","B3GAT1","FGFBP2","GZMH","PRSS23","S100B","PRF1","NT5E") # highlighted in ms.
fcfcplot.sel <- subset(fcfcplot,with(fcfcplot,GeneName %in% genesel & apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>xlims[4]&logFC.rna.obs>ylims[4]),1,any)))
fcfcplot.sel <- fcfcplot.sel[order(fcfcplot.sel$GeneName,fcfcplot.sel$logFC.atac.obs),]
fcfcplot.sel <- fcfcplot.sel[!duplicated(fcfcplot.sel$GeneName),]
p <- ggplot(data=fcfcplot[order(fcfcplot$logFC.atac.obs),], aes(logFC.atac.obs, logFC.rna.obs)) +
annotate("rect", xmin = -max(abs(xlims*1.1)), xmax = xlims[3], ymin = -max(abs(ylims*1.1)), ymax = ylims[3], fill = "darkcyan", alpha = 0.1) + # alt version with differently-colored quadrants. Use "darkorange1" to match both quadrants to the original version
annotate("rect", xmin = xlims[4], xmax = max(abs(xlims)), ymin = ylims[4], ymax = max(abs(ylims)), fill = "darkorange1", alpha = 0.1) +
geom_point(aes(size=factor(apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>xlims[4]&logFC.rna.obs>ylims[4]),1,orred)),
alpha=factor(apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>xlims[4]&logFC.rna.obs>ylims[4]),1,orred)),
color=factor(apply(cbind(logFC.atac.obs<xlims[3]&logFC.rna.obs<ylims[3],logFC.atac.obs>xlims[4]&logFC.rna.obs>ylims[4]),1,orred)))) +
# geom_text(data = fcfcplot[fcfcplot$logFC.atac.obs*fcfcplot$logFC.rna.obs > 0.2,], aes(logFC.atac.obs,logFC.rna.obs,label=GeneName), size=2.5) + # uses a threshhold to apply labels to extreme points
geom_text(data = subset(fcfcplot.sel,logFC.atac.obs<0),aes(logFC.atac.obs,logFC.rna.obs,label=GeneName), size=2.5, vjust = 1.3, hjust = 1, color = "green4") + # preselect labels used in the paper (negative)
geom_text(data = subset(fcfcplot.sel,logFC.atac.obs>0),aes(logFC.atac.obs,logFC.rna.obs,label=GeneName), size=2.5, vjust = -0.3, hjust = 0, color = "green4") + # preselect labels used in the paper (positive)
geom_point(data = fcfcplot.sel,aes(logFC.atac.obs,logFC.rna.obs), color = "green3", size = 0.8) +
scale_color_manual(values = c("FALSE"="dimgray","TRUE"="navy"), guide=F) +
scale_size_manual(values = c("FALSE"=0.5,"TRUE"=0.7), guide=F) +
scale_alpha_manual(values = c("FALSE"=0.25,"TRUE"=0.75), guide=F) +
scale_x_continuous(limits = c(-max(abs(xlims*1.1)),max(abs(xlims)))) +
scale_y_continuous(limits = c(-max(abs(ylims*1.1)),max(abs(ylims)))) +
xlab("ATAC-seq logFC (Promoter accessibility)") +
ylab("RNA-seq logFC (Expression)") +
ggtitle(paste("logFC RNAseq vs ATACseq")) +
theme_bw(base_family = "Helvetica",base_size = 10) +
theme(panel.background = element_rect(fill = "white", color = "black"),
# panel.grid.major.x = element_line(color = "ivory3", size = 0.2, linetype = 2),
# panel.grid.major.y = element_line(color = "ivory3", size = 0.2, linetype = 2),
panel.grid.major = element_blank(),
axis.line = element_line(color = "black", size = 0.25),
axis.text.x = element_text(size=10)) +
geom_hline(yintercept = ylims[3:4], color = "palegreen3", linetype = "22", size=0.5, alpha=0.5) +
geom_vline(xintercept = xlims[3:4], color = "palegreen3", linetype = "22", size=0.5, alpha=0.5)
# p <- ggMarginal(p,type = "histogram", size = 15, bins = 150, fill="palegreen3", color="seagreen3")
print(p)
dev.off()
|
a36a5d7422647c1b37b07c264f6faa9829df042b | 836871abe39af81f483418b56e328bb2e281e5d1 | /man/robustHInit.Rd | 9c1a0d787da1ad7e90a2b134f04f8c22353f0528 | [] | no_license | HenrikBengtsson/ACNE | 7576aeb7d6d4e16a8fa40733f8156e1e5c11f538 | 31dc8364bb5310f1d8f7a757e608db30aac49147 | refs/heads/master | 2023-07-09T20:58:21.796880 | 2023-06-25T21:34:53 | 2023-06-25T21:34:53 | 31,195,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,007 | rd | robustHInit.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% robustHInit.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{robustHInit}
\alias{robustHInit}
\title{Robust initialization of the H (copy number) matrix}
\description{
Robust initialization of the H (copy number) matrix.
}
\usage{
robustHInit(V, W, maxIter=5L, ...)
}
\arguments{
\item{V}{An KxI \code{\link[base]{matrix}} where I is the number of arrays and K is the
number of probes where K should be even (K=2L).}
\item{W}{A Kx2 \code{\link[base]{matrix}} of probe-affinity estimates.}
\item{maxIter}{The maximum number of iteration.}
\item{...}{Not used.}
}
\value{
Returns a 2xI \code{\link[base]{matrix}} of robustified allele-specific copy-number estimates.
}
\details{
This function utilized a random number generator.
}
\keyword{internal}
|
23b68fc7ae997450ea25fc8472466b3150ec6ea0 | 5c5242760e0a45fef0400f48173fc325c296a80e | /man/crossCorrByShift.Rd | 405c05726c671457b17c5f4c925a2accd5039e32 | [] | no_license | jrboyd/peakrefine | 29f90711497b0b1de56ff9fe5abed1aeb98e705f | 44a7f42eeefefb52a69bb3066cc5829d79d25f02 | refs/heads/master | 2021-07-08T05:11:24.470701 | 2020-07-27T16:36:31 | 2020-07-27T16:36:31 | 148,842,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,617 | rd | crossCorrByShift.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_crossCorrShift.R
\name{crossCorrByShift}
\alias{crossCorrByShift}
\title{Calculate cross strand correlation using the read shift method.}
\usage{
crossCorrByShift(bam_file, query_gr, n_regions = 20, max_dupes = 1,
frag_min = 0, frag_max = 250, step = 10, small_step = 1,
include_plots = TRUE)
}
\arguments{
\item{bam_file}{character. Path to .bam file, must have index at .bam.bai.}
\item{query_gr}{GRanges. Regions to calculate cross correlation for.}
\item{n_regions}{integer. query_gr will be downsampled to this many regions
for speed. Use NA to skip downsampling.}
\item{max_dupes}{integer. Duplicate reads above this value will be removed.}
\item{frag_min}{integer. extension value to start at.}
\item{frag_max}{integer. extension value to end at.}
\item{step}{integer. proceed from frag_min measuring correlation every step.}
\item{small_step}{integer. after measuring correlation every step, a second
round of fragment size refinement is done using small_step within +/- step
of maximum.}
\item{include_plots}{logical. Should plots be included in output?}
}
\value{
data.table of metrics
}
\description{
Calculate cross strand correlation using the read shift method.
}
\examples{
bam_file = system.file("extdata", "MCF10A_CTCF.random5.bam", package = "peakrefine")
np = system.file("extdata", "MCF10A_CTCF.random5.narrowPeak", package = "peakrefine")
qgr = rtracklayer::import(np, format = "narrowPeak")
crossCorrByShift(bam_file, qgr[1:2], frag_min = 50,
frag_max = 250, step = 50, small_step = 10)
}
|
4b28274382c7a119f1ccc170eb1298497ed50fb2 | 050edfa53f5ec7d76b2321c552266e0f60e4db92 | /R/k5_k1.bar.R | 87db2e3b1deb08407f5a21b01c47841f5f75a12f | [] | no_license | placeboo/subgraph | e1ab54fabda52ed4243fdc5cdc2a348b2da6d41c | 37036807aa7bd75aeab90fe224fdd44c126fb3f9 | refs/heads/master | 2021-10-27T15:54:59.877512 | 2019-04-18T08:08:57 | 2019-04-18T08:08:57 | 107,905,890 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 742 | r | k5_k1.bar.R | #' fiveFlower graphs
#' List all possible fiveFlower graphs based on given SIX nodes
#' @param x The vector representing nodes
#' @return A matrix listing edges of fiveFlower graphs
#' @examples
#' k5_k1.bar(c(1:6))
k5_k1.bar = function(x){
if(length(x) != 6){
stop("The number of nodes should be SIX!")
}
mat = matrix(,,ncol = 5)
x = sort(x)
for(i in 1: length(x)){ # go through the hub
hub = x[i]
rst.vec = x[-i]
temp.mat = c(deToIn(hub, rst.vec[1]), deToIn(hub, rst.vec[2]), deToIn(hub, rst.vec[3]), deToIn(hub, rst.vec[4]), deToIn(hub, rst.vec[5]))
mat = rbind(mat, temp.mat)
}
return(mat[-1, ])
}
|
b9b64e5216f07baebda3d356d33f306173050ada | 682fdb45d76bd462593d07113a0f642665ff44a3 | /man/write.group.morph.standard.singlehemi.Rd | fb2fdfa69338b5d0b37bdcd8012bed3b40dc2581 | [
"MIT"
] | permissive | dfsp-spirit/fsbrain | dd782c91f95c52b8039e4ec6642345d994a6ed84 | 09f506dbf5467356ab26a65246f31051da58f198 | refs/heads/master | 2023-07-06T10:11:18.468284 | 2023-06-26T16:42:45 | 2023-06-26T16:42:45 | 209,085,379 | 44 | 12 | NOASSERTION | 2023-01-15T19:49:54 | 2019-09-17T15:05:51 | R | UTF-8 | R | false | true | 1,113 | rd | write.group.morph.standard.singlehemi.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fsdir_abstraction_group.R
\name{write.group.morph.standard.singlehemi}
\alias{write.group.morph.standard.singlehemi}
\title{Write single hemi per-vertex data for a group of subjects to given file names.}
\usage{
write.group.morph.standard.singlehemi(
filepaths,
data,
format = "auto",
create_dirs = TRUE
)
}
\arguments{
\item{filepaths}{vector of character strings, the full paths to the output files, including file names and extension.}
\item{data}{numerical matrix or data.frame, the morph data for a single hemi (as returned by \code{group.morph.standard}). Number of subjects (columns) must match the length of the 'filepaths'.}
\item{format}{character string, a valid format spec for \code{freesurferformats::write.fs.morph}, e.g., "auto" to derive from filename, "mgh", "mgz", "curv" or others.}
\item{create_dirs}{logical, whether to create missing (sub) directories which occur in the 'filepaths'.}
}
\description{
Write single hemi per-vertex data for a group of subjects to given file names.
}
\keyword{internal}
|
ef89b54349e0551dea3d952b735947d941cb35fe | b817cf5b07c5edec40937825665a155f0f95ee2a | /R/states.r | 9711ca63fbeedd36096e8819e07e8bce5745f92b | [] | no_license | rui/Dissertation | f67c133921efea4ad06c34348a96884a7fea28ba | 51a05c3191094d9d68d4e597af933113c81da808 | refs/heads/master | 2021-01-17T05:38:06.291472 | 2012-05-01T20:02:56 | 2012-05-01T20:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,385 | r | states.r | states = matrix(c(
'Alabama','AL',
'Alaska','AK',
'Arizona','AZ',
'Arkansas','AR',
'California','CA',
'Colorado','CO',
'Connecticut','CT',
'Delaware','DE',
'Florida','FL',
'Georgia','GA',
'Hawaii','HI',
'Idaho','ID',
'Illinois','IL',
'Indiana','IN',
'Iowa','IA',
'Kansas','KS',
'Kentucky','KY',
'Louisiana','LA',
'Maine','ME',
'Maryland','MD',
'Massachusetts','MA',
'Michigan','MI',
'Minnesota','MN',
'Mississippi','MS',
'Missouri','MO',
'Montana','MT',
'Nebraska','NE',
'Nevada','NV',
'New Hampshire','NH',
'New Jersey','NJ',
'New Mexico','NM',
'New York','NY',
'North Carolina','NC',
'North Dakota','ND',
'Ohio','OH',
'Oklahoma','OK',
'Oregon','OR',
'Pennsylvania','PA',
'Rhode Island','RI',
'South Carolina','SC',
'South Dakota','SD',
'Tennessee','TN',
'Texas','TX',
'Utah','UT',
'Vermont','VT',
'Virginia','VA',
'Washington','WA',
'West Virginia','WV',
'Wisconsin','WI',
'Wyoming','WY',
'American Samoa','AS',
'District of Columbia','DC',
'D.C.','DC',
'Federated States of Micronesia','FM',
'Guam','GU',
'Marshall Islands','MH',
'Northern Mariana Islands','MP',
'Palau','PW',
'Puerto Rico','PR',
'Virgin Islands','VI',
'Armed Forces Africa','AE',
'Armed Forces Americas','AA',
'Armed Forces Canada','AE',
'Armed Forces Europe','AE',
'Armed Forces Middle East','AE',
'Armed Forces Pacific','AP'),
ncol=2, byrow=TRUE
)
states = as.data.frame(states)
names(states) = c('name','abbr')
|
88c616ccd1c703714bd37e248b22adc7160e2d77 | e40e60878aa2a88f9a300b2e6ee0cd628686e2ef | /R/complexity.R | 797c603f9dcbb9a02783f4fb88b7a20d41304ed8 | [
"MIT"
] | permissive | bfgray3/bigo | 4c21dd6dfde31bd3c55cb8e2d3d973874e6a023a | 75d9f94a52bca93f15c501616429e8bf6bdb74e4 | refs/heads/main | 2021-05-06T18:38:32.427975 | 2017-11-28T02:06:47 | 2017-11-28T02:06:47 | 112,034,044 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,044 | r | complexity.R | ############
### time ###
############
.time <- function(.expr) {
elapsed <- system.time(.expr)
elapsed[["elapsed"]]
}
###############
### .f_time ###
###############
.f_time <- function(..., .fun) {
.time(.fun(...))
}
############
### bigo ###
############
#' Big-O
#'
#' Analyze the algorithmic complexity of a function with respect to arguments.
#'
#' This function returns an object of class \code{bigo}, which contains a
#' tibble of runtime results, the name of the function being measured, and
#' the number of runs over which the results are averaged.
#'
#' @param f The function to be investigated.
#' @param ... One or more numeric or integer vectors, passed as named
#' arguments, where the names are arguments of \code{f}.
#' @param num_runs The number of experiments over which to average results.
#' Currently only 1 is supported.
#' @return An object of class \code{bigo} containing the results of the
#' runtime experiments: a tibble of runtimes, the function name, and the
#' number of runs that the results are averaged over.
#' @examples
#' bigo(f = function(n) if (n < 3) 1 else { Recall(n - 1) + Recall(n - 2) },
#' n = 1:15)
#' @export
bigo <- function(f, ..., num_runs = 1) {
########################
### num_runs WARNING ###
########################
if (num_runs != 1) {
warning("Non-unit values of `num_runs` are not supported until version 0.0.2;",
" setting `num_runs` to 1.", call. = FALSE)
num_runs <- 1
}
################
### FUNCTION ###
################
f <- match.fun(f)
f_args <- names(formals(f))
this_call <- match.call()
f_char <- deparse(this_call[["f"]])
if (length(f_char) > 1) f_char <- "anonymous"
#################
### ARGUMENTS ###
#################
arg_ranges <- list(...)
if (! length(arg_ranges)) {
stop("Must pass at least one named argument for complexity evaluation.",
call. = FALSE)
}
passed_args <- names(arg_ranges)
if (any(passed_args == "") || is.null(passed_args)) {
stop("All arguments for complexity evaluation must be named.",
call. = FALSE)
}
#######################################
### CHECK FOR VALIDITY OF ARGUMENTS ###
#######################################
invalid_args <- setdiff(passed_args, f_args)
if (length(invalid_args)) {
stop("Passed ", paste(invalid_args, collapse = ", "),
" to be evaluated for complexity, but ",
if (length(invalid_args) > 1) "they are " else "it is ",
"not an argument of `", f_char, "`.", call. = FALSE)
}
#######################
### FIND COMPLEXITY ###
#######################
runtime_results <- arg_ranges %>%
purrr::map(floor) %>%
expand.grid() %>%
dplyr::mutate(elapsed = purrr::pmap_dbl(., .f_time, .fun = f))
results <- list(runtimes = runtime_results,
function_name = f_char,
num_runs = num_runs)
class(results) <- c("bigo")
results
}
### |
c0c0357e85eb02ae8d4a7ad257bb63f936196848 | c5dc4ae6beeec17d2076242ab1965f0e2df0ea8f | /global.R | 23a2d9f50315c7846fe4057c7c1ed19765efce66 | [] | no_license | seewilds/DevDataPRoducts | e4207b87ec22859c70fdf3a138816607f840c29b | ff400adc1521b2909256943f450a7ded7943c736 | refs/heads/main | 2023-07-10T03:32:26.913860 | 2021-08-19T17:40:15 | 2021-08-19T17:40:15 | 398,024,809 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,271 | r | global.R | library(shiny)
library(dplyr)
library(ggplot2)
library(reshape2)
#setwd("~/REE Projects/shiny")
ree_aggregates <- read.csv("ree_aggregates.csv", encoding = "UTF-8")
ree_aggregates$Project<- as.character(ree_aggregates$Project)
ree_aggregates$Owner.s.<- as.character(ree_aggregates$Owner.s.)
ree_aggregates$Confidence<- as.character(ree_aggregates$Confidence)
ree_aggregates$variable<- as.character(ree_aggregates$variable)
ree_aggregates$Project <- iconv(enc2utf8(ree_aggregates$Project), sub = 'byte')
ree_aggregates$Project<- gsub("<e1>", "a", ree_aggregates$Project)
ree_aggregates$Project<- gsub("<e4>", "a", ree_aggregates$Project)
ree_aggregates$Owner.s. <- iconv(enc2utf8(ree_aggregates$Owner.s.), sub = 'byte')
ree_aggregates$Confidence <- iconv(enc2utf8(ree_aggregates$Confidence), sub = 'byte')
ree_aggregates$variable <- iconv(enc2utf8(ree_aggregates$variable), sub = 'byte')
ree_aggregates <- ree_aggregates%>%select(-X)
ree_elements <- read.csv("ree_elements.csv", encoding = "UTF-8")
ree_elements$Project<- as.character(ree_elements$Project)
ree_elements$Owner.s.<- as.character(ree_elements$Owner.s.)
ree_elements$Confidence<- as.character(ree_elements$Confidence)
ree_elements$variable<- as.character(ree_elements$variable)
ree_elements$Project<- iconv(enc2utf8(ree_elements$Project), sub = 'byte')
ree_elements$Project<- gsub("<e1>", "a", ree_elements$Project)
ree_elements$Project<- gsub("<e4>", "a", ree_elements$Project)
ree_elements$Owner.s.<- iconv(enc2utf8(ree_elements$Owner.s.), sub = 'byte')
ree_elements$Confidence<- iconv(enc2utf8(ree_elements$Confidence), sub = 'byte')
ree_elements$variable<- iconv(enc2utf8(ree_elements$variable), sub = 'byte')
ree_elements <- ree_elements%>%select(-X)%>%filter(variable!="Tonnes")
element_uses <- read.csv("element_uses.csv", encoding = "UTF-8")
element_uses$Selected.applications <- as.character(element_uses$Selected.applications)
element_uses$Selected.applications <- iconv(enc2utf8(element_uses$Selected.applications), sub="byte")
element_uses$Selected.applications <- gsub("<a0>", " ", element_uses$Selected.applications)
#element_uses <- enc2native(element_uses)
aggregate_metrics <- read.csv("aggregate_metrics.csv", encoding = "UTF-8")
#aggregate_metrics <- enc2native(aggregate_metrics) |
26d1b960818ed0cb480839c08e72424a209da928 | 7b18e1d9702050698af4d415932892de1462d16c | /SRC/02clean.R | d567f55e410e7780986f465a7df34c9c10a56ab2 | [] | no_license | anykine/DuchenneConnect2016 | 3de9cb100823b4ee72239165322e8f81664fd583 | 3947e6f54f6d820bfdd7e6c0308a6cf1e1c22900 | refs/heads/master | 2021-10-22T20:29:35.106649 | 2019-03-12T19:24:00 | 2019-03-12T19:24:00 | 100,430,011 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,684 | r | 02clean.R | # Some data sets have multiple rows per individual (time course)
# so I use data.tables() to get last row to dedup
#
#
# ----------- MUSCLE DATA -------------
#
# Data cleanup
setkey(muscle, Patient.ID)
muscle2 = muscle[, .SD[c(.N)], by=Patient.ID] # this gets the last row for each patient record
#
#
# ----------- GENETIC DATA -------------
#
# get the last observation for each group
setkey(gen, Patient.ID)
gen2= gen[, .SD[c(.N)], by=Patient.ID] # this gets the last row for each patient record
# REQUIRED fix columns with same name, for merge later
# REQUIRED There are 3 columns with name "Other Value"
tmp.names = make.names(colnames(gen))
tmp.names[c(22,24,26)] = c("Laboratory.Other.Value", "Test.Method.Other.Value", "Category.Other.Value")
setnames(gen, tmp.names)
setnames(gen2, tmp.names)
#
#
# ----------- STEROID DATA -------------
#
setkey(ster, Patient.ID)
ster2 = ster[ , .SD[c(.N)], by=Patient.ID]
#
#
# ----------- CARDIO DATA -------------
#
setkey(cardio, Patient.ID)
cardio2 = cardio[ , .SD[c(.N)], by=Patient.ID]
#
# ----------- combine muscle2, ster2, gen2 into 'all' ------------
#
#check no dups in each table, should be 0rows
lapply(list(muscle2,ster2, gen2), checkUniquePatientsPerRow, myKey="Patient.ID")
# Age.m = muscle, Age.s = steroid, Age = Genetic Excel table
all = merge(merge(muscle2, ster2, by.x="Patient.ID", by.y="Patient.ID", suffixes=c(".m",".s")),
gen2, by.x="Patient.ID", by.y="Patient.ID", suffixes=c(".ms", ".g"))
#
# ----------- combine 'all' with cardio2 ------------
# Don't use yet
#all = merge(all, cardio2, by.x="Patient.ID", by.y="Patient.ID", suffixes=c("", ".c"))
# save.image("SRC/DCdata2016.RData")
|
7fc5e502816c5307aedd5ea2f256c9a16ea476ef | 30596cbc584495402cfeb772b669f5c896fe90db | /register.R | 0e34b18bfa6feeea994aace1f042f86869998e88 | [] | no_license | boettiger-lab/data-tracker | de855d74adf362424a4e8f4124d66cde39563562 | 434c33fe2d5b5b2d11282d976208bc419ef67687 | refs/heads/master | 2021-08-17T09:31:39.022454 | 2021-08-01T00:28:00 | 2021-08-01T00:28:00 | 242,202,095 | 0 | 1 | null | 2020-03-05T08:20:05 | 2020-02-21T18:12:13 | R | UTF-8 | R | false | false | 239 | r | register.R |
urls <- paste0("https://github.com/boettiger-lab/data-tracker/raw/master/",
list.files("store", full.names = TRUE, recursive = TRUE))
lapply(urls, contentid::register, registries = c("https://hash-archive.org", "store/"))
|
5d85196158aa8b36def02d8c6791f79808f5bbb7 | 9b67199df14059ab543e777aac16e78a441656ef | /HypothesisFive.R | 31b8fea163d43e4a5cd1ee23e222019a41f5b022 | [] | no_license | megzcalvert/AMtoo | 608b5bcac2ffd708fc3e3e3c4ab979408bb748d8 | 4a900df8f3edd3defd31e08f5950cc8b2271295e | refs/heads/master | 2021-06-26T03:07:08.958953 | 2020-10-18T16:20:05 | 2020-10-18T16:20:05 | 153,009,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,222 | r | HypothesisFive.R | rm(list = objects())
ls()
library(readr)
library(data.table)
library(tidyverse)
library(janitor)
library(tidylog)
library(broom)
library(Hmisc)
library(psych)
getwd()
setwd("~/Dropbox/Research_Poland_Lab/AM Panel/")
pheno17 <- fread("./Phenotype_Database/pheno17_htpLong.txt")
pheno18 <- fread("./Phenotype_Database/pheno18_htpLong.txt")
pheno19 <- fread("./Phenotype_Database/pheno19_htpLong.txt")
hddt17 <- fread("./Phenotype_Database/HDDT2017.txt")
hddt17$hddt17 <- as.Date(hddt17$hddt17)
hddt18 <- fread("./Phenotype_Database/HDDT2018.txt")
hddt18$hddt18 <- as.Date(hddt18$hddt18)
hddt19 <- fread("./Phenotype_Database/HDDT2019.txt")
hddt19$hddt19 <- as.Date(hddt19$hddt19)
colnames(pheno17)
# Check
trial <- pheno17 %>%
filter(ID == "NDVI") %>%
filter(Date == "2017-03-31")
cor.test(trial$value, trial$GRYLD)
nested17 <- pheno17 %>%
tidylog::select(-Plot_ID, -Variety) %>%
group_by(Date, ID) %>%
nest() %>%
mutate(
correlation = map(data, ~ cor.test(.x$GRYLD, .x$value)),
tidyCor = map(correlation, glance)
) %>%
unnest(tidyCor) %>%
tidylog::select(-data, -correlation)
nested18 <- pheno18 %>%
filter(ID != "height") %>%
tidylog::select(-entity_id, -Variety, -year) %>%
group_by(Date, ID) %>%
nest() %>%
mutate(
correlation = map(data, ~ cor.test(.x$GRYLD, .x$value)),
tidyCor = map(correlation, glance)
) %>%
unnest(tidyCor) %>%
tidylog::select(-data, -correlation)
nested19 <- pheno19 %>%
filter(ID != "height") %>%
tidylog::select(-entity_id, -Variety, -year) %>%
group_by(Date, ID) %>%
nest() %>%
mutate(
correlation = map(data, ~ cor.test(.x$GRYLD, .x$value)),
tidyCor = map(correlation, glance)
) %>%
unnest(tidyCor) %>%
tidylog::select(-data, -correlation)
nested17$Date <- as.Date(nested17$Date)
nested18$Date <- as.Date(nested18$Date)
nested19$Date <- as.Date(nested19$Date)
nested17 %>%
ggplot(aes(x = Date, y = estimate, color = ID)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high), size = 1) +
geom_hline(
yintercept = 0, linetype = 2,
colour = "darkgrey"
) +
geom_vline(aes(xintercept = hddt17), hddt17,
colour = "#74c476",
alpha = 0.75
) +
theme_bw() +
# scale_colour_manual(values = c("#e41a1c","#377eb8","#4daf4a",
# "#984ea3","#ff7f00","#525252")) +
scale_x_date(
date_breaks = "1 week",
date_labels = "%d%b"
) +
labs(title = "Correlation with CI 2017") +
ylab("Pearson correlation co-efficient") +
theme(
axis.text = element_text(colour = "black", size = 14),
axis.title = element_text(size = 16),
title = element_text(size = 20),
# legend.position = "bottom",
legend.text = element_text(size = 14)
)
nested18 %>%
ggplot(aes(x = Date, y = estimate, color = ID)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high), size = 1) +
geom_hline(
yintercept = 0, linetype = 2,
colour = "darkgrey"
) +
geom_vline(aes(xintercept = hddt18), hddt18,
colour = "#74c476",
alpha = 0.75
) +
theme_bw() +
scale_x_date(
date_breaks = "1 week",
date_labels = "%m/%d"
) +
labs(title = "Correlation with CI 2018") +
ylab("Pearson correlation co-efficient") +
theme(
axis.text = element_text(colour = "black", size = 14),
axis.title = element_text(size = 16),
title = element_text(size = 20),
# legend.position = "bottom",
legend.text = element_text(size = 14)
)
nested19 %>%
ggplot(aes(x = Date, y = estimate, color = ID)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high), size = 1) +
geom_hline(
yintercept = 0, linetype = 2,
colour = "darkgrey"
) +
geom_vline(aes(xintercept = hddt19), hddt19,
colour = "#74c476",
alpha = 0.75
) +
theme_bw() +
scale_x_date(
date_breaks = "1 week",
date_labels = "%m/%d"
) +
labs(title = "Correlation with CI 2019") +
ylab("Pearson correlation co-efficient") +
theme(
axis.text = element_text(colour = "black", size = 14),
axis.title = element_text(size = 16),
title = element_text(size = 20),
# legend.position = "bottom",
legend.text = element_text(size = 14)
)
|
3b5da42368816f9279ec9f351681872cfc4eddde | 89d5a7062a6991a49efcd21313c9f2daeb26261c | /R/market_data.R | c9515f95bad5d2270eb5672761335d2ac6d51105 | [] | no_license | anttsou/qmj | 3786eb2bdff69831ae6e4bdda9d37d9c03af27a6 | ffc56ea6d7a00e8f2f958df9c44a6008211882d3 | refs/heads/master | 2021-01-19T00:47:23.680802 | 2016-07-10T21:48:59 | 2016-07-10T21:48:59 | 29,163,706 | 10 | 7 | null | 2016-01-10T23:36:58 | 2015-01-13T00:08:25 | R | UTF-8 | R | false | false | 5,739 | r | market_data.R | #' Produces component and quality scores.
#'
#' Calculates market growth, payouts, safety, and
#' profitability of our list of companies for later
#' processing.
#'
#' All parameters default to package data sets and must
#' be formatted similarly to a data frame produced by
#' \code{\link{tidy_prices}} and \code{\link{tidyinfo}}.
#'
#' @return A data frame containing company names, tickers,
#' profitability z-scores, growth z-scores, safety z-scores,
#' payout z-scores, and quality z-scores. Organized by
#' quality in descending order.
#'
#' @param companies A data frame of company names and
#' tickers.
#' @param financials A data frame containing financial
#' information for the given companies.
#' @param prices A data frame containing the daily
#' market closing prices and returns.
#'
#' @seealso \code{\link{market_profitability}}
#' @seealso \code{\link{market_growth}}
#' @seealso \code{\link{market_safety}}
#' @seealso \code{\link{market_payouts}}
#'
#' @examples
#' \dontrun{
#' ## To immediately get quality scores using
#' ## package data sets.
#'
#' market_data()
#'
#' ## If we desire to produce a set of quality
#' ## scores for a specific data frame of
#' ## companies, which we'll call companies.
#'
#' # Remove old temporary data, if present.
#' clean_downloads(companies)
#'
#' # Get raw financial and price data.
#' raw_financials <- get_info(companies)
#' raw_prices <- get_prices(companies)
#'
#' # Clean raw data for use in market_data.
#' financials <- tidyinfo(raw_financials)
#' prices <- tidy_prices(raw_prices)
#'
#' quality_scores <- market_data(companies, financials, prices)
#' }
#' @importFrom dplyr arrange %>%
#' @import qmjdata
#' @export
market_data <- function(companies = qmjdata::companies, financials = qmjdata::financials, prices = qmjdata::prices) {
if (length(companies$ticker) == 0) {
stop("first parameter requires a ticker column.")
}
if (length(which(financials$TCSO < 0))) {
stop("Negative TCSO exists.")
}
## First Filter: All companies must have an annual financial statement posted two years ago,
## we'll call this the target-year. Since some companies may produce an 10-K filing early
## the next year, we'll also allow any company which produced a filing the following year
## through this filter.
target_year <- as.numeric(format(Sys.Date(), "%Y")) - 2
leeway_year <- target_year + 1
valid_tickers <- dplyr::filter(financials, year==target_year | year==leeway_year) %>%
dplyr::select(ticker) %>%
dplyr::distinct()
## Second Filter: All companies must have 3-4 years of contiguous financial data including
## the target year.
#' @includeIn Second Filter: Keeps only those companies which have 3-4 years of contiguous
#' financial data including the target year (or leeway year).
second_filter <- function(selected_ticker, fin, target_year, leeway_year) {
selected_rows <- dplyr::filter(fin, ticker==selected_ticker)
## Check to ensure that 3-4 years of financial data exist.
if(nrow(selected_rows) >= 3) {
## Check to ensure that the target year, or the leeway year, is contained in the data.
if(target_year %in% selected_rows$year | leeway_year %in% selected_rows$year){
## Check to ensure that years are contiguous. We'll allow some flexibility on this,
## due to the possibility of a company filing an annual report early the next calendar year,
## and then filing said report on an annual basis thereafter.
## As some companies may also produce two filings within the same calendar year
## (for example, at the beginning of January and then again late in December),
## we're interested primarily in just ensuring that the summed differences of
## the years of each filing is within a certain bound.
## Consequently, we'll test to see if the sum of the differences between adjacent
## row years is <= 4.
if(sum(diff(selected_rows$year)) <= 4)
return(selected_ticker)
}
}
## Return a predictable failure flag.
return("")
}
valid_tickers <- sapply(valid_tickers$ticker, second_filter, financials, target_year, leeway_year)
valid_tickers <- valid_tickers[valid_tickers != ""]
## Price Filter: Remove companies from consideration which do not have a significant
## amount of price data.
expected_rows <- length(prices$ticker[prices$ticker == 'GSPC'])
passing_companies <- table(prices$ticker[!is.na(prices$pret)])
## Say we want each company to have at least 80% of our maximal data company, GSPC.
passing_companies <- passing_companies[passing_companies >= (expected_rows * 4/5)]
passing_companies <- rownames(passing_companies)
valid_tickers <- passing_companies[passing_companies %in% valid_tickers]
## Single out those companies that have passed our filters.
companies <- companies[companies$ticker %in% valid_tickers,]
## Calculate component scores.
profitability <- market_profitability(companies, financials)$profitability
growth <- market_growth(companies, financials)$growth
safety <- market_safety(companies, financials, prices)$safety
payouts <- market_payouts(companies, financials)$payouts
## Calculate quality scores and get z-scores.
quality <- profitability + growth + safety + payouts
quality <- scale(quality)
name <- companies$name
ticker <- companies$ticker
marketdata <- data.frame(name = name, ticker = ticker, profitability = profitability, growth = growth, safety = safety, payouts = payouts, quality = quality)
## Arrange data by
marketdata <- dplyr::arrange(marketdata, desc(quality))
marketdata
}
|
47037d9a68f6a77c1dd69e201d7321f985652b8d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/LFDREmpiricalBayes/examples/PRGM.action.Rd.R | d355fc8aa4168efcb0d817ecf5278a43e39bd764 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 552 | r | PRGM.action.Rd.R | library(LFDREmpiricalBayes)
### Name: PRGM.action
### Title: Based on the Robust Bayes Approach, Performs a Multiple
### Hyothesis Testing Problem under an Squared Error Loss Function
### Aliases: PRGM.action
### Keywords: Posterior Regret Gamma Minimax Average
### ** Examples
#LFDR reference class values generated
#First reference class
LFDR.Separate <- c(0.14, 0.8, 0.16, 0.30)
#Second reference class
LFDR.Combined <- c(0.21, 0.61, 0.12, 0.10)
output <- PRGM.action(LFDR.Separate, LFDR.Combined)
# Vector of the LFDR estimates
output
|
909e83ed4cc8531cc31bd6be7122fa4d84539f2f | 7d47028c95b2d6aef5e0f790421dca147305361e | /Missing_data_imputation_breast_cancer.R | f950c759c6738744bed3022674518df490e1e72f | [
"MIT"
] | permissive | pdeepak87/R-stats-machine-learning | 42d77c3a702579560cef3b1c038158f8b081ffa2 | 1c0d48c6c8c1434c3cc57cab40a3e9cec93f579a | refs/heads/master | 2023-03-25T17:18:02.067678 | 2021-03-20T23:41:41 | 2021-03-20T23:41:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,046 | r | Missing_data_imputation_breast_cancer.R | # Read the text file with na.string set to '?'
df <- read.csv("breast-cancer-wisconsin.data.txt",header=F,na.strings = '?')
# How many NA/missing values are present?
sum(is.na.data.frame(df))
library('mice')
md.pattern(df)
# Imputation
tempData <- mice(df,m=5,maxit=50,meth='pmm',seed=500)
summary(tempData)
densityplot(tempData)
x=as.vector(df$V7)
miss.mean = mean(x,na.rm = T)
# Imputation using mean
#==========================
# Function to replace a vector with its mean without missing values
NA2mean <- function(x) replace(x, is.na(x), mean(x, na.rm = TRUE))
# New dataframe by applying this function to the original dataframe
df.impute.mean<-as.data.frame(lapply(df, NA2mean))
# Check there is no missing value in the imputed dataframe
sum(is.na.data.frame(df.impute.mean))
# Imputation using regression
#==================================
imp1<-mice(df,method = 'norm.predict')
df.impute.regression <- complete(imp1)
# Imputation using regression with perturbation
#==================================================
imp2<-mice(df,method = 'norm.nob')
df.impute.regression.perturb <- complete(imp2)
# Density plots of imputed objects
#====================================
densityplot(imp1)
densityplot(imp2)
# Dataset by removing the missing values
df.removed<-na.omit(df)
# kNN classification using imputed datasets
library(caret)
ctrl <- trainControl(method="repeatedcv",number=10,repeats = 3)
knnFit.mean <- train(x=df.impute.mean[,1:10],y=as.factor(df.impute.mean[,11]), method = "knn", trControl = ctrl,
preProcess = c("center","scale"), tuneLength = 10)
plot(knnFit.mean,col = "dark red",lwd=5,lty=2,cex.lab=1.25,cex.main=1.5,
main="Mean Imputed Dataset: Accuracy of kNN with repeated 10-fold CV",
xlab="Number of neighbors",
ylab="Accuracy of classification")
knnFit.reg <- train(x=df.impute.regression[,1:10],y=as.factor(df.impute.regression[,11]), method = "knn", trControl = ctrl,
preProcess = c("center","scale"), tuneLength = 10)
plot(knnFit.reg,col = "dark red",lwd=5,lty=2,cex.lab=1.25,cex.main=1.5,
main="Regression Imputed Dataset: Accuracy of kNN with repeated 10-fold CV",
xlab="Number of neighbors",
ylab="Accuracy of classification")
knnFit.perturb <- train(x=df.impute.regression.perturb[,1:10],y=as.factor(df.impute.regression.perturb[,11]), method = "knn", trControl = ctrl,
preProcess = c("center","scale"), tuneLength = 10)
plot(x=knnFit.perturb,col = "dark red",lwd=5,lty=2,cex.lab=1.25,cex.main=1.5,
main="Regression with Perturbation Imputed Dataset: Accuracy of kNN with repeated 10-fold CV",
xlab="Number of neighbors",
ylab="Accuracy of classification")
knnFit.removed <- train(x=df.removed[,1:10],y=as.factor(df.removed[,11]), method = "knn", trControl = ctrl,
preProcess = c("center","scale"), tuneLength = 10)
plot(knnFit.removed,col = "dark red",lwd=5,lty=2,cex.lab=1.25,cex.main=1.5,
main="Dataset with missing values removed: Accuracy of kNN with repeated 10-fold CV",
xlab="Number of neighbors",
ylab="Accuracy of classification")
# Errors of kNN classification with various imputed datasets
#================================================================
error1=100*(1-knnFit.mean$results$Accuracy)
error2=100*(1-knnFit.reg$results$Accuracy)
error3=100*(1-knnFit.perturb$results$Accuracy)
error4=100*(1-knnFit.removed$results$Accuracy)
# Plot the erros
plot(c(1:10),error1,type="l", lwd=2, col="blue", ylim=c(3, 4),
xlab="Number of neighbors",
ylab="Error of the classification")
lines(c(1:10),error2,lwd=2, col="red")
lines(c(1:10),error3,lwd=2, col="orange")
lines(c(1:10),error4,lwd=2, col="brown")
legend("topleft", legend=c("Mean impute","regression impute",
"Regression+Perturbation impute","Removed the missing values"),
lwd=c(2,2,2), col=c("blue","red","orange","brown"))
|
b57498e497fcb07971f2224ee27ffb079732f55c | f91396d3c1827d2d5378da2ed3ed86ff90e94f20 | /man/print.madr.enumerate.Rd | 95320598d712635c24ab775b459b95a866ac18d2 | [] | no_license | cran/madr | 210133189f56e00a8402a344db35d2abd7c8724c | 904590d2d0d243f2a3468438f08409b1c77d0ce0 | refs/heads/master | 2020-07-26T08:18:31.152650 | 2016-09-05T20:23:43 | 2016-09-05T20:23:43 | 67,250,354 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 394 | rd | print.madr.enumerate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.madr.enumerate.r
\name{print.madr.enumerate}
\alias{print.madr.enumerate}
\title{Print function for madr.enumerate class}
\usage{
\method{print}{madr.enumerate}(x, ...)
}
\arguments{
\item{x}{madr.enumerate object}
\item{...}{ignored}
}
\description{
This function prints results from madr.enumerate class
}
|
bf937b30ed64a897398e23988a6704036fa50265 | 5ee80f0189bcd1401a6c86957c0d242a024f6474 | /04.String/String.R | 6c031968e2fd3344ceed670443423fde6c985da1 | [] | no_license | Mynuddin-dev/R-Programming-A-Z | 71bdf490f2b83a5dbbdcb96b9c3abcb0a8f98520 | 8f12ff33cd684a997aeb899fab9b7613fe9bb9b1 | refs/heads/main | 2023-04-08T21:48:56.181790 | 2021-04-24T14:44:27 | 2021-04-24T14:44:27 | 356,656,100 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,535 | r | String.R | a <- 'Start and end with single quote'
print(a)
b <- "Start and end with double quotes"
print(b)
c <- "single quote ' in between double quotes"
print(c)
d <- 'Double quotes " in between single quote'
print(d)
#Examples of Invalid Strings
# e <- 'Mixed quotes"
# print(e)
#
# f <- 'Single quote' inside single quote'
# print(f)
#
# g <- "Double quotes " inside double quotes"
# print(g)
#Concatenating Strings - paste() function
#paste(..., sep = " ", collapse = NULL)
#... represents any number of arguments to be combined.
#sep represents any separator between the arguments. It is optional.
#collapse is used to eliminate the space in between two strings.
#But not the space within two words of one string.
a<- "Hello"
b <- 'How'
c <- "are you? "
print(paste(a,b,c))
print(paste(a,b,c, sep = "-"))
print(paste(a,b,c, sep = "", collapse = ""))
#Numbers and strings can be formatted to a specific style using format() function.
#format(x, digits, nsmall, scientific, width, justify = c("left", "right", "centre", "none"))
#x is the vector input.
#digits is the total number of digits displayed.
#nsmall is the minimum number of digits to the right of the decimal point.
#scientific is set to TRUE to display scientific notation.
#width indicates the minimum width to be displayed by padding blanks in the beginning.
#justify is the display of the string to left, right or center.
# Total number of digits displayed. Last digit rounded off.
result <- format(23.123456789, digits = 9)
print(result)
# Display numbers in scientific notation.
result <- format(c(6, 13.14521), scientific = TRUE)
print(result)
# The minimum number of digits to the right of the decimal point.
result <- format(23.47, nsmall = 5)
print(result)
# Format treats everything as a string.
result <- format(6)
print(result)
# Numbers are padded with blank in the beginning for width.
result <- format(13.7, width = 6)
print(result)
# Left justify strings.
result <- format("Hello", width = 8, justify = "l")
print(result)
# Justfy string with center.
result <- format("Hello", width = 8, justify = "c")
print(result)
#parameter is the vector input
r <- nchar("Count the number of characters")
print(r)
# Changing to Upper case.
r1 <- toupper("Changing To Upper")
print(r1)
# Changing to lower case.
r2 <- tolower("Changing To Lower")
print(r2)
# Extract characters from 5th to 7th position.
rt <- substring("abcdefghijkl", 5, 7)
print(rt)
|
063dc6ec9941ac15861068701a254607f2209e1f | 0102f791bde704863c4822abf1aee441b0715a53 | /risk_network/visNetwork-master/R/visTree.R | 02656456b5a9bfd8eaf4cb5cbeb564c80d16100d | [
"MIT"
] | permissive | WilliamTorsett/data_analytics | 34ade6b3505a793332f91c2b0190f7509559c78b | 01f98c5afaf652990897fd1f9d9df1ae2d3d2ee3 | refs/heads/master | 2022-12-06T14:40:42.492982 | 2020-08-30T14:40:30 | 2020-08-30T14:40:30 | 258,614,710 | 0 | 1 | MIT | 2020-08-30T14:40:31 | 2020-04-24T20:15:57 | HTML | UTF-8 | R | false | false | 37,824 | r | visTree.R | #' Visualize Recursive Partitioning and Regression Trees (rpart object)
#'
#' Visualize Recursive Partitioning and Regression Trees \code{rpart}. Have a look to \link{visTreeEditor} to edity and get back network, or to \link{visTreeModuleServer} to use custom tree module in R
#'
#' @param object \code{rpart}, rpart object
#' @param data \code{data.frame}, adding mini-graphics in tooltips using \code{sparkline} and \code{tooltipColumns} ?
#' @param tooltipColumns \code{numeric}, indice of columns used in tooltip. All by default.
#' So, we add boxplot / pie focus on sub-population vs all population using \code{sparkline} package. \code{NULL} to disable.
#' @param main Title. See \link{visNetwork}
#' @param submain Subtitle. See \link{visNetwork}
#' @param footer Footer. See \link{visNetwork}
#' @param direction \code{character}, The direction of the hierarchical layout.
#' The available options are: UD, DU, LR, RL. To simplify:
#' up-down, down-up, left-right, right-left. Default UD. See \link{visHierarchicalLayout}
#' @param nodesPopSize \code{boolean}, nodes sizes depends on population ? Default to FALSE
#' @param fallenLeaves \code{boolean} leaf nodes at the bottom of the graph ? Default to FALSE
#' @param nodesFontSize \code{numeric}, size of labels of nodes. Default to 16
#' @param edgesFontSize \code{numeric}, size of labels of edges Default to 14
#' @param legendFontSize \code{numeric}, size of labels of nodes in legend. Default to 16
#' @param legendNodesSize \code{numeric}, size of nodes in legend. Default to 22
#' @param edgesFontAlign \code{character}, for edges only. Default tp 'horizontal'. Possible options: 'horizontal' (Default),'top','middle','bottom'. See \link{visEdges}
#' @param colorVar \code{character}, colors to use or \code{data.frame} To set color of variables. 2 columns :
#' \itemize{
#' \item{"variable"}{ : names of variables}
#' \item{"color"}{ : colors (in hexa). See examples}
#' }
#' @param colorY if classification tree : \code{character} colors to use or \code{data.frame} 2 columns :
#' \itemize{
#' \item{"modality"}{ : levels of Y}
#' \item{"color"}{ : colors (in hexa)}
#' }
#' if regression tree : \code{character}, 2 colors (min and max, in hexa)
#' @param colorEdges \code{character}, color of edges, in hexa. Default to #8181F7
#' @param legend \code{boolean}, add legend ? Default TRUE. \link{visLegend}
#' @param legendWidth \code{numeric}, legend width, between 0 and 1. Default 0.1
#' @param legendNcol \code{numeric}, number of columns in legend. Default 1
#' @param legendPosition \code{character}, one of "left" (Default) or "right"
#' @param highlightNearest \code{list}, Highlight nearest nodes. See \link{visOptions}
#' @param collapse \code{list}, collapse or not using double click on a node ? See \link{visOptions}
#' @param updateShape \code{boolean}, in case of collapse, udpate cluster node shape as terminal node ? Default to TRUE
#' @param tooltipDelay \code{numeric}, delay for tooltips in millisecond. Default 500
#' @param rules \code{boolean}, add rules in tooltips ? Default to TRUE
#' @param simplifyRules \code{boolean}, simplify rules writing
#' @param digits \code{numeric}, number of digits. Default to 3
#' @param height \code{character}, default to "600px"
#' @param width \code{character}, default to "100\%"
#' @param minNodeSize \code{numeric}, in case of \code{nodesPopSize}, minimum size of a node. Defaut to 15. Else, nodes size is minNodeSize + maxNodeSize / 2
#' @param maxNodeSize \code{numeric}, in case of \code{nodesPopSize}, maximum size of a node. Defaut to 30. Else, nodes size is minNodeSize + maxNodeSize / 2
#' @param shapeVar \code{character}, shape for variables nodes See \link{visNodes}
#' @param shapeY \code{character}, shape for terminal nodes See \link{visNodes}
#' @param export \code{boolean}, add export button. Default to TRUE
#'
#' @return a visNetwork object
#'
#' @seealso \link{visTreeEditor}, \link{visTreeModuleServer}, \link{visNetworkEditor}
#'
#' @references See online documentation \url{http://datastorm-open.github.io/visNetwork/}
#'
#' @examples
#'
#' \dontrun{
#'
#' library(rpart)
#'
#' # Basic classification tree
#' res <- rpart(Species~., data=iris)
#' visTree(res, data = iris, main = "Iris classification Tree")
#'
#' # Basic regression tree
#' res <- rpart(Petal.Length~., data=iris)
#' visTree(res, edgesFontSize = 14, nodesFontSize = 16)
#'
#' # Complex tree
#' data("solder")
#' res <- rpart(Opening~., data = solder, control = rpart.control(cp = 0.00005))
#' visTree(res, data = solder, nodesPopSize = TRUE, minNodeSize = 10,
#' maxNodeSize = 30, height = "800px")
#'
#' # ----- Options
#' res <- rpart(Opening~., data = solder, control = rpart.control(cp = 0.005))
#'
#' # fallen leaves + align edges label & size
#' visTree(res, fallenLeaves = TRUE, height = "500px",
#' edgesFontAlign = "middle", edgesFontSize = 20)
#'
#' # disable rules in tooltip, and render tooltip faster
#' # enable hover highlight
#' visTree(res, rules = FALSE, tooltipDelay = 0,
#' highlightNearest = list(enabled = TRUE, degree = list(from = 50000, to = 0),
#' hover = TRUE, algorithm = "hierarchical"))
#'
#' # Change color with data.frame
#' colorVar <- data.frame(variable = names(solder),
#' color = c("#339933", "#b30000","#4747d1","#88cc00", "#9900ff","#247856"))
#'
#' colorY <- data.frame(modality = unique(solder$Opening),
#' color = c("#AA00AA", "#CDAD15", "#213478"))
#'
#' visTree(res, colorEdges = "#000099", colorVar = colorVar, colorY = colorY)
#'
#' # Change color with vector
#' visTree(res, colorEdges = "#000099",
#' colorVar = substring(rainbow(6), 1, 7),
#' colorY = c("blue", "green", "orange"))
#'
#'
#' # Use visNetwork functions to add more options
#' visTree(res) %>%
#' visOptions(highlightNearest = TRUE)
#'
#'
#' }
#'
#' @export
#'
#' @importFrom grDevices hcl
#' @importFrom grDevices colorRamp
#' @importFrom grDevices rgb
#'
visTree <- function(object,
data = NULL,
tooltipColumns = if(!is.null(data)){1:ncol(data)} else {NULL},
main = "",
submain = "",
footer = "",
direction = "UD",
fallenLeaves = FALSE,
rules = TRUE,
simplifyRules = TRUE,
shapeVar = "dot",
shapeY = "square",
colorVar = NULL,
colorY = NULL,
colorEdges = "#8181F7",
nodesFontSize = 16,
edgesFontSize = 14,
edgesFontAlign = "horizontal",
legend = TRUE,
legendNodesSize = 22,
legendFontSize = 16,
legendWidth = 0.1,
legendNcol = 1,
legendPosition = "left",
nodesPopSize = FALSE,
minNodeSize = 15,
maxNodeSize = 30,
highlightNearest = list(enabled = TRUE,
degree = list(from = 50000, to = 0), hover = FALSE,
algorithm = "hierarchical"),
collapse = list(enabled = TRUE, fit = TRUE, resetHighlight = TRUE,
clusterOptions = list(fixed = TRUE, physics = FALSE)),
updateShape = TRUE,
tooltipDelay = 500,
digits = 3,
height = "600px",
width = "100%",
export = TRUE){
# controls
stopifnot("rpart" %in% class(object))
stopifnot("character" %in% class(direction))
stopifnot(direction %in% c("UD", "LR", "RL", "DU"))
stopifnot(length(direction) == 1)
stopifnot("logical" %in% class(nodesPopSize))
stopifnot("numeric" %in% class(minNodeSize) | "integer" %in% class(minNodeSize))
stopifnot("numeric" %in% class(maxNodeSize) | "integer" %in% class(maxNodeSize))
stopifnot("logical" %in% class(fallenLeaves))
stopifnot("logical" %in% class(simplifyRules))
stopifnot("numeric" %in% class(nodesFontSize) | "integer" %in% class(nodesFontSize))
stopifnot("numeric" %in% class(edgesFontSize) | "integer" %in% class(edgesFontSize))
stopifnot("numeric" %in% class(legendFontSize) | "integer" %in% class(legendFontSize))
stopifnot("character" %in% class(edgesFontAlign))
if(!is.null(colorVar)){
stopifnot(any(c("data.frame", "character") %in% class(colorVar)))
}
if(!is.null(colorY)){
if(object$method == "class"){
stopifnot(any(c("data.frame", "character") %in% class(colorY)))
}
if(object$method == "anova"){
stopifnot("character"%in%class(colorY))
stopifnot(length(colorY) <= 2)
}
}
if(!is.null(colorEdges)){
stopifnot("character" %in% class(colorEdges))
}
stopifnot("logical" %in% class(legend))
stopifnot("numeric" %in% class(legendWidth) | "integer" %in% class(legendWidth))
stopifnot("numeric" %in% class(legendNcol) | "integer" %in% class(legendNcol))
stopifnot("character" %in% class(legendPosition))
stopifnot(any(c("logical", "list") %in% class(highlightNearest)))
stopifnot(any(c("logical", "list") %in% class(collapse)))
stopifnot("numeric" %in% class(tooltipDelay)| "integer" %in% class(tooltipDelay))
stopifnot("logical" %in% class(rules))
stopifnot("numeric" %in% class(digits)| "integer" %in% class(digits))
stopifnot("character" %in% class(height))
stopifnot("character" %in% class(width))
stopifnot("character" %in% class(shapeVar))
stopifnot("character" %in% class(shapeY))
if(!is.null(tooltipColumns)){
if(class(tooltipColumns) %in% c("character", "factor")){
tooltipColumns <- which(tooltipColumns %in% colnames(data))
}
stopifnot(class(tooltipColumns)[1] %in% c("numeric", "integer"))
stopifnot(!is.null(data))
stopifnot(max(tooltipColumns) <= ncol(data))
which_character <- which(sapply(data[, tooltipColumns, drop = FALSE],
function(x) class(x)[1]) %in% "character")
if(length(which_character) > 0){
for(i in tooltipColumns[which_character]){
data[, i] <- as.factor(data[, i])
}
}
}
if(!is.null(tooltipColumns) | rules){
if(!requireNamespace("sparkline", quietly = TRUE)){
stop("'sparkline' package is needed for this function")
}
}
# ------------------------------
# get information from rpart object
rpartNodesNames <- row.names(object$frame)
infoClass <- NULL
parentsDec <- list(lapply(rpartNodesNames, function(X)(.parent(as.numeric(X)))))
infoVar <- object$frame$var
infoRules <- .vis_give_rules(object)
detailRules <- .rpart_lists(object)
colLabels <- attributes(object$terms)$term.labels
colClass <- attributes(object$terms)$dataClasses
colClass <- colClass[names(colClass)%in%colLabels]
if(length(rpartNodesNames) > 1){
rpartHier <- sapply(as.numeric(rpartNodesNames[2:length(rpartNodesNames)]), function(X){
info <- .parent(X)
list(info[length(info)-1], info[length(info)], length(info))
})
from <- unlist(rpartHier[1,])
to <- unlist(rpartHier[2,])
level <- c(1, unlist(rpartHier[3,]))
# ------------------------------
# build edge info (label + tootip)
edgesLabels <- character(length(to))
edgesTooltip <- character(length(to))
lapply(1:length(to), function(i){
cur_rule <- strsplit(infoRules[paste0("Node", to[i])], ",")[[1]]
sens <- substr(cur_rule, 1, 1)
ind_rule <- as.numeric(substr(cur_rule, 2, nchar(cur_rule)))
rule <- detailRules[[sens]][[ind_rule]]
operator <- attributes(rule)$compare
# if(names(operator) %in% names(colClass[which(colClass %in% c("factor", "character", "ordered"))])){
if(operator %in% "="){
operator <- NULL
edgesLabels[i] <<- paste(rule, collapse = ", ")
}else{
rule <- round(rule, digits)
edgesLabels[i] <<- paste(operator, paste(rule, collapse = ", "))
}
edgesTooltip[i] <<- paste0('<div style="text-align:center;"><b>', names(attr(rule, "compare")), "</b></div>",
paste0('<div style="text-align:center;">', operator, rule, "</div>", collapse = ""))
invisible()
})
edgesLabelsFull <- edgesLabels
formatLabels <- function(x){
ifelse(nchar(x) > 10, paste0(substr(x, 1, 7), "..."), x)
}
edgesLabels <- sapply(edgesLabels, formatLabels)
} else {
level <- 1
}
# ------------------------------
# nodes
if(length(rpartNodesNames) > 1){
nodes_pop <- object$frame$n[match(to, rpartNodesNames)]
} else {
nodes_pop <- object$frame$n
}
nodes_var <- as.character(infoVar)
nodes_var_color <- nodes_var[nodes_var != "<leaf>"]
shape <- ifelse(infoVar != "<leaf>", shapeVar, shapeY)
SortLabel <- sort(unique(nodes_var_color))
colorVar <- .generateVarColor(colorVar, nodes_var_color, SortLabel)
nodes_color <- as.character(colorVar$color[match(nodes_var, colorVar$variable)])
# get stats for nodes (mean / variance / proba)
statsNodes <- NULL
# Classification TREE
if(!is.null(attributes(object)$ylevels)){
infoClass <- attributes(object)$ylevels
nlevelsClass <- length(infoClass)
probaClass <- object$frame[,"yval2"]
effectif <- data.frame(probaClass[,2:(nlevelsClass+1), drop = F])
probs <- data.frame(probaClass[,(nlevelsClass+2):(ncol(probaClass)-1), drop = F])
probsHtml <- probs
for(i in 1:length(infoClass)){
probsHtml[,i] <- paste0(infoClass[i], " : <b>",
round(probsHtml[,i], digits)*100, "%</b>",
" (", effectif[,i], ")")
}
statsNodes <- apply(probsHtml, 1, function(x){paste0(x, collapse = "<br>")})
}else{
# Regression TREE
varNodes <- round(object$frame$dev/(object$frame$n - 1),digits)
varNodes[which(varNodes == Inf)] <- NA
statsNodes <- paste0("Mean : <b>" , round(object$frame$yval,digits),
"</b><br>Variance : <b>",varNodes, "</b>")
}
# ------------------------------
# Build rules for tooltip
tooltipRules <- list(NULL)
if(length(parentsDec[[1]]) > 1){
for(i in 2:length(parentsDec[[1]])){
use <- parentsDec[[1]][[i]]
varDecisions <- nodes_var[match(as.character(use[-length(use)]), rpartNodesNames)]
decisionsrules <- edgesLabelsFull[match(as.character(use), rpartNodesNames)-1]
varDecisionBegin <- unique(varDecisions)
if(simplifyRules){
filtre <- ifelse(colClass[varDecisions]%in% c("character", "factor", "ordered"),
varDecisions,
paste0(varDecisions, substr(decisionsrules, 1 ,1)))
tabFiltre <- table(filtre) > 1
if(length(which(tabFiltre))>0){
filtres <- names(tabFiltre)[which(tabFiltre)]
filtreOut <- NULL
for(j in filtres){
filtreOut <- c(filtreOut, max(which(j== filtre)))
}
keeprules <- sort(c(which(!filtre%in%filtres), filtreOut))
varDecisions <- varDecisions[keeprules]
decisionsrules <- decisionsrules[keeprules]
}
filtre <- varDecisions
varDecisionsOrder <- varDecisions
tabFiltre <- table(filtre)>1
if(length(which(tabFiltre))>0){
filtres <- names(tabFiltre)[which(tabFiltre)]
for(j in filtres){
rulesNumSimpl <- decisionsrules[which(varDecisions == j)]
down <- which(substr(rulesNumSimpl,1,1) == ">")
newLib <- paste0("", substr(rulesNumSimpl[down], 4, nchar(rulesNumSimpl[down])),
" <= <b>", j, "</b> < ", substr(rulesNumSimpl[-down], 3,
nchar(rulesNumSimpl[-down])))
decisionsrules <- decisionsrules[-which(varDecisions == j)]
varDecisions <- varDecisions[-which(varDecisions == j)]
varDecisionsOrder <- varDecisionsOrder[-which(varDecisionsOrder == j)]
varDecisionsOrder <- c(varDecisionsOrder, j)
varDecisions <- c(varDecisions, "")
decisionsrules <- c(decisionsrules, newLib)
}
}
varDecisions <- varDecisions[match(varDecisionBegin, varDecisionsOrder )]
decisionsrules <- decisionsrules[match(varDecisionBegin, varDecisionsOrder )]
}
tooltipRules[[i]] <- paste0(paste("<b>",varDecisions, "</b>", decisionsrules), collapse = "<br>")
}
}
# ------------------------------
# Sparklines for nodes
labelComplete <- NULL
if(!is.null(data) & !is.null(tooltipColumns)){
data <- data[, tooltipColumns, drop = FALSE]
nodesNames <- as.integer(rownames(object$frame))
classDtaIn <- unlist(lapply(data, function(X){class(X)[1]}))
classDtaIn <- classDtaIn%in%c("numeric", "integer")
dataNum <- data[,classDtaIn, drop = FALSE]
if(ncol(dataNum) > 0){
minPop <- apply(dataNum, 2, min)
maxPop <- apply(dataNum, 2, max)
meanPop <- colMeans(dataNum)
popSpkl <- apply(dataNum,2, function(X){
.addSparkLineOnlyJs(X, type = "box")
})
labelComplete <- sapply(nodesNames, function(Z){
.giveLabelsFromDfWhichInvisible(subsetRpart(object, dataNum, Z),
popSpkl, minPop, maxPop, meanPop)
})
}
dataOthr <- data[,!classDtaIn, drop = FALSE]
if(ncol(dataOthr) > 0){
popSpkl <- apply(dataOthr,2, function(X){
Y <- sort(table(X))
spl <- .addSparkLineOnlyJs(Y , type = "pie", labels = names(Y))
if(length(Y) > 1){
Y <- data.frame(Y)
} else {
Y <- data.frame(X = names(Y), Freq = Y)
}
Y$X <- ifelse(nchar(as.character(Y$X) ) > 9,
paste0(substr(Y$X, 1, 8), "..."), as.character(Y$X))
modP <- Y$X[length(Y$X)]
paste0(spl, " On pop. (mode: <b>", modP, "</b>)")
})
namOrder <- lapply(dataOthr, function(X){
names(sort(table(X)))
})
labelComplete <- paste(labelComplete, sapply(nodesNames, function(Z){
.giveLabelsFromDfChrInvisible(subsetRpart(object, dataOthr, Z),
popSpkl, namOrder)} ) )
}
labelComplete <- paste0('<hr class = "rPartvisNetwork">
<div class ="showOnMe"><div style="text-align:center;"><U style="color:blue;" class = "classActivePointer">Details</U></div>
<div class="showMeRpartTTp" style="display:none;margin-top: -15px">
',labelComplete,
'</script>',
'<script type="text/javascript">',
'$(document).ready(function(){
$(".showOnMe").click(function(){
$(".showMeRpartTTp").toggle();
$.sparkline_display_visible();
});
});</script>','</div></div>')
}
# ------------------------------
# Terminal nodes colors
ind_terminal <- which(nodes_var == "<leaf>")
if(!is.null(attributes(object)$ylevels)){
# Classification tree
listColorY <- .generateYColor(object, colorY, nodes_var, digits = digits, infoClass = infoClass, probs = probs)
colNodClust <- as.character(listColorY$colorY$color[match(listColorY$vardecidedClust, listColorY$colorY$modality)])
nodes_color[ind_terminal] <- colNodClust[ind_terminal]
nodes_var[ind_terminal] <- listColorY$vardecidedClust[ind_terminal]
}else{
# regression tree
listColorY <- .generateYColor(object, colorY, nodes_var, digits = digits)
# for legend color
colorMin <- grDevices::rgb(listColorY$colRamp(0), maxColorValue=255)
colorMax <- grDevices::rgb(listColorY$colRamp(1), maxColorValue=255)
# terminal nodes
nodes_color[ind_terminal] <- listColorY$colorTerm[ind_terminal]
classTerminal <- round(object$frame$yval, digits)
nodes_var[ind_terminal] <- listColorY$vardecidedClust[ind_terminal]
# cluster
colNodClust <- listColorY$colorTerm
}
if(rules) {
idToSample <- length(tooltipRules)
idS <- sapply(1:idToSample, function(X){paste0(sample(LETTERS, 15), collapse = "")})
idS <- paste0("myIdToDisplay", idS)
# <div onclick="toggle_visibility(\'',idS,'\')">
# <U>RULES</U></div><div id="',idS,'">',
# tooltipRules,'</div>
finalHtmlRules <- paste0(
'<hr class = "rPartvisNetwork">
<div class ="showOnMe2"><div style="text-align:center;"><U style="color:blue;" class = "classActivePointer">Rules</U></div>
<div class="showMeRpartTTp2" style="display:none;">
',tooltipRules,
'</script>',
'<script type="text/javascript">',
'$(document).ready(function(){
$(".showOnMe2").click(function(){
$(".showMeRpartTTp2").toggle();
$.sparkline_display_visible();
});
});</script>','</div></div>
')
}else{
finalHtmlRules <- ""
}
finalNodesTooltip <- paste0(
'<div style="text-align:center;">', "N : <b>",
round(object$frame$n/object$frame$n[1],digits)*100,
"%</b> (", object$frame$n,")<br>", "Complexity : <b>",
round(object$frame$complexity, digits),
"</b><br>", statsNodes,
ifelse(!unlist(lapply(tooltipRules, is.null)), finalHtmlRules, ""), '</div>',
labelComplete)
# ------------------------------
# Nodes size on population
value = object$frame$n
if(nodesPopSize){
minNodeSize = minNodeSize
maxNodeSize = maxNodeSize
}else{
minNodeSize = (minNodeSize + maxNodeSize) / 2
maxNodeSize = minNodeSize
}
# ------------------------------
# Legend
legendX <- lapply(SortLabel[SortLabel != "<leaf>"], function(x){
col <- as.character(colorVar$color[match(x, colorVar$variable)])
list(label = x, color = col, shape = shapeVar, size = legendNodesSize,
Leaf = 0, font.size = legendFontSize)
})
legendY <- lapply(infoClass, function(X){
# if(is.null(colorY)){
# col <- colorTerm[which(infoClass== X)]
# }else{
col <- as.character(listColorY$colorY$color[match(X, listColorY$colorY$modality)])
# }
list(label = X, color = col, shape = shapeY, size = legendNodesSize,
Leaf = 1, font.size = legendFontSize)
})
legendFinal <- do.call(rbind,(lapply(c(legendX, legendY), data.frame)))
if(!is.null(legendFinal)){
legendFinal$id <- 10000:(10000 + (nrow(legendFinal))-1)
}
# ------------------------------
# Final data for visNetwork
nodes <- data.frame(id = as.numeric(rpartNodesNames), label =nodes_var,
level = level, color = nodes_color, value = value,
shape = shape, title = finalNodesTooltip, fixed = TRUE,
colorClust = colNodClust, labelClust = listColorY$vardecidedClust, Leaf = 0,
font.size = nodesFontSize, scaling.min = minNodeSize, scaling.max = maxNodeSize)
nodes$Leaf[ind_terminal] <- 1
if(fallenLeaves){
nodes$level[which(nodes$shape %in% shapeY)] <- max(nodes$level)
}
if(length(rpartNodesNames) > 1){
smooth <- list(enabled = TRUE, type = "cubicBezier", roundness = 0.5)
edges <- data.frame(id = paste0("edge", 1:length(from)),from = from, to = to, label = edgesLabels,
value = nodes_pop, title = edgesTooltip, color = colorEdges,
font.size = edgesFontSize, font.align = edgesFontAlign, smooth = smooth)
} else {
edges <- NULL
}
# ------------------------------
# Coordinate
# if(coordinates){
# rpartcoParams <- list(uniform = TRUE, branch = 0.2, nspace = 0.2, minbranch = 0.3)
# Xp <- rpart:::rpartco(object, rpartcoParams)$x
# nodes$x <- Xp * 100
# nodes$y <- nodes$level * 150
# nodes$y <- nodes$y - mean(nodes$y)
# nodes$x <- nodes$x - mean(nodes$x)
#
# intervalPositionX <- max(nodes$x)
# CorrectPosition <- legendWidth*intervalPositionX
# nodes$x <- nodes$x + CorrectPosition / 8
# nodes$x <- nodes$x / (1 + legendWidth)
# }
tree <- visNetwork(nodes = nodes, edges = edges, height = height, width = width, main = main,
submain = submain, footer = footer) %>%
visHierarchicalLayout(direction = direction) %>%
visOptions(highlightNearest = highlightNearest, collapse = collapse) %>%
visInteraction(tooltipDelay = tooltipDelay,
dragNodes = FALSE, selectConnectedEdges = FALSE,
tooltipStyle = 'position: fixed;visibility:hidden;padding: 5px;
white-space: nowrap;
font-family: cursive;font-size:12px;font-color:purple;background-color: #E6E6E6;
border-radius: 15px;') %>%
visEdges(scaling = list(label = list(enabled = FALSE))) %>%
visEvents(type = "once", stabilized = "function() {
this.setOptions({layout:{hierarchical:false}, physics:{solver:'barnesHut', enabled:true, stabilization : false}, nodes : {physics : false, fixed : true}});
}")
if(!is.null(legendFinal)){
tree <- visLegend(tree, addNodes = legendFinal, useGroups = FALSE, enabled = legend,
width = legendWidth, ncol = legendNcol, position = legendPosition)
}
# rajout informations class tree
tree$x$tree <- list(updateShape = updateShape, shapeVar = shapeVar,
shapeY = shapeY, colorVar = colorVar, colorY = listColorY)
if(export){
tree <- tree%>%visExport()
}
if(!is.null(labelComplete) | rules){
tree <- tree %>% sparkline::spk_add_deps()
}
tree
}
.visUpdateTree <- function(graph, updateShape = NULL, shapeVar = NULL, shapeY = NULL){
if(!any(class(graph) %in% "visNetwork_Proxy")){
stop("Can't use visFocus with visNetwork object. Only within shiny & using visNetworkProxy")
}
tree <- list()
tree$updateShape <- updateShape
tree$shapeVar <- shapeVar
tree$shapeY <- shapeY
data <- list(id = graph$id, tree = tree)
graph$session$sendCustomMessage("visShinyUpdateTree", data)
graph
}
#Legend regression tree gradient color, still in dev
# ' <div style= "background: red;
# background: -webkit-linear-gradient(colorMax,',',colorMin,');
# background: -o-linear-gradient(colorMax,',',colorMin,');
# background: -moz-linear-gradient(colorMax,',',colorMin,');
# background: linear-gradient(colorMax,',',colorMin,');">Test gradient color</div>'
# ,
.parent <- function(x) {
if (x[1] != 1) {
c(Recall(if (x %% 2 == 0L) x / 2 else (x - 1) / 2), x)
} else {
x
}
}
.vis_give_rules <- function (object)
{
frame <- object$frame
ruleNums <- as.numeric(row.names(frame))
is.leaf <- (frame$var == "<leaf>")
frame[!is.leaf, "order"] <- seq_along(which(!is.leaf))
TF <- as.numeric(row.names(frame))[-1]%%2==0
ret <- ifelse(TF,
as.numeric(row.names(frame))[-1]/2,
(as.numeric(row.names(frame))[-1] - 1)/2)
ordeR <- frame[as.character(ret),"order"]
ret <- ifelse(TF, paste0("L", ordeR), paste0("R", ordeR))
ret <- c("No", ret)
rpartNodesNames <- as.numeric(row.names(frame))
out <- ret
names(out) <- paste0("Node", rpartNodesNames)
return(out)
}
.rpart_lists <- function (object)
{
ff <- object$frame
n <- nrow(ff)
if (n == 1L)
return("root")
is.leaf <- (ff$var == "<leaf>")
whichrow <- !is.leaf
vnames <- ff$var[whichrow]
index <- cumsum(c(1, ff$ncompete + ff$nsurrogate + (!is.leaf)))
irow <- index[c(whichrow, FALSE)]
ncat <- object$splits[irow, 2L]
lsplit <- rsplit <- list()
if (any(ncat < 2L)) {
jrow <- irow[ncat < 2L]
cutpoint <- object$splits[jrow, 4L]
temp1 <- (ifelse(ncat < 0, "<", ">="))[ncat < 2L]
temp2 <- (ifelse(ncat < 0, ">=", "<"))[ncat < 2L]
lsplit[ncat < 2L] <- cutpoint
rsplit[ncat < 2L] <- cutpoint
}
if (any(ncat > 1L)) {
xlevels <- attr(object, "xlevels")
jrow <- seq_along(ncat)[ncat > 1L]
crow <- object$splits[irow[ncat > 1L], 4L]
cindex <- (match(vnames, names(xlevels)))[ncat > 1L]
lsplit[jrow] <- lapply(seq_along(jrow), function(i) xlevels[[cindex[i]]][object$csplit[crow[i],
] == 1L])
rsplit[jrow] <- lapply(seq_along(jrow), function(i) xlevels[[cindex[i]]][object$csplit[crow[i],
] == 3L])
}
lsplit <- lapply(seq_along(lsplit), function(i) structure(lsplit[[i]],
compare = ifelse(ncat[i] < 2L, ifelse(ncat[i] < 0, "<",
">="), "=")))
rsplit <- lapply(seq_along(lsplit), function(i) structure(rsplit[[i]],
compare = ifelse(ncat[i] < 2L, ifelse(ncat[i] < 0, ">=",
"<"), "=")))
names(lsplit) <- vnames
names(rsplit) <- vnames
results <- list(L = lsplit, R = rsplit)
return(results)
}
subsetRpart <- function(tree,data, node = 1L) {
wh <- sapply(as.integer(rownames(tree$frame)), .parent)
wh <- unique(unlist(wh[sapply(wh, function(x) node %in% x)]))
data[rownames(tree$frame)[tree$where] %in% wh[wh >= node], , drop = FALSE]
}
.generateVarColor <- function(colorVar, nodes_var, SortLabel){
if(is.null(colorVar)){
colorVar <- data.frame(variable = unique(nodes_var), color = grDevices::hcl(seq(0, 250, length = length(unique(nodes_var))), l = 80))
}else{
if("data.frame" %in% class(colorVar)){
unused_var <- setdiff(colorVar$variable, setdiff(SortLabel, "<leaf>"))
if(length(unused_var) > 0){
colorVar <- colorVar[-which(colorVar$variable %in% unused_var), ]
}
miss_var <- setdiff(setdiff(SortLabel, "<leaf>"), colorVar$variable)
if(length(miss_var) > 0){
tmp_color <- setdiff(grDevices::hcl(seq(0, 250, length = nrow(colorVar) + length(miss_var)), l = 80), colorVar$color)
miss_color <- data.frame(variable = miss_var,
color = tmp_color[1:length(unique(miss_var))])
colorVar <- rbind.data.frame(colorVar, miss_color)
}
}else if("character" %in% class(colorVar)){
colorVar <- data.frame(variable = setdiff(SortLabel, "<leaf>"),
color = rep(colorVar, length(SortLabel))[1:length(setdiff(SortLabel, "<leaf>"))])
}
}
colorVar
}
.generateYColor <- function(object, colorY, nodes_var, digits = 3, infoClass = NULL, probs = NULL){
if(!is.null(attributes(object)$ylevels)){
if(is.null(infoClass)){
infoClass <- attributes(object)$ylevels
}
if(is.null(probs)){
probaClass <- object$frame[,"yval2"]
nlevelsClass <- length(infoClass)
effectif <- data.frame(probaClass[,2:(nlevelsClass+1), drop = F])
probs <- data.frame(probaClass[,(nlevelsClass+2):(ncol(probaClass)-1), drop = F])
}
# Classification tree
vardecidedClust <- infoClass[apply(probs, 1, which.max)]
if(is.null(colorY)){
colorY <- data.frame(modality = unique(infoClass),
color = grDevices::hcl(seq(250, 360, length = length(unique(infoClass))), l = 60))
}else{
if("data.frame" %in% class(colorY)){
miss_y <- setdiff(infoClass, colorY$modality)
if(length(miss_y) > 0){
miss_color <- data.frame(modality = miss_y,
color = grDevices::hcl(seq(250, 360, length = length(unique(miss_y))), l = 60))
colorY <- rbind.data.frame(colorY, miss_color)
}
}else if("character" %in% class(colorY)){
colorY <- data.frame(modality = infoClass,
color = rep(colorY, length(infoClass))[1:length(infoClass)])
}
}
list(colorY = colorY, vardecidedClust = vardecidedClust)
} else {
# Regression tree
vardecidedClust <- round(object$frame$yval, digits)
# palette
if(length(row.names(object$frame)) > 1){
meanV <- object$frame$yval-min(object$frame$yval)
meanV <- meanV/max(meanV)
} else {
meanV <- 1
}
colRamp <- .creatColorRampY(colorY)
colorTerm <- grDevices::rgb(colRamp(meanV), maxColorValue=255)
if(is.null(colorY)){
colorY <- c("#E6E0F8", "#8904B1")
} else if(length(colorY) > 1){
colorY <- c(colorY[1],colorY[2])
} else {
colorY <- c(NA,colorY[1])
}
list(colRamp = colRamp, colorTerm = colorTerm, colorY = colorY, vardecidedClust = vardecidedClust)
}
}
.creatColorRampY <- function(colorY)
{
if(is.null(colorY))
{
colRamp <- grDevices::colorRamp(c("#E6E0F8", "#8904B1"))
}else{
if(length(colorY) > 1){
colRamp <- grDevices::colorRamp(c(colorY[1],colorY[2]))
} else {
colRamp <- grDevices::colorRamp(c(NA,colorY[1]))
}
}
colRamp
}
#' Run and edit a visTree, and get back in R
#'
#' Needed packages : shiny, rpart, colourpicker, shinyWidgets
#'
#' @param data \code{rpart or data.drame}
#' @param ... all arguments except \code{object} present in \link{visTreeModuleServer}
#'
#' @examples
#'
#' \dontrun{
#'
#' net <- visTreeEditor(data = iris)
#' net <- visTreeEditor(data = rpart(iris), main = "visTree Editor")
#' net <- visTreeEditor(data = rpart(iris), tooltip_data = iris,
#' main = "visTree Editor")
#' net
#'
#' }
#'
#' @export
#'
#' @importFrom utils packageVersion
#'
#' @seealso \link{visTree}, \link{visTreeModuleServer}, \link{visNetworkEditor}
#'
#' @references See online documentation \url{http://datastorm-open.github.io/visNetwork/}
#'
visTreeEditor <- function(data, ...){
.ctrlPckTree()
if("rpart" %in% class(data)){
rpartParams <- FALSE
} else if("data.frame" %in% class(data)){
rpartParams <- TRUE
}
return(shiny::runApp(shiny::shinyApp(ui = shiny::fluidPage(
visTreeModuleUI(id = "visTreeEditor", rpartParams = rpartParams, visTreeParams = TRUE, quitButton = TRUE)),
server = function(input, output, session) {
shiny::callModule(visTreeModuleServer, id = "visTreeEditor", data = shiny::reactive(data), ...)
})))
}
.giveLabelsFromDfWhichInvisible <- function(df, popSpkl = NULL, minPop = NULL, maxPop = NULL, meanPop = NULL){
df <- df[!is.na(df[,1]),, drop = FALSE]
clM <- colMeans(df)
if(!is.null(popSpkl)){
nm <- names(df)
re <- list()
for(i in nm){
re[[i]] <- paste0("<br>", popSpkl[[i]],' : On pop. (mean:<b>', round(meanPop[i],2),"</b>)","<br>",
.addSparkLineOnlyJs(df[,i], type = "box",
min = minPop[[i]], max = maxPop[[i]]),
" : On grp. (mean:<b>", round(clM[i], 2),"</b>)")
}
}
re <- unlist(re)
paste(paste("<br> <b>",names(clM), ": </b>", re, collapse = ""))
}
.giveLabelsFromDfChrInvisible <- function(df, popSpkl, namOrder){
nm <- names(df)
re <- list()
for(i in nm){
tbl <- table(df[,i, drop = FALSE])
tbl <- tbl[na.omit(match(namOrder[[i]], names(tbl)))]
if(length(tbl) > 1){
tbl <- data.frame(tbl)
} else {
tbl <- data.frame(Var1 = names(tbl), Freq = tbl)
}
newMod <- namOrder[[i]][!namOrder[[i]]%in%tbl$Var1]
if(length(newMod) > 0){
tbl <- rbind(tbl, data.frame(Var1 = newMod, Freq = 0))
}
namOrder
tbl$Var1 <- ifelse(nchar(as.character(tbl$Var1) ) > 9, paste0(substr(tbl$Var1, 1, 8), "..."), as.character(tbl$Var1))
re[[i]] <- paste0(.addSparkLineOnlyJs(tbl$Freq, type = "pie", labels = tbl$Var1), "On grp. (mode:<b>", tbl[which.max(tbl$Freq),]$Var1,"</b>)")
}
re <- unlist(re)
paste(paste("<br> <b>",names(re), ": </b><br>", popSpkl, "<br>", re, collapse = ""))
}
#' @importFrom grDevices boxplot.stats
.addSparkLineOnlyJs <- function(vect, min = NULL, max = NULL, type = "line", labels = NULL){
getboxplotValues <- function(x){
if(!all(is.na(x)) && length(x) >4){
x_box <- boxplot.stats(x)
x_out_range <- ifelse(length(x_box$out)>=2, range(x_box$out),NA)
return(sort(c(x_box$stats, x_out_range)))
} else{
return(NA)
}
}
if(is.null(min))min <- min(vect)
if(is.null(max))max <- max(vect)
drun <- sample(LETTERS, 15, replace = TRUE)
drun <- paste0(drun, collapse = "")
if(!is.null(labels)){
tltp <- paste0((1:length(labels))-1, ": '", labels, "'", collapse = ",")
tltp <- paste0("
tooltipFormat: \'{{offset:offset}} ({{percent.1}}%)\', tooltipValueLookups: {
\'offset\': { ", tltp, "}}")
}else{
tltp <- NULL
}
if(type != "box"){
ttr <- paste0('
$(function() {
$(".inlinesparkline', drun,'").sparkline([',paste0(vect, collapse = ",") ,'], {
type: "',type , '", chartRangeMin: ', min,', chartRangeMax: ', max,'
, ', tltp, '
});
});
')
} else {
vect <- getboxplotValues(vect)
ttr <- paste0('
$(function() {
$(".inlinesparkline', drun,'").sparkline([',paste0(vect, collapse = ",") ,'], {
type: "',type , '", raw : true, chartRangeMin: ', min,', chartRangeMax: ', max,'
, ', tltp, '
});
});
')
}
paste0('<div class="inlinesparkline', drun,'" style="display: inline-block;"> </div>',
'<script type="text/javascript">',
ttr,
'</script>')
} |
65c29e1509c88d08228abb65297bd86350341711 | 63b1476cd7341c9fbe29b51e0d0ae4d65f55eb97 | /chemical.R | c3a088d45cd96ad476b882632332102e35695bea | [] | no_license | aguimaraesduarte/msan604 | 4b3f6a63559e2e890c257ddb8b5d281f17aa6136 | 17dd54dae05baa12bfbda3835d4e407b6878a74e | refs/heads/master | 2021-03-27T14:40:01.145454 | 2017-02-01T04:51:15 | 2017-02-01T04:51:15 | 72,265,858 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,941 | r | chemical.R | rm(list=ls())
cat("\014")
par(mfrow=c(1,1))
# Load libraries
library(tseries)
library(forecast)
library(lawstat)
## Chemical data
chem <- read.table("chemical.txt", header = F)
chem <- chem$V1
ts.plot(chem, main = "Daily Chemical Concentrations", xlab = "Days", ylab = "Concentrations")
# check whether ordinary differencing is necessary
par(mfrow=c(1,1))
acf(chem, lag.max = 50) # fit seems necessary
# Apply ordinary differencing
dchem <- diff(chem)
par(mfrow=c(2,1))
ts.plot(dchem, main = "Differenced Daily Chemical Concentrations", xlab = "Days", ylab = "Concentrations")
acf(dchem, lag.max = 50)
# This seems stationary . Let's check with ADF test:
ndiffs(chem, test = "adf")
ndiffs(dchem, test = "adf")
# No apparent seasonality, but let's check with a hypothesis test
nsdiffs(x = chem, m = 7) # 7 days in a week
# So let's use ARIMA to model this. First, we need to pick orders p and q
# We do that with the ACF and PACF plots
par(mfrow=c(2,1))
acf(dchem, lag.max = 50)
pacf(dchem, lag.max = 50)
# p <= 2, q <= 3 seems reasonable. Let's start there
m <- arima(x = chem, order = c(1,1,3))
summary(m)
# for (2,1,3), we get lower log-likelihood, but higher aic, so adding a parameters does not improve significantly
# Let's visualize how well this model fits
f <- chem - m$residuals # fitted values
par(mfrow=c(1,1))
ts.plot(chem, main = "Daily Chemical Concentrations", xlab = "Days", ylab = "Concentrations")
lines(f, col="red")
legend("bottomright", legend = c("Observed", "Predicted"), lty = 1, col = c("black", "red"))
# Residual Diagnostics
tsdiag(m) # for (1,1,1) => spike at h=3 for ACF, Ljung-Box fails for h>=3
# for (1,1,2) => better, but maybe not optimal
# for (1,1,3) => looks good!
plot(m$residuals, main = "Residuals vs. Time", ylab = "Residuals")
abline(h = 0, col = "red")
qqnorm(m$residuals)
qqline(m$residuals, col = "red")
# Automatic check (optional)
auto.arima(chem, allowdrift = F)
|
fa32c2eeba05ae75167e24a6d2814163f7df1f1e | 44a51100036590df139b16915ab4b04bf4f7b3c9 | /scripts/graph_by_teams.R | 26d86d329f0ab942c0ff5dd513f9d69bddc5c860 | [] | no_license | wellrangel/Bball_players_versatility | 5b4b53c831db62e7976230ad99aa2c4cc522eb13 | d345fe16312d3221c272f5aca42e4c087a1fad02 | refs/heads/master | 2020-04-25T02:08:58.798151 | 2019-04-16T05:36:42 | 2019-04-16T05:36:42 | 172,429,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,021 | r | graph_by_teams.R | library(stringr)
library(reshape)
library(reshape2)
library(sqldf)
library(ggplot2)
library(plyr)
source("scriptsnovoscampos/functions.R")
rotatedAxisElementText = function(angle,posicaoition='x'){
angle = angle[1];
posicaoition = posicaoition[1]
posicaoitions = list(x=0,y=90,top=180,right=270)
if(!posicaoition %in% names(posicaoitions))
stop(sprintf("'posicaoition' must be one of [%s]",paste(names(posicaoitions),collapse=", ")),call.=FALSE)
if(!is.numeric(angle))
stop("'angle' must be numeric",call.=FALSE)
rads = (angle - posicaoitions[[ posicaoition ]])*pi/180
hjust = 0.5*(1 - sin(rads))
vjust = 0.5*(1 + cos(rads))
element_text(angle=angle,vjust=vjust,hjust=hjust, size=36)
}
df_cp <- read.csv2("outputdata/classified_players_novos_atributos.csv", sep=",", stringsAsFactors=FALSE)
df_cp<- df_cp[df_cp$Temporada == "NBB 9",]
table(df_cp$equipe, df_cp$type)
table(df_cp$equipe)
bauru <- df_cp[df_cp$equipe == "Bauru",]
bauru <- bauru[,c("posicao", "jogador")]
Especialistas <- df_cp[df_cp$type == "Specialist",]
Versateis <- df_cp[df_cp$type == "Versatile",]
dataGraph <- data.frame(Team=character(),
Classification=character(),
Total=integer(),
Percentual=integer(),
stringsAsFactors=FALSE)
#times <- as.data.frame(unique(Especialistas$equipe))
times <- c("Flamengo","Mogi", "Franca", "Brasília",
"Bauru","Paulistano", "Vitória","Pinheiros",
"Vasco da Gama","Campo Mourão", "Basq, Cearense", "Macaé Basquete",
"Minas", "L, Sorocabana", "Caxias do Sul"
)
times <- as.data.frame(times)
for(i in 1:nrow(times)) {
TimeEspecialistas <- Especialistas[Especialistas$equipe==times[i,],]
TimeVersateis <- Versateis[Versateis$equipe==times[i,],]
myRow <- dataGraph[1,]
myRow$Team <- times[i,]
myRow$Classification <- "Versatile"
myRow$Total <- nrow(TimeVersateis)
myRow$Percentual <- round((nrow(TimeVersateis)/(nrow(TimeEspecialistas) + nrow(TimeVersateis))) *100,0)
dataGraph <- rbind(dataGraph, myRow)
myRow <- dataGraph[1,]
myRow$Team <- times[i,]
myRow$Classification <- "Specialists"
myRow$Total <- nrow(TimeEspecialistas)
myRow$Percentual <- round((nrow(TimeEspecialistas)/(nrow(TimeEspecialistas) + nrow(TimeVersateis))) *100,0)
dataGraph <- rbind(dataGraph, myRow)
}
dataGraph$Team <- factor(dataGraph$Team, levels=c("Bauru","Paulistano", "Pinheiros", "Vitória",
"Flamengo","Mogi", "Franca", "Brasília",
"Vasco da Gama","Campo Mourão", "Basq, Cearense", "Macaé Basquete",
"Minas", "L, Sorocabana", "Caxias do Sul"))
dataGraph$Team <- ifelse(dataGraph$Team=="Bauru", "Team 01", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Paulistano", "Team 02", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Pinheiros", "Team 03", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Vitória", "Team 04", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Flamengo", "Team 05", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Mogi", "Team 06", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Franca", "Team 07", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Brasília", "Team 08", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Vasco da Gama", "Team 09", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Campo Mourão", "Team 10", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Basq, Cearense", "Team 11", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Macaé Basquete", "Team 12", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Minas", "Team 13", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="L, Sorocabana", "Team 14", as.character(dataGraph$Team))
dataGraph$Team <- ifelse(dataGraph$Team=="Caxias do Sul", "Team 15", as.character(dataGraph$Team))
#dataGraph$Team <- factor(dataGraph$Team, levels=c("Team 01","Paulistano", "Pinheiros", "Vitória",
# "Flamengo","Mogi", "Franca", "Brasília",
# "Vasco da Gama","Campo Mourão", "Basq. Cearense", "Macaé Basquete",
# "Minas", "Liga Sorocabana", "Caxias do Sul"))
#dataGraph[dataGraph$Classification=="Specialists",]$labelposicao <- dataGraph[dataGraph$Classification=="Specialists",]$Percentual+5
#dataGraph[dataGraph$Classification=="Versatile",]$labelposicao <- dataGraph[dataGraph$Classification=="Versatile",]$Percentual-5
dataGraph <- ddply(dataGraph, .(Team),
transform, posicao = cumsum(Percentual) - (0.5 * Percentual))
ggplot(dataGraph, aes(Team, Percentual, fill = Classification)) +
theme_bw() +
geom_bar(stat="identity") +
geom_text(data=dataGraph, aes(x = Team, y = posicao, label = paste0(Percentual,"%")), size=10, vjust=-.5) +
xlab("Teams") +
ylab("Players' frequency (in percentage, %)") +
scale_fill_manual(values=c( "#9999CC", "#66CC99"))+
theme(
#axis.text.x=element_text(size=38),
axis.text.y=element_text(size=40),
axis.title=element_text(size=35,face="bold"),
legend.position="top",
legend.title=element_text(size=25),
legend.text=element_text(size=40),
axis.title.x = element_text(margin = margin(t = 10, r = 0, b = 0, l = 0)),
axis.title.y = element_text(hjust=0.85, margin = margin(t = 0, r = 15, b = 0, l = 0)),
axis.text.x = rotatedAxisElementText(45,'y')
) + theme(legend.title=element_blank(), legend.spacing.x = unit(0.5, 'cm'))
ggsave("graph/figure4.png")
|
f8be123ceee1d32f01baf45f7735085c017441d6 | 5a9956727d7a12f0bf2c697c486a49c2f37ed8c3 | /man/classifier_loglikelihood.Rd | 4b5e9cd12cff3612e88df857a537d2cb11da5fde | [] | no_license | mlondschien/hdcd | fb1bfcf9315bcda74632e86c415d22357193f5d9 | c6dd90aceb7921b25d18f9395771415080a6b530 | refs/heads/master | 2023-02-04T08:14:54.664927 | 2020-12-29T18:48:35 | 2020-12-29T18:48:35 | 280,871,404 | 2 | 0 | null | 2020-10-22T16:41:27 | 2020-07-19T13:29:06 | R | UTF-8 | R | false | true | 515 | rd | classifier_loglikelihood.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_selection.R
\name{classifier_loglikelihood}
\alias{classifier_loglikelihood}
\title{Loglikelihood from classifier}
\usage{
classifier_loglikelihood(y_train, predictions, y_test = y_train,
oob = F)
}
\arguments{
\item{y_train}{training labels}
\item{predictions}{matrix of predicted class probabilities}
\item{y_test}{test labels}
\item{oob}{boolean whether predictions are oob}
}
\description{
Loglikelihood from classifier
}
|
ab521db2b7cb5c022bd47666df285f96a89d3afb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TTR/examples/DPO.Rd.R | 9a8c8ad87d6c6c789c98e10cc6cebdd36151884c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 202 | r | DPO.Rd.R | library(TTR)
### Name: DPO
### Title: De-Trended Price Oscillator
### Aliases: DPO
### Keywords: ts
### ** Examples
data(ttrc)
priceDPO <- DPO(ttrc[,"Close"])
volumeDPO <- DPO(ttrc[,"Volume"])
|
6d69f8e0575e8fc3d3901dafd57098ac023b5fdf | 82663c850403bb61a0417d394392c0a533927dc1 | /Fun_Obj_Diniz_1994.R | 809520d105072e3916a610583e5585482b2bed18 | [] | no_license | ArturLourenco/rsmapmodel | e022a4ccfb7e6df4b436efffcaaf73979a74567c | bd9666e86eb887958339e842e8391e18930a6f9b | refs/heads/main | 2023-04-09T11:13:56.886908 | 2021-04-19T17:07:36 | 2021-04-19T17:07:36 | 359,536,556 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 734 | r | Fun_Obj_Diniz_1994.R | Nash = function(x){
str <- x[1]
e2 <- x[2]
e1 <- x[3]
cinf <- x[4]
Rsoloin = tuin * str #<--- reservatório do solo (zona aerada)
Qcal<-0
for(i in 1:length(P))
{
Es = P[i]*tuin^e2; #<--- calcula o escoamento superficial
Er = tuin * Ep[i]^e1; #<--- calcula a evapotranspiraçao real
Qcal[i] = (Es) * Ad / 2630; #<--- Vazão calculada
Rsoloin = Rsoloin + P[i] - Es - Er; #<--- Atualiza o reservatório do solo
tuin = (Rsoloin + cinf*P[i]) / str; #<--- Atualiza o teor de umidade
}
Nash<- (1 -(sum(rowSums(Qcal - Qob)^2)/sum(rowSums(Qob - mean(Qob))^2))) #<--- Eficiencia de Nash-Sutclif
} |
5b1ceb339682506956b04181ed3b5535700a7403 | 504a746e9481a81bd2087315e4443830ee0cc754 | /Notes.R | 43191fe3c2cd3dd0287810460bea50c8c30d9bba | [] | no_license | dancopeland/RDataWrangling | b394551b398beef3657611e6c1a3f76d1527f6b1 | 8faa4f6180c5ac5f3bf08f7918aaa3bd75609102 | refs/heads/master | 2020-05-25T08:22:32.807771 | 2019-05-29T21:23:00 | 2019-05-29T21:23:00 | 187,708,874 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,668 | r | Notes.R | library(tidyverse)
library(readxl)
library(purrr)
library(stringr)
library(dplyr)
boy.file.names <- list.files(full.names = TRUE)
boy.file.names
excel_sheets(boy.file.names[[1]])
get.data.sheet.name <- function(file, pattern) {
str_subset(excel_sheets(file), pattern)
}
map(boy.file.names, get.data.sheet.name, pattern = "Table 1")
tmp <- read_excel(
boy.file.names[1],
sheet = get.data.sheet.name(boy.file.names[1], pattern = "Table 1"),
skip = 6
)
glimpse(tmp)
read.baby.names <- function(file) {
sheet.name <- str_subset(excel_sheets(file), "Table 1")
read_excel(file, sheet = sheet.name, skip = 6)
}
read.baby.names( boy.file.names[1])
boysNames = map(boy.file.names, read.baby.names)
glimpse(boysNames)
col_names = colnames(boysNames[[1]])
col_names
boysNames[[1]] <- select(boysNames[[1]], "Name...2" , "Name...6", "Count...3", "Count...7" )
boysNames[[1]]
boysNames[[1]] <- drop_na(boysNames[[1]])
boysNames[[1]]
testBoyNames <- boysNames[[2]]
typeof(testBoyNames)
testBoyNames <- select(testBoyNames, str_subset(colnames(testBoyNames), "Count"), str_subset(colnames(testBoyNames), "Name"))
testBoyNames
condense.baby.names <- function(data.frame) {
select(data.frame, str_subset(colnames(data.frame), "Count"), str_subset(colnames(data.frame), "Name"))
}
boyNames <- map(boysNames, condense.baby.names)
x <- 0
for (df in boyNames) {
x = x + 1
df <- df[,c(3,1,4,2)]
boyNames[[x]] <- df
}
boyNames <- map(boyNames, drop_na)
names(boyNames[[1]]) <- c("Name", "Count", "Name1", "Count1")
for (x in 1:length(boyNames)) {
names(boyNames[[x]]) <- c("Name", "Count", "Name1", "Count1")
}
boyNames
bind_rows(select(boyNames[[20]], "Name", "Count"),
select(boyNames[[20]], "Name" = "Name1", "Count" ="Count1"))
length(boyNames)
stacked.boy.names <- NULL
for (x in 1:length(boyNames)) {
stacked.boy.names[[x]] <- bind_rows(select(boyNames[[x]], Name, Count), select(boyNames[[x]], Name = Name1, Count =Count1))
}
stacked.boy.names
glimpse(head(stacked.boy.names))
yearvec <- c(1996:2015)
names(stacked.boy.names) <- yearvec
stacked.boy.names
# stacked.boy.names[[1]] <- mutate(stacked.boy.names[[1]], Year = names(stacked.boy.names)[1])
for (x in 1:length(stacked.boy.names)){
stacked.boy.names[[x]] <- mutate(stacked.boy.names[[x]], Year = as.integer(names(stacked.boy.names)[x]))
}
stacked.boy.names
big.data.names <- bind_rows(stacked.boy.names)
glimpse(big.data.names)
jack <- filter(big.data.names, Name == "JACK")
ggplot(jack, aes(x = Year, y = Count)) + geom_line() + ggtitle("Popularity of \"Jack\", over time")
filter(big.data.names, Count == max(big.data.names$Count))
|
8e1f2ad977c7757c01f81e5ab28582f10383b1ae | 9c44748031a24aa41661827aef05d9ab7eff169a | /R/authors.R | cd8c4cb933653bb739f371631a471e39534457ff | [] | no_license | avalcarcel9/aliviateR | 810958846ca78cd0b67f58b1898773e445fc862a | 4a420e0557b1da7d267b130379d63297991fb2a0 | refs/heads/master | 2021-07-07T21:50:43.367434 | 2020-07-09T04:05:23 | 2020-07-09T04:05:23 | 130,411,088 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,069 | r | authors.R | #' Create or modify the `Authors@R` field in the DESCRIPTION file
#'
#' @description If the `Authors@R` field is not included in the DESCRIPTION file
#' then `use_author()` adds the field assigns the author defined by the input
#' parameters. If the `Authors@R` field exists already in the DESCRIPTION then
#' `use_author()` will add the author defined by the input parameters as an
#' additional new author.
#'
#' The `use_author()` function should be used after [create_package()] or
#' [use_description()].
#'
#' If you create a lot of packages, consider storing personalized defaults as a
#' named list in an option named `"usethis.description"`. [use_description()]
#' will automatically fill using this information. Here's an example of code to
#' include in `.Rprofile`:
#'
#' ``` options( usethis.description = list( `Authors@R` = 'person("Jane", "Doe",
#' email = "jane@example.com", role = "aut", comment = c(ORCID =
#' "YOUR-ORCID-ID"))', License = "MIT + file LICENSE", Language = "es" ) )
#' ```
#'
#' @param given a character string with the given (first) name of the author.
#' @param family a character string with the family (last) name of the author.
#' @param role a character vector specifying the role of the person to be added.
#' @param email a character string giving an e-mail address of the author to be added.
#' @param comment a character string providing comments related to the author to be added.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Adds the default Jane Doe to author
#'
#' use_author()
#'
#' # Adds the author information for Ali
#' use_author(
#' given = "Ali",
#' family = "Val",
#' role = "aut",
#' email = "alval@example.com",
#' comment = c(ORCID = "YOUR-ORCID-ID")
#' )
#'
#' # Adds a second author Ali2
#' use_author(
#' given = "Ali2",
#' family = "Val2",
#' role = "cph",
#' email = "alval2@example.com",
#' comment = NULL
#' )
#' }
#'
use_author <- function(given = "Jane", family = "Doe", role = "aut", email = "jane@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) {
# Adapted from use_dependency code and tools provided in the desc package
# TODO long term: figure out if/how desc package requires input for role with multiple roles (i.e. c("aut", "cre"))
# TODO long term: create addin to prompt for author information
# TODO long term: create a snippet with author information for DESCRIPTION
# TODO add tests
# Assume DESCRIPTION is generated from usethis so that Authors@R is filled with either
# 1. The temporary author filled in without usethis defaults
# person("First", "Last", , "first.last@example.com", c("aut", "cre"), comment = c(ORCID = "YOUR-ORCID-ID"))
# 2. True author(s)
# Check the "Author:" field exists and if it does error the function
if (desc::desc_has_fields("Author")) {
ui_stop(
"{ui_field('Author')} was found as a field value in the DESCRIPTION. \\
Please remove or replace it with the {ui_field('Authors@R')} field."
)
# TODO long term: if Author: is found in the description ask user if they
# want to remove and replace it with Authors@R using desc::desc_del()
}
# TODO long term: if Authors@R field is missing from the description ask user
# if they want to add a blank one or error our
# if(desc::desc_has_fields("Authors@R") == FALSE){
# desc::desc_set(`Authors@R` = '')
# }
# Create person object using inputs
author <- utils::person(given = given, family = family, role = role, email = email, comment = comment)
# Obtain the current authors in the description
desc_authors <- desc::desc_get_authors()
# Check if any current author in the DESCRIPTION is exactly identical to the author input
if (author %in% desc_authors) {
ui_stop(
"Author {ui_value(author)} is already listed in \\
{ui_field('Authors@R')} in the current DESCRIPTION, no change made."
)
}
# Add the input author
desc::desc_add_author(given = given, family = family, role = role, email = email, comment = comment, normalize = TRUE)
ui_done("Added {ui_value(author)} to the {ui_field('Authors@R')} field.")
# Check if the usethis default author is included and remove it if so
usethis_author <- utils::person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID"))
if (usethis_author %in% desc_authors) {
if (ui_yeah("{ui_field('Authors@R')}` field is populated with the {ui_code('usethis')} \\
default (i.e. {ui_value(usethis_author)}. Would you like to remove the default?")) {
# TODO(@jennybc): should we suppress messages from the desc::desc_del_author function? If so, how is this handled inside the package? suppressMessages()?
# Delete the usethis default author (i.e. person(given = "First", family = "Last", email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")))
desc::desc_del_author(given = "First", family = "Last", email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID"))
}
}
return(invisible())
}
|
6c3ca28ea99ca24d6549cca507c2cce3d42e88aa | c74d95f43a3fb0481862fa7f070e9113890f50d0 | /plot1.R | 0a9f38109030dbee7764feaa0040a0d112b91908 | [] | no_license | AzAlejandro/ExData_Plotting1 | 35644bcc84f8541388db606cb84bae3bc9ca0ced | 10697cd6a2f11090f4803878b20fdf737f698aeb | refs/heads/master | 2022-11-29T05:21:11.455980 | 2020-08-20T23:14:57 | 2020-08-20T23:14:57 | 289,104,289 | 0 | 0 | null | 2020-08-20T20:29:16 | 2020-08-20T20:29:15 | null | UTF-8 | R | false | false | 961 | r | plot1.R | URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- paste("./data_household", "/", "data_household.zip", sep = "")
dir.create("data_household")
download.file(url = URL, destfile = filename)
unzip(zipfile = filename, exdir = "./data_household")
data_household <- read.table("./data_household/household_power_consumption.txt", sep = ";", header = TRUE)
data_household$Global_active_power <- as.numeric(data_household$Global_active_power)
data_household$Date <- as.Date(strptime(data_household$Date, format = "%d/%m/%Y"))
data_subset <- subset(data_household, Date <= "2010-09-18" & Date >= "2010-09-16")
png("plot1.png", width = 480, height = 480)
par(mfrow = c(1,1))
hist(data_subset$Global_active_power,
col = "red",
xlim = c(0,8),
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
xaxt = "n",
)
axis(1, at = 0:6)
dev.off()
|
3bd2ce49fa8c5dc90f21a8efe3f76121d9995283 | 9a2828d6fab3fc6f7ce0f7c5bcde4259ab686c37 | /hw3/Homework3.R | 5c498abf079c7c4149c0fd94710981742b3a9f7f | [] | no_license | NarenSuri/Exploratory-Data-Analysis | 813a62702b731c6c50220fb6b77dd9dc6cdff4fe | 2b8630eedc38c7050e73bdd7aeab3f528af8bc81 | refs/heads/master | 2021-01-02T09:03:31.485269 | 2017-08-11T20:10:16 | 2017-08-11T20:10:16 | 99,129,948 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,781 | r | Homework3.R | #install.packages("noncensus")
library(noncensus)
data(counties)
CalculateFourths = function(dataset){
dataset = sort(dataset)
len = length(dataset)
if((len%%2)==0){
medianData = median(dataset)
fourthDepth = len/4
uperfourthDepth = len - fourthDepth
frth = dataset[fourthDepth]
Ufrth = dataset[uperfourthDepth]
spread = Ufrth-frth
fourths = c(spread,medianData)
return(fourths)
}
else{
medianData = median(dataset)
medianDepth = (len+1)/2
fourthDepth = (medianDepth+1)/2
uperfourthDepth = (len - fourthDepth)+1
frth = dataset[fourthDepth]
Ufrth = dataset[uperfourthDepth]
spread = Ufrth-frth
fourths = c(spread,medianData)
return(fourths)}
}
states = levels(counties$state)
# lets prepare the lists to store the data, so that we can directly lpot them in the future
MeadianList = c()
StatesList =c()
SpreadList =c()
for(Astate in states)
{
DataOfAState = counties[counties$state==Astate,]
ResultFourths = CalculateFourths(DataOfAState$population)
if(!is.na(ResultFourths[1]) && ResultFourths[1] != 0)
{MeadianList = c(MeadianList,ResultFourths[2])
SpreadList = c(SpreadList,ResultFourths[1])
StatesList = c(StatesList,Astate)}
}
# plotting the graphs for each state
# now we shall log the meddian and spread that was obtained
MeadianList = log10(MeadianList)
SpreadList = log10(SpreadList)
plot(MeadianList,SpreadList, col="orange")
LinearModel = lm(SpreadList~MeadianList)
abline(LinearModel,lty=4,col="green")
# Find the coeficients of the lm wee have drawn
coefficients(LinearModel)
LinearModel$coefficients[2]
# This value is approximately 0.914, which is equal to 0
# p =1-b that is, 1-0.92 = which is aproximatly 0
# so there is no power transform required for this, as its almost zero
# for the 0 = p, tukey gave log transform
|
c9df128f3164bae3ebe63ece15653e46a4769471 | 255438aa65447488b1d63d823fde6710f82242a9 | /R/validate_arguments.R | 95f6c3cf0b4a16984740a82126caab275d14819d | [
"MIT"
] | permissive | EDIorg/taxonomyCleanr | a3a9a3b573eed3b540e49e18681e649df5ec9bc8 | 9c4a18573222c9dc70b881e8b0591a3ae6746e7d | refs/heads/main | 2023-05-23T16:41:48.864242 | 2023-03-28T21:49:43 | 2023-03-28T21:55:41 | 113,625,739 | 14 | 5 | MIT | 2023-03-28T21:55:27 | 2017-12-08T23:18:12 | R | UTF-8 | R | false | false | 2,569 | r | validate_arguments.R | #' Validate arguments of taxonomyCleanr functions
#'
#' @description
#' Validate input arguments to taxonomyCleanr functions.
#'
#' @param fun.name
#' (character) Function name
#' @param fun.args
#' (named list) Function arguments and values passed from the calling
#' function.
#'
#'
#' @keywords internal
#'
validate_arguments <- function(fun.name, fun.args){
# Parameterize --------------------------------------------------------------
use_i <- sapply(fun.args, function(X) identical(X, quote(expr=)))
fun.args[use_i] <- list(NULL)
# Helper functions ----------------------------------------------------------
# Return a warning if data.sources aren't available
ping_data_sources <- function(data.sources) {
worms_ping <- function (what = "status", ...) {
r <- httr::GET("https://www.marinespecies.org/rest")
httr::status_code(r) == 200
}
pingers <- data.frame(
id = c(3, 9, 11, 165),
fun = c("taxize::itis_ping()", "worms_ping()", "taxize::gbif_ping()", "taxize::tropicos_ping()"),
name = c("ITIS", "WORMS", "GBIF", "TROPICOS"),
stringsAsFactors = FALSE)
use_i <- unlist(
lapply(
pingers$fun[match(data.sources, pingers$id)],
function(x) {
eval(parse(text = x))
}))
if (!all(use_i)) {
warning(
paste0(
pingers$name[match(data.sources, pingers$id)][!use_i],
" cannot be reached at this time. Please try again later."),
call. = FALSE)
}
}
# Called from resolve_sci_taxa() --------------------------------------------
if (fun.name == 'resolve_sci_taxa') {
if (!is.null(fun.args$x) & !is.null(fun.args$path)) {
stop('Both "path" and "x" arguments are not allowed. Select one or the other.')
}
if (is.null(fun.args$x)) {
if (is.null(fun.args$path)) {
stop('Input argument "path" is missing!')
}
if (!file.exists(paste0(fun.args$path, '/taxa_map.csv'))) {
stop('taxa_map.csv is missing! Create it with create_taxa_map().')
}
}
if (is.null(fun.args$data.sources)) {
stop('Input argument "data.sources" is missing!')
}
authorities <- view_taxa_authorities()
authorities <- authorities[authorities$resolve_sci_taxa == 'supported', ]
use_i <- as.character(fun.args$data.sources) %in% as.character(authorities$id)
if (sum(use_i) != length(use_i)){
stop('Input argument "data.sources" contains unsupported data source IDs!')
}
ping_data_sources(fun.args$data.sources)
}
}
|
3c65e9676ae95898525df77f7e3661dffd5250c5 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/9111_0/rinput.R | e6c1f439edb43cc453f625c403af535ffa12994e | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("9111_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9111_0_unrooted.txt") |
d84bf4c95b0e4f322b0576be3d34a4b8dcfa6e4e | 2113b1ef587481e9c458e115bc049edaf88ad719 | /man/drop_media.Rd | 0b77240c61dd84982eb13c47964636c7ced1a6f2 | [] | no_license | KasperSkytte/rdrop2 | bc236a4ecb74867a01b9caf914e6af295384e47e | 3b6084187835bb457cc7974b534bb9f2f3e37696 | refs/heads/master | 2022-04-09T21:53:04.706863 | 2020-03-10T11:24:04 | 2020-03-10T11:24:04 | 105,997,225 | 0 | 0 | null | 2017-10-06T11:29:27 | 2017-10-06T11:29:27 | null | UTF-8 | R | false | true | 1,338 | rd | drop_media.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drop_media.R
\name{drop_media}
\alias{drop_media}
\title{Returns a link directly to a file.}
\usage{
drop_media(path = NULL, dtoken = get_dropbox_token())
}
\arguments{
\item{path}{This is required The path to the new folder to create relative to
root.}
\item{dtoken}{The Dropbox token generated by \code{\link{drop_auth}}. rdrop2
will try to automatically locate your local credential cache and use them.
However, if the credentials are not found, the function will initiate a new
authentication request. You can override this in \code{\link{drop_auth}} by
pointing to a different location where your credentials are stored.}
}
\description{
Similar to \code{drop_shared}. The difference is that this bypasses the
Dropbox webserver, used to provide a preview of the file, so that you can
effectively stream the contents of your media. This URL should not be used to
display content directly in the browser. IMPORTANT: The media link will expire
after 4 hours. So you'll need to cache the content with knitr cache OR re-run
the function call after expiry.
}
\examples{
\dontrun{
drop_media('Public/gifs/duck_rabbit.gif')
}
}
\references{
\href{https://www.dropbox.com/developers/documentation/http/documentation#files-get_temporary_link}{API documentation}
}
|
1268e82422bd4a96f54b2716f3194a7bc9eceb0c | eb75a7d1a47e19c1ad08510c15cdd24409801139 | /Stat540_Seminars/STAT540_seminar2.1/seminar2.1_madeATable.R | da167dcf611d141c58c6dddeb2ba043d3be62555 | [] | no_license | clbayly/stat540-2014-bayly-carmen | 5b1a69f6d75a40a7e6d6eb85cf562f8a16a2212a | 954e901a79d07906b1ec60fd967b2cf30fa9d44b | refs/heads/master | 2016-08-05T20:15:50.573987 | 2014-04-13T21:56:47 | 2014-04-13T21:56:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,077 | r | seminar2.1_madeATable.R | #matrix construction
B <- 1000
#'''#with rnorm
name <- rnorm
n <- 10
x10 <- matrix(rnorm(B * n), nrow = B, ncol = n)
n <- 100
x100 <- matrix(rnorm(B * n), nrow = B, ncol = n)
n <- 1000
x1000 <- matrix(rnorm(B * n), nrow = B, ncol = n)
n <- 10000
x10000 <- matrix(rnorm(B * n), nrow = B, ncol = n)
#'''
'''
#with dbeta
name <- dbeta
n <- 10
x10 <- matrix(dbeta(B * n), nrow = B, ncol = n)
n <- 100
x100 <- matrix(dbeta(B * n), nrow = B, ncol = n)
n <- 1000
x1000 <- matrix(dbeta(B * n), nrow = B, ncol = n)
n <- 10000
x10000 <- matrix(dbeta(B * n), nrow = B, ncol = n)
'''
'''
#with dhyper
name <- dhyper
n <- 10
x10 <- matrix(dhyper(B * n), nrow = B, ncol = n)
n <- 100
x100 <- matrix(dhyper(B * n), nrow = B, ncol = n)
n <- 1000
x1000 <- matrix(dhyper(B * n), nrow = B, ncol = n)
n <- 10000
x10000 <- matrix(dhyper(B * n), nrow = B, ncol = n)
'''
#sample size
n10 <- ncol(x10)
n100 <- ncol(x100)
n1000 <- ncol(x1000)
n10000 <- ncol(x10000)
#Sample means
xbar10 <- rowMeans(x10)
xbar100 <- rowMeans(x100)
xbar1000 <- rowMeans(x1000)
xbar10000 <- rowMeans(x10000)
#trueSEM
tSEM10 <- sd(x10/sqrt(nrow(x10)))
tSEM100 <- sd(x100/sqrt(nrow(x100)))
tSEM1000 <- sd(x1000/sqrt(nrow(x1000)))
tSEM10000 <- sd(x10000/sqrt(nrow(x10000)))
#obsSEM
oSEM10 <- sd(rowMeans(x10))
oSEM100 <- sd(rowMeans(x100))
oSEM1000 <- sd(rowMeans(x1000))
oSEM10000 <- sd(rowMeans(x10000))
#sampleMeanIQR
IQR10 <- IQR(rowMeans(x10))
IQR100 <- IQR(rowMeans(x100))
IQR1000 <- IQR(rowMeans(x1000))
IQR10000 <- IQR(rowMeans(x10000))
#sampleMeanMad
mad10 <- mad(rowMeans(x10))
mad100 <- mad(rowMeans(x100))
mad1000 <- mad(rowMeans(x1000))
mad10000 <- mad(rowMeans(x10000))
#make table
rownames <-rbind(n10, n100, n1000, n10000)
trueSEM <- rbind(tSEM10, tSEM100, tSEM1000, tSEM10000)
obsSEM <- rbind(oSEM10, oSEM100, oSEM1000, oSEM10000)
IQR <- rbind(IQR10, IQR100, IQR1000, IQR10000)
mad <- rbind(mad10, mad100, mad1000, mad10000)
bigtable <- cbind(rownames, trueSEM, obsSEM, IQR, mad)
xname <- c("sampleSize", "trueSEM", "obsSEM", "sampleMeanIQR", "sampleMeanMad")
colnames(bigtable) <- xname
name
bigtable
|
7f341007c9471e78f38fd286a966de43b3dc44ea | ed4d7ac589de72b473550e9d76c2e5d5ce4bad31 | /data-analysis/subjects_coverage_faults.R | c63372f7a028f49dc53d48608d9c980b5f03c6d6 | [
"MIT"
] | permissive | WesleyMarques/tcp-experiment | 769a42faf1b50fddf6e47a38eefa33fe581db1ca | 1a4db61a9f122934e78d4e68e20a2f71893f861e | refs/heads/master | 2022-03-31T10:09:38.150163 | 2020-01-13T23:00:34 | 2020-01-13T23:00:34 | 128,976,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 889 | r | subjects_coverage_faults.R | require(dplyr)
require(ggplot2)
require(readr)
subjectsData <- read_csv("data/result.data");
projects = unique(subjectsData$project)
coverage = unique(subjectsData$coverage)
subjectsMedian <- subjectsData %>%
group_by(project, version, coverage) %>%
summarise(faultsMedian = median(faults), coveredMedian = median(covered)) %>%
View()
for(projectOne in projects){
for(covOne in coverage){
subjectsData %>%
filter(coverage == covOne, project == projectOne) %>%
mutate(metric = (covered - faults)) %>%
# filter(metric <= 0 ) %>%
ggplot(aes(y = faults, x = testName)) +
geom_line(group=1)+
ylab("covered")+
xlab("test name")+
facet_wrap(~version, scales = "free")
graphName <- paste(c("graphics/apfd_mspreading_covered_vs_faults/", projectOne, "_", covOne, "_faults.pdf"), sep="", collapse = "")
ggsave(graphName)
}
}
|
ffced82ade56208d9916ed864306b77495330f07 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i10-m10-u10-v0.pddl_planlen=23/dungeon_i10-m10-u10-v0.pddl_planlen=23.R | 6c200409bbf080cbff30004450d51567a8cd6047 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 90 | r | dungeon_i10-m10-u10-v0.pddl_planlen=23.R | 1f9a0e7b2830a604aac902f0282be454 dungeon_i10-m10-u10-v0.pddl_planlen=23.qdimacs 5030 13467 |
a4df8e6ce8849085aa48174cadfaed70dc590e61 | 274320eef9fec65c9cbcedace7f448ec78ceee3d | /Script2.R | 19192d204a3e697f3b8b234e561b058d631337dd | [] | no_license | renusam/sample | a900f7333cd7a8331592aa414eeb6977bc169f8e | b8d4ec07f1875492da42b6ed658db969d4874816 | refs/heads/master | 2023-01-29T07:56:29.805087 | 2023-01-13T09:19:07 | 2023-01-13T09:19:07 | 260,425,130 | 0 | 0 | null | 2023-01-13T09:18:13 | 2020-05-01T09:46:37 | Rebol | UTF-8 | R | false | false | 24 | r | Script2.R | 4*3
# This is a comment
|
21c53abc50a807e80f89ffac4f7aeb6e0f4f1a10 | 005927cf37f6cf0a9b161080bf7ac417ba4291ab | /Project1/plot3.R | 90242f5f9196f9764d6818cf7acf6e919f8f8ac9 | [] | no_license | JRTheStudent/Exploratory-Data-Analysis | 9cfdf5d0240e424baa5c2d2af772c4dfcdd983ab | e93c38cd5b9f78c98aec0e99cf732749f8730153 | refs/heads/master | 2020-05-20T07:27:10.436996 | 2015-03-22T16:34:34 | 2015-03-22T16:34:34 | 31,546,302 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,177 | r | plot3.R | ## This script is intended to reproduce the third of the four plots as
## described and shown at: https://github.com/rdpeng/ExData_Plotting1
## Setup - Only required once; comment out to expedite development.
## if(!(file.exists("./data") & file.info("./data")$isdir)) dir.create("./data")
## download.file(
## paste(
## "https://d396qusza40orc.cloudfront.net/"
## ,"exdata%2Fdata%2Fhousehold_power_consumption.zip"
## ,sep = ""
## )
## ,destfile = "./data/household_power_consumption.zip"
## )
## write(date(), file = "./data/date_downloaded.txt")
## unzip("./data/household_power_consumption.zip", exdir = "./data")
## file.remove("./data/household_power_consumption.zip")
## Read in the source data.
data <- read.table(
"./data/household_power_consumption.txt"
,header = T
,na.strings = "?"
, sep=";"
)
## Subset out 2007-02-01 and 2007-02-02.
data <- data[data$Date %in% c("1/2/2007", "2/2/2007"),]
## "Date" column contains %d/%m/%Y and "Time" contains %H:%M%S.
## Combine and coerce into POSIXlt.
data$Date <- strptime(
paste(
data$Date
,data$Time
,sep=" "
)
,format = "%d/%m/%Y %H:%M:%S"
)
## Remove the "Time" column as it is now superflous.
data <- data[-2]
## "data" structure:
## [1] "Date" (POSIXlt)
## [2] "Global_active_power" (numeric)
## [3] "Global_reactive_power" (numeric)
## [4] "Voltage" (numeric)
## [5] "Global_intensity" (numeric)
## [6] "Sub_metering_1" (numeric)
## [7] "Sub_metering_2" (numeric)
## [8] "Sub_metering_3" (numeric)
## Generate the plot.
png(filename = "./plot3.png", bg = "transparent")
par(mar = c(4.2,3.9,3.2,1.2))
plot(
data$Date
,data$Sub_metering_1
,xlab = ""
,ylab = "Energy sub metering"
,type = "l"
)
lines(data$Date, data$Sub_metering_2, col = "red", type = "l")
lines(data$Date, data$Sub_metering_3, col = "blue", type = "l")
legend(
"topright"
,legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
,text.col = "black"
,lty = 1
,col = c("black", "red", "blue")
)
dev.off() |
495bd1e1163850264ab4834ef961385c7c825486 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/1954_46/rinput.R | 0580750aec4a82beeb7b4617df1249d2b0e46077 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("1954_46.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1954_46_unrooted.txt") |
fe750e087c3f90b125e441249c15f2827292ab13 | e7b72443995f0025404efc999d69d606e810c3da | /R/libdata-info.R | a57c8396e168a2df23af568182e7a5c1d1bd14da | [
"MIT"
] | permissive | benkon17/circreg | c8561e011069c6c8a1f7a255e5834b0395ba49f3 | 5e990de2692a6cd428685b29aa1a286131471b41 | refs/heads/master | 2020-03-14T14:37:18.928659 | 2018-05-01T02:07:55 | 2018-05-01T02:07:55 | 131,657,672 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 904 | r | libdata-info.R | #' Bibliographic data for 14509 juvenile non-fiction monographs.
#'
#' A dataset containing circulation and bibliographic information for 14509
#' juvenile non-fiction works in a public library collection.
#'
#' @format A data fram with 14509 rows and eight variables:
#' \describe{
#' \item{TITLE}{title of work}
#' \item{PAGES}{MARC formatted physical description of item, number of pages}
#' \item{CALLNUM}{call number as it appears in library catalog}
#' \item{CHARGES}{total number of charges for the item since entry into library catalog}
#' \item{LASTUSED}{date item was last used}
#' \item{COPY}{copy number of the item}
#' \item{PRICE}{price of the item in US dollars}
#' \item{CREATED}{date the item was entered into library catalog}
#' }
#' @source Obtained from the Delray Beach Public Library via library management
#' system report (SIRSI).
"libdata"
|
5306079df980d04795027d9c208170119b2e0958 | c959b41dce1f37bf8b24cdd9dc0a31155f6ff031 | /K-means Clustering.R | bfc88bbb89200b46178af469f2d8ab9b0631d21b | [] | no_license | bominkm/Dongjak-gu | 5f106d7cba0d017a16b0a34453100f90273f0a47 | 00f025a8109312194e73af9131e25fcb73c5e2f0 | refs/heads/main | 2023-04-11T07:56:38.556435 | 2021-04-29T18:35:59 | 2021-04-29T18:35:59 | 358,311,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,670 | r | K-means Clustering.R | library(dplyr)
library(fpc)
library(cluster)
library(stringr)
library(clusterSim)
library(factoextra)
dat0 <- read.csv( "C:/Users/user/Desktop/BOAZ/동작구 공모전/사용데이터/reallyfinal.csv", encoding = 'CP949')
sum(is.na(dat0))
dat0[!complete.cases(dat0),]
dat0 <- na.omit(dat0)
data <- dat0 %>% mutate(길이 = 길이/100) %>%
mutate(시설 = (아파트+치안센터)/길이,
CCTV = CCTV/길이,
비상벨 = 비상벨/길이,
보안등 = (가로등 + 보안등)/길이) %>%
dplyr::select(도로명,시설, CCTV, 비상벨, 보안등)
#'로' 제외
data_ <- data %>% filter(!(data$도로명 %in% grep("로$",data$도로명, value = T)))
str(data_)
# Ward's method clustering
data1 <- data_[2:5]
data1 <- scale(data1, center = T, scale = T)
d.data <- dist(data1, method = "euclidean")
data.ward <- hclust(d.data, method = "ward.D2")
c1.data <- cutree(data.ward, 6)
table(c1.data)
which(c1.data == 1)
par(mfrow = c(1,1))
plot(data.ward, hang = -1)
rect.hclust(data.ward, 5, border = 1:5)
# k-means clustering
for (i in 1:20){
set.seed(1)
data.k <- kmeans(data1, i, nstart = 50)
result <- index.G1 (data1,data.k$cluster,centrotypes="centroids")
print(result)
}
set.seed(1)
data.k <- kmeans(data1, 6, nstart = 50)
data.k
which(data.k$cluster == 4)
data.pr <- prcomp(data1, retx = T, center = T, scale. = T)
summary(data.pr)
data.loading <- data.pr$rotation %*% diag(data.pr$sdev)
round(data.loading[,1:2],3)
data.k$centers
fviz_cluster(data.k, data1, cluster = data.k)
tab_cluster <- cbind(data_, data.k$cluster)
write.csv(tab_cluster, "final.csv")
|
7eaa076012f0c7bb09701c9add0ce6a5d9f04bf4 | 36a25a9052d14520300e7f5613730a3a9606a8c9 | /Generator/ApplyJacobian_fd.R | 35ab7797b27bf841315585e593d071502195b55a | [] | no_license | cyrulnic/NoStRa | 20fbe84dd2c3a7f43bc8e9c39bc025d35c0e50c9 | 83e9776158503fbdf5b5a23aa7a23c5ead53691f | refs/heads/master | 2020-03-29T04:09:25.821312 | 2019-09-18T13:23:15 | 2019-09-18T13:23:15 | 149,518,116 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 896 | r | ApplyJacobian_fd.R | ApplyJacobian_fd = function(F, x0, x1, u, eps, ...){
# Approx computation of the Jacobian J of function F(x) applied to u --
# using finite differencing.
# Applies J to u.
# Linearization around the trajectory
# x1=F(x0)
#
# Method:
#
# J=dF/dx|_x0
# F(x0 + eps*u) - F(x0) = eps* J*u + o(eps) ==>
#
# v=J*u \approx (F(x0 + eps*u) - F(x0)) /eps == (F(x0 + eps*u) - x1) /eps (*)
#
# Args:
#
# F - function name.
# NB: The 1st argument of F should be x (x0 in this case).
# x0 - point around which the linearization is done
# x1 - should be =F(x0)
# u - "perturbation to which J is to be applied
# eps - small real number, multiplies u in calculating the finite diference
# ... - arguments of F after x
#
# M Tsyrulnikov
# Jan 2018
#browser()
v = (F(x0 + eps*u, ...) - x1) /eps
return(v)
}
|
0d92d31f7bf711f5499627b692479104664ae60e | c0cc2b3cdacb8acc6ae96688f984259f173df206 | /BlockMixtureModel_NegativeBinomial_Rcpp.R | 740b4921d9358e2e2abab7b29ddd04c4385f680e | [] | no_license | NingtaoWang/Biclustering-for-Sequence-Count-Data | 49e3de20fce5608daa419e69d2f30b8c1b13e464 | 15c7692f14f9de8dc3bf4db67f7c89ed090f7dbd | refs/heads/master | 2020-05-21T16:42:12.471309 | 2016-10-19T13:51:23 | 2016-10-19T13:51:23 | 65,487,510 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,061 | r | BlockMixtureModel_NegativeBinomial_Rcpp.R | REPEAT_LIMIT <<- 10001
RELATIVE_DIFF <<- 0.0000001
library(Rcpp)
library(RcppArmadillo)
cppFunction('
Rcpp::NumericVector MyOuterNB( Rcpp::NumericMatrix DataR,
Rcpp::NumericMatrix rR,
Rcpp::NumericMatrix probR) {
int n = DataR.nrow(),
m = DataR.ncol(),
K = probR.nrow(),
L = probR.ncol();
int n_iter = n * m * K * L,
n_iter_ijk = n * m * K,
n_iter_ij = n * m,
n_iter_i = n;
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
double tmp = lgamma(DataR(i,j) + rR(k,l)) - lgamma(rR(k,l)) - lgamma(DataR(i,j) + 1) + rR(k,l) * log(probR(k,l)) + DataR(i,j) * log(1 - probR(k,l));
Output[h] = exp(tmp);
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterPsiPsi( Rcpp::NumericMatrix Psiik,
Rcpp::NumericVector Psiijkl) {
Rcpp::IntegerVector DimsPsiijkl = Psiijkl.attr("dim");
int n_iter = DimsPsiijkl[0] * DimsPsiijkl[1] * DimsPsiijkl[2] * DimsPsiijkl[3],
n_iter_ijk = DimsPsiijkl[0] * DimsPsiijkl[1] * DimsPsiijkl[2],
n_iter_ij = DimsPsiijkl[0] * DimsPsiijkl[1],
n_iter_i = DimsPsiijkl[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
Output[h] = Psiik(i, k) * Psiijkl(h);
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterOmegaOmega( Rcpp::NumericMatrix Omegajl,
Rcpp::NumericVector Omegaijkl) {
Rcpp::IntegerVector DimsOmegaijkl = Omegaijkl.attr("dim");
int n_iter = DimsOmegaijkl[0] * DimsOmegaijkl[1] * DimsOmegaijkl[2] * DimsOmegaijkl[3],
n_iter_ijk = DimsOmegaijkl[0] * DimsOmegaijkl[1] * DimsOmegaijkl[2],
n_iter_ij = DimsOmegaijkl[0] * DimsOmegaijkl[1],
n_iter_i = DimsOmegaijkl[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
Output[h] = Omegajl(j, l) * Omegaijkl(h);
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterMu( Rcpp::NumericMatrix DataR,
Rcpp::NumericVector PsiPsi,
Rcpp::NumericVector OmegaOmega) {
Rcpp::IntegerVector DimsPsiPsi = PsiPsi.attr("dim");
int n_iter = DimsPsiPsi[0] * DimsPsiPsi[1] * DimsPsiPsi[2] * DimsPsiPsi[3],
n_iter_ijk = DimsPsiPsi[0] * DimsPsiPsi[1] * DimsPsiPsi[2],
n_iter_ij = DimsPsiPsi[0] * DimsPsiPsi[1],
n_iter_i = DimsPsiPsi[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
Output(h) = DataR(i,j) * (PsiPsi(h) + OmegaOmega(h));
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterPartial1( Rcpp::NumericMatrix DataR,
Rcpp::NumericVector PsiPsi,
Rcpp::NumericVector OmegaOmega,
Rcpp::NumericMatrix muR,
Rcpp::NumericMatrix rR) {
Rcpp::IntegerVector DimsPsiPsi = PsiPsi.attr("dim");
int n_iter = DimsPsiPsi[0] * DimsPsiPsi[1] * DimsPsiPsi[2] * DimsPsiPsi[3],
n_iter_ijk = DimsPsiPsi[0] * DimsPsiPsi[1] * DimsPsiPsi[2],
n_iter_ij = DimsPsiPsi[0] * DimsPsiPsi[1],
n_iter_i = DimsPsiPsi[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
double tmp = R::digamma(DataR(i,j) + rR(k,l)) - R::digamma(rR(k,l)) - (DataR(i,j) + rR(k,l))/(muR(k,l) + rR(k,l)) + 1 + log(rR(k,l)) - log(muR(k,l) + rR(k,l));
Output(h) = tmp * (PsiPsi(h) + OmegaOmega(h));
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterPartial2( Rcpp::NumericMatrix DataR,
Rcpp::NumericVector PsiPsi,
Rcpp::NumericVector OmegaOmega,
Rcpp::NumericMatrix muR,
Rcpp::NumericMatrix rR) {
Rcpp::IntegerVector DimsPsiPsi = PsiPsi.attr("dim");
int n_iter = DimsPsiPsi[0] * DimsPsiPsi[1] * DimsPsiPsi[2] * DimsPsiPsi[3],
n_iter_ijk = DimsPsiPsi[0] * DimsPsiPsi[1] * DimsPsiPsi[2],
n_iter_ij = DimsPsiPsi[0] * DimsPsiPsi[1],
n_iter_i = DimsPsiPsi[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
double MuRR = muR(k,l) + rR(k,l);
double tmp = R::trigamma(DataR(i,j) + rR(k,l)) - R::trigamma(rR(k,l)) - 2/MuRR + (DataR(i,j) + rR(k,l))/MuRR/MuRR + 1/rR(k,l);
Output(h) = tmp * (PsiPsi(h) + OmegaOmega(h));
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterDMu( Rcpp::NumericMatrix DataR,
Rcpp::NumericMatrix MuR,
Rcpp::NumericMatrix rR,
Rcpp::NumericVector Fijkl) {
Rcpp::IntegerVector DimsFijkl = Fijkl.attr("dim");
int n_iter = DimsFijkl[0] * DimsFijkl[1] * DimsFijkl[2] * DimsFijkl[3],
n_iter_ijk = DimsFijkl[0] * DimsFijkl[1] * DimsFijkl[2],
n_iter_ij = DimsFijkl[0] * DimsFijkl[1],
n_iter_i = DimsFijkl[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
double DataMu = DataR(i,j) - MuR(k,l);
Output(h) = Fijkl(h) * rR(k,l) * DataMu / MuR(k,l) / (rR(k,l) + MuR(k,l));
}
return Output;
}')
cppFunction('
Rcpp::NumericVector MyOuterDSigma( Rcpp::NumericMatrix DataR,
Rcpp::NumericMatrix MuR,
Rcpp::NumericMatrix rR,
Rcpp::NumericVector Fijkl) {
Rcpp::IntegerVector DimsFijkl = Fijkl.attr("dim");
int n_iter = DimsFijkl[0] * DimsFijkl[1] * DimsFijkl[2] * DimsFijkl[3],
n_iter_ijk = DimsFijkl[0] * DimsFijkl[1] * DimsFijkl[2],
n_iter_ij = DimsFijkl[0] * DimsFijkl[1],
n_iter_i = DimsFijkl[0];
Rcpp::NumericVector Output(n_iter);
for(int h = 0; h < n_iter; h++){
int l = floor(h / n_iter_ijk);
int ijk = h - l * n_iter_ijk;
int k = floor(ijk / n_iter_ij);
int ij = ijk - k * n_iter_ij;
int j = floor(ij / n_iter_i);
int i = ij - j * n_iter_i;
double tmp = R::digamma(DataR(i,j) + rR(k,l)) - R::digamma(rR(k,l)) - (DataR(i,j) + rR(k,l))/(MuR(k,l) + rR(k,l)) + 1 + log(rR(k,l)) - log(MuR(k,l) + rR(k,l));
Output(h) = Fijkl(h) * tmp;
}
return Output;
}')
BM.StepE <- function(data, mu, r, p, q, K, L, n, m)
{
prob <- r / (r + mu)
Fijkl <- MyOuterNB(data, r, prob)
Fijkl <- array(Fijkl, dim = c(n, m, K, L))
Fijkl[is.nan(Fijkl)] <- 1e-300
Fijkl[Fijkl < 1e-300] <- 1e-300
Psi.ijkl <- apply(Fijkl,1:3,FUN=function(x){x*q/(x%*%q)})
Psi.ijkl <- aperm(Psi.ijkl, c(2,3,4,1))
F1.ijk <- apply(Fijkl,1:3,FUN=function(x){x%*%q})
F1.log.ijk <- log(F1.ijk)
F1.log.ik <- apply(F1.log.ijk,c(1,3),sum)
F1.log.ik.adjtmp <- apply(F1.log.ik,1,
FUN=function(x){
tmpx <- max(x)-1400
x[which(x<tmpx)]<-tmpx
return(x)})
F1.log.ik.adj <- apply(F1.log.ik.adjtmp,2,FUN=function(x){x-max(x)/2-min(x)/2})
F1.log.ik.adj <- t(F1.log.ik.adj)
F1.ik <- apply(F1.log.ik.adj,1,FUN=function(x){exp(x)*p/(exp(x)%*%p)})
Psi.ik <- t(F1.ik)
Omega.ijkl <- apply(Fijkl,c(1,2,4),FUN=function(x){x*p/(x%*%p)})
Omega.ijkl <- aperm(Omega.ijkl, c(2,3,1,4))
F2.ijl <- apply(Fijkl,c(1,2,4),FUN=function(x){x%*%p})
F2.log.ijl <- log(F2.ijl)
F2.log.jl <- apply(F2.log.ijl,c(2,3),sum)
F2.log.jl.adjtmp <- apply(F2.log.jl,1,
FUN=function(x){
tmpx <- max(x)-1400
x[which(x<tmpx)]<-tmpx
return(x)})
F2.log.jl.adj <- apply(F2.log.jl.adjtmp,2,FUN=function(x){x-max(x)/2-min(x)/2})
F2.log.jl.adj <- t(F2.log.jl.adj)
F2.jl <- apply(F2.log.jl.adj,1,FUN=function(x){exp(x)*q/(exp(x)%*%q)})
Omega.jl <- t(F2.jl)
F1.log.i.mean <- apply(F1.log.ik.adjtmp,2,FUN=function(x){max(x)/2+min(x)/2})
F1.ik.tmp <- apply(F1.log.ik.adj,1,FUN=function(x){exp(x)*p})
F1.i.tmp <- apply(F1.ik.tmp,2,sum)
F1.i.log <- log(F1.i.tmp)
F2.log.j.mean <- apply(F2.log.jl.adjtmp,2,FUN=function(x){max(x)/2+min(x)/2})
F2.jl.tmp <- apply(F2.log.jl.adj,1,FUN=function(x){exp(x)*q})
F2.j.tmp <- apply(F2.jl.tmp,2,sum)
F2.j.log <- log(F2.j.tmp)
LogLikelihood <- sum(F1.i.log)+sum(F2.j.log)+sum(F1.log.i.mean)+sum(F2.log.j.mean)
return(list(Psi.ik = Psi.ik, Psi.ijkl = Psi.ijkl,
Omega.jl = Omega.jl, Omega.ijkl = Omega.ijkl, LogLikelihood = LogLikelihood))
}
BM.StepM <- function(data, Psi.ik, Psi.ijkl, Omega.jl, Omega.ijkl, mu, r, K, L, n, m)
{
PsiPsi <- MyOuterPsiPsi(Psi.ik, Psi.ijkl)
PsiPsi <- array(PsiPsi, dim = c(n, m, K, L))
PsiPsi.jl <- apply(PsiPsi,c(2,4),sum)
OmegaOmega <- MyOuterOmegaOmega(Omega.jl, Omega.ijkl)
OmegaOmega <- array(OmegaOmega, dim = c(n, m, K, L))
OmegaOmega.ik <- apply(OmegaOmega,c(1,3),sum)
PsiOmega.kl <- apply(OmegaOmega+PsiPsi, 3:4, sum)
p <- apply(Psi.ik+OmegaOmega.ik,2,sum)/n/(m+1)
q <- apply(Omega.jl+PsiPsi.jl,2,sum)/m/(n+1)
PsiOmegaData <- MyOuterMu(data, PsiPsi, OmegaOmega)
PsiOmegaData <- array(PsiOmegaData, dim=c(n, m, K, L))
PsiOmegaData.kl <- apply(PsiOmegaData, 3:4, sum)
new.mu <- PsiOmegaData.kl/PsiOmega.kl
PsiOmegaPartial1 <- MyOuterPartial1(data, PsiPsi, OmegaOmega, mu, r)
PsiOmegaPartial1 <- array(PsiOmegaPartial1, dim=c(n, m, K, L))
PsiOmegaPartial1.kl <- apply(PsiOmegaPartial1, 3:4, sum)
PsiOmegaPartial2 <- MyOuterPartial2(data, PsiPsi, OmegaOmega, mu, r)
PsiOmegaPartial2 <- array(PsiOmegaPartial2, dim=c(n, m, K, L))
PsiOmegaPartial2.kl <- apply(PsiOmegaPartial2, 3:4, sum)
new.r <- r - PsiOmegaPartial1.kl / PsiOmegaPartial2.kl
#new.r[new.r < 1] <- 1
#new.r[new.r < 1] <- r[new.r < 1]
new.r[is.na(new.r)] <- r[is.na(new.r)]
new.r[new.r > 1e+10] <- 1e+10
new.r[new.r <= 0] <- r[new.r <= 0]
return(list(p=p, q=q, mu=new.mu, r=new.r) )
}
BM.InitTheta <- function(data, K, L)
{
res1 <- kmeans(data,K)
res2 <- kmeans(t(data),L)
p <- res1$size/sum(res1$size)
q <- res2$size/sum(res2$size)
mu <- array(0,dim=c(K,L))
r <- array(0,dim=c(K,L))
for(k in 1:K)
{
for(l in 1:L)
{
tmp <- data[which(res1$cluster==k),which(res2$cluster==l)]
tmp <- as.vector(tmp)
mu[k,l] <- mean(tmp)
if(is.na(var(tmp))){
r[k, l] <- 1
}
else{
if(var(tmp) > mean(tmp)){
r[k, l] <- mu[k,l]^2 / {var(tmp) - mu[k,l]}
}
else{
r[k, l] <- max(tmp)
}
}
r[r<=0] <- 1
}
}
return(list(mu=mu, r = r, p=p, q=q, row.class=res1$cluster, col.class=res2$cluster) )
}
BM.RunEM <- function(data, K, L)
{
n <- dim(data)[1]
m <- dim(data)[2]
theta <- BM.InitTheta(data, K, L)
#theta <- result0
mu <- theta$mu
r <- theta$r
p <- theta$p
q <- theta$q
rpt <- 1
LogLikelihood <- -Inf
while(TRUE){
OldLogLikelihood <- LogLikelihood
EResult <- BM.StepE(data, mu, r, p, q, K, L, n, m)
Psi.ik <- EResult$Psi.ik
Psi.ijkl <- EResult$Psi.ijkl
Omega.jl <- EResult$Omega.jl
Omega.ijkl <- EResult$Omega.ijkl
LogLikelihood <- EResult$LogLikelihood
cat("K:",K," L:",L," rpt:", rpt, "\n")
cat("mu:", mu, "\n")
cat("r:", r, "\n")
cat("p:", p, "\n")
cat("q:", q, "\n")
cat("LogLikelihood:",LogLikelihood,"\n")
cat("\n")
if (is.infinite(LogLikelihood))
break
if( (abs(1 - OldLogLikelihood/LogLikelihood) < RELATIVE_DIFF) ){
cat("quit due to likelihood\n")
cat("RELATIVE_DIFF:",RELATIVE_DIFF,"\n")
break
}
if( rpt >= REPEAT_LIMIT ){
cat("quit due to rpt\n")
break
}
if( OldLogLikelihood > LogLikelihood ){
cat("quit due to likelihood decreasing\n")
#break
}
oldtheta <- theta
theta <- BM.StepM(data, Psi.ik, Psi.ijkl, Omega.jl, Omega.ijkl, mu, r, K, L, n, m)
mu <- theta$mu
r <- theta$r
p <- theta$p
q <- theta$q
if( sum(is.na(mu)) > 0 ){
cat("mu na\n")
tmp.old.mu <- as.vector(oldtheta$mu)
tmp.mu <- as.vector(mu)
ind.na <- which(is.na(mu) == T)
tmp.mu[ind.na] <- tmp.old.mu[ind.na]
mu <- array(tmp.mu, dim = c(K, L))
theta$mu <- mu
}
#identifiable
#tmp.ind1 <- sort(colSums(mu),index.return=T)$ix
#mu <- mu[,tmp.ind1]
#r <- r[,tmp.ind1]
#q <- q[tmp.ind1]
#tmp.ind2 <- sort(rowSums(mu),index.return=T)$ix
#mu <- mu[tmp.ind2,]
#r <- r[tmp.ind2,]
#p <- p[tmp.ind2]
rpt <- rpt + 1
}
row.class <- max.col(Psi.ik)
col.class <- max.col(Omega.jl)
return(list(p=p, q=q, row.class = row.class, col.class = col.class,
LogLikelihood = LogLikelihood, mu = mu, r=r) )
}
BM.BICsimu <- function(data, mu, r, p, q, K, L)
{
n <- dim(data)[1]
m <- dim(data)[2]
#K <- dim(mu)[1]
#L <- dim(mu)[2]
prob <- r / (r + mu)
Fijkl <- MyOuterNB(data, r, prob)
Fijkl <- array(Fijkl, dim = c(n, m, K, L))
Fijkl.dmu <- MyOuterDMu(data, mu, r, Fijkl)
Fijkl.dmu <- array(Fijkl.dmu, dim = c(n, m, K, L))
Fijkl.dr <- MyOuterDSigma(data, mu, r, Fijkl)
Fijkl.dr <- array(Fijkl.dr, dim = c(n, m, K, L))
#Fijkl[is.nan(Fijkl)] <- 1e-300
Fijkl[Fijkl < 1e-300] <- 1e-300
#Fijkl.dmu[is.nan(Fijkl.dmu)] <- 1e-300
#Fijkl.dr[is.nan(Fijkl.dr)] <- 1e-300
F1.ijk <- apply(Fijkl,1:3,FUN=function(x){x%*%q})
F1.log.ijk <- log(F1.ijk)
F1.log.ik <- apply(F1.log.ijk,c(1,3),sum)
F1.log.ik.adjtmp <- apply(F1.log.ik,1,
FUN=function(x){
tmpx <- max(x)-1400
x[which(x<tmpx)]<-tmpx
return(x)})
F1.log.ik.adj <- apply(F1.log.ik.adjtmp,2,FUN=function(x){x-max(x)/2-min(x)/2})
F1.log.ik.adj <- t(F1.log.ik.adj)
F1.ik <- apply(F1.log.ik.adj,1,FUN=function(x){exp(x)/(exp(x)%*%p)})
F1.ik <- t(F1.ik)
drow.dk <- apply(as.matrix(F1.ik[,-K]),2,FUN=function(x){x-F1.ik[,K]})
F1.i <- apply(F1.log.ik.adj,1,FUN=function(x){exp(x)%*%p})
F1.ijkl <- apply(Fijkl,1:3,FUN=function(x){x/(x%*%q)})
F1.ijkl <- aperm(F1.ijkl, c(2,3,4,1))
F1.ikl <- apply(F1.ijkl,c(1,3,4),sum)
F1.ikl <- apply(F1.ikl,3,FUN=function(x){x*exp(F1.log.ik.adj)})
F1.ikl <- array(F1.ikl,dim=c(n,K,L))
F1.il <- apply(F1.ikl,c(1,3),FUN=function(x){x%*%p})
F1.il <- apply(F1.il,2,FUN=function(x){x/F1.i})
drow.dl <- apply(as.matrix(F1.il[,-L]),2,FUN=function(x){x-F1.il[,L]})
F1.ijkl.dmu <- apply(Fijkl.dmu,c(1,2,3),FUN=function(x){x*q})
F1.ijkl.dmu <- apply(F1.ijkl.dmu,1,FUN=function(x){x/F1.ijk})
F1.ijkl.dmu <- array(F1.ijkl.dmu,dim=c(n,m,K,L))
F1.ikl.dmu <- apply(F1.ijkl.dmu,c(1,3,4),sum)
F1.ikl.dmu <- apply(F1.ikl.dmu,3,FUN=function(x){x*exp(F1.log.ik.adj)})
F1.ikl.dmu <- array(F1.ikl.dmu,dim=c(n,K,L))
F1.ikl.dmu <- apply(F1.ikl.dmu,c(1,3),FUN=function(x){x*p})
F1.ikl.dmu <- apply(F1.ikl.dmu,c(1,3),FUN=function(x){x/F1.i})
drow.dmu <- apply(F1.ikl.dmu,1,FUN=function(x){as.vector(x)})
drow.dmu <- t(drow.dmu)
F1.ijkl.dr <- apply(Fijkl.dr,c(1,2,3),FUN=function(x){x*q})
F1.ijkl.dr <- apply(F1.ijkl.dr,1,FUN=function(x){x/F1.ijk})
F1.ijkl.dr <- array(F1.ijkl.dr,dim=c(n,m,K,L))
F1.ikl.dr <- apply(F1.ijkl.dr,c(1,3,4),sum)
F1.ikl.dr <- apply(F1.ikl.dr,3,FUN=function(x){x*exp(F1.log.ik.adj)})
F1.ikl.dr <- array(F1.ikl.dr,dim=c(n,K,L))
F1.ikl.dr <- apply(F1.ikl.dr,c(1,3),FUN=function(x){x*p})
F1.ikl.dr <- apply(F1.ikl.dr,c(1,3),FUN=function(x){x/F1.i})
drow.dr <- apply(F1.ikl.dr,1,FUN=function(x){as.vector(x)})
drow.dr <- t(drow.dr)
F2.ijl <- apply(Fijkl,c(1,2,4),FUN=function(x){x%*%p})
F2.log.ijl <- log(F2.ijl)
F2.log.jl <- apply(F2.log.ijl,c(2,3),sum)
F2.log.jl.adjtmp <- apply(F2.log.jl,1,
FUN=function(x){
tmpx <- max(x)-1400
x[which(x<tmpx)]<-tmpx
return(x)})
F2.log.jl.adj <- apply(F2.log.jl.adjtmp,2,FUN=function(x){x-max(x)/2-min(x)/2})
F2.log.jl.adj <- t(F2.log.jl.adj)
F2.jl <- apply(F2.log.jl.adj,1,FUN=function(x){exp(x)/(exp(x)%*%q)})
F2.jl <- t(F2.jl)
dcol.dl <- apply(as.matrix(F2.jl[,-L]),2,FUN=function(x){x-F2.jl[,L]})
F2.j <- apply(F2.log.jl.adj,1,FUN=function(x){exp(x)%*%q})
F2.ijkl <- apply(Fijkl,c(1,2,4),FUN=function(x){x/(x%*%p)})
F2.ijkl <- aperm(F2.ijkl, c(2,3,1,4))
F2.jkl <- apply(F2.ijkl,c(2,3,4),sum)
F2.jkl <- apply(F2.jkl,2,FUN=function(x){x*exp(F2.log.jl.adj)})
F2.jkl <- array(F2.jkl,dim=c(m,L,K))
F2.jk <- apply(F2.jkl,c(1,3),FUN=function(x){x%*%q})
F2.jk <- apply(F2.jk,2,FUN=function(x){x/F2.j})
dcol.dk <- apply(as.matrix(F2.jk[,-K]),2,FUN=function(x){x-F2.jk[,K]})
F2.ijkl.dmu <- apply(Fijkl.dmu,c(1,2,4),FUN=function(x){x*p})
F2.ijkl.dmu <- apply(F2.ijkl.dmu,1,FUN=function(x){x/F2.ijl})
F2.ijkl.dmu <- array(F2.ijkl.dmu,dim=c(n,m,L,K))
F2.jkl.dmu <- apply(F2.ijkl.dmu,c(2,3,4),sum)
F2.jkl.dmu <- apply(F2.jkl.dmu,3,FUN=function(x){x*exp(F2.log.jl.adj)})
F2.jkl.dmu <- array(F2.jkl.dmu,dim=c(m,L,K))
F2.jkl.dmu <- apply(F2.jkl.dmu,c(1,3),FUN=function(x){x*q})
F2.jkl.dmu <- apply(F2.jkl.dmu,c(1,3),FUN=function(x){x/F2.j})
F2.jkl.dmu <- aperm(F2.jkl.dmu,c(1,3,2))
dcol.dmu <- apply(F2.jkl.dmu, 1, FUN = function(x){as.vector(x)})
dcol.dmu <- t(dcol.dmu)
F2.ijkl.dr <- apply(Fijkl.dr,c(1,2,4),FUN=function(x){x*p})
F2.ijkl.dr <- apply(F2.ijkl.dr,1,FUN=function(x){x/F2.ijl})
F2.ijkl.dr <- array(F2.ijkl.dr,dim=c(n,m,L,K))
F2.jkl.dr <- apply(F2.ijkl.dr,c(2,3,4),sum)
F2.jkl.dr <- apply(F2.jkl.dr,3,FUN=function(x){x*exp(F2.log.jl.adj)})
F2.jkl.dr <- array(F2.jkl.dr,dim=c(m,L,K))
F2.jkl.dr <- apply(F2.jkl.dr,c(1,3),FUN=function(x){x*q})
F2.jkl.dr <- apply(F2.jkl.dr,c(1,3),FUN=function(x){x/F2.j})
F2.jkl.dr <- aperm(F2.jkl.dr,c(1,3,2))
dcol.dr <- apply(F2.jkl.dr, 1, FUN = function(x){as.vector(x)})
dcol.dr <- t(dcol.dr)
drow <- cbind(drow.dk,drow.dl,drow.dmu,drow.dr)
dcol <- cbind(dcol.dk,dcol.dl,dcol.dmu,dcol.dr)
return(list(DROW=drow,DCOL=dcol))
}
BM.SimuData <- function(n, m, mu, r, p, q)
{
K <- length(p)
L <- length(q)
rep.row <- rmultinom(1, size=n, prob=p)
rep.col <- rmultinom(1, size=m, prob=q)
Z <- NULL
for (k in 1:K)
{
if(rep.row[k]!=0)
{
Y <- NULL
for (l in 1:L)
{
tmp1 <- rnbinom(rep.row[k]*rep.col[l], size = r[k,l], mu = mu[k,l])
tmp2 <- array(tmp1,dim=c(rep.row[k],rep.col[l]))
Y <- cbind(Y,tmp2)
}
Z <- rbind(Z,Y)
}
}
Z <- as.array(Z)
return(list(data = Z, reprow = rep.row, repcol = rep.col))
}
BM.BIC <- function(data, K, L, rep=30)
{
n <- dim(data)[1]
m <- dim(data)[2]
emResult <- BM.RunEM(data, K, L)
mu <- emResult$mu
r <- emResult$r
p <- emResult$p
q <- emResult$q
H <- NULL
J <- NULL
for(i in 1:rep)
{
tmpdata <- BM.SimuData.alt(n, m, mu, r, p, q)
#tmpdata <- BM.SimuData(n, m, mu, r, p, q)
BICResult <- BM.BICsimu(tmpdata, mu, r, p, q, K, L)
drow <- BICResult$DROW
dcol <- BICResult$DCOL
cat("Simulation:",i,"\n")
#cat("dRow:",drow,"\n")
#cat("dCol:",dcol,"\n")
#cat("\n")
tmp1 <- t(drow)%*%drow+t(dcol)%*%dcol
H <- cbind(H,as.vector(tmp1))
tmp2 <- (colSums(drow)+colSums(dcol))%*%t(colSums(drow)+colSums(dcol))
J <- cbind(J,as.vector(tmp2))
}
tmp <- K+L-2+2*K*L
H.simu <- apply(H,1,mean)
J.simu <- apply(J,1,mean)
H.simu <- array(H.simu,dim=c(tmp,tmp))
J.simu <- array(J.simu,dim=c(tmp,tmp))
lambda <- 5e-5
H.simu.1 <- H.simu + diag(rep(lambda, tmp))
tmpdiag <- diag(J.simu%*%solve(H.simu.1))
BIC <- -2*emResult$LogLikelihood + (log(n)+log(m))*sum(tmpdiag)
cat("trace:",sum(tmpdiag),"\n")
cat("relative DF:",sum(tmpdiag)/tmp,"\n")
cat("BIC:",BIC,"\n")
cat("LogLikelihood:",emResult$LogLikelihood,"\n")
cat("\n")
return(list(H = H, J = J, Hsimu=H.simu, Jsimu=J.simu, BIC = BIC , est = emResult))
}
BM.SimuData.alt <- function(n, m, mu, r, p, q)
{
K <- length(p)
L <- length(q)
rep.row <- rmultinom(1, size=n, prob=p)
rep.col <- rmultinom(1, size=m, prob=q)
Z <- NULL
for (k in 1:K)
{
if(rep.row[k]!=0)
{
Y <- NULL
for (l in 1:L)
{
tmp1 <- rnbinom(rep.row[k]*rep.col[l], size = r[k,l], mu = mu[k,l])
tmp2 <- array(tmp1,dim=c(rep.row[k],rep.col[l]))
Y <- cbind(Y,tmp2)
}
Z <- rbind(Z,Y)
}
}
Z <- as.array(Z)
return(Z)
}
|
2d53a662aacb46eae548d6e417655922fc613cc4 | f0219074aa427f3973f1c178e33fe9b6b135ef65 | /tests/testthat/test_flowmap.R | 0fdb651c3cd0989592f5fed14c22355fab02e73e | [] | no_license | cran/cartograflow | 50722e32c31a3a4fc32ad2e07233526bd24415a5 | 27e57ecabee92f9a6f26973c4478e1bdf5226a33 | refs/heads/master | 2021-07-14T16:57:06.458262 | 2020-07-19T18:50:02 | 2020-07-19T18:50:02 | 186,031,368 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,594 | r | test_flowmap.R | #' @title Mapping a flow matrix origin-destination
#' @param tab the input flow dataset .csv
#' @param fij colnames of the flow dataset
#' @param origin.f colnames Origin
#' @param destination.f colnames Destination
#' @param x crs
#' @param nodes the input pt file
#' @param code is the column with the spatial units ID
#' @param nodes.X coordinate X of the point file
#' @param nodes.Y coordinate Y of the point file
#' @param filter allows you to filter (or not) the flow dataset. See details
#' @param threshold is the value of the threshold criterion used to filter the values. The default is 1.
#' @param taille is a graphical parameter for modifying the width of the feature
#' @param bkg the geographical background file .shp or json ...
#' @param a.head integer code, determining the kind of arrows to be drawn. See Details
#' @param a.length length of the edges of the arrow head (in inches).
#' @param a.angle angle from the shaft of the arrow to the edge of the arrow head.
#' @param a.col color of the arrows
#' @param plota acces au fond de carte
#' @return a matrix or a list with the correct tabflow ID code
#' @return The resulting flowmap
#' @details
#' The flow dataset must be converted to a dataframe for optimal performance (troubles remains with tibble format)
#'
#' If filter = FALSE, all the matrice values are plot [(n*(n-1)] cells, i.e. all links out of the main diagonal.
#' If filter = TRUE only non-zero values are plotted, i.e. existing links with or without threshold.
#' The default threshold is set to 1.
#'
#' a.head is for applying an arrow or not
#' -- code=0 : the link has no head - no arrow
#' -- code=1 : an arrow is draw at (x0[i], y0[i]).
#' -- code=2 : an arrow is draw at (x1[j], y1[j])
#' -- code=3 : an arrow is draw at both nodes.
#' @importFrom graphics segments
#' @importFrom graphics arrows
#' @import sf
#' @examples
#' library(cartograflow)
#' library(sf)
#' data(flowdata)
#' map <- st_read(system.file("shape/MGP_TER.shp", package = "cartograflow"))
#' flowmap(
#' tab = flows, fij = "Fij", origin.f = "i", destination.f = "j",
#' bkg = map, code = "EPT_NUM", nodes.X = "X", nodes.Y = "Y",
#' filter = FALSE
#' )
#' @export
flowmap <- function(tab, fij, origin.f, destination.f, bkg = NULL,
x,nodes = NULL, code, nodes.X, nodes.Y,
filter, plota, threshold, taille,
a.head, a.length, a.angle, a.col) {
tab <- tab %>% select(origin.f, destination.f, fij)
if (!is.null(nodes)) {
geo <- "pt"
mgp_flow <- flowjointure(
geom = geo, DF.flow = tab, origin = origin.f, destination = destination.f,
DF.point = nodes, id = code, x = nodes.X, y = nodes.Y
)
nodes$code <- as.character(nodes[, code])
nodes$nodes.X <- as.numeric(nodes[, nodes.X])
nodes$nodes.Y <- as.numeric(nodes[, nodes.Y])
crs = if (missing(x)) NA_crs_ else st_crs(x)
nodes <- sf::st_as_sf(x = nodes, coords = c(nodes.X, nodes.Y), crs = crs)
plot(nodes$geometry, col = "grey", lwd = 0.05)
}
if (!is.null(bkg)) {
geo <- "area"
mgp_flow <- flowjointure(
geom = geo, bkg, DF.flow = tab, origin = origin.f, destination = destination.f,
id = code, x = nodes.X, y = nodes.Y
)
plot(mgp_flow$geometry.X, col = "grey", lwd = 0.05)
if (missing(plota)) {
plota <- FALSE
}
else {
plot(sf::st_geometry(bkg), add = TRUE)
}
}
if (missing(filter)) {
filter <- FALSE
}
else {
filter
}
if (filter == FALSE) {
trace <- arrows(mgp_flow$Xi, mgp_flow$Yi, mgp_flow$Xj, mgp_flow$Yj, code = 0, col = "black")
message("All theorical links are plotted")
}
if (filter == TRUE) {
if (missing(threshold)) {
threshold <- 1
message("you use the default threshold= 1")
}
else {
threshold
}
mgp_flow <- mgp_flow[mgp_flow$ydata >= threshold, ]
if (missing(taille)) {
taille <- 1
}
else {
taille
}
maxsize <- taille
mgp_flow$size <- (mgp_flow$ydata / max(mgp_flow$ydata)) * maxsize
if (missing(a.head)) {
a.head <- 0
}
else {
a.head
}
if (missing(a.length)) {
a.length <- 0.1
}
else {
a.length
}
if (missing(a.angle)) {
a.angle <- 30
}
else {
a.angle
}
if (missing(a.col)) {
a.col <- "#000311"
}
else {
a.col
}
trace <- arrows(mgp_flow$Xi, mgp_flow$Yi, mgp_flow$Xj, mgp_flow$Yj,
length = a.length, angle = a.angle, code = a.head, col = a.col, lwd = mgp_flow$size
)
}
}
|
f004d2d28ca8244bcff9a87d8d839f11f3fa7345 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/BAMBI/R/vmstart.R | b78ee740189b2353cd7ad3eda592903f2e36811d | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,893 | r | vmstart.R | #' @keywords internal
Ainv <- function(x) {
if(x < 0.53) (2*x + x^3 + 5/6*x^5)
else if(x >= 0.86 && x < 0.95) ((9 - 8*x + 3*x^2) / (8 * (1 - x)))
else ((1.28 - 0.53*x^2) * tan(x*pi/2))
}
#' @keywords internal
A_bessel <- function(x){
besselI(x, 1, TRUE)/besselI(x, 0, TRUE)
}
#' @keywords internal
start_par_vm <- function(data.sub) {
x1 <- data.sub
Sbar <- mean(sin(x1))
Cbar <- mean(cos(x1))
muhat <- atan(Sbar/Cbar) + pi * (Cbar < 0)
Rbar <- sqrt(Sbar^2 + Cbar^2)
k <- Ainv(Rbar)
c(k, prncp_reg(muhat))
} #starting parameters from a dataset
#' @keywords internal
start_clus_kmeans_vm <- function(data.full, comp = 2, nstart = 10){
data.full.cart <- t(sapply(data.full, function(x) c(cos(x), sin(x))))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vm(data.full[clust.ind[[m]]]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
#' @keywords internal
start_clus_rand_vm <- function(data.full, comp = 2, nstart = 10) {
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(length(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vm(data.full[clust.ind[[m]]]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
#' @keywords internal
start_par_vmsin <- function(data.sub) {
x1 <- data.sub[,1]; y1 <- data.sub[,2]
Sbarphi <- mean(sin(x1))
Cbarphi <- mean(cos(x1))
phibar <- atan(Sbarphi/Cbarphi) + pi * (Cbarphi < 0)
Rbarphi <- sqrt(Sbarphi^2 + Cbarphi^2)
k1 <- Ainv(Rbarphi)
Sbarpsi <- mean(sin(y1))
Cbarpsi <- mean(cos(y1))
psibar <- atan(Sbarpsi/Cbarpsi) + pi * (Cbarpsi < 0)
Rbarpsi <- sqrt(Sbarpsi^2 + Cbarpsi^2)
k2 <- Ainv(Rbarpsi)
sindiffphi <- sin(outer(x1, x1, "-"))
sindiffpsi <- sin(outer(y1, y1, "-"))
rho <- sum(sindiffphi*sindiffpsi)/sum(sindiffphi^2)/sum(sindiffpsi^2)
c(k1, k2, rho*sqrt(k1*k2), prncp_reg(phibar), prncp_reg(psibar))
} #starting parameters from a dataset
#' @keywords internal
start_clus_kmeans_vmsin <- function(data.full, comp = 2, nstart = 10){
data.full.cart <- t(apply(data.full, 1, sph2cart))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmsin(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
#' @keywords internal
start_clus_rand_vmsin <- function(data.full, comp = 2, nstart = 10) {
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(nrow(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmsin(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
#' @keywords internal
start_par_vmcos <- function(data.sub) {
x1 <- data.sub[,1];
y1 <- data.sub[,2]
Sbarphi <- mean(sin(x1))
Cbarphi <- mean(cos(x1))
phibar <- atan(Sbarphi/Cbarphi) + pi * (Cbarphi < 0)
Rbarphi <- sqrt(Sbarphi^2 + Cbarphi^2)
k1 <- Ainv(Rbarphi)
#k1 <- min(15, Ainv(Rbarphi))
Sbarpsi <- mean(sin(y1))
Cbarpsi <- mean(cos(y1))
psibar <- atan(Sbarpsi/Cbarpsi) + pi * (Cbarpsi < 0)
Rbarpsi <- sqrt(Sbarpsi^2 + Cbarpsi^2)
k2 <- Ainv(Rbarpsi)
#k2 <- min(15, Ainv(Rbarpsi))
mu <- prncp_reg(phibar)
nu <- prncp_reg(psibar)
Sbarphi_psi <- mean(sin(x1-y1))
Cbarphi_psi <- mean(cos(x1-y1))
phi_psibar <- atan(Sbarphi_psi/Cbarphi_psi) + pi * (Cbarphi_psi < 0)
Rbarphi_psi <- sqrt(Sbarphi_psi^2 + Cbarphi_psi^2)
k3.unsgn <- Ainv(Rbarphi_psi)
sindiffphi <- sin(outer(x1, x1, "-"))
sindiffpsi <- sin(outer(y1, y1, "-"))
rho <- sum(sindiffphi*sindiffpsi)/sum(sindiffphi^2)/sum(sindiffpsi^2)
# Sbarphi_psi <- mean(sin(x1-y1+mu-nu))
# Cbarphi_psi <- mean(cos(x1-y1+mu-nu))
# Rbarphi_psi <- sqrt(Sbarphi_psi^2 + Cbarphi_psi^2)
k3 <- sign(rho)*k3.unsgn
c(k1, k2, k3, mu, nu)
} #starting parameters from a dataset
#' @keywords internal
start_clus_kmeans_vmcos <- function(data.full, comp = 2, nstart = 10){
data.full.cart <- t(apply(data.full, 1, sph2cart))
data.kmean <- kmeans(data.full.cart, centers = comp, nstart = nstart)
ids <- data.kmean$cluster
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmcos(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, function(x) sum_sq(x[1:3]))))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #kmeans, then start_par for each cluster
#' @keywords internal
start_clus_rand_vmcos <- function(data.full, comp = 2, nstart = 10){
rand.pi <- runif(comp, 1/(2*comp), 2/comp)
rand.pi <- rand.pi/sum(rand.pi)
rand.multinom <- t(rmultinom(nrow(data.full), 1, rand.pi))
ids <- apply(rand.multinom, 1, which.max)
clust.ind <- lapply(1:comp, function(i) which(ids == i))
par <- sapply(1:length(clust.ind), function(m) start_par_vmcos(data.full[clust.ind[[m]],]))
pi.mix <- listLen(clust.ind)/length(unlist(clust.ind))
order.conc <- order(order(apply(par, 2, sum_sq)))
list("par.mat" = par[,order.conc], "pi.mix" = pi.mix[order.conc], "clust.ind" = clust.ind[order.conc], "id" = ids)
} #random groups, then start_par for each cluster
|
941758da81a16a002062240d47c7ea627deb03d3 | 61bab2187b64568d2b99ffaa4dcd14f28e2ea73e | /Diphenotype_ordinal_demo_altparam.R | ae17b637795a31c855b5b0793838c4cda80de289 | [
"CC-BY-4.0"
] | permissive | RMKirkpatrick/mxGREMLdemos | b6aa0534f956de176f7e2d6e9c957891988757f4 | 04d1fbf0cadfe25380fd40c5214fd6ad62572ae7 | refs/heads/master | 2022-12-11T16:32:35.685582 | 2022-11-19T02:47:32 | 2022-11-19T02:47:32 | 201,999,169 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,674 | r | Diphenotype_ordinal_demo_altparam.R | # Copyright 2019-2022 by Robert M. Kirkpatrick
# Licensed under CC BY 4.0 <http://creativecommons.org/licenses/by/4.0/>
# This script demonstrates a GREML analysis of 2 ordinal phenotypes (3 levels each).
# The model is parameterized in terms of the traits' latent-scale correlations and variance proportions.
library(mvtnorm)
library(Matrix)
library(OpenMx)
options(mxCondenseMatrixSlots=TRUE)
set.seed(171216)
#With more threads, the job will run more quickly, but will require more memory:
mxOption(NULL,"Number of Threads",2)
#Number of simulees (participants):
N <- 1000
#Number of SNPs from which to construct GRM:
msnps <- 50000
#True parameter values, for data generation:
truevals <- c(
y1_tau1=qnorm(0.5), #<--Threshold #1 for trait 1; 0
y1_tau2=qnorm(0.75), #<--Threshold #2 for trait 1; 0.6744898
y2_tau1=qnorm(0.75), #<--Threshold #1 for trait 2; 0.6744898
y2_tau2=qnorm(0.9), #<--Threshold #2 for trait 2; 1.281552
y1_h2lat=0.3, #<--Latent-scale heritability of trait 1
y2_h2lat=0.8, #<--Latent-scale heritability of trait 2
latgencov=0.5*sqrt(0.3)*sqrt(0.8), #<--Traits' latent-scale genetic covariance; 0.244949
latenvcov=0.25 #<--Traits' latent-scale nonshared environmental covariance; re = 0.6681531
)
#True parameter values, for observed scale:
truevals2 <- c(
mu1=(0.5*0)+(0.25*1)+(0.25*2), #<--Observed mean of trait 1; 0.75
mu2=(0.75*0)+(0.15*1)+(0.1*2), #<--Observed mean of trait 2; 0.35
vp1=(0.5*0^2)+(0.25*1^2)+(0.25*2^2) - (0.75^2), #<--Observed variance of trait 1; 0.6875
vp2=(0.75*0^2)+(0.15*1^2)+(0.1*2^2) - (0.35^2), #<--Observe variance of trait 2; 0.4275
k1=sum(dnorm(c(qnorm(0.5),qnorm(0.75))))^2, #<--Trait 1's heritability conversion factor; 0.5136859
k2=sum(dnorm(c(qnorm(0.75),qnorm(0.9))))^2, #<--Trait 2's heritability conversion factor; 0.2433201
#Observed-scale heritability of trait #1, 0.2241539:
y1_h2obs=0.3 * sum(dnorm(c(qnorm(0.5),qnorm(0.75))))^2 / ((0.5*0^2)+(0.25*1^2)+(0.25*2^2) - (0.75^2)),
#Observed-scale heritability of trait #2, 0.4553359:
y2_h2obs=0.8 * sum(dnorm(c(qnorm(0.75),qnorm(0.9))))^2 / ((0.75*0^2)+(0.15*1^2)+(0.1*2^2) - (0.35^2))
)
#Generate msnps SNPs. SNPs are in linkage equilibrium, with MAF in interval [0.05,0.5]:
snps <- matrix(NA_real_,nrow=N,ncol=msnps)
for(mi in 1:msnps){
maf <- runif(1,min=0.05,max=0.5)
snps[,mi] <- rbinom(n=N,size=2,prob=maf)
snps[,mi] <- (snps[,mi]-mean(snps[,mi]))/sd(snps[,mi])
#print(mi)
}
GRM <- snps%*%t(snps) / msnps #<--#Calculate GRM from SNPs.
ev <- eigen(GRM,symmetric=T) #<--Eigen-decompose the GRM.
#If you don't care whether or not the GRM is positive-definite, you can comment out this part. It "bends" the GRM to the nearest
#(in a least-squares sense) positive-definite matrix:
if(!all(ev$values > .Machine$double.eps)){
GRM <- as.matrix(nearPD(GRM)$mat)
}
#Free some memory:
rm(snps, ev); gc()
#Covariance matrix for genetic liabilities:
varglat <- rbind(
cbind(GRM*truevals["y1_h2lat"], GRM*truevals["latgencov"]),
cbind(GRM*truevals["latgencov"], GRM*truevals["y2_h2lat"])
)
#Covariance matrix for nonshared environmental liabilities:
varelat <- matrix(data=c(1-truevals["y1_h2lat"], 0.5-truevals["latgencov"], 0.5-truevals["latgencov"], 1-truevals["y2_h2lat"]), nrow=2)
#Genetic liabilities:
glat <- rmvnorm(n=1, mean=rep(0,2*N), sigma=varglat)
#Nonshared environmental liabilities:
elat <- rmvnorm(n=N, mean=c(0,0), sigma=varelat)
#Trait 1, latent scale:
y1lat <- glat[1:N]+elat[,1]
#Trait 2, latent scale:
y2lat <- glat[(N+1):(2*N)]+elat[,2]
#Trait 1, observed scale:
y1obs <- (y1lat>truevals["y1_tau1"]) + (y1lat>truevals["y1_tau2"])
#Trait 2, observed scale:
y2obs <- (y2lat>truevals["y2_tau1"]) + (y2lat>truevals["y2_tau2"])
table(y1obs)
table(y2obs)
#Free some memory:
rm(elat, glat, varelat, varglat); gc()
# Estimate thresholds, using a conventional ordinal-FIML analyisis: ###
threshmod <- mxModel(
"Threshold_Model",
mxData(observed=data.frame(y1=mxFactor(y1obs,levels=c(0,1,2)),y2=mxFactor(y2obs,levels=c(0,1,2))),type="raw"),
mxMatrix(
type="Full",nrow=2,ncol=2,free=T,
values=c(
qnorm(mean(y1obs <= 0, na.rm=T)), qnorm(mean(y1obs <= 1, na.rm=T)), qnorm(mean(y2obs <= 0, na.rm=T)), qnorm(mean(y2obs <= 1, na.rm=T))),
labels=c("y1_tau1","y1_tau2","y2_tau1","y2_tau2"),
name="Tau"),
mxMatrix(type="Stand",nrow=2,free=T,values=cor(y1obs,y2obs,use="pair"),labels="rho",lbound=-0.9999,ubound=0.9999,name="R"),
mxMatrix(type="Zero",nrow=1,ncol=2,name="Mu"),
mxFitFunctionML(),
mxExpectationNormal(covariance="R",means="Mu",dimnames=c("y1","y2"),thresholds="Tau",threshnames=c("y1","y2"))
)
threshmod <- mxRun(threshmod)
summary(threshmod)
#Conversion factor for trait 1:
( k1 <- sum(dnorm(threshmod$Tau$values[,1]))^2 )
#Conversion factor for trait 2:
( k2 <- sum(dnorm(threshmod$Tau$values[,2]))^2 )
#Free some memory:
rm(threshmod); gc()
# Use Haseman-Elston to estimate heritabilities, for use as OpenMx start values ###
#Standardized phenotype 1:
yy1 <- (y1obs-mean(y1obs))/sd(y1obs)
#Standardized phenotype 2:
yy2 <- (y2obs-mean(y2obs))/sd(y2obs)
#Upper triangle of GRM:
U <- matrix(NA_real_,N,N)
U[!lower.tri(U,diag=T)] <- GRM[!lower.tri(GRM,diag=T)]
#Vector of off-diagonal GRM coefficients:
xx <- rep(NA_real_, N*(N-1)/2)
#Vectors to hold cross-products of participants' standardized phenotypes:
yy1.2 <- rep(NA_real_, N*(N-1)/2)
yy2.2 <- rep(NA_real_, N*(N-1)/2)
#Loop to populate vectors to be used in HE regression:
sofar <- 1
for(i in 1:(N-1)){
yy1.2[sofar:(sofar+N-1-i)] <- yy1[i]*yy1[(i+1):N]
yy2.2[sofar:(sofar+N-1-i)] <- yy2[i]*yy2[(i+1):N]
xx[sofar:(sofar+N-1-i)] <- U[i,][!is.na(U[i,])]
sofar <- sofar+N-i
#print(i)
}
#Should be no more NAs:
sum(is.na(yy1.2))
sum(is.na(yy2.2))
sum(is.na(xx))
#HE regression for trait 1; slope estimates observed-scale heritability:
( her1 <- lm(yy1.2~xx)$coefficients[2] )
#HE regression for trait 2:
( her2 <- lm(yy2.2~xx)$coefficients[2] )
#Enforce bounds on observed-scale heritability estimates:
her1 <- ifelse(her1<0, 0, her1)
her1 <- ifelse(her1>0.9999, 0.9999, her1)
her2 <- ifelse(her2<0, 0, her2)
her2 <- ifelse(her2>0.9999, 0.9999, her2)
#Free some memory:
rm(U,xx,yy1,yy1.2,yy2,yy2.2); gc()
#With mxGREML, you have to treat phenotypes that are actually ordinal as though they're continuous:
widedata <- cbind(y1=as.numeric(y1obs), y2=as.numeric(y2obs))
#The only fixed effects are the two intercepts:
gremldat <- mxGREMLDataHandler(data=widedata,yvars=c("y1","y2"),blockByPheno=TRUE)
head(gremldat$yX)
#Options for verifying analytic derivatives with NPSOL:
# mxOption(NULL,"Print level",20)
# mxOption(NULL,"Print file",1)
# mxOption(NULL,"Verify level",3)
# mxOption(NULL,"Function precision",1e-7)
#Custom compute plan:
plan <- mxComputeSequence(
steps=list(
mxComputeGradientDescent(engine="SLSQP",verbose=5L),
#mxComputeGradientDescent(engine="NPSOL",verbose=5L),
mxComputeOnce("fitfunction", c("gradient","hessian")),
mxComputeStandardError(),
mxComputeHessianQuality(),
mxComputeReportDeriv(),
mxComputeReportExpectation()
))
#^^^Note: If you are running the R GUI under Windows, delete the 'verbose=5L' argument in the above.
#mxGREML model:
gremlmod <- mxModel(
"Diphenotype_Ordinal",
mxExpectationGREML(V="V",dataset.is.yX=TRUE),
mxData(observed=gremldat$yX, type="raw", sort=F),
plan,
#Trait 1's latent-scale heritability:
mxMatrix(type="Full",nrow=1,ncol=1,free=T,values=her1*var(y1obs,na.rm=T)/k1,labels="h2lat1",lbound=0,ubound=0.9999,name="H2lat1"),
#Trait 2's latent-scale heritability:
mxMatrix(type="Full",nrow=1,ncol=1,free=T,values=her2*var(y2obs,na.rm=T)/k2,labels="h2lat2",lbound=0,ubound=0.9999,name="H2lat2"),
#Conversion factors:
mxMatrix(type="Full",nrow=1,ncol=1,free=F,values=k1,name="Konv1"),
mxMatrix(type="Full",nrow=1,ncol=1,free=F,values=k2,name="Konv2"),
#Observed-scale phenotypic variances:
mxMatrix(type="Full",nrow=1,ncol=1,free=T,values=var(y1obs,na.rm=T),labels="vp1",lbound=0.0001,name="Vp1"),
mxMatrix(type="Full",nrow=1,ncol=1,free=T,values=var(y2obs,na.rm=T),labels="vp2",lbound=0.0001,name="Vp2"),
#Genetic correlation:
mxMatrix(type="Full",nrow=1,ncol=1,free=T,values=cor(y1obs,y2obs,use="pair"),labels="ra",lbound=-0.9999,ubound=0.9999,name="Ra"),
#Nonshared environmental correlation:
mxMatrix(type="Full",nrow=1,ncol=1,free=T,values=cor(y1obs,y2obs,use="pair"),labels="re",lbound=-0.9999,ubound=0.9999,name="Re"),
#Fixed matrices for use in MxAlgebras:
mxMatrix(type="Symm",nrow=N,free=F,values=GRM,name="A"),
mxMatrix(type="Unit",nrow=N,ncol=1,name="Uno"),
mxMatrix(type="Zero",nrow=N,ncol=N,name="Zip"),
#Covariance matrix, V:
mxAlgebra(
rbind(
cbind(A%x%(H2lat1*Konv1), Zip),
cbind(A%x%(Ra*sqrt(H2lat1*Konv1*H2lat2*Konv2)), A%x%(H2lat2*Konv2))
) +
rbind(
cbind(vec2diag(Uno%x%(Vp1-H2lat1*Konv1)), Zip),
cbind(vec2diag(Uno%x%(Re*sqrt(Vp1-H2lat1*Konv1)*sqrt(Vp2-H2lat2*Konv2))), vec2diag(Uno%x%(Vp2-H2lat2*Konv2)))
), name="V"),
#^^^Note that everything above the diagonal in V and its matrix derivatives is ignored by the backend; here, those elements are
#simply set to zero, to avoid unnecessary computational work.
#The parameterization of this model makes the derivatives of V rather messy...:
mxAlgebra(
rbind(
cbind(A%x%Konv1, Zip),
cbind(A%x%(Ra*sqrt(Konv1*H2lat2*Konv2)*0.5/sqrt(H2lat1)), Zip)
) +
rbind(
cbind(vec2diag(Uno%x%(-1*Konv1)), Zip),
cbind(vec2diag(Uno%x%(Re*sqrt(Vp2-H2lat2*Konv2)*0.5/sqrt(Vp1-H2lat1*Konv1)*(-1*Konv1))), Zip)
), name="dV_dh2lat1"),
mxAlgebra(
rbind(
cbind(Zip, Zip),
cbind(A%x%(Ra*sqrt(Konv1*H2lat1*Konv2)*0.5/sqrt(H2lat2)), A%x%Konv2)
) +
rbind(
cbind(Zip, Zip),
cbind(vec2diag(Uno%x%(Re*sqrt(Vp1-H2lat1*Konv1)*0.5/sqrt(Vp2-H2lat2*Konv2)*(-1*Konv2))), vec2diag(Uno%x%(-1*Konv2)))
), name="dV_dh2lat2"),
mxAlgebra(
rbind(
cbind(vec2diag(Uno), Zip),
cbind(vec2diag(Uno%x%(Re*sqrt(Vp2-H2lat2*Konv2)*0.5/sqrt(Vp1-H2lat1*Konv1))), Zip)
), name="dV_dvp1"),
mxAlgebra(
rbind(
cbind(Zip, Zip),
cbind(vec2diag(Uno%x%(Re*sqrt(Vp1-H2lat1*Konv1)*0.5/sqrt(Vp2-H2lat2*Konv2))), vec2diag(Uno))
), name="dV_dvp2"),
mxAlgebra(
rbind(
cbind(Zip, Zip),
cbind(A%x%sqrt(H2lat1*Konv1*H2lat2*Konv2), Zip)
), name="dV_dra"),
mxAlgebra(
rbind(
cbind(Zip, Zip),
cbind(vec2diag(Uno%x%(sqrt(Vp1-H2lat1*Konv1)*sqrt(Vp2-H2lat2*Konv2))), Zip)),
name="dV_dre"),
mxFitFunctionGREML(dV=c(h2lat1="dV_dh2lat1",h2lat2="dV_dh2lat2",vp1="dV_dvp1",vp2="dV_dvp2",ra="dV_dra",re="dV_dre"))
)
#Free some memory:
rm(GRM,widedata,gremldat,plan); gc()
gremlmod <- mxRun(gremlmod)
gc()
#See results:
summary(gremlmod, verbose=T)
gremlmod$output$fit
gremlmod$output$gradient
gremlmod$output$hessian
|
a419ba0cda8d942a7126a97f1e4e7723e79ce739 | 8a87ac5a13ed42ef99a7f0e566e07e32207c885a | /refm/api/src/bigdecimal/util.rd | 7bf2fd12aabecd6be8cd55110c87352a70164854 | [] | no_license | mrkn/rubydoc | cf096e253afbe11d94a7b390645f18872754e5f1 | dff08665e8c537073d3ebab5b98d4bcd175055b5 | refs/heads/master | 2021-01-18T14:54:27.743911 | 2010-08-15T06:15:58 | 2010-08-15T06:15:58 | 876,651 | 1 | 0 | null | null | null | null | EUC-JP | R | false | false | 2,272 | rd | util.rd |
String、Float、Rational オブジェクト を BigDecimal オブジェクトに変換する機能を提供
します。
* [[m:String#to_d]]
* [[m:Float#to_d]]
* [[m:Rational#to_d]]
#@until 1.9.1
また、BigDecimal オブジェクトを Rational オブジェクトに変換する機能も提供します。
* [[m:BigDecimal#to_r]]
#@end
これらのメソッドを使うには 'bigdecimal' と 'bigdecimal/util'を require
する必要があります。
= reopen Float
== Instance Methods
--- to_d -> BigDecimal
自身を [[c:BigDecimal]] に変換します。
@return [[c:BigDecimal]] に変換したオブジェクト
自身を一度 [[m:Float#to_s]] で文字列に変換してから
[[m:Kernel.#BigDecimal]] で生成するので、精度に注意してください。
1.0.to_d.to_s # => "0.1E1"
(1.0/0).to_d.to_s # => "Infinity"
((1.0/3).to_d/(2.0/3).to_d).to_s # => "0.499999999999999250000000000000375E0"
((1.0/3)/(2.0/3)).to_d.to_s # => "0.5E0"
= reopen String
== Instance Methods
--- to_d -> BigDecimal
自身を [[c:BigDecimal]] に変換します。BigDecimal(self) と同じです。
@return [[c:BigDecimal]] に変換したオブジェクト
= reopen BigDecimal
== Instance Methods
--- to_digits -> String
自身を "1234.567" のような十進数の形式にフォーマットした文字列に変換し
ます。
@return 十進数の形式にフォーマットした文字列
注意:
このメソッドは非推奨です。[[m:BigDecimal#to_s]]("F") を使用してください。
#@until 1.9.1
--- to_r -> Rational
自身を [[c:Rational]] に変換します。
@return [[c:Rational]] に変換したオブジェクト
#@end
= reopen Rational
== Instance Methods
--- to_d(nFig = 0) -> BigDecimal
自身を [[c:BigDecimal]] に変換します。
nFig が指定された場合、nFig 桁まで計算を行います。省略したり 0 以下を指
定した場合は [[m:BigDecimal.double_fig]] * 2 + 1 桁まで計算を行います。
@param nFig 計算を行う桁数
@return [[c:BigDecimal]] に変換したオブジェクト
例:
require "rational"
require "bigdecimal"
require "bigdecimal/util"
Rational(1, 3).to_d(3).to_s # => "0.333E0"
Rational(1, 3).to_d(10).to_s # => "0.3333333333E0"
|
e56dc56b322d3ff7b01c430012abd870e00abf04 | f43ff1e09138649558c2e90a75bd2d4f3cbbdbb6 | /source/macOS/R-Portable-Mac/library/AnnotationDbi/unitTests/test_select_NOSCHEMA.R | dec837bd329a1fdd4eb36280220566fc105a9133 | [
"GPL-2.0-only",
"MIT",
"CC-BY-3.0"
] | permissive | romanhaa/Cerebro | 5b2d9371403c52f60341894f84cd0f6a006cc930 | 946ed178c986027d60af6013e63d1fc51ae8b371 | refs/heads/master | 2022-12-02T15:49:57.705873 | 2021-11-20T11:47:12 | 2021-11-21T17:09:37 | 164,686,297 | 87 | 23 | MIT | 2022-11-10T18:21:44 | 2019-01-08T16:09:59 | HTML | UTF-8 | R | false | false | 4,450 | r | test_select_NOSCHEMA.R | ## this will install a testDb stashed in the
## ## this is the package name
## pkgName <- "org.testing.db"
## ## Get the package path
## pkgPath <- system.file("extdata", pkgName, package="AnnotationDbi")
## ## Then install it
## install.packages(pkgPath, repos=NULL)
## and load it
#####install.packages(system.file('extdata','org.testing.db', package='AnnotationDbi'), repos=NULL)
dir.create(testlib <- tempfile())
old_libPaths <- NULL
.setUp <- function()
{
installed <- rownames(installed.packages(testlib))
if ("org.testing.db" %in% installed)
return()
pkg <- system.file("extdata", "org.testing.db", package="AnnotationDbi")
suppressPackageStartupMessages(install.packages(
pkg, lib = testlib, repos=NULL, type="source",
INSTALL_opts="--no-test-load", verbose = FALSE, quiet = TRUE
))
old_libPaths <<- .libPaths()
.libPaths(c(testlib, old_libPaths))
finchCsomes <<- c(as.character(1:15),as.character(17:28),
"MT","Un","W","Z","4A","1A","1B")
finchCols <<- c("CHROMOSOME","SYMBOL","GENENAME","GID","GO","EVIDENCE",
"ONTOLOGY","GOALL","EVIDENCEALL","ONTOLOGYALL")
}
.tearDown <- function()
.libPaths(old_libPaths)
## lower level tests (more useful)
test_keysLow <- function(){
x <- org.testing.db::org.testing.db
res <- unique(AnnotationDbi:::.noSchemaKeys(x, "CHROMOSOME"))
checkTrue(all(sort(res) == sort(finchCsomes)))
}
test_selectLow <- function(){
x <- org.testing.db::org.testing.db
keys <- "100008579"
cols <- "SYMBOL"
keytype <- "GID"
res <- AnnotationDbi:::.noSchemaSelect(x, keys, cols, keytype)
checkTrue(all(res==c("100008579","EGR1")))
checkTrue(all(colnames(res)==c("GID","SYMBOL")))
keys <- "brain-derived neurotrophic factor"
cols <- c("SYMBOL","GID")
keytype <- "GENENAME"
res <- AnnotationDbi:::.noSchemaSelect(x, keys, cols, keytype)
checkTrue(all(res==c("brain-derived neurotrophic factor","BDNF","751584")))
checkTrue(all(colnames(res)==c("GENENAME","SYMBOL","GID")))
keys <- "brain-derived neurotrophic factor"
cols <- c("GO","GID")
keytype <- "GENENAME"
res <- head(AnnotationDbi:::.noSchemaSelect(x, keys, cols, keytype),n=1)
checkTrue(all(res==c("brain-derived neurotrophic factor","GO:0001657",
"751584")))
checkTrue(all(colnames(res)==c("GENENAME","GO","GID")))
}
## high level tests (does this dispatch right etc.?)
test_columns <- function(){
x <- org.testing.db::org.testing.db
res <- columns(x)
checkTrue(all(sort(res) == sort(finchCols)))
}
test_keytypes <- function(){
x <- org.testing.db::org.testing.db
res <- keytypes(x)
checkTrue(all(sort(res) == sort(finchCols)))
}
test_keys<- function(){ ## BOOM
x <- org.testing.db::org.testing.db
## most basic case
res <- keys(x, "CHROMOSOME")
checkTrue(all(sort(res) == sort(finchCsomes)))
res <- head(keys(x, "GID"), n=2)
checkTrue(all(res==c("751582", "751583")))
res <- head(keys(x, "SYMBOL", pattern="BDNF"))
checkTrue(res=="BDNF")
res <- head(keys(x, "GID", pattern="BDNF", column="SYMBOL"))
checkTrue(res=="751584")
res <- head(keys(x, "SYMBOL", column="GID"),n=2)
checkTrue(all(res==c("ACT5C","AHSA2")))
}
test_select <- function(){
x <- org.testing.db::org.testing.db
## most basic case
res <- select(x, keys="100008579",
columns="SYMBOL", keytype="GID")
checkTrue(all(res==c("100008579","EGR1")))
checkTrue(all(colnames(res)==c("GID","SYMBOL")))
## return more than one column
res <- select(x, keys="100008579",
columns=c("SYMBOL","CHROMOSOME"), keytype="GID")
checkTrue(all(res==c("100008579","EGR1","13")))
checkTrue(all(colnames(res)==c("GID","SYMBOL","CHROMOSOME")))
## return GO and evidence codes
suppressWarnings(res <- head(select(x, keys="100008579",
columns=c("GO","EVIDENCE"), keytype="GID"),n=1))
checkTrue(all(res==c("100008579","GO:0000122","IEA")))
checkTrue(all(colnames(res)==c("GID","GO","EVIDENCE")))
## test lookup from alt-key
res <- select(x, keys="BDNF",
columns="GENENAME", keytype="SYMBOL")
checkTrue(all(res==c("BDNF","brain-derived neurotrophic factor")))
checkTrue(all(colnames(res)==c("SYMBOL","GENENAME")))
}
|
1e2bd6154264a8e23da42391a71f655b6cdddd2d | 6b1b2a6c4dcbb117a60386f58d3cdee99450863e | /Course3 - Getting and Cleaning Data/Week2/Quiz2.R | 8c5095a0aba18a33cc98bc802888cef43e6b544f | [] | no_license | mpedreira/datasciencecoursera | 6a0a60a0d33d04b86bf505dc7e21f420e18f9260 | 432887b289695d743aa3bca4bc96644d813ec40a | refs/heads/master | 2021-01-01T05:39:34.224000 | 2015-06-14T20:33:21 | 2015-06-14T20:33:21 | 34,282,907 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,630 | r | Quiz2.R | question1 <- function (){
library(jsonlite)
JSON <- fromJSON("https://api.github.com/users/jtleek/repos", flatten = TRUE)
created_at <- JSON[JSON$name == "datasharing",]$created_at
print(c("El repo se ha creado: ",created_at))
created_at
}
question2 <- function (){
library(sqldf)
acs_file <- "./specdata/question2.csv"
# file <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
# download.file(file,destfile = acs_file,method="curl",quiet = TRUE)
acs <- read.csv(acs_file, header=T, sep=",")
head(sqldf("select pwgtp1 from acs where AGEP < 50"))
# sqldf("select * from acs where AGEP < 50 and pwgtp1") ## Dont filters PWGTP1 colum
# sqldf("select pwgtp1 from acs") ## Dont filters AGEP < 50
# sqldf("select * from acs where AGEP < 50") ## Dont filters PWGTP1 colum
}
question3 <- function (){
library(sqldf)
acs_file <- "./specdata/question2.csv"
# file <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
# download.file(file,destfile = acs_file,method="curl",quiet = TRUE)
acs <- read.csv(acs_file, header=T, sep=",")
sqldf("select distinct AGEP from acs")
# sqldf("select unique AGEP from acs") ##FAILS
# sqldf("select AGEP where unique from acs") ## FAILS
# sqldf("select unique * from acs") ## FAILS
}
question4 <- function (){
jleek_url <- "http://biostat.jhsph.edu/~jleek/contact.html"
data <- url(jleek_url)
html <- readLines(data)
result <- sapply(html[c(10, 20, 30, 100)], nchar)
close(data)
print(paste("El Resultado es :",as.character(result[1]),as.character(result[2]),as.character(result[3]),as.character(result[4]),sep=" "))
c(as.integer(result[1]),as.integer(result[2]),as.integer(result[3]),as.integer(result[4]))
}
## This is standar method for others
question4b <- function (){
jleek_file <- "./specdata/question4.html"
file <- "http://biostat.jhsph.edu/~jleek/contact.html"
download.file(file,destfile = jleek_file,method="curl",quiet = TRUE)
html <- readLines(data_file)
sapply(html[c(10, 20, 30, 100)], nchar)
print(paste("El Resultado es :",as.character(result[1]),as.character(result[2]),as.character(result[3]),as.character(result[4]),sep=" "))
c(as.integer(result[1]),as.integer(result[2]),as.integer(result[3]),as.integer(result[4]))
}
question5 <- function (){
data_file <- "./specdata/question5.for"
file <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
download.file(file,destfile = data_file,method="curl",quiet = TRUE)
parsed <- read.fwf(
file=data_file,
skip=4,
widths=c(-1,9,-5,4,4,-5,4,4,-5,4,4,-5,4,4))
sum(parsed[, 4])
}
|
3a85854ecc57b2589726cad1061f96ac18ed93df | 2fb385f67a561dd58dbb91879d2317b9d9cb8a7f | /Kobble_Cr/Longitudinal profile_5m.R | 005416e6fa62f8f1a1ff6a446b178afb97973277 | [
"AFL-3.0"
] | permissive | SongyanYu/Recession-analysis | 1cc4c3b9a63aa06088f7d7c8fa59316951914d92 | 2a2d7ae32983e6dce7d963e9b70c7944aea17a24 | refs/heads/master | 2023-04-12T11:49:55.310636 | 2021-09-03T06:31:23 | 2021-09-03T06:31:23 | 233,721,010 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,878 | r | Longitudinal profile_5m.R | #########################
# This script aims to get longitudinal profile in correct order (i.e. the flow direction order)
# There are two DEMs dataset availabe but in different spatial resolution (i.e. 1m & 5m). This scritp
# deals with both datasets and return the longitudinal profile for them, respectively.
#########################
library(raster)
setwd("C:/Users/s2974665/Google Drive/PhD at GU/Part 3 River channel identification/Kobble Cr")
longprofile.5m<-raster("Kob 5m DEM/kob_longprof.tif")
longprofile.1m<-raster("kob_longprof.tif")
flowdir.5m<-raster("Kob 5m DEM/Flowdir_kob.tif")
flowdir.1m<-raster("Flowdir_kob.tif")
plot(values(longprofile.5m)[!is.na(values(longprofile.5m))])
plot(values(longprofile.1m)[!is.na(values(longprofile.1m))])
longitudinal.profile.value<-function(longprofile=longprofile.1m,flowdir=flowdir.1m){
value<-values(longprofile)[!is.na(values(longprofile))] #the order of "value" is from top to bottom
flowdir.value<-values(flowdir)[!is.na(values(flowdir))]
value.location<-which(!is.na(values(longprofile)))
value.col<-value.location%%raster::ncol(longprofile) #4708 is the number of columns of the raster
value.row<-value.location%/%raster::ncol(longprofile)
pool<-data.frame(x=value.col,y=value.row)
flowdir.rowno<-which(max(pool$y)==pool$y)
flowdirection<-c()
flowdirection[1]<-flowdir.rowno[1]
for(i in 1:(length(flowdir.value)-1)){
if(flowdir.value[flowdirection[i]]==32)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]-1&pool$y==pool$y[flowdirection[i]]-1)
if(flowdir.value[flowdirection[i]]==1)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]+1&pool$y==pool$y[flowdirection[i]])
if(flowdir.value[flowdirection[i]]==2)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]+1&pool$y==pool$y[flowdirection[i]]+1)
if(flowdir.value[flowdirection[i]]==4)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]&pool$y==pool$y[flowdirection[i]]+1)
if(flowdir.value[flowdirection[i]]==8)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]-1&pool$y==pool$y[flowdirection[i]]+1)
if(flowdir.value[flowdirection[i]]==16)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]-1&pool$y==pool$y[flowdirection[i]])
if(flowdir.value[flowdirection[i]]==64)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]&pool$y==pool$y[flowdirection[i]]-1)
if(flowdir.value[flowdirection[i]]==128)
flowdirection[i+1]<-which(pool$x==pool$x[flowdirection[i]]+1&pool$y==pool$y[flowdirection[i]]-1)
}
value<-value[rev(flowdirection)]
return(value)
}
value.1m<-longitudinal.profile.value(longprofile = longprofile.1m,flowdir = flowdir.1m)
value.5m<-longitudinal.profile.value(longprofile = longprofile.5m,flowdir = flowdir.5m)
|
bfcfd6a648fd72bf787a675a861fe7af4440e2c9 | ed7f56fc404a44c80d16ed89dca51cb53aeb0287 | /man/getCOP.Rd | 757378540dd85d4ec2eb55483c11ec3a71db7db3 | [] | no_license | InternationalRiceResearchInstitute/pedX | 636ccaf55e38063cd755dafd196ab5984e137220 | bd574e1d0375b77fc9f9f043d50bd12316bb4f13 | refs/heads/master | 2022-02-27T04:04:37.457206 | 2018-02-18T02:27:34 | 2018-02-18T02:27:34 | 103,363,132 | 0 | 1 | null | 2019-09-30T03:59:49 | 2017-09-13T06:39:43 | R | UTF-8 | R | false | true | 484 | rd | getCOP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCOP.R
\name{getCOP}
\alias{getCOP}
\title{getCOP Function for getting a sparse relationship (COP) matrix}
\usage{
getCOP(gidvec = NULL, ped)
}
\arguments{
\item{gidvec}{a vector of gids of interest}
\item{Ped}{Sparse pedigree matrix (class pedigree from pedigreemm) OR a data fame}
}
\value{
A sparse relationship matrix
}
\description{
getCOP Function for getting a sparse relationship (COP) matrix
}
|
29e64e660d45a7eaa5bee355cabbbee432558e62 | a883b57cfa02a57a77648e2630f67178cd471275 | /4. The investigation of underlying intrinsic immune landscapes.R | 0278cce1ff5966eff6e9faea6ec5184f013d9f6e | [
"MIT"
] | permissive | longjunyu/Pancan-ICI | e0af239a705f04b7be2b9c8424258129416cca74 | 18d0e9d4028429f86c36e753473d2521bfa36a2f | refs/heads/master | 2023-04-15T09:10:41.102398 | 2022-02-06T15:57:49 | 2022-02-06T15:57:49 | 243,694,793 | 5 | 5 | null | null | null | null | UTF-8 | R | false | false | 47,721 | r | 4. The investigation of underlying intrinsic immune landscapes.R | # 4.1 Immunogenomic indicator analysis
options(stringsAsFactors=FALSE)
library(ggplot2)
library(ggpubr)
library(ggsci)
clindf <- read.table("risk2.txt", head=TRUE, sep="\t", quote="")
dim(clindf)
clindf[1:6,1:5]
clindf[, "group"] <- factor(clindf[, "risk"])
clindf <- clindf[order(clindf[, "id"]),]
clindf[, "sample_id"] <- substr(clindf[, "id"], 1, 12)
clindf[, "sample_id2"] <- clindf[, "id"]
dim(clindf)
head(clindf)
colnames(clindf)
sum(duplicated(clindf[, "sample_id"]))
clindf <- clindf[!duplicated(clindf[, "sample_id"]),]
sum(duplicated(clindf[, "sample_id"]))
expdf <- read.csv(
"The Immune Landscape of Cancer-mmc2.csv", head=TRUE, quote=""
)
dim(expdf)
expdf[1:6, 1:6]
expdf[, "sample_id"] <- expdf[, "TCGA.Participant.Barcode"]
sum(duplicated(clindf[, "sample_id"]))
sum(duplicated(expdf[, "sample_id"]))
sample_ids <- intersect(clindf[, "sample_id"], expdf[, "sample_id"])
length(sample_ids)
sample_ids[1:10]
clindf2 <- merge(clindf, expdf, by="sample_id", all=FALSE)
dim(clindf2)
colnames(clindf2)
write.csv(clindf2, "clindf2.csv", row.names=FALSE, na="")
newdf1 <- clindf2
dim(newdf1)
ncol(newdf1)
vars <- colnames(newdf1)[10:ncol(newdf1)]
vars
outlst <- list()
for (i in 1:length(vars)) {
print(paste0("------- ", vars[i], " --------"))
newdf2 <- data.frame(
value=newdf1[, vars[i]],
group=newdf1[, "group"]
)
newdf2 <- newdf2[complete.cases(newdf2),]
n <- nrow(newdf2)
model <- wilcox.test(value ~ factor(group), newdf2)
print(model)
means <- aggregate(value ~ factor(group), newdf2, mean)
medians <- aggregate(value ~ factor(group), newdf2, median)
maxs <- aggregate(value ~ factor(group), newdf2, max)
mins <- aggregate(value ~ factor(group), newdf2, min)
outlst[[i]] <- data.frame(
var=vars[i],
n=n,
mean_high=means[1, "value"],
mean_low=means[2, "value"],
median_high=medians[1, "value"],
median_low=medians[2, "value"],
max_high=maxs[1, "value"],
max_low=maxs[2, "value"],
min_high=mins[1, "value"],
min_low=mins[2, "value"],
pvalue=model[["p.value"]]
)
}
outdf <- do.call(rbind, outlst)
outfile <- "result_mean_2.csv"
write.csv(outdf, outfile, row.names=FALSE, na="")
dir.create("fig")
dir.create("outdata")
windowsFonts()
library(scales)
colors1 <- rainbow(length(vars), s=1, v=1, alpha = 1)
barplot(1:12,col=rainbow(12, s=1, v=1, alpha = 1))
colors1
colors2 <- rainbow(length(vars), s=1, v=0.6, alpha = 1)
barplot(1:12,col=rainbow(12, s=1, v=0.6, alpha = 1))
colors2
maxvalues <- apply(outdf[, c("max_high", "max_low")], 1, max)
minvalues <- apply(outdf[, c("min_high", "min_low")], 1, min)
for (i in 1:length(vars)) {
newdf2 <- data.frame(value=newdf1[, vars[i]], group=newdf1[, "group"])
newdf2 <- newdf2[complete.cases(newdf2),]
newdf2[, "group"] <- factor(
newdf2[, "group"], levels=c("high", "low"), labels=c("INB", "IB")
)
windowsFonts()
colors <- c(colors1[i], colors2[i])
title_x <- vars[i]
tb <- table(newdf2[, "group"])
title_label_x <- paste0(names(tb), " (n=", tb, ")")
comparisons <- list(c("INB", "IB"))
symnum.args <- list(
cutpoints=c(0, 0.0001, 0.001, 0.01, 0.05, 1),
symbols=c("", "", "", "", "")
)
labely <- maxvalues[i] + (maxvalues[i] - minvalues[i]) * 0.15
fig <- ggplot(newdf2, aes(x=group, y=value, color=group)) +
geom_boxplot(
aes(colour=group), notch=FALSE,
outlier.size=0.6, outlier.shape=1, outlier.alpha=0.5,
size=1.2,
fatten=1
) +
scale_color_manual(values=colors) +
scale_x_discrete(title_x, labels=title_label_x) +
scale_y_continuous("Value") +
stat_compare_means(
aes(
label=ifelse(
p < 0.001, "P < 0.001", paste0("P = ", ..p.format..)
)
),
label.x=1.25, label.y=labely, family="sans", size=2.8
) +
stat_compare_means(
comparisons=comparisons,
method="t.test",
symnum.args=symnum.args
) +
theme_bw() +
theme(
axis.title.x=element_text(family="sans", size=8),
axis.title.y=element_blank(),
axis.text.x=element_text(
color="black", angle=30, hjust=1,
family="sans", size=8
),
axis.text.y=element_text(color="black", family="sans", size=8),
axis.line=element_line(),
legend.title=element_blank(),
legend.position="none",
panel.grid=element_blank(),
panel.border=element_rect(color=NA)
)
fig
outfile <- paste0("fig/", "fig_boxplot_", i, "_", vars[i], ".tiff")
tiff(outfile, width=4, height=5, unit="cm", res=350, compression="lzw+p")
print(fig)
dev.off()
outfile <- paste0("fig/", "fig_boxplot_", i, "_", vars[i], ".pdf")
pdf(outfile, width=2/2.54, height=5/2.54)
print(fig)
dev.off()
fig <- ggplot(newdf2, aes(x=group, y=value, group=group)) +
geom_violin(
aes(fill=group), color="grey70"
) +
geom_boxplot(
aes(color=group), notch=FALSE,
outlier.size=0.3, outlier.shape=1, outlier.alpha=0.5,
size=0.3, width=0.2,
fatten=1
) +
geom_boxplot(
color="grey70", notch=FALSE,
outlier.colour=NA,
size=0.3, width=0.2,
fatten=0.9
) +
scale_color_manual(values=colors) +
scale_fill_manual(values=colors) +
scale_x_discrete(title_x, labels=title_label_x) +
scale_y_continuous("Value") +
stat_compare_means(
aes(
label=ifelse(
p < 0.001, "P < 0.001", paste0("P = ", ..p.format..)
)
),
label.x=1.25, label.y=labely, family="sans", size=2.5
) +
stat_compare_means(
comparisons=comparisons,
method="wilcox.test",
symnum.args=symnum.args
) +
theme_bw() +
theme(
axis.title.x=element_text(family="sans", size=7),
axis.title.y=element_blank(),
axis.text.x=element_text(
color="black", angle=30, hjust=1, family="sans", size=7),
axis.text.y=element_text(color="black", family="sans", size=7),
axis.line=element_line(),
legend.title=element_blank(),
legend.position="none",
panel.grid=element_blank(),
panel.border=element_rect(color=NA)
)
fig
outfile <- paste0("fig/", "fig_violin_", i, "_", vars[i], ".tiff")
tiff(outfile, width=4, height=5, unit="cm", res=350, compression="lzw+p")
print(fig)
dev.off()
outfile <- paste0("fig/", "fig_violin_", i, "_", vars[i], ".pdf")
pdf(outfile, width=3/2.54, height=5/2.54)
print(fig)
dev.off()
}
# 4.2 Deciphering mutational signatures in the genome
options(stringsAsFactors=FALSE)
mafdf <- read.table("mc3.v0.2.8.PUBLIC.maf", header=TRUE, sep="\t", quote="")
dim(mafdf)
head(mafdf)
mafdf[1:6,1:6]
head(mafdf[, "Tumor_Sample_Barcode"])
mafdf[, "sample_id"] <- substr(mafdf[, "Tumor_Sample_Barcode"], 1, 16)
head(mafdf[, "sample_id"])
sum(duplicated(mafdf[, "sample_id"]))
silent <- c("Silent", "Intron", "RNA", "3'UTR", "3'Flank", "5'UTR", "5'Flank", "IGR")
keeps <- c(
"Missense_Mutation",
"Frame_Shift_Del",
"Frame_Shift_Ins",
"Nonsense_Mutation",
"Nonstop_Mutation",
"Splice_Site",
"Translation_Start_Site"
)
mafdf <- mafdf[mafdf[, "Variant_Classification"] %in% keeps,]
table(mafdf[, "Variant_Classification"])
sample_ids <- levels(factor(mafdf[, "sample_id"]))
length(sample_ids)
sum(duplicated(sample_ids))
symbols <- levels(factor(mafdf[, "Hugo_Symbol"]))
length(symbols)
mafdf_2 <- mafdf
mafdf_2[, "Hugo_Symbol"] <- factor(mafdf_2[, "Hugo_Symbol"], levels=symbols)
mafdf_2[, "sample_id"] <- factor(mafdf_2[, "sample_id"], levels=sample_ids)
mutdf <- as.data.frame(table(mafdf_2[, "Hugo_Symbol"], mafdf_2[, "sample_id"]))
mutdf[, "mut"] <- ifelse(mutdf[, "Freq"] > 0, 1, 0)
head(mutdf)
genedf <- as.data.frame(
matrix(
mutdf[, "mut"], ncol=length(sample_ids), dimnames=list(symbols, sample_ids)
)
)
class(genedf)
dim(genedf)
genedf[1:6,1:6]
sample_ids2 <- substr(sample_ids, 1, 15)
dup_ids2 <- sample_ids2[duplicated(sample_ids2)]
sample_ids[sample_ids2 %in% dup_ids2]
keep_ids <- sample_ids[!duplicated(sample_ids2)]
genedf <- genedf[, keep_ids]
dim(genedf)
genedf[1:6,1:6]
clindf <- read.table("group.txt", header=TRUE, sep="\t", quote="")
dim(clindf)
head(clindf)
clindf[, "sample_id"] <- clindf[, "Mixture"]
dim(clindf)
clindf[1:6,1:3]
mafdf2 <- mafdf
mafdf2[, "sample_id"] <- substr(mafdf2[, "Tumor_Sample_Barcode"], 1, 16)
mafdf2 <- merge(mafdf2, clindf, by="sample_id", all=FALSE)
mafdf2[, "mutation_type6"] <- paste0(
mafdf2[, "Reference_Allele"], ">", mafdf2[, "Tumor_Seq_Allele2"]
)
mafdf2[1:6, "mutation_type6"]
mafdf2[, "mutation_type6"] <- ifelse(
nchar(mafdf2[, "mutation_type6"]) != 3, "Others", mafdf2[, "mutation_type6"]
)
table(mafdf2[, "mutation_type6"])
mafdf2[, "mutation_type6"] <- ifelse(
grepl("-", mafdf2[, "mutation_type6"]), "Others", mafdf2[, "mutation_type6"]
)
table(mafdf2[, "mutation_type6"])
mafdf2[, "mutation_type6"] <- ifelse(
mafdf2[, "mutation_type6"] %in% c("C>A", "G>T"), "C>A",
ifelse(
mafdf2[, "mutation_type6"] %in% c("C>G", "G>C"), "C>G",
ifelse(
mafdf2[, "mutation_type6"] %in% c("C>T", "G>A"), "C>T",
ifelse(
mafdf2[, "mutation_type6"] %in% c("T>A", "A>T"), "T>A",
ifelse(
mafdf2[, "mutation_type6"] %in% c("T>C", "A>G"), "T>C",
ifelse(mafdf2[, "mutation_type6"] %in% c("T>G", "A>C"), "T>G", NA)
)
)
)
)
)
table(mafdf2[, "mutation_type6"])
save.image("project_1.RData")
load("project_1.RData")
library(ggplot2)
library(grid)
library(gtable)
library(ggsci)
newdf1 <- mafdf2[!is.na(mafdf2[, "mutation_type6"]),]
head(newdf1)
newdf1[, "count"] <- 1
newdf2 <- aggregate(count ~ sample_id + mutation_type6 + risk, newdf1, sum)
newdf3 <- aggregate(count ~ sample_id + risk, newdf1, sum)
colnames(newdf3)[3] <- "total"
newdf4 <- merge(newdf2, newdf3, by=c("sample_id", "risk"), all.x=TRUE)
newdf4[, "percent"] <- newdf4[, "count"] / newdf4[, "total"] * 100
figdf <- newdf4
figdf[, "id"] <- figdf[, "sample_id"]
figdf[, "value"] <- figdf[, "percent"]
figdf[, "group"] <- figdf[, "mutation_type6"]
figdf[, "subgroup"] <- figdf[, "risk"]
sort_group <- c("left", "right", "center")[3]
sort_subgroup <- c("left", "right", "center")[3]
colors_group <- c(
rgb(187,87,198, max=255),
rgb(110,110,207, max=255),
rgb(187,204,73, max=255),
rgb(227,68,90, max=255),
rgb(215,97,137, max=255),
rgb(197,51,115, max=255)
)
colors_group
barplot(1:6,col=colors_group)
colors_subgroup <- pal_jco("default")(2)
colors_subgroup <- c(
rgb(86, 86, 220, max=255),
rgb(224, 77, 224, max=255)
)
barplot(1:2,col=colors_subgroup)
r <- 0.5
title_group <- "Mutation"
title_subgroup <- "Risk"
spacing_subgroup <- unit(0.1 * r, "cm")
width_barplot <- 17 * 2.54 * r
width_group <- 1.5 * 2.54 * r
width_subgroup <- 1.5 * 2.54 * r
width <- width_barplot + width_group + width_subgroup
height <- 10 * 2.54 * r
top_group <- 1 * r
top_subgroup <- 1 * r
if (sort_subgroup == "left") {
tb <- table(figdf[, "subgroup"])
subgroups <- names(tb[order(-tb)])
figdf[, "subgroup"] <- factor(figdf[, "subgroup"], levels=subgroups)
} else if (sort_subgroup == "right") {
tb <- table(figdf[, "subgroup"])
subgroups <- names(tb[order(tb)])
figdf[, "subgroup"] <- factor(figdf[, "subgroup"], levels=subgroups)
} else if (sort_subgroup == "center") {
tb <- table(figdf[, "subgroup"])
subgroups <- names(tb[order(-tb)])
subgroups2 <- c()
for (i in 1:length(subgroups)) {
if (i %% 2 == 1) {
subgroups2 <- c(subgroups[i], subgroups2)
} else {
subgroups2 <- c(subgroups2, subgroups[i])
}
}
figdf[, "subgroup"] <- factor(figdf[, "subgroup"], levels=subgroups2)
}
newdf1 <- aggregate(value ~ group, figdf, mean)
groups <- newdf1[order(newdf1[, "value"]), "group"]
figdf[, "group"] <- factor(figdf[, "group"], levels=groups)
if (sort_group == "left") {
figdf <- figdf[
order(figdf[, "subgroup"], figdf[, "group"], -figdf[, "value"]),]
ids <- figdf[figdf[, "group"] == groups[length(groups)], "id"]
figdf[, "id"] <- factor(figdf[, "id"], levels=ids)
} else if (sort_group == "right") {
figdf <- figdf[
order(figdf[, "subgroup"], figdf[, "group"], figdf[, "value"]),]
ids <- figdf[figdf[, "group"] == groups[length(groups)], "id"]
figdf[, "id"] <- factor(figdf[, "id"], levels=ids)
} else if (sort_group == "center") {
figdf <- figdf[
order(figdf[, "subgroup"], figdf[, "group"], -figdf[, "value"]),]
ids <- figdf[figdf[, "group"] == groups[length(groups)], "id"]
ids2 <- c()
for (i in 1:length(ids)) {
if (i %% 2 == 1) {
ids2 <- c(ids[i], ids2)
} else {
ids2 <- c(ids2, ids[i])
}
}
figdf[, "id"] <- factor(figdf[, "id"], levels=ids2)
}
head(figdf)
p <- ggplot(figdf, aes(x=id, y=value, fill=group)) +
geom_bar(position="stack", stat="identity", width=1.1) +
scale_x_discrete("") +
scale_y_continuous("", expand=c(0, 0), limit=c(0, 100)) +
scale_fill_manual(values=colors_group) +
facet_grid(~ subgroup, scales="free", space="free") +
guides(fill=guide_legend(ncol=1, byrow=TRUE)) +
theme(
axis.title=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line=element_line(),
legend.title=element_text(),
legend.position="none",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", colour="transparent"),
legend.background=element_rect(fill="transparent", colour="black"),
strip.text.x=element_text(color="transparent"),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_line(color=NA),
panel.grid.minor=element_line(color=NA),
panel.spacing.x=unit(spacing_subgroup, "cm"),
plot.background=element_blank()
)
p1 <- p
p <- ggplot(figdf, aes(x=id, y=value, fill=group)) +
geom_bar(position="stack", stat="identity", width=1) +
scale_fill_manual(values=colors_group) +
guides(fill=guide_legend(title=title_group, ncol=1, byrow=TRUE)) +
theme(
legend.title=element_text(),
legend.position="right",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", colour="transparent"),
legend.background=element_rect(fill="transparent", colour="black")
)
p2 <- p
p <- ggplot(figdf, aes(x=id, y=value, fill=subgroup)) +
geom_bar(position="stack", stat="identity", width=1) +
scale_fill_manual(values=colors_subgroup) +
guides(fill=guide_legend(title=title_subgroup, ncol=1, byrow=TRUE)) +
theme(
legend.title=element_text(),
legend.position="right",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", colour="transparent"),
legend.background=element_rect(fill="transparent", colour="black")
)
p3 <- p
g <- ggplotGrob(p1)
strips <- which(grepl('strip-', g[["layout"]][["name"]]))
for (i in 1:length(strips)) {
j <- which(grepl("rect",
g[["grobs"]][[strips[i]]][["grobs"]][[1]][["childrenOrder"]])
)
g[["grobs"]][[strips[i]]][["grobs"]][[1]][[
"children"]][[j]][["gp"]][["fill"]] <- colors_subgroup[i]
}
g1 <- g
g <- ggplotGrob(p2)
guide <- which(sapply(g[["grobs"]], function(x) x$name) == "guide-box")
g2 <- g[["grobs"]][[guide]]
g2[["heights"]][2] <- unit(top_group, "cm")
g <- ggplotGrob(p3)
guide <- which(sapply(g[["grobs"]], function(x) x$name) == "guide-box")
g3 <- g[["grobs"]][[guide]]
g3[["heights"]][2] <- unit(top_subgroup, "cm")
gt <- gtable(
unit(c(width_barplot, width_group, width_subgroup), c("cm")),
unit(height, "cm"))
gt <- gtable_add_grob(gt, g1, 1, 1)
gt <- gtable_add_grob(gt, g2, 1, 2, 1, 2)
gt <- gtable_add_grob(gt, g3, 1, 3, 1, 3)
outfile <- "fig_barplot.pdf"
pdf(outfile, onefile=TRUE, width=width/2.54, height=height/2.54)
grid.draw(gt)
dev.off()
library(ggplot2)
library(ggpubr)
figdf[, "id"] <- figdf[, "sample_id"]
figdf[, "value"] <- figdf[, "percent"]
figdf[, "group"] <- figdf[, "risk"]
figdf[, "var"] <- figdf[, "mutation_type6"]
newdf2 <- figdf
nrow(newdf2)
vars <- levels(factor(newdf2[, "var"]))
newlst1 <- list()
for (i in 1:length(vars)) {
newdf3 <- newdf2[newdf2[, "var"] == vars[i],]
newdf3[, "group"] <- factor(newdf3[, "group"])
model <- wilcox.test(value ~ group, newdf3)
print(model)
means <- aggregate(value ~ factor(group), newdf3, mean)
medians <- aggregate(value ~ factor(group), newdf3, median)
maxs <- aggregate(value ~ factor(group), newdf3, max)
mins <- aggregate(value ~ factor(group), newdf3, min)
newlst1[[i]] <- data.frame(
var=vars[i],
mean_1=means[means[, 1] == "high", "value"],
mean_2=means[means[, 1] == "low", "value"],
median_1=medians[means[, 1] == "high", "value"],
median_2=medians[means[, 1] == "low", "value"],
max_high=maxs[1, "value"],
max_low=maxs[2, "value"],
min_high=mins[1, "value"],
min_low=mins[2, "value"],
pvalue=model[["p.value"]])
}
newdf4 <- do.call(rbind, newlst1)
write.csv(newdf4, "result_mean_2.csv", row.names=FALSE, na="")
windowsFonts()
library(scales)
colors1 <- rainbow(6, s=1, v=1, alpha=1)
barplot(1:6,col=rainbow(6, s=1, v=1, alpha=1))
colors1
colors2 <- rainbow(6, s=1, v=0.6, alpha=1)
barplot(1:6,col=rainbow(6, s=1, v=0.6, alpha=1))
colors2
maxvalues <- apply(newdf4[, c("max_high", "max_low")], 1, max)
minvalues <- apply(newdf4[, c("min_high", "min_low")], 1, min)
for (i in 1:length(vars)) {
newdf3 <- newdf2[newdf2[, "var"] == vars[i],]
colors <- c(colors1[i], colors2[i])
titlex <- vars[i]
comparisons <- list(c("high", "low"))
symnum.args <- list(
cutpoints=c(0, 0.0001, 0.001, 0.01, 0.05, 1),
symbols=c("", "", "", "", "")
)
labely <- maxvalues[i] *1.15
fig <- ggplot(newdf3, aes(x=group, y=value, group=group)) +
geom_boxplot(
aes(colour=group), notch=FALSE,
outlier.size=0.6, outlier.shape=1, outlier.alpha=0.5,
size=0.8,
fatten=0.8
) +
scale_color_manual(values=colors) +
scale_x_discrete(titlex, labels=c("cold tumor", "hot tumor")) +
scale_y_continuous("Value", breaks=seq(0, 100, 20)) +
stat_compare_means(
aes(
label=ifelse(
p < 0.001, "P < 0.001", paste0("P = ", ..p.format..)
)
),
label.x=1.25, label.y=labely, family="sans", size=2.5
) +
stat_compare_means(
comparisons=comparisons,
method="wilcox.test",
symnum.args=symnum.args
) +
theme_bw() +
theme(
axis.title.x=element_text(family="sans", size=7),
axis.title.y=element_blank(),
axis.text.x=element_text(
color="black", angle=30, hjust=1, family="sans", size=7),
axis.text.y=element_text(color="black", family="sans", size=7),
axis.line=element_line(),
legend.title=element_blank(),
legend.position="none",
panel.grid=element_blank(),
panel.border=element_rect(color=NA)
)
fig
outfile <- paste0("fig_boxplot_", i, "_", ".tiff")
tiff(outfile, width=4, height=5, unit="cm", res=350, compression="lzw+p")
print(fig)
dev.off()
outfile <- paste0("fig_boxplot_", i, "_", ".pdf")
pdf(outfile, width=5/2.54, height=6/2.54)
print(fig)
dev.off()
fig <- ggplot(newdf3, aes(x=group, y=value, group=group)) +
geom_violin(
aes(fill=group)
) +
geom_boxplot(
colour="black", notch=FALSE,
outlier.size=0.3, outlier.shape=1, outlier.alpha=0.5,
size=0.3, width=0.2,
fatten=0.8
) +
scale_fill_manual(values=colors) +
scale_x_discrete(titlex, labels=c("cold tumor", "hot tumor")) +
scale_y_continuous("Value", breaks=seq(0, 100, 20)) +
stat_compare_means(
aes(
label=ifelse(
p < 0.001, "P < 0.001", paste0("P = ", ..p.format..)
)
),
label.x=1.25, label.y=labely, family="sans", size=2.5
) +
stat_compare_means(
comparisons=comparisons,
method="wilcox.test",
symnum.args=symnum.args
) +
theme_bw() +
theme(
axis.title.x=element_text(family="sans", size=7),
axis.title.y=element_blank(),
axis.text.x=element_text(
color="black", angle=30, hjust=1, family="sans", size=7),
axis.text.y=element_text(color="black", family="sans", size=7),
axis.line=element_line(),
legend.title=element_blank(),
legend.position="none",
panel.grid=element_blank(),
panel.border=element_rect(color=NA)
)
fig
outfile <- paste0("fig_violin_", i, "_", ".tiff")
tiff(outfile, width=4, height=5, unit="cm", res=350, compression="lzw+p")
print(fig)
dev.off()
outfile <- paste0("fig_violin_", i, "_", ".pdf")
pdf(outfile, width=5/2.54, height=6/2.54)
print(fig)
dev.off()
}
library(deconstructSigs)
library(BSgenome.Hsapiens.UCSC.hg19)
library(MutationalPatterns)
mut_ref <- mafdf2
mut_ref[1:6, 1:12]
mut_ref[, "Chromosome"] <- paste0("chr", mut_ref[, "Chromosome"])
mut_ref[1:6, 1:12]
sample.mut.ref <- mut_ref
snp_count <- mut.to.sigs.input(
mut.ref=sample.mut.ref,
sample.id="sample_id",
chr="Chromosome",
pos="Start_Position",
ref="Reference_Allele",
alt="Tumor_Seq_Allele2",
bsg=BSgenome.Hsapiens.UCSC.hg19
)
snp_count[1:6, 1:6]
mut_mtx <- t(snp_count)
dim(mut_mtx)
mut_mtx[1:6, 1:6]
plot_96_profile(mut_mtx[, c(1, 7)])
plot_96_profile(mut_mtx[,c(1, 7)], condensed=TRUE)
counts <- snp_count
colnames(counts) <- substr(colnames(counts), 3, 5)
counts <- t(counts)
types <- levels(factor(row.names(counts)))
lst <- list()
for (i in 1:length(types)) {
lst[[i]] <- data.frame(
type=types[i],
freq=sum(counts[row.names(counts) == types[i],])
)
}
df <- do.call(rbind, lst)
df$pct <- round(df$freq / sum(df$freq) * 100, 2)
df$type2 <- c(
"C>A/A>G", "C>G/G>C", "C>T/G>A", "T>A/A>T", "T>C/A>G", "T>G/A>C"
)
df
labels <- paste0(df$type2, "\n(", df$pct, "%)")
colors <- c("#999999","#E69F00","#56B4E9","#009E73","#F0E442","#0072B2")
pie(df$pct, labels=labels, col=colors)
outfile <- "fig_1_1.tiff"
tiff(outfile, width=20, height=20, unit="cm", res=350)
pie(df$pct, labels=labels, col=colors)
dev.off()
library(NMF)
mut_mtx2 <- mut_mtx + 0.0001
dim(mut_mtx2)
save.image("project_4_1.RData")
library(MutationalPatterns)
res = nmf(mut_mtx2, rank=4, method="brunet", nrun=30, seed=123456, .opt='p7')
signatures = NMF::basis(res)
contribution = NMF::coef(res)
reconstructed = signatures %*% contribution
nmf_res <- list(signatures = signatures,
contribution = contribution,
reconstructed = reconstructed)
str(nmf_res)
colnames(nmf_res$signatures) <- paste0("Signature", LETTERS[1:4])
rownames(nmf_res$contribution) <- paste0("Signature", LETTERS[1:4])
cancer_signatures = read.table("signatures_probabilities.txt", sep="\t", header=TRUE)
new_order = match(row.names(mut_mat), cancer_signatures$Somatic.Mutation.Type)
cancer_signatures = cancer_signatures[as.vector(new_order),]
row.names(cancer_signatures) = cancer_signatures$Somatic.Mutation.Type
colnames(cancer_signatures)
cancer_signatures = as.matrix(cancer_signatures[,4:33])
all(rownames(cancer_signatures) == rownames(nmf_res$signatures))
cos_sim_signatures = cos_sim_matrix(nmf_res$signatures, cancer_signatures)
plot_cosine_heatmap(cos_sim_signatures, cluster_rows=FALSE, plot_values=TRUE)
outfile <- "fig_cosine_heatmap.tiff"
tiff(outfile, width=30, height=20, unit="cm", res=350, compression="lzw+p")
plot_cosine_heatmap(cos_sim_signatures, cluster_rows=FALSE, plot_values=TRUE)
dev.off()
outfile <- "fig_cosine_heatmap.pdf"
pdf(outfile, onefile=FALSE, width=30/2.54, height=20/2.54)
plot_cosine_heatmap(cos_sim_signatures, cluster_rows=FALSE, plot_values=TRUE)
dev.off()
save.image("project_4.RData")
save(nmf_res, file="nmf_res_rank4.RData")
options(stringsAsFactors=FALSE)
library(NMF)
load("mut_mat.RData")
load("nmf_res.RData")
load("mut_mtx.RData")
library(MutationalPatterns)
plot_96_profile(nmf_res$signatures)
outfile <- "fig_barplot_5_1_test.tiff"
tiff(outfile, width=20, height=20, unit="cm", res=350, compression="lzw+p")
plot_96_profile(nmf_res$signatures)
dev.off()
mut_matrix <- nmf_res[["signatures"]]
norm_mut_matrix = apply(mut_matrix, 2, function(x) x / sum(x))
CONTEXTS_96 <- rownames(mut_matrix)
context = CONTEXTS_96
SUBSTITUTIONS <- c(
"C>A",
"C>G",
"C>T",
"T>A",
"T>C",
"T>G"
)
substitution = rep(SUBSTITUTIONS, each=16)
context <- paste0(substr(context, 1, 1), ".", substr(context, 7, 7))
df = data.frame(substitution=substitution, context=context)
rownames(norm_mut_matrix) = NULL
library(reshape2)
df2 = cbind(df, as.data.frame(norm_mut_matrix))
df3 = melt(df2, id.vars=c("substitution", "context"))
head(df3)
df4 <- df3
df4[, "value"] <- round(df4[, "value"] * 100, 1)
outdf <- df4
outfile <- "result_signature_96.csv"
write.csv(outdf, outfile, row.names=FALSE, na="")
library(ggplot2)
library(grid)
library(gtable)
library(scales)
colors <- c(
rgb(204,0,255, max=255),
rgb(255,153,0, max=255),
rgb(51,255,0, max=255),
rgb(197,51,115, max=255),
rgb(160,64,255, max=255),
rgb(255,0,153, max=255)
)
colors
colors_subgroup <- c(
rgb(204,0,255, max=255),
rgb(255,153,0, max=255),
rgb(51,255,0, max=255),
rgb(197,51,115, max=255),
rgb(160,64,255, max=255),
rgb(255,0,153, max=255)
)
colors_subgroup
ymax = 0.35
signatures <- c("SignatureA", "SignatureB", "SignatureC", "SignatureD")
for (i in 1:length(signatures)) {
df4 <- df3[df3[, "variable"] == signatures[i],]
df4[, "substitution2"] <- ""
gplot <- ggplot(
data=df4,
aes(
x=context,
y=value,
fill=substitution,
width=0.6),
) +
geom_rect(
aes(fill=substitution),
alpha=0.01, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf
) +
geom_bar(stat="identity", colour="transparent", size=0.2) +
scale_fill_manual(values=colors) +
facet_wrap(
~ substitution2 + substitution, strip.position="bottom", nrow=1
) +
coord_cartesian(ylim=c(0, ymax)) +
scale_y_continuous(
"Relative contribution", breaks=seq(0, ymax, 0.05),
labels=percent
) +
guides(fill=FALSE) +
theme_bw() +
theme(
axis.title=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line=element_line(),
legend.title=element_text(),
legend.position="none",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", color="transparent"),
legend.background=element_rect(fill="transparent", color="black"),
strip.background=element_rect(color=NA, fill=NA),
strip.placement="outside",
panel.background=element_rect(fill=NA),
panel.border=element_rect(color=NA),
panel.grid.major=element_line(color=NA),
panel.grid.minor=element_line(color=NA),
panel.spacing.x=unit(0.06, "cm"),
plot.background=element_blank()
)
grob <- ggplotGrob(gplot)
strips <- which(grepl("strip-", grob[["layout"]][["name"]]))
for (j in 1:length(strips)) {
k <- which(grepl("rect",
grob[["grobs"]][[strips[j]]][["grobs"]][[1]][["childrenOrder"]])
)
grob[["grobs"]][[strips[j]]][["grobs"]][[1]][[
"children"]][[k]][["gp"]][["fill"]] <- colors_subgroup[j]
}
grob1 <- grob
grob <- grobTree(
rectGrob(gp=gpar(fill="black")),
textGrob(signatures[i], gp=gpar(col="white"))
)
grob2 <- grob
gtab <- gtable(
unit(c(16, 3, 1), "cm"),
unit(c(1, 1, 8), "cm")
)
gtab <- gtable_add_grob(gtab, grob1, 1, 1, 3, 3)
gtab <- gtable_add_grob(gtab, grob2, 2, 2, 2, 2)
grid.newpage()
grid.draw(gtab)
outfile <- paste0("fig_barplot_5_1_", i, ".pdf")
pdf(outfile, onefile=FALSE, width=20/2.54, height=10/2.54)
grid.newpage()
grid.draw(gtab)
dev.off()
}
gplot <- ggplot(
data=df3, aes(
x=context,
y=value,
fill=substitution,
width=0.6
)) +
geom_bar(stat="identity", colour="transparent", size=.2) +
scale_fill_manual(values=colors) +
facet_grid(variable ~ substitution) +
coord_cartesian(ylim=c(0, ymax)) +
scale_y_continuous("Relative contribution", breaks=seq(0, ymax, 0.1)) +
guides(fill=FALSE) +
theme_bw() +
theme(
axis.title.y=element_text(size=12,vjust=1),
axis.text.y=element_text(size=8),
axis.title.x=element_text(size=12),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
strip.text.x=element_text(size=9),
strip.text.y=element_text(size=9),
panel.grid.major.x = element_blank()
)
gplot
outfile <- "fig_barplot_5_1.pdf"
pdf(outfile, onefile=FALSE, width=20/2.54, height=20/2.54)
gplot
dev.off()
options(stringsAsFactors=FALSE)
library(ggplot2)
library(NMF)
load("mut_mat.RData")
load("nmf_res.RData")
load("mut_mtx.RData")
library(MutationalPatterns)
plot_contribution(nmf_res$contribution, nmf_res$signature, mode="absolute")
signatures <- nmf_res[["signatures"]]
contribution <- nmf_res[["contribution"]]
total_signatures = colSums(signatures)
abs_contribution = contribution * total_signatures
library(reshape2)
m_contribution = melt(abs_contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
head(m_contribution)
sumdf <- aggregate(Contribution ~ Sample, m_contribution, sum)
ids <- sumdf[order(-sumdf[, "Contribution"]), "Sample"]
m_contribution[, "Sample"] <- factor(m_contribution[, "Sample"], levels=ids)
m_contribution <- m_contribution[order(m_contribution[, "Sample"]),]
m_contribution[, "Contribution"] <- log2(m_contribution[, "Contribution"] + 1)
colors <- rainbow(4)
fig <- ggplot(
m_contribution, aes(
x=factor(Sample),
y=Contribution,
fill=factor(Signature),
color=factor(Signature),
order=Sample
)
) +
geom_bar(stat="identity", colour = "transparent") +
scale_color_manual(name="Signature", values=colors) +
scale_fill_manual(name="Signature", values=colors) +
scale_x_discrete("") +
scale_y_continuous(
"Log2(Absolute contribution + 1) \n Log2(no. mutations + 1)"
) +
theme_bw() +
theme(
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
panel.grid=element_blank()
)
fig
outfile <- "fig_barplot_5_2.pdf"
pdf(outfile, onefile=TRUE, width=20/2.54, height=10/2.54)
fig
dev.off()
options(stringsAsFactors=FALSE)
library(NMF)
library(reshape2)
library(ggplot2)
load("nmf_res.RData")
contribution <- nmf_res$contribution
clindf <- read.table("group.txt", header=TRUE, sep="\t", quote="")
dim(clindf)
head(clindf)
clindf[, "sample_id"] <- clindf[, "Mixture"]
dim(clindf)
clindf[1:6,1:3]
library(MutationalPatterns)
plot_contribution(nmf_res$contribution, nmf_res$signature, mode="relative")
m_contribution = melt(contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
aggregate(Contribution ~ Signature, m_contribution, sum)
plot = ggplot(m_contribution,
aes(x = factor(Sample),
y = Contribution,
fill = factor(Signature),
order = Sample)) +
geom_bar(position = "fill", stat="identity", colour="transparent") +
labs(x = "", y = "Relative contribution") +
theme_bw() +
theme(panel.grid.minor.x=element_blank(),
panel.grid.major.x=element_blank()) +
theme(panel.grid.minor.y=element_blank(),
panel.grid.major.y=element_blank())
palette <- rainbow(4)
plot = plot + scale_fill_manual(name="Signature", values=palette)
plot = plot + xlim(levels(factor(m_contribution$Sample)))
plot
outfile <- "fig_barplot_5_3_test.pdf"
pdf(outfile, onefile=TRUE, width=30/2.54, height=10/2.54)
plot
dev.off()
library(ggplot2)
library(grid)
library(gtable)
library(ggsci)
newdf1 <- nmf_res[["contribution"]]
newlst1 <- list()
for (i in 1:ncol(newdf1)) {
newlst1[[i]] <- data.frame(
sample_id=colnames(newdf1)[i],
signature=rownames(newdf1),
contribution=newdf1[, i],
percent=newdf1[, i] / sum(newdf1[, i]) * 100
)
}
newdf2 <- do.call(rbind, newlst1)
newdf3 <- merge(newdf2, clindf, by="sample_id", all=FALSE)
figdf <- newdf3
head(figdf)
figdf[, "id"] <- figdf[, "sample_id"]
figdf[, "value"] <- figdf[, "percent"]
figdf[, "group"] <- figdf[, "signature"]
figdf[, "subgroup"] <- figdf[, "risk"]
df4 <- aggregate(value ~ group, figdf, max)
df4[, "value"] <- round(df4[, "value"], 1)
colnames(df4)[2] <- "max"
df5 <- aggregate(value ~ group, figdf, min)
df5[, "value"] <- round(df5[, "value"], 1)
colnames(df5)[2] <- "min"
outdf <- merge(df4, df5, by="group", all=TRUE)
outfile <- "result_signature_sample.csv"
write.csv(outdf, outfile, row.names=FALSE, na="")
sort_group <- c("left", "right", "center")[3]
sort_subgroup <- c("left", "right", "center")[3]
colors_group <- c(
rgb(187,87,198, max=255),
rgb(187,204,73, max=255),
rgb(215,97,137, max=255),
rgb(197,51,115, max=255)
)
colors_group
barplot(1:6,col=colors_group)
colors_subgroup <- pal_jco("default")(2)
colors_subgroup <- c(
rgb(86, 86, 220, max=255),
rgb(224, 77, 224, max=255)
)
barplot(1:2,col=colors_subgroup)
r <- 0.5
title_group <- "Mutation"
title_subgroup <- "Risk"
spacing_subgroup <- unit(0.1 * r, "cm")
width_barplot <- 16.1 * 2.54 * r
width_group <- 2.3 * 2.54 * r
width_subgroup <- 1.6 * 2.54 * r
width <- width_barplot + width_group + width_subgroup
height <- 10 * 2.54 * r
top_group <- 1 * r
top_subgroup <- 1 * r
if (sort_subgroup == "left") {
tb <- table(figdf[, "subgroup"])
subgroups <- names(tb[order(-tb)])
figdf[, "subgroup"] <- factor(figdf[, "subgroup"], levels=subgroups)
} else if (sort_subgroup == "right") {
tb <- table(figdf[, "subgroup"])
subgroups <- names(tb[order(tb)])
figdf[, "subgroup"] <- factor(figdf[, "subgroup"], levels=subgroups)
} else if (sort_subgroup == "center") {
tb <- table(figdf[, "subgroup"])
subgroups <- names(tb[order(-tb)])
subgroups2 <- c()
for (i in 1:length(subgroups)) {
if (i %% 2 == 1) {
subgroups2 <- c(subgroups[i], subgroups2)
} else {
subgroups2 <- c(subgroups2, subgroups[i])
}
}
figdf[, "subgroup"] <- factor(figdf[, "subgroup"], levels=subgroups2)
}
newdf1 <- aggregate(value ~ group, figdf, mean)
newdf1
groups <- newdf1[order(newdf1[, "value"]), "group"]
figdf[, "group"] <- factor(figdf[, "group"], levels=groups)
if (sort_group == "left") {
figdf <- figdf[
order(figdf[, "subgroup"], figdf[, "group"], -figdf[, "value"]),]
ids <- figdf[figdf[, "group"] == groups[length(groups)], "id"]
figdf[, "id"] <- factor(figdf[, "id"], levels=ids)
} else if (sort_group == "right") {
figdf <- figdf[
order(figdf[, "subgroup"], figdf[, "group"], figdf[, "value"]),]
ids <- figdf[figdf[, "group"] == groups[length(groups)], "id"]
figdf[, "id"] <- factor(figdf[, "id"], levels=ids)
} else if (sort_group == "center") {
figdf <- figdf[
order(figdf[, "subgroup"], figdf[, "group"], -figdf[, "value"]),]
ids <- figdf[figdf[, "group"] == groups[length(groups)], "id"]
ids2 <- c()
for (i in 1:length(ids)) {
if (i %% 2 == 1) {
ids2 <- c(ids[i], ids2)
} else {
ids2 <- c(ids2, ids[i])
}
}
figdf[, "id"] <- factor(figdf[, "id"], levels=ids2)
}
head(figdf)
p <- ggplot(figdf, aes(x=id, y=value, fill=group)) +
geom_bar(position="stack", stat="identity", width=1.1) +
scale_x_discrete("") +
scale_y_continuous("", expand=c(0, 0), limit=c(0, 100)) +
scale_fill_manual(values=colors_group) +
facet_grid(~ subgroup, scales="free", space="free") +
guides(fill=guide_legend(ncol=1, byrow=TRUE)) +
theme(
axis.title=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line=element_line(),
legend.title=element_text(),
legend.position="none",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", colour="transparent"),
legend.background=element_rect(fill="transparent", colour="black"),
strip.text.x=element_text(color="transparent"),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_line(color=NA),
panel.grid.minor=element_line(color=NA),
panel.spacing.x=unit(spacing_subgroup, "cm"),
plot.background=element_blank()
)
p1 <- p
p <- ggplot(figdf, aes(x=id, y=value, fill=group)) +
geom_bar(position="stack", stat="identity", width=1) +
scale_fill_manual(values=colors_group) +
guides(fill=guide_legend(title=title_group, ncol=1, byrow=TRUE)) +
theme(
legend.title=element_text(),
legend.position="right",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", colour="transparent"),
legend.background=element_rect(fill="transparent", colour="black")
)
p2 <- p
p <- ggplot(figdf, aes(x=id, y=value, fill=subgroup)) +
geom_bar(position="stack", stat="identity", width=1) +
scale_fill_manual(values=colors_subgroup) +
guides(fill=guide_legend(title=title_subgroup, ncol=1, byrow=TRUE)) +
theme(
legend.title=element_text(),
legend.position="right",
legend.justification=c(0, 1),
legend.key=element_rect(fill="transparent", colour="transparent"),
legend.background=element_rect(fill="transparent", colour="black")
)
p3 <- p
g <- ggplotGrob(p1)
strips <- which(grepl('strip-', g[["layout"]][["name"]]))
for (i in 1:length(strips)) {
j <- which(grepl("rect",
g[["grobs"]][[strips[i]]][["grobs"]][[1]][["childrenOrder"]])
)
g[["grobs"]][[strips[i]]][["grobs"]][[1]][[
"children"]][[j]][["gp"]][["fill"]] <- colors_subgroup[i]
}
g1 <- g
g <- ggplotGrob(p2)
guide <- which(sapply(g[["grobs"]], function(x) x$name) == "guide-box")
g2 <- g[["grobs"]][[guide]]
g2[["heights"]][2] <- unit(top_group, "cm")
g <- ggplotGrob(p3)
guide <- which(sapply(g[["grobs"]], function(x) x$name) == "guide-box")
g3 <- g[["grobs"]][[guide]]
g3[["heights"]][2] <- unit(top_subgroup, "cm")
gt <- gtable(
unit(c(width_barplot, width_group, width_subgroup), c("cm")),
unit(height, "cm"))
gt <- gtable_add_grob(gt, g1, 1, 1)
gt <- gtable_add_grob(gt, g2, 1, 2, 1, 2)
gt <- gtable_add_grob(gt, g3, 1, 3, 1, 3)
outfile <- "fig_barplot_5_3.pdf"
pdf(outfile, onefile=TRUE, width=width/2.54, height=height/2.54)
grid.draw(gt)
dev.off()
options(stringsAsFactors=FALSE)
library(NMF)
load("mut_mat.RData")
load("nmf_res.RData")
load("mut_mtx.RData")
clindf <- read.table("grou.txt", header=TRUE, sep="\t", quote="")
dim(clindf)
head(clindf)
clindf[, "sample_id"] <- clindf[, "Mixture"]
dim(clindf)
clindf[1:6,1:3]
signatures <- nmf_res[["signatures"]]
contribution <- nmf_res[["contribution"]]
total_signatures = colSums(signatures)
abs_contribution = contribution * total_signatures
library(reshape2)
m_contribution = melt(abs_contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
head(m_contribution)
newdf2 <- m_contribution
newdf2[, "sample_id"] <- newdf2[, "Sample"]
newdf2 <- merge(newdf2, clindf, by="sample_id", all=FALSE)
newdf2[, "value"] <- log2(newdf2[, "Contribution"] + 1)
newdf2[, "signature"] <- newdf2[, "Signature"]
newdf2[, "group"] <- newdf2[, "risk"]
nrow(newdf2)
signatures <- c("SignatureA", "SignatureB", "SignatureC", "SignatureD")
newlst1 <- list()
for (i in 1:length(signatures)) {
newdf3 <- newdf2[newdf2[, "signature"] %in% signatures[i],]
newdf3[, "group"] <- factor(newdf3[, "group"])
model <- wilcox.test(value ~ group, newdf3)
print(model)
means <- aggregate(value ~ factor(group), newdf3, mean)
medians <- aggregate(value ~ factor(group), newdf3, median)
maxs <- aggregate(value ~ factor(group), newdf3, max)
mins <- aggregate(value ~ factor(group), newdf3, min)
newlst1[[i]] <- data.frame(
var=signatures[i],
mean_1=means[1, "value"],
mean_2=means[2, "value"],
median_1=medians[1, "value"],
median_2=medians[2, "value"],
max_1=maxs[1, "value"],
max_2=maxs[2, "value"],
min_1=mins[1, "value"],
min_2=mins[2, "value"],
pvalue=model[["p.value"]])
}
newdf4 <- do.call(rbind, newlst1)
newdf4[, "adj_pvalue"] <- p.adjust(newdf4[, "pvalue"], method="bonferroni")
write.csv(newdf4, "result_mean_2.csv", row.names=FALSE, na="")
library(ggplot2)
library(ggpubr)
library(scales)
windowsFonts()
colors1 <- c(
rgb(255,0,153, max=255),
rgb(51,255,0, max=255),
rgb(255,153,0, max=255),
rgb(204,0,255, max=255)
)
colors1
colors2 <- c(
rgb(153,0,92, max=255),
rgb(31,153,0, max=255),
rgb(153,92,0, max=255),
rgb(122,0,153, max=255)
)
colors2
maxvalues <- apply(newdf4[, c("max_1", "max_2")], 1, max)
minvalues <- apply(newdf4[, c("min_1", "min_2")], 1, min)
for (i in 1:length(signatures)) {
newdf3 <- newdf2[newdf2[, "signature"] %in% signatures[i],]
colors <- c(colors1[i], colors2[i])
titlex <- signatures[i]
comparisons <- list(c("high", "low"))
symnum.args <- list(
cutpoints=c(0, 0.0001, 0.001, 0.01, 0.05, 1),
symbols=c("", "", "", "", "")
)
labely <- maxvalues[i] *1.15
fig <- ggplot(newdf3, aes(x=group, y=value, group=group)) +
geom_boxplot(
aes(colour=group), notch=FALSE,
outlier.size=0.6, outlier.shape=1, outlier.alpha=0.5,
size=0.8,
fatten=0.9
) +
scale_color_manual(values=colors) +
scale_x_discrete(titlex, labels=c("INB", "IB")) +
scale_y_continuous("Value") +
stat_compare_means(
aes(
label=ifelse(
p < 0.001, "P < 0.001", paste0("P = ", ..p.format..)
)
),
label.x=1.25, label.y=labely, family="sans", size=2.5
) +
stat_compare_means(
comparisons=comparisons,
method="wilcox.test",
symnum.args=symnum.args
) +
theme_bw() +
theme(
axis.title.x=element_text(family="sans", size=7),
axis.title.y=element_blank(),
axis.text.x=element_text(
color="black", angle=30, hjust=1, family="sans", size=7),
axis.text.y=element_text(color="black", family="sans", size=7),
axis.line=element_line(),
legend.title=element_blank(),
legend.position="none",
panel.grid=element_blank(),
panel.border=element_rect(color=NA)
)
fig
outfile <- paste0("fig_boxplot_", i, "_", ".tiff")
tiff(outfile, width=4, height=5, unit="cm", res=350, compression="lzw+p")
print(fig)
dev.off()
outfile <- paste0("fig_boxplot_", i, "_", ".pdf")
pdf(outfile, width=4/2.54, height=5/2.54)
print(fig)
dev.off()
fig <- ggplot(newdf3, aes(x=group, y=value, group=group)) +
geom_violin(
aes(fill=group), color="grey70"
) +
geom_boxplot(
aes(color=group), notch=FALSE,
outlier.size=0.3, outlier.shape=1, outlier.alpha=0.5,
size=0.3, width=0.2,
fatten=1
) +
geom_boxplot(
color="grey70", notch=FALSE,
outlier.colour=NA,
size=0.3, width=0.2,
fatten=0.9
) +
scale_color_manual(values=colors) +
scale_fill_manual(values=colors) +
scale_x_discrete(titlex, labels=c("INB", "IB")) +
scale_y_continuous("Value") +
stat_compare_means(
aes(
label=ifelse(
p < 0.001, "P < 0.001", paste0("P = ", ..p.format..)
)
),
label.x=1.25, label.y=labely, family="sans", size=2.5
) +
stat_compare_means(
comparisons=comparisons,
method="wilcox.test",
symnum.args=symnum.args
) +
theme_bw() +
theme(
axis.title.x=element_text(family="sans", size=7),
axis.title.y=element_blank(),
axis.text.x=element_text(
color="black", angle=30, hjust=1, family="sans", size=7),
axis.text.y=element_text(color="black", family="sans", size=7),
axis.line=element_line(),
legend.title=element_blank(),
legend.position="none",
panel.grid=element_blank(),
panel.border=element_rect(color=NA)
)
fig
outfile <- paste0("fig_violin_", i, "_", ".tiff")
tiff(outfile, width=4, height=5, unit="cm", res=350, compression="lzw+p")
print(fig)
dev.off()
outfile <- paste0("fig_violin_", i, "_", ".pdf")
pdf(outfile, width=4/2.54, height=5/2.54)
print(fig)
dev.off()
}
# 4.3 Oncogenic pathway analysis
inputFile="EBPlusPlusAdjustPANCAN_exp_log.txt"
gmtFile="pathway.gmt"
library(GSVA)
library(limma)
library(GSEABase)
rt=read.table(inputFile,sep="\t",header=T,check.names=F)
dim(rt)
rt[1:6, 1:6]
sample_ids <- colnames(rt)[2:ncol(rt)]
types <- substr(sample_ids, 14, 15)
types[1:10]
types <- ifelse(as.numeric(types) >= 10, "normal", "tumor")
table(types)
sample_ids2 <- sample_ids[types == "tumor"]
rt <- rt[, c("gene_id", sample_ids)]
dim(rt)
rt[1:6, 1:6]
rt=as.matrix(rt)
rownames(rt)=rt[,1]
exp=rt[,2:ncol(rt)]
dimnames=list(rownames(exp),colnames(exp))
mat=matrix(as.numeric(as.matrix(exp)),nrow=nrow(exp),dimnames=dimnames)
mat=avereps(mat)
mat=mat[rowMeans(mat)>0,]
geneSet=getGmt(gmtFile,
geneIdType=SymbolIdentifier())
ssgseaScore=gsva(mat, geneSet, method='ssgsea', kcdf='Gaussian', abs.ranking=TRUE)
normalize=function(x){
return((x-min(x))/(max(x)-min(x)))}
ssgseaOut=normalize(ssgseaScore)
ssgseaOut=rbind(id=colnames(ssgseaOut),ssgseaOut)
write.table(ssgseaOut,file="ssgseaOut.txt",sep="\t",quote=F,col.names=F)
|
164a6bb4a97471bd1d7615d9295fa0b6b1e40ffe | ab7d15d06ed92cd51cc383dc9e98ae2a8fa41eaa | /man/export_graph.Rd | 88708326652c8374b8caf2f6910973e7927f00dc | [
"MIT"
] | permissive | rich-iannone/DiagrammeR | 14c46eb994eb8de90c50166a5d2d7e0668d3f7c5 | 218705d52d445c5d158a04abf8107b425ea40ce1 | refs/heads/main | 2023-08-18T10:32:30.784039 | 2023-05-19T16:33:47 | 2023-05-19T16:33:47 | 28,556,914 | 1,750 | 293 | NOASSERTION | 2023-07-10T20:46:28 | 2014-12-28T08:01:15 | R | UTF-8 | R | false | true | 1,828 | rd | export_graph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export_graph.R
\name{export_graph}
\alias{export_graph}
\title{Export a graph to various image formats}
\usage{
export_graph(
graph,
file_name = NULL,
file_type = NULL,
title = NULL,
width = NULL,
height = NULL
)
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
\item{file_name}{The name of the exported file (including it's extension).}
\item{file_type}{The type of file to be exported. Options for graph files
are: \code{png}, \code{pdf}, \code{svg}, and \code{ps}.}
\item{title}{An optional title for the output graph.}
\item{width}{Output width in pixels or \code{NULL} for default. Only useful for
export to image file formats \code{png}, \code{pdf}, \code{svg}, and \code{ps}.}
\item{height}{Output height in pixels or \code{NULL} for default. Only useful for
export to image file formats \code{png}, \code{pdf}, \code{svg}, and \code{ps}.}
}
\description{
Export a graph to a variety of image formats such as PNG, PDF, SVG, and
PostScript.
}
\examples{
# Create a simple graph
graph <-
create_graph() \%>\%
add_path(
n = 5,
edge_aes = edge_aes(
arrowhead = c(
"normal", "vee",
"tee", "dot"
),
color = c(
"red", "blue",
"orange", "purple"
)
)
)
# Create a PDF file for
# the graph (`graph.pdf`)
# graph \%>\%
# export_graph(
# file_name = "graph.pdf",
# title = "Simple Graph"
# )
# Create a PNG file for
# the graph (`mypng.png`)
# graph \%>\%
# export_graph(
# file_name = "mypng.png",
# file_type = "PNG"
# )
}
\seealso{
Other Display and Save:
\code{\link{render_graph_from_graph_series}()},
\code{\link{render_graph}()},
\code{\link{save_graph}()}
}
\concept{Display and Save}
|
1c1ff97322fd81b333e78d40eaffa8a63eac51ea | c95e114ec7a470d110634870bebc96380225bdfb | /man/eval_pgap.Rd | e2b731d7e6f08cfa9ddce3bae681bf7da40c47ec | [] | no_license | obrl-soil/dsmartr | 631b9e80189d64059d8ae65c94c498bb3d29b189 | ce19e6d8cdd5efbeabd649ceb79e370983889579 | refs/heads/master | 2022-12-23T19:08:49.784982 | 2022-12-16T23:40:46 | 2022-12-16T23:40:46 | 97,471,418 | 11 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,817 | rd | eval_pgap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsmartr_evaluate.R
\name{eval_pgap}
\alias{eval_pgap}
\title{Calculate probability gap}
\usage{
eval_pgap(dsmartr_probs = NULL, cpus = 1)
}
\arguments{
\item{dsmartr_probs}{RasterBrick; 'dsmartr_probabilities' output by
\code{\link[dsmartr:collate]{dsmartr::collate()}}. Alternatively,
probability maps output by
\code{\link[dsmartr:most_likely]{dsmartr::most_likely()}} can be used, or a
list of two rasters read from disk.}
\item{cpus}{Integer; number of processors to use in parallel.}
}
\value{
\code{probability_gap}: RasterLayer depicting the probability gap.
Written to disk as GeoTIFF.
}
\description{
Calculates and maps the difference between the first and second most-probable
dsmartr probability surfaces. Requires outputs of
\code{\link[dsmartr:collate]{dsmartr::collate()}}.
}
\note{
This function is often called the 'confusion index', but has been
renamed as that term is used in multiple contexts within the scientific
literature.
}
\examples{
\dontrun{
# run collate() with the example data then:
pgap1 <-
eval_pgap(dsmartr_probs = collated[['dsmartr_probabilities']][[1:2]],
cpus = max(1, (parallel::detectCores() - 1)))
# or supply unstacked maps after running unstack() (slightly faster)
pgap2 <- eval_pgap(dsmartr_probs = most_likely_soil[c('most_likely_prob_1',
'most_likely_prob_2')],
cpus = max(1, (parallel::detectCores() - 1)))
# or read from file
mpp_1 <- raster(file.path(getwd(), 'most_likely_maps', 'most_likely_1.tif'))
mpp_2 <- raster(file.path(getwd(), 'most_likely_maps', 'most_likely_2.tif'))
pgap3 <- eval_pgap(dsmartr_probs = list(mpp_1, mpp_2),
cpus = max(1, (parallel::detectCores() - 1)))
}
}
|
7941f332e589d4ad773fe350ad04dba79d2476f1 | 0b8ff928c22d87ee5d7a8a19f5ce8ff51bd7155c | /man/simulateest.Rd | e6beee5af73fd72bd2d8dd71ede8bb1e88820241 | [] | no_license | Gtmille2/seamlessTrial | d9104369cfda1987c374d8b3535d925df7211105 | 7dcd49f4c09486ee3d383eb1df8bc673d77a658e | refs/heads/master | 2020-07-31T17:24:08.170740 | 2019-11-17T18:35:21 | 2019-11-17T18:35:21 | 210,692,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,638 | rd | simulateest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulatetrials.R
\name{simulateest}
\alias{simulateest}
\title{Testing other simulate trials function}
\usage{
simulateest(n1 = 20, N1 = 100, N = 200, n.trt = 3, mean.s = NULL,
mean.t = NULL, p1, p2, sigma0, sigma, rho, nsim, design = "Pocock",
tau1, tau2, save.boundary)
}
\arguments{
\item{N1}{Number of patients with secondary endpoint available at first analysis}
\item{N}{The total number of patients in the trial}
\item{n.trt}{The number of treatments in the trial}
\item{mean.s}{The mean for short term endpoint sample groups}
\item{mean.t}{Mean for the long term endpoint sample groups}
\item{p1}{The covariate value for the first covariate}
\item{p2}{THe covariate value for the second covariate}
\item{sigma0}{sigma0 in the bivariate normal distribution}
\item{sigma}{is the known sigma for the population}
\item{rho}{is the known correlation between endpoints.}
\item{nsim}{The number of simulation runs. The default is 1000}
\item{design}{The chosen covariate adaptive randomization procedure. Default is Pocock's design}
\item{tau1}{The chosen covariate adaptive randomization procedure. Default is Pocock's design}
\item{tau2}{The chosen covariate adaptive randomization procedure. Default is Pocock's design}
}
\description{
This function doesn't use an actual second point, just the projected based on the initial point
simulate.trials <- function(n1=20, N1=200, N=200, n.trt=3, mean.s=rep(0,3), mean.t=rep(0,3), p1, sigma0=1, sigma=1, rho=0.0, nsim=1000, save.boundary, simonly=0)
}
|
be47c8e8f1517b5f4552aafa67cadef77d4977d5 | 2ee2222783fc85066764d715c5ab6f62e8806c9d | /graph_script.R | 66eb87a1e27c8391b3bda3dda4fa405d5c7b03c9 | [] | no_license | Isrozzis/R-code | b507b0581b4717e7ed420abd2b13ed67b7d4ca16 | 24db7ce9f116a3321f9aae02aa731ee9d2c6edd3 | refs/heads/master | 2021-01-10T17:54:34.878347 | 2015-11-10T19:03:25 | 2015-11-10T19:03:25 | 45,886,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 930 | r | graph_script.R | ## how to make a line graph for each site on a single day for each hour for a single pollutant
## read in the data
hourly_O3 <- read.csv("C:/Users/Trevor/Documents/Air/hourly_ozone.csv")
##Clean up the negative values. They should not exist
hourly_O3$value[hourly_O3$value < 0] <- NA
## Create and clean up a data frame for a single day
df <- hourly_O3[hourly_O3$date == "2008-08-01", ]
df <- df[,-(1:4)]
df <- df[,-(2:4)]
df <- df[,-4]
## use the dcast function to restructure the data
library(reshape2)
c <- dcast(df, site ~ hour)
##restructure and clean up the data some more
tc <- t(c)
tc <- data.frame(tc)
colnames(tc) <- as.character(tc[1,])
tc <- tc[-1,]
tc$hour <- c(1:23)
## melt the data so we can graph is much easier with ggplot
melt <- melt(tc, id = "hour")
## graph the data with ggplot
library(ggplot2)
meltp <- ggplot(melt, aes(x = hour, y = value, colour = variable))
meltp + geom_line(size = .7)
|
633d3df4fb11e7585a12a18a115f88db139b330e | 6be20602745931c1d48f4b75445db5be6a4b6103 | /R/defaults.R | d780554e53cb42cf6bb42a6dc519f8546424b96d | [] | no_license | itsdalmo/reporttool | 86a533f763cfc7faa2813a44f11772979ef7b823 | bf751fde5edb0ccd396b6d6ae6c21bb077fd79bf | refs/heads/master | 2020-12-22T01:06:22.446606 | 2016-06-06T07:34:45 | 2016-06-06T07:34:45 | 33,544,836 | 1 | 0 | null | 2016-03-06T13:06:43 | 2015-04-07T13:30:26 | R | UTF-8 | R | false | false | 3,861 | r | defaults.R | #' @rdname utilities
#' @export
get_default <- function(string) {
if (!is.string(string)) stop("Input was not a string (character(1)).")
y <- default[stri_detect(names(default), regex = string, ignore_case = TRUE)]
# Drop list if only one entry is returned
if (length(y) == 1L) y[[1]] else y
}
# Default values ---------------------------------------------------------------
default <- list(
"palette" = c("#F8766D", "#00BFC4", "#808080", "#00BF7D", "#9590FF", "#A3A500", "#EA8331"),
"latents" = c("image", "expect", "prodq", "servq", "value", "epsi", "loyal"),
"na_strings" = c("NA", " ", "", "#DIV/0!", "#NULL!", "#NAVN?", "#NAME?"),
"structure" = list(
"survey" = c("df", "cd", "hd", "ents", "mm", "tr", "cfg"),
"sheet" = c("data", "contrast data", "historic data", "entities", "measurement model", "translations", "config"),
"ents" = c("entity", "n", "valid", "marketshare"),
"mm" = c("latent", "manifest", "question", "type", "values"),
"tr" = c("original", "replacement"),
"cfg" = c("config", "value")),
"model" = rbind("image" = c(0,0,0,0,0,0,0),
"expect" = c(1,0,0,0,0,0,0),
"prodq" = c(1,1,0,0,0,0,0),
"servq" = c(1,1,1,0,0,0,0),
"value" = c(0,0,1,1,0,0,0),
"epsi" = c(1,0,1,1,1,0,0),
"loyal" = c(0,0,0,0,0,1,0)),
"associations" = list("image" = "q4",
"expect" = "q5",
"prodq" = "q7p",
"servq" = "q7s",
"value" = "q8",
"epsi" = c("q3", "q6", "q16"),
"loyal" = c("q10", "q15", "q15b")),
"translation" = list(
"required" = c("image", "expect", "prodq", "servq", "value", "epsi", "loyal",
"mainentity", "subentity", "manifest", "difference", "question",
"contrast_average", "average", "study_average", "spring", "fall"),
"norwegian" = c("Image/inntrykk", "Forventninger", "Produktkvalitet", "Servicekvalitet",
"Verdi for pengene", "Kundetilfredshet", "Lojalitet", "Bank",
"Avdeling", "Kode", "Diff", "Spørsmål", "Snitt nasjonal", "Snitt",
"Snitt webstudien", "Vår", "Høst"),
"danish" = c("Image", "Forventninger", "Produktkvalitet", "Servicekvalitet",
"Værdi for pengene", "Kundetilfredshed", "Loyalitet", "Bank",
"Afdeling", "Kode", "Diff", "Spørgsmål", "Snit national", "Snit",
"Snit webstudien", "Forår", "Efterår")),
"config" = list(
"setting" = c("reporttool", "study", "segment", "year", "period", "method",
"language", "cutoff", "latents", "marketshares"),
"value" = c("1.4", "Barnehage", "", "2015", "fall", "web", "norwegian", .3, "mean", "no")
),
"template" = list(
"beamer" = list(
"dir" = "rmd/beamer",
"files" = "beamer_template.tex")),
"theme" = list(
"beamer" = list(
"dir" = "rmd/beamer",
"files" = c("beamercolorthememetropolis.sty",
"beamerfontthememetropolis.sty",
"beamerthemem.sty", "logo.eps",
"beamer_preamble.tex"))),
"pattern" = list(
"detect_scale" = "^[0-9]{1,2}[[:alpha:][:punct:] ]*",
"extract_scale" = "^[0-9]{1,2}\\s*=?\\s*([[:alpha:]]*)",
"rmd" = list(
"chunk_start" = "^```\\{r",
"chunk_end" = "```$",
"chunk_eval" = ".*eval\\s*=\\s*((.[^},]+|.[^}]+\\))),?.*",
"inline" = "`r[ [:alnum:][:punct:]][^`]+`",
"section" = "^#[^#]",
"slide" = "^##[^#]"),
"code" = list(
"yaml" = "^##\\+ ---",
"inline" = "`r[ [:alnum:][:punct:]][^`]+`",
"title" = "^##\\+\\s*#{1,2}[^#]",
"text" = "^##\\+\\s.*"))
) |
3f404035a89d459f72580c45580294428a17b0d3 | 34f1c05e5fae6f679ccae0add331bb616b8d1d1e | /R/msBP.test.R | f96b5680232e35746b8c7292927ffb1f4fec8451 | [] | no_license | cran/msBP | cd452f56ef06bf07c8c17a921e95633fa1304900 | 0435f096cb1fae264cb2051e97eff84b0145d13f | refs/heads/master | 2023-09-01T03:47:30.196343 | 2023-08-23T00:00:13 | 2023-08-23T01:27:25 | 24,886,401 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,991 | r | msBP.test.R | msBP.test <- function(y, a, b, group, priorH0 = 0.5, mcmc, maxScale=5, plot.it=FALSE, ...)
{
priorH1 <- 1 - priorH0
group <- factor(group, labels=c(0,1))
w <- w1 <- w0 <- msBP.compute.prob(msBP.rtree(a,b,maxScale), root=FALSE)
Ps <- matrix(0.5, maxScale, mcmc$nrep)
BF <- rep(0.5, mcmc$nrep)
for(ite in 2:mcmc$nrep)
{
if(ite/mcmc$ndisplay == round(ite/mcmc$ndisplay)) cat("Iteration", ite, "over", mcmc$nrep, "\n")
#cat(ite)
Pm <- rep(c(1,Ps[,ite-1]), c(2^(0:(maxScale))))
W1 <- vec2tree(Pm*tree2vec(w) + (1-Pm)*tree2vec(w1))
W0 <- vec2tree(Pm*tree2vec(w) + (1-Pm)*tree2vec(w0))
sh0 <- msBP.postCluster(y[group==0], W0)
sh1 <- msBP.postCluster(y[group==1], W1)
nrv0 <- msBP.nrvTrees(sh0, maxS=maxScale)
nrv1 <- msBP.nrvTrees(sh1, maxS=maxScale)
n0 <- tree2vec(nrv0$n)
r0 <- tree2vec(nrv0$r)
v0 <- tree2vec(nrv0$v)
n1 <- tree2vec(nrv1$n)
r1 <- tree2vec(nrv1$r)
v1 <- tree2vec(nrv1$v)
n <- n0+n1
r <- r0+r1
v <- v0+v1
S <- rbeta(length(n), 1+n, a+v-n)
S0 <- rbeta(length(n0), 1+n0, a+v0-n0)
S1 <- rbeta(length(n1), 1+n1, a+v1-n1)
R <- rbeta(length(n), b+r, b+v-n-r)
R0 <- rbeta(length(n), b+r, b+v0-n0-r0)
R1 <- rbeta(length(n), b+r1, b+v1-n1-r1)
w <- msBP.compute.prob(structure(list(S = vec2tree(S), R = vec2tree(R)), class = "msbpTree"), root=FALSE)
w0 <- msBP.compute.prob(structure(list(S = vec2tree(S0), R = vec2tree(R0)), class = "msbpTree"), root=FALSE)
w1 <- msBP.compute.prob(structure(list(S = vec2tree(S1), R = vec2tree(R1)), class = "msbpTree"), root=FALSE)
testingprobs <- msBP.nesting(n,r,v,n0,r0,v0,n1,r1,v1,priorH0,a,b,maxScale)
Ps[,ite] <- testingprobs[1,]
BF[ite] <- testingprobs[4,maxScale]
}
Ps
Pp <- apply(Ps,1,mean)
if(plot.it)
{
plot(cumprod(Pp)~c(1:maxScale), xlab="Scale", ylab=expression(paste(hat(Pr),'(',H[0],' | ', - ')')), ylim=c(-0.2,1.2), cex=0.8, ty='b', yaxt='n',xaxt='n', lwd=1.5)
axis(2, at=c(0, 0.5, 1))
axis(1, at=c(1:maxScale))
}
out <- list(Ps=Ps, Pp=Pp, BF=mean(BF))
}
|
fbe918d427cb6ba13d5e61d3cfc245d9370d9362 | 684e590319bec83910d3ca1f03c0e2a43c3d462d | /man/label_by_colnumbs.Rd | 40766e8c7079f076cb44d70382833ac5aee8ad34 | [] | no_license | adamclemmitt/simpleSCDE | 88e2b50cbfe75a416a8cfa62b242207cab6a4ef3 | 3f082f8c7245ccdd27a3b5189e5dce7f4753645e | refs/heads/master | 2020-03-25T12:40:01.061657 | 2019-01-02T04:28:46 | 2019-01-02T04:28:46 | 143,786,761 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,209 | rd | label_by_colnumbs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_labels.R
\name{label_by_colnumbs}
\alias{label_by_colnumbs}
\title{Label two groups of cells based on the contents of their names}
\usage{
label_by_colnumbs(df, x.start, x.stop, y.start, y.stop, x.name = "group_x",
y.name = "group_y")
}
\arguments{
\item{df}{A data frame}
\item{x.start}{Index of \code{col.names} at which to start first
group of cells}
\item{x.stop}{Index of \code{col.names} at which to stop first
group of cells}
\item{y.start}{Index of \code{col.names} at which to start first
group of cells}
\item{y.stop}{Index of \code{col.names} at which to stop first
group of cells}
\item{x.name}{String label for first cell group (default: "group_x")}
\item{y.name}{String label for second cell group (default: "group_y")}
}
\value{
A named character vector that contains \code{x.name} and
\code{y.name}, and all cells that do not qualify as positive or
negative are omitted
}
\description{
Label two groups of cells based on the contents of their names
}
\examples{
label_by_colnumbs(df = Fincher, x.start = 1, x.stop = 11, y.start = 12,
y.stop = length(colnames(planaria_cells))
}
|
f23dcf0bfe7a1c3abe329a25a6a204fbe5395f38 | eeaf84376b246e31ad342fb8fae6026d6cfa8a3b | /attendance.R | 817fe6f8872f17f10c2e956ce1082c76c6f8df18 | [] | no_license | mehtapriyesh/MSTeams_Attendance-Calculator | ccbd531982920caf8b0b8c78957333003818f09a | e0c1c874d7183474e3dbb2bafed8efb9316059b6 | refs/heads/master | 2022-11-23T06:05:34.313187 | 2020-07-28T03:28:56 | 2020-07-28T03:28:56 | 283,084,304 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,633 | r | attendance.R |
#upload path of the folder containing all of your csv files.
#In case you have just one file, you can directly add its path to variable names files.
#files = "C:/....../file.csv"
files <- list.files(path = "C:/Users/user/Desktop/multi", pattern = "*.csv", full.names = T)
reader <- function(x) {
read.csv(x,sep = "\t", fileEncoding="utf-16", stringsAsFactors = F)
}
library(lubridate)
library(tidyverse)
#This function row binds all the files that was in the folder
attendance <- sapply(files, reader, simplify=FALSE) %>%
bind_rows(.id = NULL)
#Duration of the meeting (in hours)
dur = 2
#Add name of all the hosts (as in the Teams CSV files) who have organized the meetings since there name will be on the top of the file.
prof = c("K S Rao (Dr.)", "Prashant Dhamale")
attendance$User.Action[attendance$User.Action == "Joined before"] = "Joined"
data = attendance %>%
mutate(
datetime = parse_date_time(Timestamp, "%m/%d/%Y %H:%M:%S %p", tz = "Asia/Kolkata" ),
date = as.Date(datetime),
change = ifelse(User.Action == "Joined", 1,-1),
prof = ifelse(Full.Name %in% prof, Full.Name, NA)
)%>%
fill(prof, .direction = "down") %>%
group_by(prof) %>%
mutate(
end = round_date(first(datetime),"hour") + hours(dur)
) %>%
ungroup() %>%
filter(datetime < end) %>%
mutate(
attended = round(difftime(end, datetime, units = "mins")*change)
)%>%
group_by(date, prof, Full.Name) %>%
summarize(
totalmins_attended = sum(attended)
)
#Add the name by which you would like to save the new file.
write.csv(data, 'attendance.csv')
|
32f872e0c7c6a6c31ff0b951a944f88fbfa43aa8 | 50fd7e8cc064a7f35ec30bd8bdf47ab648ee60bf | /man/arseq.kmeans.reactome.plot.Rd | 03615013603b64da00a700246a8e46d202bdf644 | [] | no_license | ajitjohnson/arseq | a0c8c4996c7137dbe8bf15d9b559191a27fb7242 | 5da13779253b47d3df07effd59d06053294ab6d2 | refs/heads/master | 2021-11-27T09:59:37.024830 | 2021-10-25T16:17:28 | 2021-10-25T16:17:28 | 205,567,517 | 8 | 2 | null | 2020-02-05T04:53:18 | 2019-08-31T16:14:14 | R | UTF-8 | R | false | true | 583 | rd | arseq.kmeans.reactome.plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arseq.kmeans.reactome.plot.R
\name{arseq.kmeans.reactome.plot}
\alias{arseq.kmeans.reactome.plot}
\title{Reactome Pathway Enrichment of Kmeans clustering output}
\usage{
arseq.kmeans.reactome.plot(clusters, save.plot = FALSE, save.dir = getwd())
}
\arguments{
\item{clusters}{List of gene clusters returned by Kmeans clustering algorithm}
}
\value{
DotPlot of Reactome Enrichment
}
\description{
Reactome Pathway Enrichment of Kmeans clustering output
}
\examples{
arseq.kmeans.reactome.plot (clusters)
}
|
fd0cb58d7941e806a7d852f1e64b80521bf1ae3c | d61450aa1e75e60013d4d1d065a0f9f18e1b5920 | /SVY21-to-WGS84.R | 2962720cf86586b80e78b69ca2ff094e2b127832 | [] | no_license | iadamien/SVY21-to-WGS84 | 1063aa34d419376d4a002f613d3654416ea6b790 | dbc17e1a6ebcdd8b806bf75dbd2b032d7a9c5ca9 | refs/heads/master | 2020-07-31T23:23:53.156772 | 2019-09-25T08:05:22 | 2019-09-25T08:05:22 | 210,785,607 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 325 | r | SVY21-to-WGS84.R | library(rgdal)
data <- read.csv("")
x <- data$X.Addr
y <- data$Y.Addr
d <- data.frame(lon=x, lat= y)
coordinates(d) <- c("lon", "lat")
CRS.new <- CRS("+init=epsg:4326") #WGS84
proj4string(d) <- CRS("+init=epsg:3414") # SVY21
d.wgs84 <- spTransform(d, CRS.new)
d.wgs84 <- as.data.frame(d.wgs84)
data <- cbind(d.wgs84, data) |
6f759b183b3690289f87a3ae1498946cce2b714a | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.machine.learning/man/sagemaker_get_device_fleet_report.Rd | 9d8a3155d7813693c4828caf8d79b3c8f94c7dd0 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 499 | rd | sagemaker_get_device_fleet_report.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_get_device_fleet_report}
\alias{sagemaker_get_device_fleet_report}
\title{Describes a fleet}
\usage{
sagemaker_get_device_fleet_report(DeviceFleetName)
}
\arguments{
\item{DeviceFleetName}{[required] The name of the fleet.}
}
\description{
Describes a fleet.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_get_device_fleet_report/} for full documentation.
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.