blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f8e5b0376818334111db9cddb804a6e4c7c86b9
|
0b1a24cee99eb7746d63ea900f7cb7956c370c1c
|
/R/rotate_to_coordaxes.R
|
52c5683bcfd96275f3d1da16109f788f34bf2eda
|
[] |
no_license
|
cran/SyScSelection
|
d3692ecb4ca10c740196556e2fb27e991e033d8b
|
b1b7e61004dd3832caf9380c177b2c2aba24234d
|
refs/heads/master
| 2023-01-04T01:43:50.391849
| 2020-10-26T13:10:02
| 2020-10-26T13:10:02
| 276,711,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
r
|
rotate_to_coordaxes.R
|
#' Rotates the ellipsoid (hellip) so its principal axes align with the coordinate axes. Both ellipsoids are centered at the origin. Note that there are (2^d)*d! valid ways to rotate the ellipsoid to the axes. This algorithm does not prescribe which solution will be provided.
#' @param hellip The shape to be rotated, must be centered at the origin
#' @return A list of: hellip2 - A new hyperellipsoid, rotated to the coordinate axes and tfm - the transformation matrix that creates the rotation
#' @import pracma
rotate_to_coordaxes <- function(hellip){
V <- vertices(hellip)
d <- max(dim(V))
V2 <- matrix(0,d,d)
for (i in 1:d){
V2[i,i] <- norm(V[,i],type="2")
}
tfm <- mrdivide(V2,V)
hellip2 <- transform_ellipsoid(hellip, tfm)
return(list(hellip2,tfm))
}
|
f9b18bcb7632518be4e9c8ba16575b1effec7436
|
d0f5623feadaad07540301d0fe2c64440ec02e39
|
/R/export_for_python.R
|
c0b5d1ed33329137a9828519b4466eb9b57ead8e
|
[
"MIT"
] |
permissive
|
sansomlab/tenx
|
81d386f4f593af88565cb7103c4f9c8af57b074a
|
1bfd53aaa3b86df1e35912e1a4749dcb76c4912d
|
refs/heads/master
| 2023-07-25T22:31:32.999625
| 2023-07-12T11:11:17
| 2023-07-12T11:11:17
| 136,856,953
| 54
| 18
|
MIT
| 2022-03-13T15:05:54
| 2018-06-11T00:53:52
|
R
|
UTF-8
|
R
| false
| false
| 3,302
|
r
|
export_for_python.R
|
## Export data to be used as input for analysis with python
## packages such as scanpy
# Libraries ----
stopifnot(
require(optparse),
require(Seurat),
require(tenxutils)
)
# Options ----
option_list <- list(
make_option(c("--seuratobject"), default="begin.Robj",
help="A seurat object after dimensionality reduction"),
make_option(c("--reductiontype"), default="pca",
help="Name of dimensional reduction slot to write (e.g. 'pca', 'integratedreduced')"),
make_option(c("--usesigcomponents"), default=TRUE,
help="Whether or not the pipeline is using significant components"),
make_option(c("--counts"), default=FALSE,
help="Export the raw counts (counts)"),
make_option(c("--data"), default=FALSE,
help="Export the normalised data (data)"),
make_option(c("--scaled"), default=FALSE,
help="Export the scaled data (data.scaled)"),
make_option(c("--outdir"), default=".",
help="the file to which the reduced dimensions will be written")
)
opt <- parse_args(OptionParser(option_list=option_list))
cat("Running with options:\n")
print(opt)
## Functions
exportData <- function(seurat_object, slot="counts", outdir=NULL) {
x <- GetAssayData(s, slot=slot)
write.table(x, gzfile(file.path(outdir,paste("assay", slot, "tsv.gz", sep="."))),
quote=FALSE, sep="\t", row.names = FALSE, col.names= FALSE)
}
exportEmbedding <- function(seurat_object, embedding="PCA", outdir=NULL) {
x <- Embeddings(object = s, reduction = embedding)
write.table(x, gzfile(file.path(outdir, paste("embedding", embedding, "tsv.gz", sep="."))),
quote=FALSE, sep="\t", row.names = FALSE, col.names = TRUE)
}
exportMetaData <- function(seurat_object, outdir=NULL) {
x <- seurat_object[[]]
x$barcode <- Cells(seurat_object)
write.table(x, gzfile(file.path(outdir, "metadata.tsv.gz")),
quote=FALSE, sep="\t", row.names = FALSE, col.names = TRUE)
}
# Read RDS seurat object
message("readRDS")
s <- readRDS(opt$seuratobject)
message("export_for_python running with default assay: ", DefaultAssay(s))
# Write out the cell and feature names
message("writing out the cell and feature names")
writeLines(Cells(s), gzfile(file.path(opt$outdir,"barcodes.tsv.gz")))
writeLines(rownames(s), gzfile(file.path(opt$outdir,"features.tsv.gz")))
# Write out embeddings (such as e.g. PCA)
message("Writing matrix of reduced dimensions")
exportEmbedding(s, opt$reductiontype, outdir=opt$outdir)
# Write out the metadata
message("Writing out the metadata")
exportMetaData(s, outdir=opt$outdir)
# Write out significant components
if (opt$usesigcomponents == TRUE) {
message("Writing vector of significant components")
comps <- getSigPC(s)
write.table(comps, file = paste0(opt$outdir, "/sig_comps.tsv"),
quote = FALSE, col.names = FALSE, row.names = FALSE)
}
# Write out assay data (such as e.g. raw counts)
if(opt$counts) { exportData(s, "counts", outdir=opt$outdir) }
if(opt$data) { exportData(s, "data", outdir=opt$outdir ) }
if(opt$scaled) { exportData(s, "scale.data", outdir=opt$outdir) }
message("export_for_python.R final default assay: ", DefaultAssay(s))
message("Completed")
|
d354e123cb6361eaf323e29668eb4b68e0852303
|
6812fef9dc352d2aef5c57c6f930e0004bf314a5
|
/man/EWOC2-internal.Rd
|
3a4e93c6b90a9646ba9eeba2975abb2a3379fe8f
|
[] |
no_license
|
cran/EWOC2
|
b468a36e3ffd637bfb4b57464470044eea1fe092
|
a548c03ea2b1126dec6190bc0ffc6d88d57f7717
|
refs/heads/master
| 2020-12-21T23:09:44.399994
| 2019-03-29T14:20:03
| 2019-03-29T14:20:03
| 236,594,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 478
|
rd
|
EWOC2-internal.Rd
|
\name{EWOC2-internal}
\alias{Function.decimalplaces}
\alias{Function.fdist}
\alias{Function.generate.bugs.file}
\alias{Function.Mi.Dose.Increment_simu}
\alias{Function.mtd_logistic}
\alias{Function.MTDdoses}
\alias{Function.nextdose.2d}
\alias{Function.parameter}
\alias{Function.postdlt}
\alias{Function.simu.2d}
\title{Internal functions for EWOC2}
\description{Internal functions for EWOC2}
\details{These are not to be called by the user.}
\keyword{internal}
|
41c5cd25261a6e4b3ea418812d3eb121fe8b62b1
|
bd522db24d4a8cc5136d9f2253a595737abbad10
|
/man/sampleNum.Rd
|
759619e91673d3d228c503771fca06fca43f436a
|
[
"Artistic-2.0"
] |
permissive
|
hshdndx/new-to-CNV
|
8a288427a5f627693a8b4e9856cf6b1617733cfb
|
6dd83a15fc0e27e998462c8ed68b81e85725c1be
|
refs/heads/master
| 2021-01-11T12:00:42.206325
| 2016-12-16T07:07:34
| 2016-12-16T07:07:34
| 76,614,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
rd
|
sampleNum.Rd
|
\name{sampleNum}
\alias{sampleNum}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Number of samples
}
\description{
This function gets number of samples from an object of \code{\link{PatCNVSession-class}} or \code{\link{PatCNVData-class}}
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
sampleNum(obj)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obj}{
an object of \code{\link{PatCNVSession-class}} or \code{\link{PatCNVData-class}}
%% ~~Describe \code{obj} here~~
}
}
\value{
Number of samples in corresponding object
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\author{
%% ~~who you are~~
Chen Wang
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
retrieve sample information \code{\link{sampleInfo}}
}
\examples{
#=== load a simulation example
config.filename <- 'sim1.ini'
makeSimulation(config.filename)
sim.session <- createSession(config.filename)
#=== print number of samples
cat("total number of samples:",sampleNum(sim.session),"\n")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{methods}
|
a0a3827013ad0b100424ff4b7ac5efe06ebec5e9
|
09aec0d4fd39e5e6e1ca9a37318a386252d3e6e4
|
/dataprep.R
|
9de719f7976e11937cfd46578977010208fd7144
|
[] |
no_license
|
fsmontenegro/ddp
|
fc77a2312141fb076add0489ad1162ddd872f5cd
|
1ac07b351bae647781fd0da855a55962633786eb
|
refs/heads/master
| 2021-01-10T11:07:33.213840
| 2015-12-24T21:25:02
| 2015-12-24T21:25:02
| 48,556,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,239
|
r
|
dataprep.R
|
#
# Markham Life Fix
#
# Libraries and cleanup
# rm(list=ls())
library(stringr)
library(dplyr)
library(tidyr)
# load csv with NAs - CSV generated on Tabula
ps<-read.csv("preschool.csv",stringsAsFactors = FALSE,na.strings = "")
colnames(ps) <- c("Program.Name","Age","Location","Day","Time","Start.Date","Classes","Course.Code","Fee")
ch<-read.csv("children.csv",stringsAsFactors = FALSE,na.strings = "")
colnames(ch) <- c("Program.Name","Age","Location","Day","Time","Start.Date","Classes","Course.Code","Fee")
pt<-read.csv("preteen.csv",stringsAsFactors = FALSE,na.strings = "")
colnames(pt) <- c("Program.Name","Age","Location","Day","Time","Start.Date","Classes","Course.Code","Fee")
df<-rbind(ps,ch,pt)
commcentres <- read.csv("commcenters.csv",stringsAsFactors = FALSE)
# repeat.before function from http://stackoverflow.com/questions/7735647/replacing-nas-with-latest-non-na-value
repeat.before <- function(x) { # repeats the last non NA value. Keeps leading NA
ind = which(!is.na(x)) # get positions of nonmissing values
if(is.na(x[1])) # if it begins with a missing, add the
ind = c(1,ind) # first position to the indices
rep(x[ind], times = diff( # repeat the values at these indices
c(ind, length(x) + 1) )) # diffing the indices + length yields how often
} # they need to be repeated
# Fix/Expand Program Names
df$Program.Name<-repeat.before(df$Program.Name)
# Expand Age & Replace with Months
df$Age<-repeat.before(df$Age)
df$F_Age <- df$Age # Save original Age description for later
df$F_Age <- gsub(" yrs\\+"," - 99 yrs",df$F_Age)
age_convert <- function(x) { # Simple conversion of "3 yrs" to 3*12 months or "1 mth"
if(grepl("mth",x[2])) {
return(as.numeric(x[1]))
}
if(grepl("yr",x[2])) {
return(as.numeric(x[1])*12)
}
}
# Take "x [unit] - y unit" and convert to months
age_calc <- function (x) {
d <- unlist(str_split(x," - "))
agestart <- str_trim(unlist(str_split(d[1]," ")))
ageend <- str_trim(unlist(str_split(d[2]," ")))
if (is.na(agestart[2])) {
agestart[2] <- ageend[2]
}
ageend[1]<-as.numeric(ageend[1])+1 # "5 yr" means up to "5 yrs, 364 days"
converted <- paste(age_convert(agestart),age_convert(ageend),sep=" - ")
return(converted)
}
df$F_Age <- sapply(df$F_Age,age_calc)
# Adjust location and covert to factor
df$Location<-str_trim(df$Location)
df$Location<-as.factor(df$Location)
# Adjust day of the week
df$Day<-str_trim(df$Day)
df$F_Day <- gsub("[\\. ]","",df$Day)
df$F_Day<-as.factor(tolower(df$F_Day))
# Adjust start time
df$F_Time <- gsub("\\.","",df$Time)
df$F_Time <- toupper(df$F_Time)
# convert start date to actual date
base_year <- "2016"
df$F_Start.Date <- paste(gsub("\\.","",df$Start.Date),base_year,sep = ' ')
df$F_Start.Date <- as.Date(df$F_Start.Date,format = "%b %d %Y")
#birthday <- as.Date("2010-04-25")
t1 <- df %>%
separate(col=F_Age,into=c("F_AgeStart","F_AgeEnd"),sep = " - ") %>%
separate(col=F_Time,into=c("F_TimeStart","F_TimeEnd"),sep = " - ")
t1$F_AgeStart <- as.numeric(t1$F_AgeStart)
t1$F_AgeEnd <- as.numeric(t1$F_AgeEnd)
t1$F_TimeStart <- as.POSIXct(t1$F_TimeStart,format="%I:%M %p")
t1$F_TimeEnd <- as.POSIXct(t1$F_TimeEnd,format="%I:%M %p")
|
bf105be8eae46626cd549ce955a45f82cc883b12
|
6bc36e666b971851a643a4ee70962032f96fa6eb
|
/Lineadeproduccion/ui.R
|
4d5ab6c677eb708e6ab618ed3167df649d659950
|
[] |
no_license
|
espinosabouvy/familiasdeproductos
|
f6bfc75ee550899d21ecdbbfeaf20c9fdd98196d
|
fda8fba69303e7231324906740076233be77fbdb
|
refs/heads/master
| 2020-06-22T15:30:44.632378
| 2017-01-12T03:34:14
| 2017-01-12T03:34:14
| 74,587,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,941
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Crear lineas de produccion y asignar modelos"),
sidebarLayout(
sidebarPanel(
h4("Te invitamos a que definas tus lineas de produccion utilizando los datos de tu empresa."),
h5("El archivo debe tener un formato de columnas como se muestra en la figura. Tus datos
pueden tener 3 o más puestos o tipos de operador"),
img(src= "http://www.magro.com.mx/images/formato.PNG", align = "left",
width = 200),
fileInput("browse", "Selecciona archivo CSV",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
checkboxInput("header", "Datos tienen encabezado", TRUE),
downloadButton("download","Descargar asignacion")
),
mainPanel(
# h5("Esta versión permite agrupar 20 estilos, si necesitas agrupar más puedes comprar
# la suscripción en Apps/Comprar aplicaciones o enviarnos un correo en la cuenta
# luis@magro.com.mx para ayudarte"),
h5("Si tienes alguna duda de como funciona esta app, puedes enviarnos un correo a
luis@magro.com.mx para ayudarte o puedes ver el artículo que explica su función y
funcionamiento en http://www.magro.com.mx/index.php/news/7-lineasprodcalzado"),
tabsetPanel(
tabPanel("Datos leidos",DT::dataTableOutput("tabla_completa")),
tabPanel("Estadistica",
tableOutput("tablainicial"),
plotOutput("boxplotini"),
plotOutput("graficoinicial")),
tabPanel("Líneas de producción",
column(6,
sliderInput("altura_cluster", "Indice de desviacion",
min=2, max= 3000,
step = 50, value = 500)),
column(6,
p("Líneas de producción a crear: "),
verbatimTextOutput("lineas")),
plotOutput("dendograma")),
tabPanel("Modelos asignados", DT::dataTableOutput("tabla_asignacion",
width = 400)),
tabPanel("Analisis Final y Medicion de mejora",
tableOutput("mejora"),
tableOutput("total.fam"),
plotOutput("grafico.final"),
tableOutput("desviaciones"))
)
)
)
))
|
558f5d238e8a7a9e8c7555faa271fae4605045d6
|
68d81fc3bd291379d987b85aa7e8f1f872f72d7d
|
/app.R
|
2f5498c602e445031bcfb8f8027ad2de90b04220
|
[] |
no_license
|
Bailey-B/participatingjournalsapp
|
52d20e7ddeb93f43673758487764b600ac8ef84e
|
7466736ab3bc5e6d52fe6affc6b627bf126f77be
|
refs/heads/master
| 2020-07-05T10:02:34.652027
| 2019-08-16T00:01:22
| 2019-08-16T00:01:22
| 202,616,887
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,373
|
r
|
app.R
|
# SAGE Path Participating Journals
library(shiny)
library(shinythemes)
library(tidyverse)
library(DT)
ui <- fluidPage(
titlePanel("SAGE Path Participating Journals"),
mainPanel(
fluidRow(img(src = "logo.png", align = "left", width = "250")),
fluidRow(p("example text"), column(1)),
fluidRow(DTOutput("participatingjournals"))
)
)
server <- function(input, output) {
participatingjournals <- read.csv("participatingjournals.csv")
participatingjournals <- participatingjournals[participatingjournals$SAGE.Path.Status == "Included",]
participatingjournals <- subset(participatingjournals, select = -c(TLA,
SPIN.Major.Disciplines.Combined,
SAGE.Path.Status))
colnames(participatingjournals) <- c("Journal", "Primary Discipline", "Impact Factor?", "Other Indexing",
"Gold Open Access?","APC, IF OA")
output$participatingjournals <- renderDT({
datatable(participatingjournals,
options = list(pageLength = nrow(participatingjournals),
lengthChange = FALSE),
rownames = FALSE,
escape = 2,
filter = "none")
})
}
shinyApp(ui, server)
|
87c7f84ba9df5388904b8106b01a2aae55d775b6
|
7811ab5322831bbfda18f2e287781c22b9f9c7ae
|
/Plantgrowth.R
|
8bc75656a2038e9e307e60d43c7210f4cf25b1cb
|
[] |
no_license
|
Anupwilson/datascienceR_code
|
740f308e921d9ed85fdc59eccf26cdcbe9681853
|
0b700583016d5bb5de856ea64e6c202778dd50b5
|
refs/heads/main
| 2023-04-17T03:23:56.115341
| 2021-05-05T10:57:08
| 2021-05-05T10:57:08
| 309,035,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
Plantgrowth.R
|
harshi<- datasets::PlantGrowth
# to find first 10 rows of plantgrowth
head(PlantGrowth,10)
# to find last 10 rows of plantGrowth
tail(PlantGrowth,10)
# to find summary of the plantGrowth
summary(PlantGrowth)
PlantGrowth[,c(1,2)]
df<-PlantGrowth[,-6]
# summary of plantgrowth
summary(PlantGrowth[,1])
#summary of weight on plantgrowth
summary(PlantGrowth$weight)
#summary of group on plantgrowth
summary(PlantGrowth$group)
#plotting
plot(PlantGrowth,col='blue')
# plotting on weight on plantgrowth
plot(PlantGrowth$weight)
# plotting on growth on plantgrowth
plot(PlantGrowth$group)
# Barplot on plantgrowth
barplot(PlantGrowth$weight)
barplot(PlantGrowth$group)
# Histogram on plantgrowth
hist(PlantGrowth$weight)
hist(PlantGrowth$group)
# boxplot on plantgrowth
boxplot(PlantGrowth)
# giving main title to the boxplot
boxplot(PlantGrowth,main='multiple boxplots')
|
f7762fa215c874685539e23142a0a31d0ba2f374
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/10341_2/rinput.R
|
748bdd18a849279a057801d8d74d6d9e79ce4a0b
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10341_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10341_2_unrooted.txt")
|
5f37244b04e6fe8ab2e503ffde2bd15c7ab37386
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/KoNLP/examples/get_dictionary.Rd.R
|
1fa6f6f5f98a0dc914c7a626dc4b8be01b7c4ec1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
get_dictionary.Rd.R
|
library(KoNLP)
### Name: get_dictionary
### Title: Get Dictionary
### Aliases: get_dictionary
### ** Examples
## Not run:
##D dic_df <- get_dictionary('sejong')
## End(Not run)
|
688cea34b43d368264601cde688fac7352b5864b
|
e9f65ddf2b049eea7e60d6dba14a531a0ea429bf
|
/fridge.R
|
96c389b86d4a434c25594d98d92686692a23f2e2
|
[] |
no_license
|
andrejondracka/IoT-analytics---electricity-consumption
|
2ccdc531d2573552ea01a2345683d7c4f7e23909
|
341e008cd7b9fd043380e7312befa47f6875962b
|
refs/heads/master
| 2020-09-07T23:57:56.868197
| 2019-11-11T10:24:44
| 2019-11-11T10:24:44
| 220,950,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,649
|
r
|
fridge.R
|
###fridge consumption
meter_data_laundry2 <- meter_data[,c('DateTime', 'Laundry Room')]
plot(meter_data_laundry2$DateTime[1380:2820],
meter_data_laundry2$`Laundry Room`[1380:2820], type = 'l',
xlab = 'hour', ylab = 'energy consumption (Wh)')
meter_data_laundry <- meter_data[,c('Date', 'Laundry Room')]
meter_data_offdays <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(meter_data_offdays) <- c('Date', 'Laundry Room')
bla <- unique(meter_data_laundry$Date)
for (ii in bla) {
tempdata = meter_data_laundry[meter_data_laundry$Date == ii,]
if (max(tempdata$`Laundry Room`) == 2) {
meter_data_offdays <- rbind (meter_data_offdays, tempdata)
}
}
meter_data_offdays_daily <- meter_data_offdays %>% group_by(Date) %>% summarize(`Laundry Room` = sum(`Laundry Room`))
meter_data_offdays_daily$Date <- as.POSIXct(meter_data_offdays_daily$Date)
plot(meter_data_offdays_daily$Date, meter_data_offdays_daily$`Laundry Room`)
meter_data_daily_offrem <- subset(meter_data_offdays_daily, `Laundry Room` < 1000)
plot(meter_data_daily_offrem$Date, meter_data_daily_offrem$`Laundry Room`)
meter_data_daily_offrem$Daydiff <- (meter_data_daily_offrem$Date - meter_data_daily_offrem$Date[1]) / 86400
linfit <- lm(meter_data_daily_offrem$`Laundry Room` ~ meter_data_daily_offrem$Daydiff)
linfit$coefficients[2] * 365
meter_data_daily_offrem$fit <- meter_data_daily_offrem$Daydiff*linfit$coefficients[2] + linfit$coefficients[1]
plot(meter_data_daily_offrem$Date, meter_data_daily_offrem$`Laundry Room`, xlab = 'Year', ylab = 'Daily consumption (Wh)')
lines(meter_data_daily_offrem$Date, meter_data_daily_offrem$fit, col = 'red')
|
f289a11f35dd261bdf3d78b7184522fbb8631767
|
89613fd7a4b0dc6758c06166738fe6b34226f4c3
|
/calcIPPGenCapShare.R
|
d5998ffe15cf124b8ae8325bac77d92b44969d38
|
[
"MIT"
] |
permissive
|
nsbowden/eiaGenerationCapacity
|
711da9ec170e8cdb4ee19b85348ee289b32008c3
|
0d633a3fa6676f0f72f4a9aa1f9da94e94a69e12
|
refs/heads/master
| 2020-04-18T05:20:10.432116
| 2019-01-24T01:27:11
| 2019-01-24T01:27:11
| 167,275,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
calcIPPGenCapShare.R
|
### Function to calculate two different measure of IPP ownership share of generation capacity
calcIPPShare = function(df) {
a = aggregate(df$nameplate, by = list(df$year, df$state, df$producer, df$fuel), FUN=sum)
names(a) = c('year', 'state', 'producer', 'fuel', 'capacity')
allfuel = a[a$fuel == "All Sources",]
### There are even two debateable measures here ipp/(ipp + util) or ipp/total
utils = allfuel[allfuel$producer == "Electric Generators, Electric Utilities",]
total = allfuel[allfuel$producer == "Total Electric Power Industry",]
ipp = allfuel[allfuel$producer == "Electric Generators, Independent Power Producers",]
names(utils) = c("year", "state", "producer", "fuel", "utilcapacity")
names(total) = c("year", "state", "producer", "fuel", "totalcapacity")
names(ipp) = c("year", "state", "producer", "fuel", "ippcapacity")
utils = utils[c("year", "state", "utilcapacity")]
total = total[c("year", "state", "totalcapacity")]
ipp = ipp[c("year", "state", "ippcapacity")]
gen = merge(ipp, utils, all=TRUE)
gen = merge(gen, total, all=TRUE)
gen$ippcapacity[is.na(gen$ippcapacity)] = 0
gen$utilcapacity[is.na(gen$utilcapacity)] = 0
gen$ipp2util = gen$ippcapacity/(gen$ippcapacity + gen$utilcapacity)
gen$ipp2total = gen$ippcapacity/gen$totalcapacity
gen
}
|
f3d2b2a721b5febed0889ffce37b8c4cbf49ef79
|
b1ea46089447e54565d29a3c6b0dcb0b166de32d
|
/5번.R
|
58761a9cb2d6bd72a2148984e6cd09cb664f1c39
|
[] |
no_license
|
soyeon710/R-STUDY
|
8d3d364dc2ff7ed8a1a1ea843f196e27b179543c
|
64cf49b59c88d05f1038ccc18e33d8c7edf3246e
|
refs/heads/master
| 2021-08-30T12:33:02.372686
| 2017-12-18T00:37:20
| 2017-12-18T00:37:20
| 110,492,191
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 13,995
|
r
|
5번.R
|
# 데이터 분석 프로젝트 - 9장
패키지 준비하기
install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 로드
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
데이터 준비하기
# 데이터 불러오기
raw_welfare <- read.spss(file = "Koweps_hpc10_2015_beta1.sav", to.data.frame = T)
# 복사본 만들기
welfare <- raw_welfare
데이터 검토하기
head(welfare)
tail(welfare)
View(welfare)
dim(welfare)
str(welfare)
summary(welfare)
변수명 바꾸기
welfare <- rename(welfare,
sex = h10_g3, # 성별
birth = h10_g4, # 태어난 연도
marriage = h10_g10, # 혼인 상태
religion = h10_g11, # 종교
income = p1002_8aq1, # 월급
code_job = h10_eco9, # 직종 코드
code_region = h10_reg7) # 지역 코드
"성별에 따라 월급이 다를까?"
1. 변수 검토하기
class(welfare$sex)
table(welfare$sex)
# 이상치 확인
table(welfare$sex)
# 이상치 결측 처리
welfare$sex <- ifelse(welfare$sex == 9, NA, welfare$sex
# 결측치 확인
table(is.na(welfare$sex))
# 성별 항목 이름 부여
welfare$sex <- ifelse(welfare$sex == 1, "male", "female") table(welfare$sex)
qplot(welfare$sex)
월급 변수 검토 및 전처리
1. 변수 검토하기
class(welfare$income)
summary(welfare$income)
qplot(welfare$income)
qplot(welfare$income) + xlim(0, 1000)
2. 전처리
# 이상치 확인
summary(welfare$income)
# 이상치 결측 처리
welfare$income <- ifelse(welfare$income %in% c(0, 9999), NA, welfare$income)
# 결측치 확인
table(is.na(welfare$income))
1. 성별 월급 평균표 만들기
sex_income <- welfare %>% filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mean_income = mean(income))
sex_income
2. 그래프 만들기
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
sex_income
# 1. 변수 검토하기
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
# 태어난 년도 1900~2014, 모름/무응답 : 9999
# 2. 이상치 확인
summary(welfare$birth)
# 3. 결측치 확인
table(is.na(welfare$birth)) # 확인 결과 이상치 및 결측치 없음
#만일 이상치가 있었다면 다음 작업이 필요하다. 즉, 이상치의 결측치 화
welfare$birth <- ifelse(welfare$birth == 9999, NA, welfare$birth)
table(is.na(welfare$birth))
# 4. 파생변수 생성 - 나이
welfare$age <- 2015 - welfare$birth + 1
summary(welfare$age)
qplot(welfare$age)
# 5. 나이에 따른 월급 평균표 만들기
age_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mean_income = mean(income))
head(age_income)
ggplot(data = age_income, aes(x=age, y=mean_income)) + geom_line()
### 다. 년령대에 따른 월급 차이 ----- 어던 연령대의 월급이 가장 많을까 ? -----------
# 분석절차
# 변수검토 및 전처리(연령대, 월급)
# 변수간 관계분석(년령대별 월급 평균표 만들기, 그래프 만들기)
# 파생변수 만들기 : 초년(30세 미만), 중년(30-59세), 노년(60세 이상)
# %>% : dplyr 패키지의 함수들은 %>% 를 이용해 조합할 수 있음
# install.packages("dplyr")
# library(dplyr)
welfare <- welfare %>%
mutate(ageg = ifelse(age < 30, "young",
ifelse(age <= 59, "middle", "old")))
table(welfare$ageg)
qplot(welfare$ageg)
# 연령대에 다른 월급 차이 분석하기 - 연령대별 월급 평균표 만들기 ???
ageg_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mean_income = mean(income))
ageg_income
ggplot(data=ageg_income, aes(x=ageg, y=mean_income)) + geom_col()
ggplot(data=ageg_income, aes(x=ageg, y=mean_income)) + geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
### 라. 연령대 및 성별 월급 차이 - --- 성별 월급차이는 연령대별로 다를까 ? -----------
# 변수검토 및 전처리(연령대, 성별, 월급)
# 변수간 관계분석(년령대 및 성별 월급 평균표 만들기, 그래프 만들기)
# 1. 연령대 및 성별 월급 평균표 만들기
# 결측치, 이상치 조정
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg, sex) %>%
summarise(mean_income = mean(income))
sex_income
# 2. 그래프 만들기
ggplot(data=sex_income, aes(x=ageg, y=mean_income, fill = sex)) + geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
# 위 그래프는 각 성별의 월금이 연령대와 함께 표현되어 명확한 이해에 어려움이 있다.
ggplot(data=sex_income, aes(x=ageg, y=mean_income, fill = sex)) + geom_col(position = "dodge") +
scale_x_discrete(limits = c("young", "middle", "old"))
# 이번에는 년령대로 구분하지 않고 나이 및 성별 월급 평균표를 그래프로 나타내 보자
# 그래프는 선 그래프, 월급 평균선이 다른 색으로
sex_age <- welfare %>%
filter(!is.na(income)) %>%
group_by(age, sex) %>%
summarise(mean_income = mean(income))
head(sex_age)
ggplot(data = sex_age, aes(x=age, y=mean_income, col = sex)) + geom_line()
### 마. 직업별 월급차이 --- 어던 직업이 월금을 가장 많이 받을까 ? --------
# 직업별 월급 문석하기
# 1. 직업 변수를 검토하고 전처리 작업 수행합시다. - 월급변수 전처리 작업은 앞단계에서 수행했음.
# 변수간 관계를 분석합니다. 변수검토 및 전처리 (직업, 월급)
# --> 변수간 관계분석(직업볋 월급 평균표, 그래프 작성)
class(welfare$code_job)
table(welfare$code_job)
# 2. 전처리
# library(readxl)
list_job <- read_excel("koweps_Codebook.xlsx", col_names = T, sheet = 2)
head(list_job)
dim(list_job) # 직업이 149개로 분류
welfare <- left_join(welfare, list_job, id="code_job") # left_join 으로 job 변수를 welfare에 결합
welfare %>%
filter(!is.na(code_job)) %>%
select(code_job, job) %>%
head(10)
# 직업별 월급 평균표 만들기, 직업별 월급 평균 구하기
job_income <- welfare %>%
filter(!is.na(job) & !is.na(income)) %>%
group_by(job) %>%
summarise(mean_income = mean(income))
head(job_income)
# 어떤 직업의 월급이 많은지 월급을 내림 차순으로 정렬하고 상위 10개를 추출
top10 <- job_income %>%
arrange(desc(mean_income)) %>%
head(10)
top10
# 그래프로 나타내기
ggplot(data=top10, aes(x=reorder(job, mean_income), y= mean_income)) +
geom_col() +
coord_flip()
# 이번에는 월금이 하위 10위권인 직업을 추출
bottom10 <- job_income %>%
arrange(mean_income) %>%
head(10)
bottom10
# 그래프 만들가
ggplot(data=bottom10, aes(x=reorder(job, -mean_income), y= mean_income)) +
geom_col() +
coord_flip() +
ylim(0, 850)
### 바. 성별 직업 빈도 - "성별로 어던 직업이 가장 많을까 ? ------
# 분석절차
# 변수 검토 및 전처리(성별, 직업) - 변수간 관계분석(성별 직업 빈도표 만들기, 그래프 만들기)
# 성별 변수 전처리와 직업 변수 전처리는 앞에서 완료했음. 바로 변수간 관계를 분석해 보자
#
# 1. 성별 직업 빈도표 만들기 - 각각 상위 10개를 추출
# 남성 직업별 빈도 상위 10개
job_male <- welfare %>%
filter(!is.na(job) & sex == "male") %>%
group_by(job) %>%
summarise(n=n()) %>%
arrange(desc(n)) %>%
head(10)
job_male
# 여성 직업별 빈도 상위 10개
job_female <- welfare %>%
filter(!is.na(job) & sex == "female") %>%
group_by(job) %>%
summarise(n=n()) %>%
arrange(desc(n)) %>%
head(10)
job_female
# 그래프 만들기
# 남성 직업 상위 10개 그래프
ggplot(data=job_male, aes(x=reorder(job, n), y= n)) +
geom_col() +
coord_flip()
# 여성 직업 상위 10개 그래프
ggplot(data=job_female, aes(x=reorder(job, n), y= n)) +
geom_col() +
coord_flip()
### 사. 종교 유무에 따른 이혼율 - "종교가 있는 사람이 이혼을 덜할까 ? ------
# 분석절차
# 변수 검토 및 전처리(종교, 혼인상태) - 변수간 관계분석(종교 유무에 따른 이혼율 표 만들기, 그래프만들기)
# 종교 변수 검토 및 전 처리
# 1. 변수 검토하기
class(welfare$religion)
table(welfare$religion)
# 2. 전처리
# 종교 유무 이름 부여
welfare$religion <- ifelse(welfare$religion == 1, "yes", "no")
table(welfare$religion)
qplot(welfare$religion)
# 혼인상태변수 검토 및 전 처리
# 1. 변수 검토하기
class(welfare$marriage)
table(welfare$marriage)
# 2. 파생변수 만들기
# 이혼 여부 변수 만들기
welfare$group_marriage <- ifelse(welfare$marriage == 1, "marriage",
ifelse(welfare$marriage == 3, "divorce", NA))
table(welfare$group_marriage)
table(is.na(welfare$group_marriage))
qplot(welfare$group_marriage)
# 종교유무에 따른 이혼율 표 만들기
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(religion, group_marriage) %>%
summarise(n=n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
religion_marriage
# 앞의 표에서 이혼에 해댕하는 값만 추출해 이혼율 표를 만들어 보자
# 이혼 추출
divorce <- religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(religion, pct)
divorce
# 그래프 만들기
# 이혼율 표를 이용해 그래프를 그려보자
ggplot(data=divorce, aes(x=religion, y= pct)) + geom_col()
# 년령대 및 종교 유무에 따른 이혼률 분석하기
# 년령대별 이혼율 표 만들기
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(ageg, group_marriage) %>%
summarise(n=n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_marriage
# 년령대별 이혼률 그래프 만들기
# 초년제외, 이혼 추출
ageg_divorce <- ageg_marriage %>%
filter(ageg != "young" & group_marriage == "divorce") %>%
select(ageg, pct)
ageg_divorce
ggplot(data = ageg_divorce, aes(x = ageg, y = pct))+ geom_col()
# 년령대 및 종교 유무에 따른 이혼률 표 만들기
# 년대, 종교유무, 결혼 상태별 비융표 만들기
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
group_by(ageg, religion, group_marriage) %>%
summarise(n=n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_religion_marriage
# 년령대 및 종교 유무별 이혼률 표 만들기
df_divorce <- ageg_religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(ageg, religion, pct)
df_divorce
ggplot(data = df_divorce, aes(x=ageg, y=pct, fill = religion)) +
geom_col(position = "dodge")
## 아. 지역별 연령대 비율 - 노년층이 많은 지역이 어디일까요 ? --------
# 변수 검토 및 전처리(지역, 년령대) - 변수간 관계분석(지역별 연령대 비율표 만들기, 그래프만들기)
# 지역 변수 검토 및 전 처리
# 1. 변수 검토하기
class(welfare$code_region)
table(welfare$code_region)
# 2. 전처리
# code_region ( 1 서울, 2 수도권(인천/경기), 3 부산/경남/울산, 4 대구/경북, 5 대전/충남
# 6 강원/충북, 7 광주/전남/전북/제주도)
# 지역 코드 목록 만들기
list_region <- data.frame(code_region = c(1:7),
region = c("서울",
"수도권(인천/경기)",
"부산/경남/울산",
"대구/경북",
"대전/충남",
"강원/충북",
"광주/전남/전북/제주도"))
list_region
# 지역별 변수 추가
welfare <- left_join(welfare, list_region, id= "code_region")
welfare %>%
select(code_region, region) %>%
head
# 지역별 년령대 비율 분석하기
# 지역별 년령대 비율표 만들기
region_ageg <- welfare %>%
group_by(region, ageg) %>%
summarise(n=n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 2))
head(region_ageg)
# 그래프 만들기
ggplot(data=region_ageg, aes(x=region, y= pct, fill = ageg)) +
geom_col() +
coord_flip()
# 노년층 비율 내림차순 만들기
list_order_old <- region_ageg %>%
filter(ageg == "old") %>%
arrange(pct)
list_order_old
# 지역명 순서 변수 만들기
order <- list_order_old$region
order
ggplot(data=region_ageg, aes(x=region, y= pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
# 년령대 순으로 막대 색깔 나열하기
class(region_ageg$ageg)
levels(region_ageg$ageg)
# factor()를 이용해서 ageg 변수를 factor 타입으로 변환하고 level 파라미터를 이용해 순서를 정한다.
region_ageg$ageg <- factor(region_ageg$ageg,
level = c("old", "middle", "young"))
class(region_ageg$ageg)
levels(region_ageg$ageg)
ggplot(data=region_ageg, aes(x=region, y= pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
|
e0c27a0ec3b649331943b5d4e77ec8b70ffb55ef
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#132.A#48.c#.w#7.s#45.asp/ctrl.e#1.a#3.E#132.A#48.c#.w#7.s#45.asp.R
|
7ab51c8991a9600f040312542e2dc7567d4985c8
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
ctrl.e#1.a#3.E#132.A#48.c#.w#7.s#45.asp.R
|
ef6542f7eb35cdfb804e23c2053dc325 ctrl.e#1.a#3.E#132.A#48.c#.w#7.s#45.asp.qdimacs 7571 22174
|
5165c947059d29db480b2541136cdb8b16232bfa
|
5919e8802f3518f8d485564a153fb120de6444b7
|
/R/app.R
|
13a16e9927927d6d3d53b9b9f7b364758030d302
|
[] |
no_license
|
tonyhammond/testR
|
735d224fd0d2b976a586149d1f67701fb9d4bf6b
|
e6440814d0044927d09f11f9989041da441d796a
|
refs/heads/master
| 2020-04-16T03:55:48.730036
| 2019-01-25T13:35:26
| 2019-01-25T13:35:26
| 165,250,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 417
|
r
|
app.R
|
library(shiny)
ui <- fluidPage(
sliderInput(inputId="pop", "City Population", value=100000, min=5000, max=1000000),
plotOutput(outputId="plot")
)
server <- function(input, output) {
cities <- read.csv("/Users/tony/cities.csv")
output$plot <- renderPlot({
centres <- cities[cities$pop >= input$pop,];
plot(centres$lon, centres$lat, xlab="Longitude", ylab="Latitude" )
})
}
shinyApp(ui=ui, server=server)
|
ba67a19b7b6134907a6bc1d88b600c455edde3f4
|
c8137545f7d54e7f6b3a0b540be0a849f5237694
|
/R/corporal_filter.R
|
d5ce1fc7dbed36afda3d5ecbf02b94aeb1c9fec6
|
[] |
no_license
|
ml271/MyUtilities
|
0f39a67cf72cfd4731e0616b6acdd7d4c87920c9
|
36ef3a92a2a74d868dd553872901fc3306a9dd5a
|
refs/heads/main
| 2023-06-03T01:16:10.145747
| 2021-06-17T15:21:57
| 2021-06-17T15:21:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
corporal_filter.R
|
########################################################################################################################
#' Wrapper for dplyr filter which can pass NULL values through
#'
#' @seealso \link{dplyr::filter}
#' @export
#'
corporal_filter <- function(.data, ..., .preserve = FALSE) {
if (!is.null(.data)) {
return(filter(.data, ..., .preserve = .preserve))
} else {
return(NULL)
}
}
|
ea4edd9afd550333ae33de18737857fa2af9410d
|
2fb65d442efadbc3a1db41fcf25fed8958c4e04f
|
/man/read.fs.mgh.Rd
|
a30a4104075410e250d572093d022f4dd027cff8
|
[
"MIT"
] |
permissive
|
dfsp-spirit/freesurferformats
|
8f507d8b82aff7c34b12e9182893007064e373b9
|
6cf9572f46608b7bb53887edd10dfed10e16e13d
|
refs/heads/master
| 2023-07-25T00:28:09.021237
| 2023-07-19T07:29:07
| 2023-07-19T07:29:07
| 203,574,524
| 22
| 3
|
NOASSERTION
| 2023-07-19T07:29:09
| 2019-08-21T11:57:16
|
R
|
UTF-8
|
R
| false
| true
| 3,999
|
rd
|
read.fs.mgh.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_fs_mgh.R
\name{read.fs.mgh}
\alias{read.fs.mgh}
\title{Read file in FreeSurfer MGH or MGZ format}
\usage{
read.fs.mgh(
filepath,
is_gzipped = "AUTO",
flatten = FALSE,
with_header = FALSE,
drop_empty_dims = FALSE
)
}
\arguments{
\item{filepath}{string. Full path to the input MGZ or MGH file.}
\item{is_gzipped}{a logical value or the string 'AUTO'. Whether to treat the input file as gzipped, i.e., MGZ instead of MGH format. Defaults to 'AUTO', which tries to determine this from the last three characters of the 'filepath' parameter. Files with extensions 'mgz' and '.gz' (in arbitrary case) are treated as MGZ format, all other files are treated as MGH. In the special case that 'filepath' has less than three characters, MGH is assumed.}
\item{flatten}{logical. Whether to flatten the return volume to a 1D vector. Useful if you know that this file contains 1D morphometry data.}
\item{with_header}{logical. Whether to return the header as well. If TRUE, return an instance of class `fs.volume` for data with at least 3 dimensions, a named list with entries "data" and "header". The latter is another named list which contains the header data. These header entries exist: "dtype": int, one of: 0=MRI_UCHAR; 1=MRI_INT; 3=MRI_FLOAT; 4=MRI_SHORT. "voldim": integer vector. The volume (=data) dimensions. E.g., c(256, 256, 256, 1). These header entries may exist: "vox2ras_matrix" (exists if "ras_good_flag" is 1), "mr_params" (exists if "has_mr_params" is 1). See the `mghheader.*` functions, like \code{\link[freesurferformats]{mghheader.vox2ras.tkreg}}, to compute more information from the header fields.}
\item{drop_empty_dims}{logical, whether to drop empty dimensions of the returned data}
}
\value{
data, multi-dimensional array. The brain imaging data, one value per voxel. The data type and the dimensions depend on the data in the file, they are read from the header. If the parameter flatten is `TRUE`, a numeric vector is returned instead. Note: The return value changes if the parameter with_header is `TRUE`, see parameter description.
}
\description{
Read multi-dimensional brain imaging data from a file in FreeSurfer binary MGH or MGZ format. The MGZ format is just a gzipped version of the MGH format. For a subject (MRI image pre-processed with FreeSurfer) named 'bert', an example file would be 'bert/mri/T1.mgz', which contains a 3D brain scan of bert.
}
\examples{
brain_image = system.file("extdata", "brain.mgz",
package = "freesurferformats",
mustWork = TRUE);
vd = read.fs.mgh(brain_image);
cat(sprintf("Read voxel data with dimensions \%s. Values: min=\%d, mean=\%f, max=\%d.\n",
paste(dim(vd), collapse = ' '), min(vd), mean(vd), max(vd)));
# Read it again with full header data:
vdh = read.fs.mgh(brain_image, with_header = TRUE);
# Use the vox2ras matrix from the header to compute RAS coordinates at CRS voxel (0, 0, 0):
vdh$header$vox2ras_matrix \%*\% c(0,0,0,1);
}
\seealso{
To derive more information from the header, see the `mghheader.*` functions, like \code{\link[freesurferformats]{mghheader.vox2ras.tkreg}}.
Other morphometry functions:
\code{\link{fs.get.morph.file.ext.for.format}()},
\code{\link{fs.get.morph.file.format.from.filename}()},
\code{\link{read.fs.curv}()},
\code{\link{read.fs.morph.gii}()},
\code{\link{read.fs.morph}()},
\code{\link{read.fs.volume}()},
\code{\link{read.fs.weight}()},
\code{\link{write.fs.curv}()},
\code{\link{write.fs.label.gii}()},
\code{\link{write.fs.mgh}()},
\code{\link{write.fs.morph.asc}()},
\code{\link{write.fs.morph.gii}()},
\code{\link{write.fs.morph.ni1}()},
\code{\link{write.fs.morph.ni2}()},
\code{\link{write.fs.morph.smp}()},
\code{\link{write.fs.morph.txt}()},
\code{\link{write.fs.morph}()},
\code{\link{write.fs.weight.asc}()},
\code{\link{write.fs.weight}()}
}
\concept{morphometry functions}
|
88f21eedeed56158b85c3939d6f445f5f8b7a698
|
c88a5cdf24325b9fff04127422ebe7e9e3672fda
|
/compile_package.R
|
1f23578067e250c70da608050867898761d93a09
|
[] |
no_license
|
ndiquattro/quatts
|
838b88889b42db1ab0b1d34079851ab166f706b8
|
1b508a6c6bcb018c84120ed4f572c32c3ffeaf32
|
refs/heads/master
| 2016-09-08T00:33:45.065197
| 2015-11-20T01:27:03
| 2015-11-20T01:27:03
| 24,732,948
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
compile_package.R
|
# Load Libraries
library(devtools)
library(roxygen2)
# Set working directory
setwd("C://Code")
# Create folder
create("quatt1")
# Edit description file for contact info
# Process documentation
setwd("/Users/nickdiquattro/Documents/Code/quatts")
document()
|
4227d50c63961d6a2d893bb7a6df0bcee60eb335
|
39d0a947a50081a352b600a54cf52555bcff79d0
|
/time to reproduction after war.R
|
9beccd28b76fa749204e5d339ef122ffbdc1e91a
|
[] |
no_license
|
robertlynch66/Lotta-LRS
|
498d6ab06248322983421305f197b3a7934b6f93
|
d72b5aad362317d661a3aebd27bff05026f9a5f4
|
refs/heads/master
| 2021-06-19T21:06:24.602263
| 2019-04-20T04:12:01
| 2019-04-20T04:12:01
| 135,286,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,311
|
r
|
time to reproduction after war.R
|
path <- "C:/Users/rofrly/Dropbox/Github/data files/"
#path <- "C:/Users/robert/Dropbox/Github/data files/"
file2<- "person_data.rds"
p <- readRDS(paste0(path, file2))
# load children data
file3 <- "children.rds"
children <- readRDS("C:/Users/rofrly/Dropbox/Github/Lottas_2/children.rds")
#children <- readRDS("/home/robert/Dropbox/Github/Lottas_2/children.rds")
library(dplyr)
library(tidyr)
library(rethinking)
library(lme4)
library(plyr)
p <- p %>% filter (sex==0)
p <- p %>% filter (birthyear < 1940 & age_at_first_birth > 12 & age_at_first_birth < 51 | is.na(age_at_first_birth))#80708
p<- p %>% drop_na(first_child_yob) # 60129
p$martta<- as.numeric(p$martta)
p$lotta<- as.numeric(p$lotta)
p$never_married <- ifelse(is.na(p$spouse_id), 1, 0)
p$age_1945 <- 1945-p$birthyear
p$birth_cat <- ifelse(p$first_child_yob<1944, 0, 1)
# 28992 started before war ends and 31137 started having kids after 1944
p <- p %>% select ("id","lotta","birthyear","agriculture","education",
"age_at_first_birth","age_1945","birth_cat","kids","professionid","birthplaceid")
p <- p[complete.cases(p), ] # 48436
# 22878 started before war ends and 25558 started having kids after 1944
children1 <- children %>% select ("id","birthYear","primaryParentId")
children2 <- children %>% select ("id","birthYear","spouseParentId")
colnames(children1)[3] <- "parentid"
colnames(children2)[3] <- "parentid"
# put data in long form
# 1) stack children so we have all ids
children<- bind_rows(children1,children2)
rm(children1, children2)
#make sure the individual's birth year column and death year/censored year column are numeric
#then make a column for 'last appearance'
# you can play around with the ages but now its just making sure they were at least 40 (and
#had completed reproduction) when they were interviewed
p$birth_plus_13 <- p$birthyear+13
p$lastapp <- ifelse (p$birthyear<1925, p$birthyear+45,1970)
## now make cut off when you want (e.g. age 50 or ages 13-50)
p$year <- mapply(seq, p$birth_plus_13, p$lastapp, SIMPLIFY = FALSE)
#Creates a
#sequence for each row,
#so if birth year is 1850 and death year 1900, the cell says 1850:1900.
#Simplify makes a matrix, but we want to keep a dataframe
#unnest creates a new row for each different value within a "cell" -
#this is taken from the 'year' column created above
p_long <- unnest(p, year) #1550622
# Now all women are censored either at age 45 or at the year of their interview
# NEXT link their kids year of birth to their 'year' by id=parentid
children <- children %>% select ("birthYear","parentid")
children$id <- 1
children<- children %>% drop_na(birthYear)
children<- children %>% drop_na(parentid)
twins <- p_long %>% left_join (children, by=c("id"="parentid","year"="birthYear"))
colnames(twins)[15] <- "reproduced"
twins$reproduced[is.na(twins$reproduced)] <- 0
twins$age <- twins$year-twins$birthyear
rm(p_long)
# select data frame columns
twins <- twins %>% select ("id","lotta","education","agriculture","year",
"reproduced","age","age_1945","age_at_first_birth","birth_cat","kids","professionid","birthplaceid")
# find duplicate data
#dupes<-children[which(duplicated(children[,c('parentid','birthYear')])==T),]
# make p_long_3 no duplicates for year and id
no_twins <- twins[!duplicated(twins[,c("id","year")]),]
### here are the key lines
## now choose the lowest year within each id category that has the first year where
# reproduce = 1
# this makes a years to reproduction after 1944 variable- basically this is the time
# that women waited after the war to have a kid
# add a gave birth in 1943 or 1944 dummy variable
dummy <- twins %>% arrange(id) %>% group_by (id) %>%
filter (reproduced==1 & year > 1942 & year<1945) %>% mutate (repro_within_2_years=1)
dummy <- dummy[!duplicated(dummy[,c("id")]),]
dummy <- dummy %>% select ("id","repro_within_2_years")
# # make a birth rate category
birthrate <- twins %>% arrange(id) %>% group_by (id) %>%
filter (reproduced==1 & year>1944) %>% mutate (time_to_repro=age-age_1945)
birthrate_2 <- birthrate %>% group_by (id) %>%
dplyr::summarise(maximum= max(time_to_repro))
birthrate <- birthrate %>% left_join (birthrate_2, by="id")
birthrate_2 <- birthrate %>% group_by (id) %>%
dplyr::summarise(kids_after_war= n())
birthrate <- birthrate %>% left_join (birthrate_2, by="id")
rm(birthrate_2)
# jouin reproduced within past 2 years to main table
birthrate <- birthrate %>% left_join (dummy, by="id")
rm(dummy)
birthrate$repro_within_2_years[is.na(birthrate$repro_within_2_years)] <- 0
birthrate$post_war_repro_rate <- birthrate$maximum/birthrate$kids_after_war
birthrate$kids_before_war <- birthrate$kids-birthrate$kids_after_war
# remove duplicate ids for this data frame
birthrate <- birthrate[!duplicated(birthrate[,c("id")]),]
### limit ages to 17 to 40 after the war #27490 obs
birthrate$age_sq <- birthrate$age_1945*birthrate$age_1945
birthrate <- birthrate[which(birthrate$age_1945>12 & birthrate$age_1945<46),] #32045
#rescale age_1945
birthrate$age_1945 <- birthrate$age_1945-min(birthrate$age_1945)
# Models
model1 <-glm(kids_after_war ~ lotta*age_1945+ birth_cat+education + agriculture+repro_within_2_years,
data = birthrate, family = poisson)
summary(model1)
m1 <- drop1(model1)
m1
model2 <-glm(time_to_repro ~ lotta*age_1945+birth_cat+ education + agriculture+repro_within_2_years,
data = birthrate, family = poisson)
summary(model2)
m1 <- drop1(model2)
m1
# with random effects
library(lme4)
model3 <-glmer(kids_after_war ~ lotta*age_1945+ birth_cat+education + agriculture+repro_within_2_years+
(1|professionid),
data = birthrate, family = "poisson",
control = glmerControl(optimizer="nloptwrap", optCtrl=list(maxfun=100000)))
summary(model3)
m1 <- drop1(model3)
m1
model4 <-glmer(time_to_repro ~ lotta*age_1945+ birth_cat+education + agriculture+repro_within_2_years+
(1|birthplaceid),
data = birthrate, family = "poisson",
control = glmerControl(optimizer="nloptwrap", optCtrl=list(maxfun=100000)))
summary(model4)
m1 <- drop1(model4)
m1
|
fb2a6015465d0693a131791fa32b078d8a7cf4dd
|
a4f76c4089643bfd773c64b82c7929ef16d1c85d
|
/functions.R
|
699db648ac4c212dbb9768613a7e2288ce2340d2
|
[
"MIT"
] |
permissive
|
duncanmorgan/EoE_SciImmunol
|
53ba1e31f88b7d9e7857a294db9598aacdf3fec2
|
b44eefe22cea9a86c84069953408197475209570
|
refs/heads/main
| 2023-06-07T08:16:53.835511
| 2021-06-30T18:23:07
| 2021-06-30T18:23:07
| 380,842,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,153
|
r
|
functions.R
|
# these are general purpose functions. source this script at the beginning of every session
setwd("L:/Duncan/eoepaper_final")
reticulate::use_python('C:/Users/dmorgan/AppData/Local/Continuum/anaconda3/python.exe', required = TRUE)
library(Seurat)
library(RColorBrewer)
library(ggplot2)
library(feather)
library(dplyr)
library(reshape2)
library(viridis)
library(tidyr)
library(pheatmap)
# import a dataset exported from python with the seuratExport function
pyImport = function(name, rawfile = 'cellsAll.feather') {
# load normalized data
normdata = read_feather(paste0(name, '.feather')) %>% as.data.frame()
# load raw data
print('reading in raw data')
rawdata = read_feather(rawfile) %>% as.data.frame()
# transfer genes to rownames and drop columns
rownames(rawdata) = rawdata[,1]
rawdata = rawdata[,-1]
# subset raw data
rawdata = rawdata[,colnames(rawdata) %in% colnames(normdata)]
# read metadata
metadata = read.csv(paste0(name, '_meta.txt'), row.names = 1, stringsAsFactors = FALSE)
# create and return seurat object
seurat = CreateSeuratObject(rawdata, min.cells = 5)
seurat@meta.data = metadata
seurat
}
# standard Seurat processing
seuratProcess = function(seurat) {
seurat = NormalizeData(seurat)
seurat = FindVariableGenes(seurat, do.plot= FALSE)
seurat = ScaleData(seurat, genes.use =seurat@var.genes, vars.to.regress = c('n_genes'), model.use = 'poisson')
seurat = RunPCA(seurat, dims.use = seurat@var.genes, do.print = FALSE)
seurat = RunUMAP(seurat, dims.use = 1:20)
seurat@meta.data$UMAP1 = seurat@dr$umap@cell.embeddings[,1]
seurat@meta.data$UMAP2 = seurat@dr$umap@cell.embeddings[,2]
seurat
}
# take the files from the exportSeurat function, assemble them into a seurat object, and process
pyToSeurat = function(name, rawfile = 'cellsAll.feather') {
print('reading in data')
seurat = pyImport(name, rawfile)
print('processing data')
seurat = seuratProcess(seurat)
print('converting to sparse format')
seurat = MakeSparse(seurat)
seurat
}
# add UMAP coordinates to seurat@meta.data
addUMAP = function(seurat) {
seurat@meta.data$UMAP1 = seurat@dr$umap@cell.embeddings[,1]
seurat@meta.data$UMAP2 = seurat@dr$umap@cell.embeddings[,2]
seurat
}
# randomly shuffle the rows in a dataframe (used primarily for plotting)
shuffle = function(data) {
set.seed(1)
data[sample(rownames(data), length(rownames(data))),]
}
# create FeaturePlots using the non-default Seurat color scheme
geneplot= function(seurat, genes) {
plots = c()
for (curr in genes){
seurat@meta.data$gene = seurat@data[curr, rownames(seurat@meta.data)]
plots[[curr]] = ggplot(shuffle(seurat@meta.data), aes(x = UMAP1, y = UMAP2, color = gene)) + geom_point(size = .8) +
scale_color_viridis_c() + labs(title = curr) + guides(color = FALSE) + theme(axis.title = element_blank(), axis.text = element_blank()) + remove_grid
}
gg = plot_grid(plotlist = plots)
gg
}
set.seed(1)
pct = function(x) {
sum(x >0)/length(x)
}
meanexp = function(x) {
mean(x)
}
# import plotting elements
source('figure_parameters.R')
|
f6e5a8208205989ca9ddb0041c02e09377e56f28
|
e8403a0661de44e34375375bbb2ada6409c1a036
|
/man/get_song_media.Rd
|
2f29aa22071b5e1200e632b6f576d0502c198244
|
[] |
no_license
|
cran/rgenius
|
d01df4b661c5de486f28fd85f61ab989ec1c8bf3
|
e670fa1724ccb3dcce25fc89eb29cc4f780d9162
|
refs/heads/master
| 2022-06-21T17:39:08.753648
| 2020-05-11T14:10:02
| 2020-05-11T14:10:02
| 263,169,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 810
|
rd
|
get_song_media.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_song_media.R
\name{get_song_media}
\alias{get_song_media}
\title{Get Song Media}
\usage{
get_song_media(song_id, access_token = Sys.getenv("GENIUS_API_TOKEN"))
}
\arguments{
\item{song_id}{Genius Song ID}
\item{access_token}{Genius Web API token. Defaults to Sys.getenv('GENIUS_API_TOKEN').}
}
\value{
Returns a tibble with the media's information
}
\description{
This function gets 'genius' media information
}
\details{
To use this function, you need to first use 'search_genius_song' to get the song ID.
This function returns the provider (YouTube, Spotify ..), the type (audio, video) and a link for the content.
}
\examples{
\dontrun{
get_song_media('55882')
}
}
\author{
Alberto Almuiña
}
|
cdd5645891b388ebaf316c77440bbba82f7acb07
|
1ced37e5243dfc53d7e3631408d0f55b9b109b11
|
/plot1.R
|
c29af447dd3569149506bc305859664d209ac72a
|
[] |
no_license
|
sjkim76/ExData_Plotting1
|
b15652427703424b8439f3d7baeba53b8a3a897c
|
77f79c67acbfffce9f05346aa5db97b57c8aaf9d
|
refs/heads/master
| 2022-09-11T11:20:19.483799
| 2020-05-28T09:43:10
| 2020-05-28T09:43:10
| 267,216,449
| 0
| 0
| null | 2020-05-27T04:01:00
| 2020-05-27T04:00:59
| null |
UTF-8
|
R
| false
| false
| 547
|
r
|
plot1.R
|
#read raw data
rawdata<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings = "?")
#subsetting data between 2007-02-01 and 2007-02-02
housedata<-rawdata[which(rawdata$Date %in% c("1/2/2007","2/2/2007")),]
#type conversion
housedata$Global_active_power<-as.numeric(housedata$Global_active_power)
#hist plotting
hist(housedata$Global_active_power,col="red",xlab="Global Active Power (kilowatts)", ylab="Frequency"
,main="Global Active Power",cex.axis=0.8)
dev.copy(png,"plot1.png",width=480,height=480)
dev.off()
|
79e39f1f925071e5f13ca023ad0dfe024a3af632
|
2548f404612eae89a0b16fbfa2ae163fef6cfba6
|
/man/data_sim_rec.Rd
|
db2372df250856385f2670555a5fbb67d7c0a09b
|
[] |
no_license
|
cran/CopulaCenR
|
67d0d170eb1e90d159780474d7244c5072453817
|
51a24f45b85af68884996fa2ce7b58edb5ff8f06
|
refs/heads/master
| 2022-12-20T18:33:28.414306
| 2022-12-16T23:40:02
| 2022-12-16T23:40:02
| 173,922,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
rd
|
data_sim_rec.Rd
|
\name{data_sim_rec}
\alias{data_sim_rec}
\docType{data}
\title{
data_sim_rec
}
\description{
A simulated real dataset of bivariate recurrent events data with 500 subjects and 6 columns.
}
\usage{data("data_sim_rec")}
\format{
A data frame with 500 subjects on the following 6 variables.
\describe{
\item{\code{id}}{subject id}
\item{\code{gap1}}{observed time of the first gap time}
\item{\code{status1}}{censoring indicator of the first event; 1=exactly observed, 0=right censor.}
\item{\code{gap2}}{observed time of the second gap time}
\item{\code{status2}}{censoring indicator of the second event; 1=exactly observed, 0=right censor.}
\item{\code{d}}{cluster size}
}
}
\keyword{datasets}
|
45163f05806a317ed9ac6b2d3e9a2b715415982c
|
0fee2b16a6182b4d7be11b07f310705f17a11dd7
|
/plot1.R
|
cff71a347e837a917f049b360a7da6e7afddb769
|
[] |
no_license
|
mosalov/ExData_Plotting1
|
8cdf6924ae617986053e0aef961d9c3a3add2e7b
|
23c78f8a11a461911dd9d22f0db7ee27d8ba89c6
|
refs/heads/master
| 2021-01-23T17:09:15.632805
| 2014-10-10T19:08:02
| 2014-10-10T19:08:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 590
|
r
|
plot1.R
|
#Downloading a file if there is none
urlname <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "data.zip"
datafilename <- "household_power_consumption.txt"
if(!file.exists(filename))
{
download.file(url = urlname, destfile = filename)
}
#Unzipping the file
unzip(filename)
data <- read.table(file = datafilename, na.strings = c("?"), sep = ";", skip = 66637, nrows = 2880)
#Constructing a plot 1
png(filename = "plot1.png")
hist(data$V3, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
1e623f30492e15bc184143095fd57a1bff5eea2f
|
4eb1cc06d9847e34916d04fa2c7fcfcc079e931e
|
/RSession_Hosmer_Linear_Final.R
|
94c4470538c37a5dbeba573d34318f5285e20a7f
|
[] |
no_license
|
Tchouanga12/linear_regression_r2
|
4b2fcb719573c802ae0f1bf20074125d5421a447
|
b7ba7999840728584f8b22b86e9fcad14238ff60
|
refs/heads/main
| 2023-07-03T22:01:18.658969
| 2021-08-10T16:56:12
| 2021-08-10T16:56:12
| 394,716,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,608
|
r
|
RSession_Hosmer_Linear_Final.R
|
#Importation des donnees
TableHosmer<-read.table('C:/Users/LUCION/Documents/Nguefack_Lectures/ISJ/MyData/DataHosmer.txt', header = TRUE)
attach(TableHosmer)
head(TableHosmer)
#ID AGE LWT RACE SMOKE PTL HT UI FVT BWT BWTLow FVT1 PTL1 LWTkg
hist(TableHosmer$BWT,proba=TRUE,xlab="Poids de naissance (BWT)", ylab="Densite",main="Repartition des poids de naissance", ylim=c(0,0.0006))
BWTordonne<-TableHosmer$BWT[order(TableHosmer$BWT)]
lines(BWTordonne, dnorm(BWTordonne, mean=mean(BWTordonne), sd=sd(BWTordonne)), col="red")
savePlot("C:/Users/LUCION/Documents/Nguefack_Lectures/ISJ/Models Multivaries/HistogrammeBWT.pdf",type="pdf")
%Regression simple
plot(TableHosmer$LWT,TableHosmer$BWT, xlab="Poids de la mere (livre)", ylab="Poids de naissance (gramme)")
RegLin1<-lm(BWT ~ LWT,data=TableHosmer)
summary(RegLin1)
plot(RegLin1$res,ylab="Residus",main="Graphique des residus")
plot(RegLin1$fit,RegLin1$res,xlab="Estimation",ylab="Residus")
abline(h=0)
plot(RegLin1$fit,abs(RegLin1$res),xlab="Estimation",ylab="|Residus|")
summary(lm(abs(RegLin1$res)~ RegLin1$fit))
####Evaluer la normalite####
qqnorm(RegLin1$res,ylab="Raw Residus Brutes")
qqline(RegLin1$res)
###########Model Selection#############
###Model Selection (First put all variables)
RegLinFull <- lm(BWT~ AGE + LWT + factor(RACE) + SMOKE + HT + UI + FVT1 + PTL1, data=TableHosmer)
step(RegLinFull)
sRegLinFull<-step(RegLinFull)
sRegLinFull$anova
sRegLinFull
library(MASS)
stepAIC(RegLinFull)
sbackRegLinFull<-step(RegLinFull, direction = "backward")
sbackRegLinFull$anova
sforwardRegLinFull<-step(RegLinFull, direction = "forward")
sforwardRegLinFull$anova
sbothRegLinFull<-step(RegLinFull, direction = "both")
sbothRegLinFull$anova
####Backward####
RegLinFull <- lm(BWT~ AGE + LWT + factor(RACE) + SMOKE + HT + UI + FVT1 + PTL1, data=TableHosmer)
summary(RegLinFull)
RegLinFull<- update(RegLinFull, . ~ . - FVT1)
summary(RegLinFull)
RegLinFull<- update(RegLinFull, . ~ . - AGE)
summary(RegLinFull)
RegLinFull<- update(RegLinFull, . ~ . - PTL1)
summary(RegLinFull)
plot(TableHosmer$LWT,TableHosmer$BWT,xlab="Poids de la mere (LWT, en livres)", ylab="Poids de naissance (BWT, en grammes)")
abline(RegLin1,lwd=2,lty=2,col="red")
legend("topright",c(expression(BWT==hat(beta)[0] + hat(beta)[1]*LWT), "observations"),lty=c(2,0),pch=c(NA,1),col=c("red","black"))
qqnorm(rstudent(RegLin1),ylab="Studentized residuals")
abline(0,1)
hist(RegLin1$res,10)
boxplot(RegLin1$res,main="Boxplot of savings residuals")
RegLin2<-lm(BWT ~ SMOKE,data=TableHosmer)
summary(RegLin2)
predict(RegLin2,newdata=data.frame(SMOKE<-c(0,1)), interval="confidence")
confint(RegLin2)
Regression multiplr
#################################
RegLin3<-lm(BWT ~ factor(RACE),data=TableHosmer)
summary(RegLin3)
RegLin4<-lm(BWT ~ LWT + UI + HT + factor(RACE),data=TableHosmer)
summary(RegLin4)
RegLin4<-lm(BWT ~ LWT + UI + HT + factor(RACE),data=TableHosmer)
RegLin4bis<-lm(BWT ~ LWT + UI + HT,data=TableHosmer)
anova(RegLin4bis,RegLin4)
#########Exemple de test d'interaction ou de modication d'effet.
RegLin5<-lm(BWT ~ AGE+SMOKE+SMOKE:AGE,data=TableHosmer)
summary(RegLin5)
confint(RegLin5)
SMOKE1<-1-TableHosmer$SMOKE
confint(lm(BWT ~ AGE+SMOKE1+SMOKE1:AGE,data=TableHosmer))
Exemples d'analyse des residus
RegLin6<-lm(BWT ~ SMOKE+AGE+LWT+factor(RACE)+UI+HT+AGE:SMOKE, data=TableHosmer)
summary(RegLin6)
ResidusOrd<-RegLin6$residuals[order(RegLin6$residuals)]
hist(ResidusOrd,proba=TRUE,main="Histogramme des residus", ylab="Densite",xlab="Residus")
lines(ResidusOrd, dnorm(ResidusOrd, sd=sd(ResidusOrd)),col="red")
qqnorm(RegLin6$residuals)
plot(RegLin6$fitted.values,RegLin6$residuals, ylab=expression(e[i]==Y[i]-hat(Y)[i]), xlab=expression(hat(Y)[i]),cex.lab=1.2)
abline(h=0,lty=2,lwd=2,col="red")
#######Nuages de points des residus en fonction de chaque variable explicative:
par(mfrow=c(2,3))
res<-RegLin6$residuals
plot(res ~ TableHosmer$SMOKE,xlab="SMOKE",ylab="residus")
abline(h=0,lty=2,lwd=2,col="red")
plot(res ~ TableHosmer$AGE,xlab="AGE",ylab="residus")
abline(h=0,lty=2,lwd=2,col="red")
plot(res ~ TableHosmer$LWT,xlab="LWT",ylab="residus")
abline(h=0,lty=2,lwd=2,col="red")
plot(res ~ TableHosmer$RACE,xlab="RACE",ylab="residus")
abline(h=0,lty=2,lwd=2,col="red")
plot(res~TableHosmer$UI,xlab="UI",ylab="residus")
abline(h=0,lty=2,lwd=2,col="red")
plot(res TableHosmer$HT,xlab="HT",ylab="residus")
abline(h=0,lty=2,lwd=2,col="red")
|
1906209d80826f43ce89b4a1a1afd25f67953e27
|
44b136efca3a53ebb400f36adb4e999a1e6d33f3
|
/man/migrateConceptSynonym.Rd
|
e393544b9f1553226e245f88a67f5768e4949d92
|
[] |
no_license
|
meerapatelmd/HemOncExt
|
8a947f9ec144f01255a3fc1b6c1aed380ead1bc9
|
226a3a0f0ee4d19d3d0bb07b04a69e2c118f58c3
|
refs/heads/master
| 2023-01-21T14:08:27.570211
| 2020-11-29T21:42:02
| 2020-11-29T21:42:02
| 281,737,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 505
|
rd
|
migrateConceptSynonym.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/migrateConceptSynonym.R
\name{migrateConceptSynonym}
\alias{migrateConceptSynonym}
\title{Migrate HemOnc and RxNorm Vocabularies Synonyms}
\usage{
migrateConceptSynonym(conn, source_schema)
}
\arguments{
\item{source_schema}{The schema where the main OMOP vocabularies are loaded.}
}
\description{
This function executes on the condition that there are zero rows in the concept_ancestor table in the hemonc_extension schema.
}
|
0661bbb75931d4bc495b62adf5e562c58a0df7de
|
aef88baf27939e10ed27c7ee422631273d7d63c0
|
/R/Math.R
|
38657576da4bccfb034e3dd2038af736616205a8
|
[] |
no_license
|
Antoniahg/relectro
|
c7579f5a12d20013d4afc746844725df9d3fcd2e
|
2d8027c28fcd5ce85ac7796d7329f1546227b324
|
refs/heads/master
| 2020-04-15T11:33:29.665312
| 2018-05-25T15:48:11
| 2018-05-25T15:48:11
| 164,635,947
| 1
| 0
| null | 2019-01-08T11:38:33
| 2019-01-08T11:38:33
| null |
UTF-8
|
R
| false
| false
| 7,942
|
r
|
Math.R
|
#' Get the standard error of the mean
#'
#' The standard deviation divided by the square root of n
#'
#' @param x Numeric vector
#' @return Numeric, standard error of the mean
sem<-function(x){sd(x,na.rm=T)/sqrt(length(x))}
#' Get the mode of a distribution
#'
#' This get you the value with the highest frequency in a vector
#'
#' @param x Numeric vector
#' @return The value in the vector with the highest frequency
modeRelectro <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#' Returns possible pairs from one or two vectors of values
#'
#' If only one vector of values is given, all unique pairs within these values are returned.
#' For example 2-3 is there but not 3-2.
#'
#' If two vectors of values are given, all possible combinations of the two vectors are returned.
#' For example 2-2 if both vectors contain number 2, 2-3 and 3-2 would be included if 2 and 3 are in both vectors.
#'
#' @param cl1 Numeric vector containing a vector of values
#' @param cl2 Optional argument containing a second vector of values
#' @param excludeOneNumberPair Logical indicating whether to include pairs with the same number when two vectors are used (e.g 2-2).
#' @return A data.frame containing the pairs
#' @examples makePairs(cl1=1:10)
makePairs<-function(cl1="",cl2=NULL,excludeOneNumberPair=TRUE){
if(is.null(cl2)){
m<-combn(cl1,m=2)
df<-data.frame(Var1=m[1,],Var2=m[2,])
}
else{
df<-expand.grid(cl1,cl2)
if(excludeOneNumberPair==TRUE)
{
df<-df[which(df[,1]!=df[,2]),]
}
}
return(df)
}
#' Smooth the values in a numeric vector using a Gaussian kernel
#'
#' The values at -1.0 are consider invalid by defaults and are not used or changed.
#' Set the value of the argument degrees to TRUE if the data are circular.
#' For example, if the first and last data of the vector should be considered next to each other.
#'
#' @param x Numeric vector
#' @param sd The standard deviation of the Gaussian kernel used to smooth
#' @param invalid Numeric indicating which value should be treated as NA, by default -1.0.
#' The value should be a numeric.
#' @param type character vector indicating the type of data to smooth
#' Valid values are linear, circular, degrees.
#' circular assumes that the vector is, i.e. the first and last values are adjacent.
#' degrees assumes that the vector contains degrees (0=360)
#' @examples smoothGaussian(x=c(1:10,9:1),sd=2,invalid=-1.0)
smoothGaussian<-function(x,sd=2,invalid=-1.0,type="linear")
{
if(length(x)==0)
return
if(sd==0)
return
if(sd<0)
stop(paste("sd is smaller than 0:",sd))
if(class(x)=="integer"){
x<-as.numeric(x)
}
if(class(invalid)!="numeric")
stop(paste("invalid should be a numeric"))
if(type=="linear"){
results<- .Call("smooth_double_gaussian_cwrap",
x, length(x), sd, invalid)
}
if(type=="degrees"){
if(any(x>360))
stop(paste("x values larger than 360"))
results<-.Call("smooth_double_gaussian_degrees_cwrap",
x, length(x), sd, invalid)
}
if(type=="circular"){
results<- .Call("smooth_double_gaussian_circular_cwrap",
x, length(x), sd, invalid)
}
return(results)
}
#' Shift values in a vector by a certain number of places and in a given direction
#'
#' The vector is wrapped around so that values that would end up after the end of the vector are place at the beginning.
#'
#' @param v A vector
#' @param places Number of places the values will be moved.
#' @param dir Direction of the shift, values should be right or left (or r or l).
#' @examples shift(v=1:10, place=2, dir="r")
shift <- function (v, places, dir = "right")
{# shift a vector in place
vnew <- NULL
d <- substring(dir, 1, 1)
if (d == "r" & places < 0) {
d <- "l"
}
else {
if (d == "l" & places < 0) {
d <- "r"
}
}
n <- length(v)
p <- abs(places)
if (p == 0) {
vnew <- v
}
else {
if (d == "r") {
vnew <- c(v[(n - p + 1):n], v[1:(n - p)])
}
else {
vnew <- c(v[(p + 1):n], v[1:p])
}
}
return(vnew)
}
#' Calculate the autocorrelation function of a vector that is circular
#'
#' The end and beginning of the vector are next to each other like on a circle
#'
#' @param v A vector
acf.circ<-function(v){
shift.correl<-function(places,vect){
s<-shift(v=vect,places = places)
cor(s,vect)
}
sapply(seq(0,length(v)-1),shift.correl,v)
}
#' Find valleys and peaks in a function
#'
#'
#' @param x A vector
#' @param partial Will detect at the beginning and end
#' @param decreasing FALSE to detect peaks and TRUE to detect troughs
which.peaks <- function(x,partial=TRUE,decreasing=FALSE){
if (decreasing){
if (partial){
which(diff(c(FALSE,diff(x)>0,TRUE))>0)
}else {
which(diff(diff(x)>0)>0)+1
}
}else {
if (partial){
which(diff(c(TRUE,diff(x)>=0,FALSE))<0)
}else {
which(diff(diff(x)>=0)<0)+1
}
}
}
#' Shift a the values of a vector by a random amount that is at least as large as the argument minMvMs
#'
#' @param x A vector
#' @param timePerSampleRes Time in sample values (from the .dat files) between the position sample
#' @param minMvMs Minimum shift in ms
#' @param samplingRate Sampling rate of the .dat files.
#' @examples shiftPositionVector(x=1:100, timePerSampleRes=400, minMvMs = 1000, samplingRate=20000)
shiftPositionVector<-function(x,
timePerSampleRes,
minMvMs,
samplingRate){
minMv<- minMvMs*(samplingRate/1000)/timePerSampleRes
mv<-sample(minMv:length(x)-minMv,1)
return(shift(x,mv))
}
#' Shift a the values of two vectors by a random amount that is at least as large as the argument minMvMs
#'
#' The two vectors are shifted by the same amount.
#'
#' @param x A vector
#' @param y A second vector
#' @param timePerSampleRes Time in sample values (from the .dat files) between the position sample
#' @param minMvMs Minimum shift in ms
#' @param samplingRate Sampling rate of the .dat files.
#' @examples shiftPositionVectors(x=1:100,y=201:300, timePerSampleRes=400, minMvMs = 1000, samplingRate=20000)
shiftPositionVectors<-function(x,y,
timePerSampleRes,
minMvMs,
samplingRate){
minMv<- minMvMs*(samplingRate/1000)/timePerSampleRes
mv<-sample(minMv:length(x)-minMv,1)
x<-shift(x,mv)
y<-shift(y,mv)
return(list(x=x,y=y))
}
#' Calculate the center of mass of a matrix, numeric or integer
#'
#' The values returned are in indices with first bin being 1 and last being length(x)
#'
#' @param x A matrix
#' @return Numeric of length 2 with the x and y coordinate of the center fo mass
centerOfMass<-function(x){
if(class(x)=="matrix")
{
s<-sum(x,na.rm=T)
if(s==0)
return(c(NA,NA))
sr<-sum(apply(x,1,sum,na.rm=T)*1:length(x[,1]))
sc<-sum(apply(x,2,sum,na.rm=T)*1:length(x[1,]))
return(c(sr/s,sc/s))
}
if(class(x)=="integer"|class(x)=="numeric")
{
return(sum(x*1:length(x))/sum(x,na.rm=T))
}
}
#' Perform r-to-Z transform to get significance of difference between two correlation coefficients
#'
#' @param r1 correlation coefficient of the first correlation
#' @param n1 number of observations in the first correlation
#' @param r2 correlation coefficient of the second correlation
#' @param n2 number of observations in the second correlation
cor.diff <- function(r1,n1,r2,n2 ){
if(r1 < -1.0|r1 > 1.0)
stop(paste("r1 is out of range:",r1))
if(r2 < -1.0|r2 > 1.0)
stop(paste("r2 is out of range:",r2))
Z1 <- 0.5 * log( (1+r1)/(1-r1) )
Z2 <- 0.5 * log( (1+r2)/(1-r2) )
diff <- Z1 - Z2
SEdiff <- sqrt( 1/(n1 - 3) + 1/(n2 - 3) )
diff.Z <- diff/SEdiff
p <- 2*pnorm( abs(diff.Z), lower.tail=F)
cat( "Difference between ",r1,"(",n1,") and ",r2,"(",n2,")", "two-tailed p value:", p , "\n" )
}
|
bd0adc03bd91095fac79ed32eb1e0e171f2dd28e
|
f464a388b87c7c9d0af4e25d693c7fb9879ebd29
|
/PH125.1-RBasics/1.2-Vectors_Sorting/Vectors.R
|
60ff4ecdbd4db69db582947f542270d3e5c96980
|
[] |
no_license
|
alvarobarbera/HarvardX-PH125-DataScience
|
5e57e8d5d36bc57992fbf9a6477a51465ef5fc02
|
1a152f47b20131d71bc61ac94282867843f5a3ae
|
refs/heads/master
| 2022-07-12T09:21:19.496692
| 2020-05-15T08:37:51
| 2020-05-15T08:37:51
| 260,189,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
Vectors.R
|
# We can create vectors using the function c, which stands for concatenate.
# We use c to concatenate entries in the following way:
codes <- c(380, 124, 818)
codes
countries <- c("USA","Canada","Mexico")
countries
# naming vectors
country_codes <- c(USA=43, Canada=4, Mexico=33)
country_codes
class(country_codes)
names(country_codes)
# sequencies
ten_numbers <- seq(1,10)
ten_numbers
cerofive <- seq(-10,10,0.5)
1:10
# cerofive is of class numeric
class(cerofive)
# 1:10 integer
class(1:10)
# subsetting to access specific values of a vector
cerofive[3]
cerofive[c(3,4)]
|
c06255d791af2ef51fdcd3db462ef555c2474444
|
75f69ae4eb0fc37bc2fde2d606a1cee493867b2d
|
/man/vignette_results_display.Rd
|
e7757e94520f56ea40c33e54b1f19cd537c59381
|
[] |
no_license
|
cran/docreview
|
8b025ce045ce58900949cb99c0b6e67cde5dbf00
|
e8f986d62977c86cde0136b3f07b4f2ff3a244fb
|
refs/heads/master
| 2023-07-08T16:50:53.256805
| 2021-08-17T06:20:11
| 2021-08-17T06:20:11
| 397,309,259
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 464
|
rd
|
vignette_results_display.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cli.R
\name{vignette_results_display}
\alias{vignette_results_display}
\title{Parse Vignette Results}
\usage{
vignette_results_display(results, config = get_config()$vignettes)
}
\arguments{
\item{vignette_results}{Output of calling vignette analysis function}
\item{error_on_failure}{Raise an error on any negative reviews}
}
\description{
Parse Vignette Results
}
\keyword{internal}
|
a7c3fde0732a498a7fce6be5ebb6c70a2124acbb
|
f9bf6790ebc018bcdcb2a006a108838afb72bf77
|
/R/testes/hexagons_teste.R
|
7eacca4efe93dea31ac0b15301d1bab551694aa8
|
[] |
no_license
|
danpanetta/acesso_oport
|
41cf77d06388cb6e8e51587ef84fc3d21556fb94
|
4b9dcaa2261a986e8e619d3f34abbfed54cfff5f
|
refs/heads/master
| 2022-11-18T04:11:35.596398
| 2020-07-13T14:39:03
| 2020-07-13T14:39:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,361
|
r
|
hexagons_teste.R
|
# info on Uber's H3 hexagonal gird: https://github.com/obrl-soil/h3jsr/blob/master/vignettes/intro-to-h3jsr.Rmd
# https://github.com/uber/h3/issues/87
# devtools::install_github("obrl-soil/h3jsr")
library(h3jsr)
library(sf)
library(ggplot2)
library(tidyverse)
# read shape
# muni <- st_read(dsn = './Shapes_IBGE', layer ='muni')
muni <- read_sf("../data-raw/municipios/ce/municipios_ce.shp", crs = 4326) %>%
filter(NM_MUNICIP == "FORTALEZA")
# # projection
# muni <- st_transform(muni, crs = 4326)
# get the unique h3 ids of the hexagons intersecting your polygon at a given resolution
hex_ids <- h3jsr::polyfill(muni, res = 8, simple = FALSE)
# Available resolutions considerinf length of short diagonal - https://uber.github.io/h3/#/documentation/core-library/resolution-table
# 10 ~136 meters
# 09 ~357
# 08 ~960
# 07 ~2510 meters
# pass the h3 ids to return the hexagonal grid
hex_grid <- unlist(hex_ids$h3_polyfillers) %>%
h3jsr::h3_to_polygon(simple = FALSE)
plot(hex_grid)
# # Safe hex grid as sf
# sf::st_write(hex_grid,
# dsn = "data/hex_municipio/fortaleza",
# layer ='hex_fortaleza',
# driver = 'ESRI Shapefile')
readr::write_rds(hex_grid, "../data/hex_municipio/hex_fortaleza.rds")
shape_para_hex <- function(shape) {
# read shape
# muni <- st_read(dsn = './Shapes_IBGE', layer ='muni')
muni <- read_sf("../data-raw/municipios/ce/municipios_ce.shp", crs = 4326) %>%
filter(NM_MUNICIP == "FORTALEZA")
# # projection
# muni <- st_transform(muni, crs = 4326)
# get the unique h3 ids of the hexagons intersecting your polygon at a given resolution
hex_ids <- h3jsr::polyfill(muni, res = 8, simple = FALSE)
# Available resolutions considerinf length of short diagonal - https://uber.github.io/h3/#/documentation/core-library/resolution-table
# 10 ~136 meters
# 09 ~357
# 08 ~960
# 07 ~2510 meters
# pass the h3 ids to return the hexagonal grid
hex_grid <- unlist(hex_ids$h3_polyfillers) %>%
h3jsr::h3_to_polygon(simple = FALSE)
plot(hex_grid)
# # Safe hex grid as sf
# sf::st_write(hex_grid,
# dsn = "data/hex_municipio/fortaleza",
# layer ='hex_fortaleza',
# driver = 'ESRI Shapefile')
readr::write_rds(hex_grid, "../data/hex_municipio/hex_fortaleza.rds")
}
|
c6b3b218c2800540ecc19d9e55e566e51150b412
|
022345c75f68dd4dd0bb202c28290e09784312d6
|
/ClusteringImageObjects.R
|
574f5ab534c4dd541e5b34090873ed09ba418082
|
[] |
no_license
|
gauravsgr/RandomFunProjects
|
1a8b24314f4dd40819f0cdbef1ff144aa9255bb1
|
b239fdf0c57f9688d9023824d4651dc0c4962b94
|
refs/heads/master
| 2021-07-10T12:01:28.680538
| 2021-04-11T03:14:18
| 2021-04-11T03:14:18
| 65,774,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,389
|
r
|
ClusteringImageObjects.R
|
#--------------------------------------------------------------------------#
# Doing the Kmeans on JPEG images #
#--------------------------------------------------------------------------#
# Load the package
install.packages('jpeg')
library(jpeg)
url <- "http://www.wall321.com/thumbnails/detail/20120304/colorful%20birds%20tropical%20head%203888x2558%20wallpaper_www.wall321.com_40.jpg"
# Download the file and save it as "Image.jpg" in the directory
dFile <- download.file(url, "Image.jpg")
# Read the image from the working directory downloaded above
img <- readJPEG("Image.jpg")
img1 <- readJPEG("Image.jpg", TRUE)
plot(img)
# Get the image dimensions
imgDm <- dim(img)
str(img)
# Assign the RGB channels to data frame
imgRGB <- data.frame(
x = rep(1:imgDm[2], each = imgDm[1]),
y = rep(imgDm[1]:1, imgDm[2]),
R = as.vector(img[,,1]),
G = as.vector(img[,,2]),
B = as.vector(img[,,3])
)
m = as.matrix(x = rep(1:5), y = seq(1:5), z = rep(5:3))
dim(m)
head(imgRGB)
library(ggplot2)
# Function to create the ggplot theme for plotting the image on canvas
drawImage <- function() {
theme(
panel.background = element_rect(
size = 3,
colour = "black",
fill = "white"),
axis.ticks = element_line(
size = 2),
panel.grid.major = element_line(
colour = "gray80",
linetype = "dotted"),
panel.grid.minor = element_line(
colour = "gray90",
linetype = "dashed"),
axis.title.x = element_text(
size = rel(1.2),
face = "bold"),
axis.title.y = element_text(
size = rel(1.2),
face = "bold"),
plot.title = element_text(
size = 20,
face = "bold",
vjust = 1.5)
)
}
# Plot the image
ggplot(data = imgRGB, aes(x = x, y = y)) +
geom_point(colour = rgb(imgRGB[c("R", "G", "B")])) +
labs(title = "Original Image: facebook Profile Pic") +
xlab("x") +
ylab("y") +
drawImage()
# Performing the clustering and plotting again
kClusters <- 12
kMeans <- kmeans(imgRGB[, c("R", "G", "B")], centers = kClusters)
kColours <- rgb(kMeans$centers[kMeans$cluster,])
ggplot(data = imgRGB, aes(x = x, y = y)) +
geom_point(colour = kColours) +
labs(title = paste("k-Means Clustering of", kClusters, "Colours")) +
xlab("x") +
ylab("y") +
drawImage()
|
57cba7122a3107a98159e31b90871c42f729c03a
|
e8e82f15e40ff16eaee9c434ed63fe19bc0b3610
|
/man/rc_expression.Rd
|
b5cb2f683498ceefd76cdf958c0d6d7f14c447ce
|
[] |
no_license
|
jackowacko/migraR
|
de0bda10d36e5356867a2150257009d10e42deb0
|
b4f2a6177935990ca1eebf2a411d5b5bf052f813
|
refs/heads/master
| 2021-04-29T23:55:20.352850
| 2018-05-10T15:47:42
| 2018-05-10T15:47:42
| 121,565,920
| 0
| 0
| null | 2018-05-09T16:12:51
| 2018-02-14T21:56:47
|
R
|
UTF-8
|
R
| false
| true
| 476
|
rd
|
rc_expression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rc_expression.R
\name{rc_expression}
\alias{rc_expression}
\title{Rogers and Castro expressions}
\usage{
rc_expression(profile = "eleven")
}
\arguments{
\item{profile}{a integer with the number of parameter for the model}
\item{model.name}{character with the the of migration model}
}
\description{
Are the Rogers and Castro expressions for migration by individual or grouped ages with methods
}
|
02681b3a51e006130afa0e69ce1925b6985cde30
|
e849a778ae0b39c98374f694124534d7bd80aa8d
|
/R/minCell.R
|
7ecb4c6bdd34c9fba116fa173ac71acde30bbe4d
|
[] |
no_license
|
ABS-dev/DiagTestKit
|
5472b88ba23a96f8195cc0d3269b40fefcbecc64
|
fe29db9b8804db5308676959f765b1c7610c46c5
|
refs/heads/master
| 2023-01-12T06:48:01.212622
| 2019-08-05T23:15:35
| 2019-08-05T23:15:35
| 114,177,632
| 0
| 0
| null | 2019-08-05T22:29:42
| 2017-12-13T23:00:13
|
R
|
UTF-8
|
R
| false
| false
| 2,940
|
r
|
minCell.R
|
#' @title minimize cell
#' @description A function used for optimizing the values of sensitivity and specificity (and \eqn{\delta} and \eqn{\gamma} for a 3-state kit).
#' The objective function minimizes the sum of the squared deviations (expected - observed cell counts).
#' @param parm \code{vector} A vector of starting values to be used for the optimization that is passed to \code{minCell}. For a 2-state experimental test, this is a vector of length 2 with entries (\eqn{\pi}, \eqn{\theta})
#' For a 3-state experimental test, this is a vector of length 4 with entries (\eqn{\pi}, \eqn{\delta}, \eqn{\theta}, \eqn{\gamma}). See also \code{\link{estimateSnSp}}.
#' @param SnR \code{data.frame} Each column corresponds to one reference test. Row 1 contains the sensitivity for the reference test(s).
#' Row 2 contains the probability of a suspect result as a fraction of the non-correct test result. This is a value between 0 and 1 (inclusive).
#' Namely, P(T? | D+) = \eqn{\psi} = \eqn{\delta} * (1 - \eqn{\pi}) where \eqn{\delta} is the second row for a given column (reference test). \eqn{\delta = \frac{\psi}{(1 - \pi)}}{\delta = \psi/(1 - \pi)}. Use a zero for a 2-state
#' test (i.e. no suspect region).
#' @param SpR \code{data.frame} Each column corresponds to one reference test. Row 1 contains the specificity for each reference test.
#' Row 2 contains the probability of a suspect result as a fraction of the non-correct test result. This is a value between 0 and 1 (inclusive).
#' Namely, P(T? | D-) = \eqn{\phi} = \eqn{\gamma} * (1 - \eqn{\theta}) where \eqn{\gamma} is the second row for a given column (reference test). \eqn{\gamma = \frac{\phi}{(1 - \theta)}}{\gamma = \phi/(1 - \theta)}. Use a zero for a 2-state
#' test (i.e. no suspect region).
#' @param Prev \code{vector} A named vector containing the prevalence for each population sampled.
#' @param xdat \code{vector} A vector of the observed cell counts.
#' @param N \code{vector} A named vector containing the sample size for each population sampled passed to \code{\link{cellS}}.
#' @param nstates \code{vector} A vector with length one more than the number of reference tests. The first element is the number of states of the experimental test and the remaining entries are the number
#' of states of each reference test (using the same ordering as SnR and SpR).
#' @return The sum of the squared deviations between the expected and observed cell counts.
# @author David Siev \email{david.siev@@aphis.usda.gov} modified by Monica Reising \email{monica.m.reising@@aphis.usda.gov}
#' @author \link{DiagTestKit-package}
minCell <- function(parm,SnR,SpR,Prev,xdat,N,nstates){
if(length(parm)==2){
SnE <- parm[1]
SpE <- parm[2]
sus.perc<-c(0,0)
} else if(length(parm)==4){
SnE <- parm[1]
SpE <- parm[3]
sus.perc <- c(parm[2],parm[4])
}
x <- cellS(SnR,SpR,Prev,SnE,SpE,sus.perc,N,nstates)
return(sum((x-xdat)^2))
}
|
da1605cacfe4a3ef344eaacb666dddbc4b956d3c
|
396df2552224ffcb0294fe6e297b231aa2e59e68
|
/_working/0137-dice-games.R
|
6b55353876cd8137880a299cb435b4cab60bb968
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ellisp/blog-source
|
d072bed980a5074d6c7fac03be3635f70ab5f098
|
1227f83df23af06da5280214ac7f2e0182be5707
|
refs/heads/master
| 2023-09-05T07:04:53.114901
| 2023-08-27T21:27:55
| 2023-08-27T21:27:55
| 122,695,494
| 17
| 8
| null | 2023-08-27T21:15:33
| 2018-02-24T02:36:45
|
HTML
|
UTF-8
|
R
| false
| false
| 2,639
|
r
|
0137-dice-games.R
|
library(tidyverse)
library(scales)
n <- 1000000
dice <- sample(1:6, n, replace = TRUE)
wins <- which(dice == 6)
results <- data.frame(game_length = diff(c(0, wins))) %>%
mutate(who_won = ifelse(game_length %% 2 == 1, "Player A", "Player B"))
rs <- results %>%
group_by(who_won) %>%
summarise(freq = n()) %>%
ungroup() %>%
mutate(prop = freq / sum(freq))
rs
svg("../img/0137-simple-results.svg", 8, 4)
ggplot(results, aes(x = game_length, fill = who_won)) +
geom_histogram(binwidth = 1) +
ggtitle("Results of an alternating dice roll game",
paste0("First to roll a six wins; Starting player wins ",
round(rs[1, "prop"], 2), " of the time")) +
scale_y_continuous(label = comma) +
labs(x = "Game length", fill = "Winner:",
y = paste("Number of wins out of", format(n, big.mark = ",", scientific = FALSE)))
dev.off()
#-----Game 2--------
#' Roll a dice and return the game 2 results for one round
#'
#' @param last_roll the previous roll of the dice. If NA, this means we are at the beginning of the game
#' @return a list with two elements: whether it was a win based on the rule of 6, or matching the last roll;
#' and what the latest roll of the dice is
diceroll <- function(last_roll){
this_roll <- sample(1:6, 1)
win <- this_roll %in% c(6, last_roll)
return(list(
win = win,
this_roll = this_roll)
)
}
#' Main cycle for playing "Game 2"
#'
#' @return the number of rolls it took to win the game
dicegame <- function(){
i <- 0
rolls <- NA
win <- FALSE
while(!win){
i <- i + 1
dr <- diceroll(rolls[length(rolls)])
win <- dr$win
rolls <- c(rolls, dr$this_roll)
}
return(i)
}
game_length <- sapply(1:n, function(x){dicegame()})
results <- data.frame(game_length = game_length) %>%
mutate(who_won = ifelse(game_length %% 2 == 1, "Player A", "Player B"))
results %>%
group_by(who_won, game_length) %>%
summarise(freq = n()) %>%
arrange(game_length)
rs <- results %>%
group_by(who_won) %>%
summarise(freq = n()) %>%
ungroup() %>%
mutate(prop = freq / sum(freq))
rs
svg("../img/0137-game2-results.svg", 8, 4)
ggplot(results, aes(x = game_length, fill = who_won)) +
geom_histogram(binwidth = 1) +
ggtitle("Results of an alternating dice roll game",
paste0("First to roll a six or to match the last roll wins; Starting player wins ",
round(rs[1, "prop"], 2), " of the time")) +
scale_y_continuous(label = comma) +
labs(x = "Game length", fill = "Winner:",
y = paste("Number of wins out of", format(n, big.mark = ",", scientific = FALSE)))
dev.off()
convert_pngs("0137")
|
6507da8abb6760ef714c0a85c23e0dcb1a6aaf82
|
94908a285737843999c5acaaad60199538a5c8d6
|
/R/plotting.devices.R
|
0a62f18fce4ab5fed700c46e0190042a72aeb8de
|
[] |
no_license
|
drmjc/mjcgraphics
|
800716e07757066d992a6eb1ea0470012cb8f698
|
cd9e30472fea14591bc342b24cc8330307fb9b4c
|
refs/heads/master
| 2021-01-19T01:47:00.882419
| 2016-06-07T06:10:21
| 2016-06-07T06:10:21
| 12,447,176
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,574
|
r
|
plotting.devices.R
|
#' Customized plotting devices
#'
#' The \code{jpeg}, \code{png} or \code{pdf} functions are redefined versions
#' of the standard ones implemented in grDevices:
#' - changes the default size rotates the axis labels (las=1)\cr
#' - pushes the label out an extra line\cr
#' - saves the filename to an environment variable, so that dev.off() can
#' find it, and open it.\cr
#' \code{dev.off} closes the current device, and opens the newly created
#' image file. The mechanism behind this is via the setting of an
#' environment variable \code{CURRENT.PLOT.FILENAME}, which gets set
#' by one of the \code{jpeg}, \code{png} or \code{pdf} functions.
#'
#' In addition, there are predefined versions for creating standard sized
#' jpeg / png devices:\cr
#' \tabular{ll}{
#' VGA \tab 640x480 \cr
#' SVGA \tab 800x600 \cr
#' XGA \tab 1024x768 \cr
#' QVGA \tab 1280x960 \cr
#' SXGA+ \tab 1400x1050 \cr
#' UXGA \tab 1600x1200 \cr
#' QXGA \tab 2048x1536 \cr
#' QSXGA+ \tab 2800x2100 \cr
#' QUXGA \tab 3200x2400 \cr
#' WXGA \tab 1280x800 \cr \cr
#' SXGA \tab 1280x1024 \cr
#' WSXGA+ \tab 1680x1050 \cr
#' WUXGA \tab 1920x1200 \cr
#' QSXGA \tab 2560x2048 \cr
#' }
#' See \url{http://www.i386.info/vidres.htm} for more info.
#'
#' In addition there are routines for opening A4 & A5 portrait/landscape pdf
#' & CairoPDF devices.
#'
#' @section Apple27:
#' The \code{\link{png.Apple27}}, \code{\link{jpeg.Apple27}}, \code{\link{pdf.Apple27}}
#' functions are designed to create images that can be opened in Preview, and will be close
#' to the maximum possible size on an Apple 27" LED Cinema/Thunderbolt Display. Note, Preview
#' doesn't seem to want to automatically fill the entire screen from edge to edge, thus these
#' are the largest sizes that preview is comforable opening at using \sQuote{View > Actual Size}.
#'
#' @note i've made the Cairo* etc plots defunct, since i never use them &
#' it's not worth adding the libcairo dependency.
#'
#' @return device opening functions return nothing. \code{dev.off} prints the
#' file path and opens the previously opened file if in an interactive session.
#'
#' @param filename the name of the output file
#' @param width the width of the device in pixels for jpeg or png, and inches for pdf
#' @param height the height of the device in pixels for jpeg or png, and inches for pdf
#' @param \dots passed to \code{\link[grDevices]{png}}, \code{\link[grDevices]{jpeg}},
#' \code{\link[grDevices]{pdf}} from \code{grDevices}
#' @param onefile logical: if \code{TRUE} (the default) allow multiple figures in one
#' file. If \code{FALSE}, generate a file with name containing the
#' page number for each page.
#' @param version a string describing the PDF version that will be required to
#' view the output. This is a minimum, and will be increased
#' (with a warning) if necessary. Defaults to \dQuote{1.4}, but see
#' \sQuote{Details} in \code{\link[grDevices]{pdf}}
#' @param paper the target paper size. The choices are \dQuote{a4}, \dQuote{letter},
#' \dQuote{legal} (or \dQuote{us}) and \dQuote{executive} (and these can be
#' capitalized), or \dQuote{a4r} and \dQuote{USr} for rotated
#' (\sQuote{landscape}). The default is \dQuote{special}, which means that
#' the \sQuote{width} and \sQuote{height} specify the paper size. A further
#' choice is \dQuote{default}; if this is selected, the papersize is
#' taken from the option \dQuote{papersize} if that is set and as
#' \dQuote{a4} if it is unset or empty. Defaults \dQuote{special}.
#' @param do.par logical: setup the plotting parameters?
#' @param bg the background color. default = \dQuote{white}, which overrides the
#' default setting of transparent.
#' @param open logical: open the recent file? Defaults to \code{TRUE}
#' if in an interactive session (see \code{\link{interactive}})
#' @inheritParams png
#'
#' @author Mark Cowley, 2009-06-10
#' @rdname plotting.devices
#' @aliases plotting.devices
#' @name Customized plotting devices
#' @importFrom grDevices png jpeg pdf dev.off
NULL
#' @export
#' @rdname plotting.devices
png <- function(filename = "Rplot%03d.png", width = 1200, height = 800, ...) {
grDevices::png(filename=filename, width=width, height=height, ...)
par(las=1, mgp=c(4,1,0), mar=par()$mar + c(1,1,0,0))
options(CURRENT.PLOT.FILENAME=filename)
# assign("CURRENT.PLOT.FILENAME", filename, pos=grep("pwbc", searchpaths()))
}
#' @export
#' @rdname plotting.devices
jpeg <- function(filename = "Rplot%03d.jpeg", width = 1200, height = 800, ...) {
grDevices::jpeg(filename=filename, width=width, height=height, ...)
par(las=1, mgp=c(4,1,0), mar=par()$mar + c(1,1,0,0))
options(CURRENT.PLOT.FILENAME=filename)
# assign("CURRENT.PLOT.FILENAME", filename, pos=grep("pwbc", searchpaths()))
}
#' @export
#' @rdname plotting.devices
pdf <- function (filename, width=11.69, height=8.27, onefile = TRUE, version = "1.4", paper="special", do.par=TRUE, bg="white", ...) {
grDevices::pdf(filename, width = width, height = height, onefile = onefile,
version = version, paper = paper, bg=bg, ...)
if( do.par ) par(las=1, mgp=c(4,1,0), mar=par()$mar + c(1,1,0,0))
options(CURRENT.PLOT.FILENAME=filename)
# assign("CURRENT.PLOT.FILENAME", filename, pos=grep("pwbc", searchpaths()))
}
#' @note \code{dev.off}: default for open is \code{capabilities("aqua")}, since this is
#' only true when running locally on a mac. There must be a more precise way of doing this.
#' Note \code{capabilities("X11")} seems a logical choice, but this is \code{TRUE} with
#' X11 forwarding, when issuing a \code{system("open ...")} command will fail.
#'
#' @export
#' @rdname plotting.devices
dev.off <- function(open=capabilities("aqua")) {
dv <- dev.cur()
grDevices::dev.off()
if( open && (! dv %in% c("quartz", "X11", "X11cairo") ) ) {
f <- getOption("CURRENT.PLOT.FILENAME")
if( !is.na(f) && !is.null(f) && file.exists(f) ) {
# f <- get.full.path(f)
f <- normalizePath(f)
system(paste("open", shQuote(f)))
# f <- sub("/Volumes/PWBC/private", "/pwbc", f)
cat(paste("# ", f, "\n", sep=""))
}
options(CURRENT.PLOT.FILENAME=NA)
}
}
############# PNG ###################
#' @export
#' @rdname plotting.devices
png.VGA <- function(filename="Rplot%03d.png", width=640, height=480,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.SVGA <- function(filename="Rplot%03d.png", width=800, height=600,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.XGA <- function(filename="Rplot%03d.png", width=1024, height=768,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.QVGA <- function(filename="Rplot%03d.png", width=1280, height=960,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.SXGA <- function(filename="Rplot%03d.png", width=1400, height=1050,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.UXGA <- function(filename="Rplot%03d.png", width=1600, height=1200,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.QSXGA <- function(filename="Rplot%03d.png", width=2048, height=1536,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.QXGA <- function(filename="Rplot%03d.png", width=2800, height=2100,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.QUXGA <- function(filename="Rplot%03d.png", width=3200, height=2400,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
png.Apple27 <- function(filename="Rplot%03d.png", width=2520, height=1340,
pointsize = 12, bg = "white", res = NA,...) {
mjcgraphics::png(filename=filename, width=width, height=height, pointsize=pointsize, bg=bg, res=res, ...)
}
################### JPEG #############################
#' @export
#' @rdname plotting.devices
jpeg.VGA <- function(filename="Rplot%03d.jpeg", width=640, height=480,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.SVGA <- function(filename="Rplot%03d.jpeg", width=800, height=600,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.XGA <- function(filename="Rplot%03d.jpeg", width=1024, height=768,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.QVGA <- function(filename="Rplot%03d.jpeg", width=1280, height=960,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.SXGA <- function(filename="Rplot%03d.jpeg", width=1400, height=1050,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.UXGA <- function(filename="Rplot%03d.jpeg", width=1600, height=1200,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.QXGA <- function(filename="Rplot%03d.jpeg", width=2048, height=1536,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.QSXGA <- function(filename="Rplot%03d.jpeg", width=2800, height=2100,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.Apple27 <- function(filename="Rplot%03d.jpeg", width=2520, height=1340,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
#' @export
#' @rdname plotting.devices
jpeg.QUXGA <- function(filename="Rplot%03d.jpeg", width=3200, height=2400,
pointsize = 12, quality = 75, bg = "white", res = NA,...) {
mjcgraphics::jpeg(filename=filename, width=width, height=height, pointsize=pointsize, quality=quality, bg=bg, res=res, ...)
}
################### PDF #############################
#' @export
#' @rdname plotting.devices
pdf.A4 <- function(file, onefile=TRUE, version="1.4", ...) {
mjcgraphics::pdf(file, , onefile=onefile, version=version, paper="a4r", ...)
}
#' @export
#' @rdname plotting.devices
pdf.A4.portrait <- function(file, onefile=TRUE, version="1.4", ...) {
mjcgraphics::pdf(file, width=8.27, height=11.69, onefile=onefile, version=version, paper="a4", ...)
}
#' @export
#' @rdname plotting.devices
pdf.A5 <- function(file, onefile=TRUE, version="1.4", ...) {
mjcgraphics::pdf(file, width=5.845, height=4.135, onefile=onefile, version=version, ...)
}
#' @export
#' @rdname plotting.devices
pdf.Apple27 <- function(file, onefile=TRUE, version="1.4", ...) {
mjcgraphics::pdf(file, width=23, height=12, onefile=onefile, version=version, ...)
}
# #' @export
# #' @rdname plotting.devices
# CairoPDF.A4 <- function(file, onefile=TRUE, version="1.4", ...) {
# require(Cairo)
# CairoPDF(file, width=11.69, height=8.27, onefile=onefile, version=version, paper="a4r", ...)
# }
#
# #' @export
# #' @rdname plotting.devices
# CairoPDF.A4.portrait <- function(file, onefile=TRUE, version="1.4", ...) {
# require(Cairo)
# CairoPDF(file, width=8.27, height=11.69, onefile=onefile, version=version, paper="a4", ...)
# }
#
# #' @export
# #' @rdname plotting.devices
# CairoPDF.A5 <- function(file, onefile=TRUE, version=version, ...) {
# require(Cairo)
# CairoPDF(file, width=5.845, height=4.135, onefile=onefile, version=version, ...)
# }
|
1a371bd779c95359b388245471047aa660f5831f
|
e15f86312db3109bbda053063557693518af4ead
|
/new_caojd/get_cao_pi_q.R
|
73d7ace0560d27ff0371f59a80ae32399ec072fa
|
[] |
no_license
|
heichiyidui/dev
|
3aecf0f92e4af4184b4eae2b1935f281b7746c86
|
73c20c19928eb94d9aec10f0d307604b147b8088
|
refs/heads/master
| 2020-12-29T01:54:24.236229
| 2016-07-01T14:51:01
| 2016-07-01T14:51:01
| 35,271,765
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,955
|
r
|
get_cao_pi_q.R
|
################################################################################
# read matrices
dis = as.matrix(read.table('cao_sum.dis',header=FALSE))
n_mats = array(scan('cao_aln.in',what = double(0)))
n_mat = array(n_mats, dim = c(400,400,100) )
# normalize the P(t) matrices
p_mat = array(dim=c(400,400,100))
for (i in 1:100){
p1=n_mat[,,i]
pi1=colSums(p1)
p1= diag(c(1/pi1)) %*% p1
p_mat[,,i]= p1
}
################################################################################
# estimate Q
alpha = 0.02
R = exp(-alpha * dis[1] * 0.25) * diag(400) * dis[1] *0.5
R = R + exp(-alpha * dis[1]) * p_mat[,,1] * dis[1]
for (i in 2:99){
t = dis[i]
t_range = (dis[i+1] - dis[i-1]) * 0.5
R = R + exp(-alpha * t) * p_mat[,,i] * t_range
}
t = dis[100]
t_range = dis[100] - dis[99]
R = R + exp(-alpha * t) * p_mat[,,100] * t_range
inv_R = solve(R)
Q = alpha * diag(400) - inv_R
################################################################################
# to normalize Q so that I + Q = PAM1
# sum(abs(Q)) = 9.3
# sum(abs(Q-t(Q))) = 2.4
Pi = as.matrix(read.table('cao/Pi',header=FALSE))
PiQ= diag(c(Pi)) %*% Q
# sum(abs(PiQ)) = 0.023
# sum(abs(PiQ-t(PiQ))) = 0.00071
PiQ = ( PiQ + t(PiQ) ) * 0.5
PiQ = PiQ - diag(diag(PiQ))
if (min(PiQ) < 0) {
for (i in 1:400)
for (j in 1:400)
if (PiQ[i,j] < 0)
PiQ[i,j]=0
} # PiQ _ij when i != j should always be non-negative
# 54706 negative numbers in the first run
PiQ = PiQ * (0.01 / sum(PiQ))
Q2 = diag(1/c(Pi)) %*% PiQ
Q2 = Q2 - diag(rowSums(Q2))
write.table(Q2,'Q2',col.names=FALSE,row.names=FALSE)
################################################################################
# to get the new Pi
Q=Q2
ev <- eigen(Q)
Lambda=ev$values
S=ev$vectors
inv_S=solve(ev$vectors)
p10000 = S %*% diag(exp(Lambda *10000 )) %*% inv_S
Pi2 = colSums(p10000)/sum(p10000)
write.table(Pi2,'Pi2',col.names=FALSE,row.names=FALSE)
|
724d2478100cf4434ed46df227b2651df580cc0e
|
35de14603463a45028bd2aca76fa336c41186577
|
/man/IMPACT.snp_group_boxplot.Rd
|
8db1b3fbbeed38fc701c1cea3483851ffab45c3c
|
[
"MIT"
] |
permissive
|
UKDRI/echolocatoR
|
e3cf1d65cc7113d02b2403960d6793b9249892de
|
0ccf40d2f126f755074e731f82386e4e01d6f6bb
|
refs/heads/master
| 2023-07-14T21:55:27.825635
| 2021-08-28T17:02:33
| 2021-08-28T17:02:33
| 416,442,683
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,462
|
rd
|
IMPACT.snp_group_boxplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IMPACT.R
\name{IMPACT.snp_group_boxplot}
\alias{IMPACT.snp_group_boxplot}
\title{\emph{IMPACT} box plot with \pkg{ggpubr}}
\source{
\href{https://www.r-bloggers.com/add-p-values-and-significance-levels-to-ggplots/}{ggpubr example}
}
\usage{
IMPACT.snp_group_boxplot(
TOP_IMPACT_all,
snp_groups = c("GWAS lead", "UCS", "Consensus"),
method = "wilcox.test",
comparisons_filter = function(x) { if ("Consensus" \%in\% x) return(x) },
show_plot = T,
save_path = F,
title = "IMPACT scores",
xlabel = NULL,
ylabel = NULL,
show_padj = T,
show_signif = T,
vjust_signif = 0.5,
show_xtext = T,
shift_points = T,
height = 10,
width = 10
)
}
\description{
Box plot of \emph{IMPACT} scores from each SNP group.
}
\examples{
\dontrun{
TOP_IMPACT_all <- reshape2::melt(boxplot_mat) \%>\% `colnames<-`(c("SNP_group","max_IMPACT"))
bp <- IMPACT.snp_group_boxplot(TOP_IMPACT_all, method="t.test")
bp <- IMPACT.snp_group_boxplot(TOP_IMPACT_all, method="wilcox.test")
}
}
\seealso{
Other IMPACT:
\code{\link{IMPACT.get_annotation_key}()},
\code{\link{IMPACT.get_annotations}()},
\code{\link{IMPACT.get_top_annotations}()},
\code{\link{IMPACT.iterate_get_annotations}()},
\code{\link{IMPACT.postprocess_annotations}()},
\code{\link{IMPACT_annotation_key}},
\code{\link{IMPACT_heatmap}()},
\code{\link{prepare_mat_meta}()}
}
\concept{IMPACT}
\keyword{internal}
|
b92d56a9f506368b66fb14cffe8bc0ca0f3ddbde
|
a0830531052bd2330932c3a2c9750326cf8304fc
|
/vmstools/man/vmsGridCreate.Rd
|
70ec708e64dcfda9bc564746bbdfa13c9aacb6bf
|
[] |
no_license
|
mcruf/vmstools
|
17d9c8f0c875c2a107cfd21ada94977d532c882d
|
093bf8666cdab26d74da229f1412e93716173970
|
refs/heads/master
| 2021-05-29T20:57:18.053843
| 2015-06-11T09:49:20
| 2015-06-11T09:49:20
| 139,850,057
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,094
|
rd
|
vmsGridCreate.Rd
|
\name{vmsGridCreate}
\alias{vmsGridCreate}
\title{function to create grids from point data by counting points in cells or summing an attribute}
\description{
Accepts an input of points data in a data frame with named columns for X and Y.
Creates a grid of defined cell size in X and Y directions (cells can be unequal).
Either counts points in cells or summs a attribute variable if one is supplied.
Optionally plots a map of the grid and outputs to a gridAscii file and/or an image.
}
\usage{
vmsGridCreate(dF
, nameLon = "Longitude"
, nameLat = "Latitude"
, nameVarToSum = ""
, cellsizeX = 0.5
, cellsizeY = 0.5
, we = ""
, ea = ""
, so = ""
, no = ""
, gridValName="fishing"
, plotMap = TRUE
, plotTitle = ""
, numCats = 5
, paletteCats = "heat.colors"
, addLegend = TRUE
, legendx = "bottomleft"
, legendncol = 1
, legendtitle = "fishing activity"
, plotPoints = TRUE
, legPoints = FALSE
, colPoints = 1
, colLand = "sienna"
, addICESgrid = FALSE
, addScale = TRUE
, outGridFile = ""
, outPlot = ""
, ...)
}
\arguments{
\item{dF}{a dataFrame containing point data}
\item{nameLon}{name of the column in the dataFrame containing Longitude or x values}
\item{nameLat}{name of the column in the dataFrame containing Latitude or y values}
\item{nameVarToSum}{optional name of the column in the dataFrame containing the attribute values to sum in the grid. If set to "" points are counted }
\item{cellsizeX}{length X (horizontal) of desired grid cells, in same units as the coordinates}
\item{cellsizeY}{length Y (vertical) of desired grid cells, in same units as the coordinates}
\item{we}{western bounds of the desired grid}
\item{ea}{eastern bounds of the desired grid}
\item{so}{southern bounds of the desired grid}
\item{no}{northern bounds of the desired grid}
\item{gridValName}{the name to give to the attribute column of the returned \code{SpatialGridDataFrame}, set to 'fishing' by default}
\item{plotMap}{whether to plot a map of the resulting grid}
\item{plotTitle}{optional title to add to the plot}
\item{numCats}{how many categories to classify grid values into for map plot (uses\code{pretty()}) classification)}
\item{paletteCats}{color pallete to use}
\item{addLegend}{whether to add a legend to the plot}
\item{legendx}{position of legend should be one of 'bottomright', 'bottom', 'bottomleft', 'left', 'topleft', 'top', 'topright', 'right', 'center'}
\item{legendncol}{number of columns in the legend}
\item{legendtitle}{legend title}
\item{plotPoints}{whether to add the original points to the plot}
\item{legPoints}{Logical. Points in legend}
\item{colPoints}{color of points to plot}
\item{colland}{color of land}
\item{addICESgrid}{Logical. Adding ICES grid on top}
\item{addScale}{Logical. Adding axes}
\item{outGridFile}{optional name for a gridAscii file to be created from the grid}
\item{outPlot}{optional name for a png file to be created from the plot}
\item{\dots}{NOT used yet}
}
\value{ a \code{SpatialGridDataFrame} object of the grid defined in package \code{sp} }
\references{EU VMS tools project}
\author{Andy South}
\seealso{\code{\link{mapGrid}}}
\examples{
#vmsGridCreate(dF, nameLon = "POS_LONGITUDE", nameLat = "POS_LATITUDE",
# cellsizeX = 0.5, cellsizeY = 0.5,legendx='bottomright',plotPoints=TRUE )
#get the example data
data(tacsat)
#subset the first 2000 points to avoid problems with NAs
dFVMS <- tacsat[1:2000,]
#create vms grid minimum call with defaults
vmsGridCreate(dFVMS,nameLat='SI_LATI',nameLon='SI_LONG')
#making the grid finer
vmsGridCreate(dFVMS,nameLat='SI_LATI',nameLon='SI_LONG',
cellsizeX=0.05,cellsizeY=0.05)
}
|
51b69ebd9b486c31757d80d025313297b6f0e2d2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/funtimes/examples/DR.Rd.R
|
57e428b09e4a826fcfae9571a79117fc514f3871
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,564
|
r
|
DR.Rd.R
|
library(funtimes)
### Name: DR
### Title: Downhill Riding (DR) Procedure
### Aliases: DR
### Keywords: trend ts
### ** Examples
## Not run:
##D ## example 1
##D ## use iris data to test DR procedure
##D
##D data(iris)
##D require(clue) # calculate NMI to compare the clustering result with the ground truth
##D require(scatterplot3d)
##D
##D Data <- scale(iris[,-5])
##D ground_truth_label <- iris[,5]
##D
##D # perform DR procedure to select optimal eps for DBSCAN
##D # and save it in variable eps_opt
##D eps_opt <- DR(t(Data), method="DBSCAN", minPts = 5)$P_opt
##D
##D # apply DBSCAN with the optimal eps on iris data
##D # and save the clustering result in variable res
##D res <- dbscan(Data, eps = eps_opt, minPts =5)$cluster
##D
##D # calculate NMI to compare the clustering result with the ground truth label
##D clue::cl_agreement(as.cl_partition(ground_truth_label),
##D as.cl_partition(as.numeric(res)), method = "NMI")
##D # visualize the clustering result and compare it with the ground truth result
##D # 3D visualization of clustering result using variables Sepal.Width, Sepal.Length,
##D # and Petal.Length
##D scatterplot3d(Data[,-4],color = res)
##D # 3D visualization of ground truth result using variables Sepal.Width, Sepal.Length,
##D # and Petal.Length
##D scatterplot3d(Data[,-4],color = as.numeric(ground_truth_label))
##D
##D
##D ## example 2
##D ## use synthetic time series data to test DR procedure
##D
##D require(funtimes)
##D require(clue)
##D require(zoo)
##D
##D # simulate 16 time series for 4 clusters, each cluster contains 4 time series
##D set.seed(114)
##D samp_Ind <- sample(12,replace=F)
##D time_points <- 30
##D X <- matrix(0,nrow=time_points,ncol = 12)
##D cluster1 <- sapply(1:4,function(x) arima.sim(list(order=c(1,0,0),ar=c(0.2)),
##D n=time_points,mean=0,sd=1))
##D cluster2 <- sapply(1:4,function(x) arima.sim(list(order=c(2,0,0),ar=c(0.1,-0.2)),
##D n=time_points,mean=2,sd=1))
##D cluster3 <- sapply(1:4,function(x) arima.sim(list(order=c(1,0,1),ar=c(0.3),ma=c(0.1)),
##D n=time_points,mean=6,sd=1))
##D
##D X[,samp_Ind[1:4]] <- t(round(cluster1,4))
##D X[,samp_Ind[5:8]] <- t(round(cluster2,4))
##D X[,samp_Ind[9:12]] <- t(round(cluster3,4))
##D
##D
##D # create ground truth label of the synthetic data
##D ground_truth_label = matrix(1,nrow=12,ncol=1)
##D for(k in 1:3){
##D ground_truth_label[samp_Ind[(4*k-4+1):(4*k)]] = k
##D }
##D
##D # perform DR procedure to select optimal delta for TRUST
##D # and save it in variable delta_opt
##D delta_opt <- DR(X,method="TRUST")$P_opt
##D
##D # apply TRUST with the optimal delta on the synthetic data
##D # and save the clustering result in variable res
##D res <- CSlideCluster(X,Delta=delta_opt ,Theta=0.9)
##D
##D # calculate NMI to compare the clustering result with the ground truth label
##D clue::cl_agreement(as.cl_partition(as.numeric(ground_truth_label)),
##D as.cl_partition(as.numeric(res)),method = "NMI")
##D
##D # visualize the clustering result and compare it with the ground truth result
##D # visualization of the clustering result obtained by TRUST
##D plot.zoo(X, type = "l",plot.type = "single",col = res, xlab = "Time Index", ylab ="")
##D # visualization of the ground truth result
##D plot.zoo(X, type = "l",plot.type = "single",col = ground_truth_label,
##D xlab = "Time Index", ylab ="")
## End(Not run)
|
0cf203d7f644be969bca2c6ad39a373734dbcdf5
|
79352071fc1b85bd06824aa6ea9503add90a4a65
|
/AIC/rectangleFixations/48.R
|
6fa4ebe1707b501db2bbe52ea1082fdf39381ff7
|
[] |
no_license
|
seminariosuacj/EyeTracking
|
add9f3e38f0e2e19996897d403a925b885112c18
|
d2d2583ac7eeb8791e746a5744159912129b127e
|
refs/heads/main
| 2023-04-26T17:48:30.433834
| 2021-05-06T02:58:24
| 2021-05-06T02:58:24
| 343,905,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
48.R
|
library(stats)
library(readr)
library(dplyr)
library(ggplot2)
library(saccades)
setwd("D:/Mario/Escritorio/dataset/emip_dataset/rawdata")
rawdata_48 <- read_tsv("48_rawdata.tsv", comment = "#")
rawdata_48 <- rename(rawdata_48, trial = Trial)
rawdata_48 <- rename(rawdata_48, x = 'L POR X [px]')
rawdata_48 <- rename(rawdata_48, y = 'L POR Y [px]')
rawdata_48 <- rawdata_48 %>%
select('Time','trial','x','y','L Validity')
#Hacer rectangle en 48
rectangle48 <- subset(rawdata_48, Time >= 440199928 & Time <= 479709209)
rectangle48$time <- seq.int(nrow(rectangle48))*4
rectangle48 = rectangle48[,-c(1)]
ggplot(rectangle48, aes(x, y)) +
geom_point(alpha = 0.4, color = "blue") +
coord_fixed() +
expand_limits(x = 1920, y = 1080)
rectangle48_fixations_save <- subset(detect.fixations(rectangle48), event=="fixation")
rectangle48_fixations_save$participant <- 48
#vamos a guardarlo
write_tsv(rectangle48_fixations_save,"D:/Mario/Escritorio/datasets/rectangleconsDATA/48.tsv")
|
0b3d81b38ab0ccce7ce5d4e1f413aab0c24541f3
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/road2stat/MEF/get-apro-sim.R
|
97e9a03beef6046cd532753826fa77fee7162421
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,163
|
r
|
get-apro-sim.R
|
# Multiple Evidence Fusion
#
# Compute APro-based ADR-ADR Similarity
#
# Author: Nan Xiao <me@nanx.me>
#
# Date: Oct 1, 2013
adr = read.csv('ADR.txt', header = FALSE, stringsAsFactors = FALSE)
# ADR UMLS code -- protein ID data that has been curated by removing
# status == 'wrong', 'false positive' and 'opposite phenotype' cases.
# Original data from supplementary material 2 of Kuhn, et al.
# 'Systematic identification of proteins that elicit drug side effects'.
predrel = read.csv('se-protein.csv', header = TRUE, stringsAsFactors = FALSE)
high = as.character(unique(predrel$UMLS.code.of.SE))
real = as.character(unique(adr$V1))
length(intersect(high, real)) # 377 in common
predrelunique = aggregate(predrel[-1], by = list(predrel$UMLS.code.of.SE), c)
# remove @inh @act
predrellist = vector('list', nrow(predrelunique))
for ( i in 1:length(predrellist) ) {
predrellist[[i]] = strsplit(paste(unlist(predrelunique[i, 2]),
collapse = ' '), ' ')[[1]]
}
names(predrellist) = as.character(predrelunique[, 1])
predrellist1 = vector('list', nrow(predrelunique))
for ( i in 1:length(predrellist) ) {
predrellist1[[i]] = unique(gsub('@act', '', gsub('@inh', '', predrellist[[i]])))
}
names(predrellist1) = as.character(predrelunique[, 1])
actinhdiff = data.frame(V1 = sapply(predrellist, length),
V2 = sapply(predrellist1, length),
V3 = sapply(predrellist, length) -
sapply(predrellist1, length))
mat = matrix(0.0, ncol = 817L, nrow = 817L)
for ( i in 1:817 ) {
tmp1 = which(adr[i, ] == names(predrellist1))
for ( j in i:817 ) {
tmp2 = which(adr[j, ] == names(predrellist1))
if ( length(tmp1) == 0L | length(tmp2) == 0L ) {
mat[i, j] = 0
} else {
mat[i, j] = length(intersect(predrellist1[[i]], predrellist1[[j]]))/
length(union(predrellist1[[i]], predrellist1[[j]]))
}
}
}
mat[lower.tri(mat)] = t(mat)[lower.tri(t(mat))]
diag(mat) = 1
mat4digit = format(round(mat, 4), nsmall = 4)
write.table(mat4digit, 'aprosim.txt', sep = '\t',
quote = FALSE, row.names = FALSE, col.names = FALSE)
|
ca1f138957d9761d67a54fb44c76240be5535b77
|
78347da8ea6ae7e1adb8537dbfae2d165ee78405
|
/R/is_pairwise_aln_tool.R
|
ba6cb761fca394c39b6dc6b05b2b14c388c2dbfc
|
[] |
no_license
|
drostlab/homologr
|
3a737ddff2df51b39919afbaecfc5f0ab79081dc
|
e3e72fafa182824fcab2483e9b65fb24db393f88
|
refs/heads/master
| 2022-12-19T13:30:14.904573
| 2020-09-27T20:23:57
| 2020-09-27T20:23:57
| 39,505,809
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 240
|
r
|
is_pairwise_aln_tool.R
|
is_pairwise_aln_tool <- function(tool = NULL){
provided <- c("NW")
if(is.null(tool)){
message("The followig methods are provided: ", paste0(provided, collapse = ", "))
return(FALSE)
}
return(is.element(tool, provided))
}
|
efdd5238b5d7e0b20704d2a305140ca3fd19c795
|
533902488dd5266c0b406005c9275048d60153ba
|
/Code/ExpressionProcessing/averageExpressionDirect.R
|
d0b673d23e4df17fc84c864ec372b5db7e2d0198
|
[] |
no_license
|
dlampart/csrproject
|
f5d3e5125ff4a5f1957bbc8a9d3b4daf70e7e625
|
92e5e558ad6d77986c02b3910e87490416c355c4
|
refs/heads/master
| 2021-01-13T00:16:29.844940
| 2016-10-17T01:09:31
| 2016-10-17T01:09:31
| 53,728,048
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 702
|
r
|
averageExpressionDirect.R
|
library(data.table)
load("interimData/expressionUnaveragedDirect.RDat")
load("interimData/fileNameTables.RDat")
fileIds=colnames(expressionUnaveraged)
geneNames=rownames(expressionUnaveraged)
relCel=celNames[hasDHS==T,]
cellIds=relCel[,unique(cellId)]
av=matrix(0,ncol=length(cellIds),nrow=length(geneNames))
rownames(av)=geneNames
colnames(av)=cellIds
for(i in c(1:length(cellIds))){
print(i)
subInd=relCel[cellIds[i]==cellId,is.element(fileIds,CELfileName)]
bb=expressionUnaveraged[,subInd]
if(sum(subInd)>1){
av[,i]=rowMeans(bb)
}else{
av[,i]=bb
}
}
averagedExpression=av
save(averagedExpression, file="interimData/overallAveragedExpressionDirect.RDat")
|
0b609840f517329a3105a1974ead6c02f55ec7f3
|
5204b23cf0f44e9f3164462c47693a61dd3cf7a5
|
/shinyPlots/avg_spd_time/ui.R
|
cb18a4e8c2928cd38ac795a3819edc1042b4abba
|
[] |
no_license
|
salsbury/strava_eda
|
9714f376620f45a630b693a213dd656b92fcc075
|
2e8262826a02e0339811cddf04fac1b9ff1839fd
|
refs/heads/master
| 2021-01-10T03:58:53.511516
| 2015-05-22T20:05:47
| 2015-05-22T20:05:47
| 36,092,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 390
|
r
|
ui.R
|
shinyUI(fluidPage(
titlePanel("Time Series of Avg_Speed Run"),
sidebarLayout(
sidebarPanel(
selectInput("num", "Select ID of Racer", choices = unique(runs_racer$id), selected = 1),
dateRangeInput("dates", "Choose the range of dates.", start = "2006-01-01"),
submitButton("Submit"),
dataTableOutput("tab")
),
mainPanel(plotOutput("plot"))
)
))
|
9ba19ba21b450dbceefe5942cef4d568e013daaa
|
db6e1efe62ca5ed1c9f529d3300a75577157321d
|
/lipidome/筛选样本.R
|
5d3750f5104594e2a32dbdb16074fa74375aa99b
|
[] |
no_license
|
pomnoob/lipidG
|
2590562bfab9fcd197a69dd96e39203e8ebaf109
|
8fae606103efd0b8755b86e02cfe9bc32c639c9a
|
refs/heads/master
| 2023-02-19T04:10:07.763133
| 2021-01-21T13:10:26
| 2021-01-21T13:10:26
| 320,478,341
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,194
|
r
|
筛选样本.R
|
library(tidyverse)
library(zscorer)
# 导入之前整理好的脂质组学数据
# 仍然没有哈尔滨和郑州的数据
# 因为这两个城市的样本id与检测数据没法一一对应
ldomic_all <- read.csv("data/ldomic_all.csv",stringsAsFactors = F)
ldomic_all <- ldomic_all %>%
rename(id=sample)
# 导入问卷数据
ques <- read.csv(file = "data/Metadata of breastmilk questionnaire.csv",stringsAsFactors = F)
# 根据问卷数据计算母亲、婴儿年龄等数据
ques$birthMon <- as.Date(paste(ques$A2A,ques$A2B,
ques$A2C,sep="-"))#乳母出生日期
ques$birthBaby<- as.Date(paste(ques$A5A,
ques$A5B,ques$A5C,sep="-"))
ques$sampleDate <- as.Date(ques$N1,
format="%d-%m-%Y")#样本采集日期
# 样本采集时母亲和婴儿年龄
ques <- ques %>%
mutate(ageMon=(sampleDate-birthMon)/365,
ageBaby=(sampleDate-birthBaby))
ques$ageBaby <- as.numeric(ques$ageBaby)
ques$ageMon <- as.numeric(ques$ageMon)
ques$id <- as.numeric(ques$id)
# 列出一下问卷数据编号代表的内容
# A4:infant gender; TB3: infant body weight; TB1:infant length
# TB6: infant head circumference; A3: parity; M1: maternal education
# M3: family annual income; M4: prepregnant maternal body weight
# M5: maternal body weight postpartum; M7: delivery mode
# TA1: maternal height; TA3: maternal body weight on site; B401: infant allergy; B5: maternal allergy history
# B6: father allergy history;B1: birth weight;B2:birth length;B3: preterm or not
quesSel <- ques %>%
select(id, city, A4, TB3, TB1, TB6, A3, M1, M3, M4,
M5, M7, TA1, B401, B5, B6, ageBaby, ageMon,B1,B2,B3,TA3) %>%
rename(sex=A4,babyWeight=TB3,babyLength=TB1,
babyHead=TB6,parity=A3,edu=M1, income=M3,
preMonWeight=M4,postMonWeight=M5,delivery=M7,monHeight=TA1,
monWeight=TA3,allergy=B401,MonAllergy=B5,FatAllergy=B6,
birthWeight=B1,birthLength=B2,preterm=B3)
# 合并脂质组学和问卷数据
ldomic <- inner_join(ldomic_all,quesSel,by="id")
# 婴儿数据
# 查看是否有身高、体重异常的样本
boxplot(ldomic$babyWeight)
# 有两个体重大于60的样本,设为NA
ldomic$babyWeight[ldomic$babyWeight > 60] <- NA
# 身高无异常值
boxplot(ldomic$babyLength)
# 母亲数据
summary(ldomic$monHeight)
boxplot(ldomic$monHeight)
ldomic$monHeight[ldomic$monHeight<100] <- NA
# 查看母亲体重分布情况,去除异常值
summary(ldomic$preMonWeight)
boxplot(ldomic$preMonWeight)
ldomic$preMonWeight[ldomic$preMonWeight>=120] <- NA
# 产后体重
summary(ldomic$postMonWeight)
boxplot(ldomic$postMonWeight)
ldomic$postMonWeight[ldomic$postMonWeight>=120] <- NA
# 采样体重
summary(ldomic$monWeight)
boxplot(ldomic$monWeight)
ldomic$monBMI <- ldomic$monWeight/(ldomic$monHeight/100)^2
summary(ldomic$monBMI)
# 计算 z 评分
ldomic <- addWGSR(data = ldomic, sex = "sex", firstPart = "babyWeight",
secondPart = "ageBaby", index = "wfa",output = "waz")
ldomic <- addWGSR(data = ldomic, sex = "sex", firstPart = "babyLength",
secondPart = "ageBaby", index = "hfa",output = "haz")
ldomic <- ldomic %>%
mutate(nBMI=case_when(monBMI>27.5~1,
monBMI>=18.5 & monBMI<=23~2,
monBMI>23 & monBMI<=27.5~3))
table(ldomic$nBMI)
# 筛选样本
ldomicSel <- ldomic %>%
# 选取月龄不小于3且非早产
filter(ageBaby > 60 & preterm == 1)
# 只排除早产儿
ldomic.p <- ldomic %>%
filter(preterm==1)
# 策略1
ldomic.p <- ldomic.p %>%
mutate(nwaz=case_when(waz > 1~1,
waz < -1~2))
table(ldomic.p$nwaz)
# 选出发育迟缓和正常的数据
waz1.ld <- ldomic.p %>%
filter(nwaz == 1 | nwaz == 2)
waz1.exp.ld <- waz1.ld %>%
select(id,nwaz,3:160) %>%
rename(group=nwaz)
write.csv(waz1.exp.ld,file = "lipidome/WAZ all lipid no preterm 1 and -1.csv",row.names = F)
# 过敏与正常
table(ldomic.p$allergy)
ldomic.p$allergy <- ifelse(ldomic.p$allergy==1,0,1)
ld.allergy <- ldomic.p %>%
select(id,allergy,3:160) %>%
rename(group=allergy)
write.csv(waz1.exp.ld,file = "lipidome/Allergy all lipid no preterm 1 and -1.csv",row.names = F)
|
79ff82b6c02a498dd83e7ac8ebcd6c14d1f3d661
|
c7ead64743d99c9ad769919e2bcfa743a16c8386
|
/tools/air_pollution/pollutantmean.R
|
445ad6f0856217a47b6852b903ebbbd02329de81
|
[
"MIT"
] |
permissive
|
eniltonangelim/data-science
|
24d862864cc619d37bfd42671360b88accc115d1
|
c39d4f9ac21e2eb0ffdefd7349af08216b7a020c
|
refs/heads/master
| 2021-12-20T19:58:20.529894
| 2021-11-30T22:52:55
| 2021-11-30T22:52:55
| 97,025,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 651
|
r
|
pollutantmean.R
|
DATA_SETS_REPOSITORY <- '../datasets/air_pollution/specdata/'
pollutantmean <- function(directory, pollutant, id = 1:332) {
airp_data <- data.frame()
for ( i in id ) {
file_data <- paste(directory, sprintf("%03d", i), '.csv', sep = '')
airp_data <- rbind(airp_data, read_csv(file_data,
col_types = "cddi"))
}
return(mean(airp_data[[pollutant]], na.rm = TRUE))
}
#print(pollutantmean(DATA_SETS_REPOSITORY, "sulfate", 1:10)) ## 4.064128
#print(pollutantmean(DATA_SETS_REPOSITORY, "nitrate", 70:72)) ## 1.706047
#print(pollutantmean(DATA_SETS_REPOSITORY, "nitrate", 23)) ## 1.280833
|
fb11d55ca6a60c675e0306f8be72035aae517236
|
794863d2e9e26424a04079a91c3a23063bdb4f8e
|
/man/dy2009.Rd
|
d6d070b940af0d99530d8f3343580b63af89d3c3
|
[] |
no_license
|
GabauerDavid/ConnectednessApproach
|
ef768e64e0bc458ad180bac6b667b3fe5662f01d
|
0ca4799a2f5aa68fdd2c4a3e8a2e0e687d0a9b17
|
refs/heads/main
| 2023-08-09T07:23:45.002713
| 2023-07-27T22:57:04
| 2023-07-27T22:57:04
| 474,462,772
| 47
| 20
| null | 2023-03-12T04:22:26
| 2022-03-26T20:47:15
|
R
|
UTF-8
|
R
| false
| false
| 438
|
rd
|
dy2009.Rd
|
\name{dy2009}
\docType{data}
\alias{dy2009}
\title{Dataset of Diebold and Yilmaz (2009)}
\description{
For detailed information see: Diebold, F. X., & Yilmaz, K. (2009). Measuring financial asset return and volatility spillovers, with application to global equity markets. The Economic Journal, 119(534), 158-171.
}
\usage{data(dy2009)}
\format{A zoo data.frame containing 30x1141 observations.}
\source{Yahoo Finance}
\keyword{datasets}
|
478abde394e6866f0e7238e1b2bfcb285b627b8c
|
1522b308afd42bc80bf4b5192c2d1670f8579c26
|
/man/best.layout.Rd
|
9b7cbff16fdfa00c1e7708ee2dc9d7724a1fa247
|
[] |
no_license
|
covaruber/Fragman
|
2c1830036ccd968c1d4df82983c0cb74d7c84651
|
55fd3627d9f6699ad97f1643883ce93387b382c3
|
refs/heads/master
| 2020-04-11T21:44:30.948814
| 2018-12-17T10:45:13
| 2018-12-17T10:45:13
| 162,114,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
rd
|
best.layout.Rd
|
\name{best.layout}
\alias{best.layout}
\title{complementary tools for layout}
\description{
This function just find the best layout fit for a number of plots desired.
}
\usage{
best.layout(x)
}
\arguments{
\item{x}{A scalar value indicating the number of plots desired}
}
\details{
No major details
}
\value{
Returns the best layout
\describe{
\item{res}{the number of rows and columns giving the best fit}
}
}
\references{
Covarrubias-Pazaran G, Diaz-Garcia L, Schlautman B, Salazar W, Zalapa J. Fragman: An R package for fragment analysis. 2016. BMC Genetics 17(62):1-8.
Robert J. Henry. 2013. Molecular Markers in Plants. Wiley-Blackwell. ISBN 978-0-470-95951-0.
Ben Hui Liu. 1998. Statistical Genomics. CRC Press LLC. ISBN 0-8493-3166-8.
}
\examples{
best.layout(9)
}
|
642f55325fd49f8a003f31a90efb393f503057c1
|
e56262bee9693f61021fea5fc000ebcf46ac34bb
|
/R/Partitions.R
|
82b3a9476313095305d8ac648a850b362f69e59d
|
[] |
no_license
|
nanoquanta/TreeTools
|
d1ed57deb83122366b422117642eb986df1457bf
|
a858cf1c96de19b786b8243ef3d4ddfd6d0d8dd1
|
refs/heads/master
| 2020-08-26T09:35:04.083356
| 2019-10-19T10:59:15
| 2019-10-19T10:59:15
| 216,997,642
| 0
| 1
| null | 2019-10-23T07:41:41
| 2019-10-23T07:41:40
| null |
UTF-8
|
R
| false
| false
| 3,492
|
r
|
Partitions.R
|
#' Tree2Splits
#'
#' Converts a phylogenetic tree to an array of bipartition splits.
#'
#' @param tr A tree of class \code{\link[ape:read.tree]{phylo}}, with tips
#' bearing integer labels (i.e. `tr$tip.label == 1:N`).
#' @return Returns a two-dimensional array. Columns correspond to unique
#' bipartitions, named with the number of a node that denotes the partition.
#' Rows correspond to tips `1:N`.
#'
#' @author Martin R. Smith
#'
#' @examples Tree2Splits(ape::rtree(6, tip.label=1:6, br=NULL))
#'
#' @importFrom ape reorder.phylo
#' @export
Tree2Splits <- function (tr) {
tr <- reorder.phylo(tr, 'postorder')
tip_label <- tr$tip.label
n_tip <- as.integer(length(tip_label))
root <- length(tip_label) + 1L
bipartitions <- phangorn_bipCPP(tr$edge, n_tip)
ret <- vapply(bipartitions[-seq_len(root)],
function (x) seq_len(n_tip) %in% x,
logical(n_tip))[seq_len(n_tip), , drop=FALSE]
rownames(ret) <- tip_label
colnames(ret) <- seq_len(ncol(ret)) + root
ret <- UniqueSplits(ret)
# Return:
DropSingleSplits(ret)
}
#' @rdname Tree2Splits
#' @export
#' @keywords internal
Tree2Bipartitions <- Tree2Splits
#' Unique Splits
#'
#' Removes equivalent duplicates from a matrix of bipartitions.
#'
#' @param splits A logical matrix containing one named row corresponding to each
#' terminal leaf of a tree, and each column corresponds to a bipartition split;
#' each split divides terminals into two bipartitions; members of one
#' are marked `TRUE` and members of the other are marked `FALSE`.
#' @param preserveParity Logical specifying whether to preserve the `TRUE` and
#' `FALSE` status within each split (which takes marginally longer). If
#' `FALSE`, each split will be defined such that taxa in the same partition
#' as the first element are marked `FALSE`, and other taxa marked `TRUE`.
#'
#' @return The splits element, with all duplicate splits removed.
#'
#' @examples
#' set.seed(1)
#' splits <- Tree2Splits(ape::rtree(6, br=NULL))
#' UniqueSplits(splits, preserveParity=TRUE)
#'
#' @author Martin R. Smith
#' @export
UniqueSplits <- function (splits, preserveParity = FALSE) {
originalParity <- splits[1, ]
# Set all splits to the same parity
splits[, originalParity] <- !splits[, originalParity]
# Identify duplicates
duplicates <- duplicated(t(splits))
# Remove duplicates
ret <- splits[, !duplicates, drop=FALSE]
# Return:
if (preserveParity) {
ret[, originalParity[!duplicates]] <- !ret[, originalParity[!duplicates]]
}
ret
}
#' Drop Single Splits
#'
#' Removes splits that pertain only to a single taxon from a splits object.
#'
#' Bipartition splits are divisions, implied by each edge or node of an unrooted
#' tree topology, that divide the taxa into two groups (one of which is a clade).
#'
#' By default, a list of splits will include those that separate a single taxon
#' (a leaf) from all others. Such splits are, by definition, present in all
#' trees that contain that taxon; they are not of interest when comparing trees.
#' This function removes such splits from a list of bipartitions.
#'
#' @param split A matrix in which each column corresponds to a bipartition split
#'
#' @return The input matrix, with any columns that separate only a single pendant
#' tip removed.
#'
#' @author Martin R. Smith
#'
#' @export
DropSingleSplits <- function (split) {
split[, colSums(split) > 1 & colSums(!split) > 1, drop=FALSE]
}
|
c1c88b9d416f2c80ed77ba81b9ce0874776a4cf8
|
dc25ff163ba1e37f2565078167fce6dac270ca7e
|
/bestoption.R
|
90a25d81bb0d8a15e85c01dfbd53711b2ebcbe00
|
[] |
no_license
|
ziulcarvalho1/Fastfood-
|
42770af2fa1009e83aa5c060e805b09ef95026b2
|
b94657c4cd47305c35b8db21a775e46ecf6ebacd
|
refs/heads/main
| 2023-03-23T22:32:11.534012
| 2021-03-08T11:47:03
| 2021-03-08T11:47:03
| 345,625,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,518
|
r
|
bestoption.R
|
##This program indicates which weather parameter works better as a predictor for demand for each type of product
##We identify that for each one of the 9 combinations of day of week and month of the year
############################################################################################################
## Load the necessary libraries
library(caTools)
library(stringr)
## Load the database
setwd("/Curso-ML/Assignment-1/")
##data0 <- read.csv("total-sp-11.csv")
##linear1x <- lm(formula=data[[26]] ~ data[[12]])
##linear2x <- cor(data[[26]], data[[12]])
##print(linear1x)
##print(coef(linear1x))
##print(linear2x)
data0 <- read.csv("data-06.csv")
## Treat the database eliminating NULLs, zeros and distorting values
## Separate the data into trainning data and test data
data_sample = sample.split(data0,SplitRatio=0.95) ##here we separate the file into trainning and test data
dataxx = subset(data0,data_sample==TRUE) ## General trainning data
data1 = subset(data0,data_sample==FALSE) ##test data
##dataxx <- data0
##data1 <- data0
## Drop not used columns
##print(data1)
data1$cbmaker <- NULL
data1$combos <- NULL
data1$kit2 <- NULL
data1$kit1 <- NULL
data1$promotion <- NULL
print(data1)
len1 <- nrow(data1)
print(len1)
## Assign 0 to dimensioned variables ax,bx and dx where A and B are the Ax = B = Y in a linear regress
## Dx is the weather parameter selected to be used as reference - the one identified with the bigger correlation
ax<-matrix(, nrow=1, ncol=27)
bx<-matrix(, nrow=1, ncol=27)
Dx<-matrix(, nrow=1, ncol=27)
print(Dx)
for (ss1 in 1:27){
ax[1,ss1]<-0
bx[1,ss1]<-0
Dx[1,ss1]<-0
}
class(ax)
##print(ax)
##print(bx)
## this for loop goes through every row in the testdata
for (register in 1:len1){
print(data1[register,])
dia <-data1$diasem[register]
mes1 <-data1$mes[register]
print(register)
print(dia)
print(mes1)
## now we have to segment the trainning data into 9 subgroups by week and by month
data <- subset.data.frame(dataxx, diasem == dia & mes == mes1) ##subset trainning week and month
#### Bell curve - tail elimination - Here we remove from the file the outliners
####
print(data)
##dim(dataxx)
##print(data)
##print(data1)
##summary(data)
##summary(data1)
len3=nrow(data)
##print(len3)
precipitac <-data[[10]]
tempmax <-data[[11]]
tempmin <-data[[12]]
tempmed <-data[[13]]
umidade <-data[[14]]
insolacao <-data[[15]]
data$cbmaker <- NULL
data$combos <- NULL
data$kit2 <- NULL
data$kit1 <- NULL
data$promotion <- NULL
desserts <-data[[19]]
pizzas <-data[[20]]
beverage <-data[[21]]
sfiha <-data[[22]]
snack <-data[[23]]
pastas <-data[[24]]
dishes <-data[[25]]
savory <-data[[26]]
salads <-data[[27]]
print(data)
z1 = 19
z2 = 10
while (z1<28){
maiorrs <- -10000
melhoropcao <- 0
while (z2<16){
var1 <-data[[z1]]
var2 <-data[[z2]]
###print(z1)
###print(z2)
print(var1)
print(var2)
##class(var1)
##class(var2)
linear <- lm(formula=var2 ~ var1)
summary(linear)
RS <- cor(var2,var1,method="pearson") ##summary(linear)$r.squared
print(RS)
if (RS >= maiorrs) { ###Here we select which one of the six factors is better to work as predictors
maiorrs=RS
melhoropcao = z2
##print(z2)
}
##scatter.smooth(x=var1, y=var2, main="temp min ~ sfiha") # scatterplot
##
##print(linear)
##R-squared (R2) is a statistical measure that represents the proportion of the variance for a
##dependent variable that's explained by an independent variable or variables in a regression model.
##Whereas correlation explains the strength of the relationship between an independent and dependent
##variable, R-squared explains to what extent the variance of one variable explains the variance of the
##second variable. So, if the R2 of a model is 0.50, then approximately half of the observed variation
##can be explained by the model's inputs.
##R-squared indicates the percentage of the samplas fall into the regression line
##the bigger the R-Squared the better the prediction is
##We are calculating six R-squared and doing that we idenfiy which one of the six weather parameters
##is better to be used do predict the demand
z2 = z2+1
}
print(z1)
print(melhoropcao)
linearx <- lm(formula=data[[z1]] ~ data[[melhoropcao]])
Cx <-coef(linearx)
Bx <- Cx[[1]]
Ax <- Cx[[2]]
print("novos valores")
print(Cx)
print(Ax)
print(Bx)
ax[1,z1]<-Ax
bx[1,z1]<-Bx
Dx[1,z1] <- melhoropcao
print(z1)
## A and B are the parameters AX + B = Y where the X is the chosen weather parameter and Y is the price
## Now we have to apply this to the test dataset for this specifi value of Z1 (type of product)
melhoropcao <- 0
z2 <- 10
z1 <- z1+1
##print(z1)
}
## quant19 <- (ax[19]*data1[[ax[1,1]])+bx[1,19]
## print(ax[1,1])
## print(quant19)
## quant20 <- (ax[1,20]*data1[[ax[1,1]])+bx[1,20]
## quant21 <- (ax[1,21]*data1[[ax[1,1]])+bx[1,21]
## quant22 <- (ax[1,22]*data1[[ax[1,1]])+bx[1,22]
## quant23 <- (ax[1,23]*data1[[ax[1,1]])+bx[1,23]
## quant24 <- (ax[1,24]*data1[[ax[1,1]])+bx[1,24]
## quant25 <- (ax[1,25]*data1[[ax[1,1]])+bx[1,25]
## quant26 <- (ax[1,26]*data1[[ax[1,1]])+bx[1,26]
## quant27 <- (ax[1,27]*data1[[ax[1,1]])+bx[1,27]
print(ax)
print(bx)
print(Dx)
print(register)
print(data1[register,])
##print(ax[1,19])
##print(bx[1,19])
##print(Dx[1,19])
##print(data1[register,Dx[1,19]])
quant19 <- (ax[1,19]*data1[register,Dx[1,19]])+bx[1,19]
##print(quant19)
##print(data1[register,20])
##print(ax[1,20])
##print(bx[1,20])
##print(Dx[1,20])
##print(data1[register,Dx[1,19]])
quant20 <- (ax[1,20]*data1[register,Dx[1,20]])+bx[1,20]
##print(quant20)
##print(data1[register,20])
quant21 <- (ax[1,21]*data1[register,Dx[1,21]])+bx[1,21]
quant22 <- (ax[1,22]*data1[register,Dx[1,22]])+bx[1,22]
quant23 <- (ax[1,23]*data1[register,Dx[1,23]])+bx[1,23]
quant24 <- (ax[1,24]*data1[register,Dx[1,24]])+bx[1,24]
quant25 <- (ax[1,25]*data1[register,Dx[1,25]])+bx[1,25]
quant26 <- (ax[1,26]*data1[register,Dx[1,26]])+bx[1,26]
quant27 <- (ax[1,27]*data1[register,Dx[1,27]])+bx[1,27]
data1$dessert1[register] <-quant19
data1$pizzas1[register] <-quant20
data1$beverage1[register]<-quant21
data1$sfiha1[register] <-quant22
data1$snack1[register] <-quant23
data1$pastas1[register] <-quant24
data1$dishes1[register] <-quant25
data1$savory1[register] <-quant26
data1$salads1[register] <-quant27
print(register)
##print(data1$desserts[register])
##print((data1$desserts[register]-quant19))
##print(abs((data1$desserts[register]-quant19)/data1$desserts[register]))
data1$desserte[register] <- abs((data1$desserts[register]-quant19)/data1$desserts[register])
data1$pizzase[register] <- abs((data1$pizzas[register]-quant20)/data1$pizzas[register])
data1$beveragee[register]<- abs((data1$beverage[register]-quant21)/data1$beverage[register])
data1$sfihae[register] <- abs((data1$sfiha[register]-quant22)/data1$sfiha[register])
data1$snacke[register] <- abs((data1$snack[register]-quant23)/data1$snack[register])
data1$pastase[register] <- abs((data1$pastas[register]-quant24)/data1$pastas[register])
data1$dishese[register] <- abs((data1$dishes[register]-quant25)/data1$dishes[register])
data1$savorye[register] <- abs((data1$savory[register]-quant26)/data1$savory[register])
data1$saladse[register] <- abs((data1$salads[register]-quant27)/data1$salads[register])
print(data1[register,])
}
print(mean(data1$desserte))
print(mean(data1$pizzase))
print(mean(data1$beveragee))
print(mean(data1$sfihae))
print(mean(data1$snacke))
print(mean(data1$pastase))
print(mean(data1$dishese))
print(mean(data1$savorye))
print(mean(data1$saladse))
print(data1)
##write.csv(data1,'/Curso-ML/Assignment-1/result-06.csv')
|
e753f9efa1ffb887ff16cdeb74b17a78d0a5f59b
|
27f6aecff55a96ebd8b8e3204d6ed529d5e7d3e0
|
/plot3.R
|
4321d82790c7a230fe0b9dfdfb33108f8a72de5b
|
[] |
no_license
|
hamode23/ExData_Plotting1
|
346600e294b425a5691430db95194d48cfee98a3
|
ab1d244dc87245fa47b9bea5d57e71ae1def8fd2
|
refs/heads/master
| 2021-01-17T22:16:02.085746
| 2015-08-08T07:48:19
| 2015-08-08T07:48:19
| 40,367,281
| 0
| 0
| null | 2015-08-07T15:28:11
| 2015-08-07T15:28:11
| null |
UTF-8
|
R
| false
| false
| 879
|
r
|
plot3.R
|
## Getting full dataset
fdata <- read.csv("household_power_consumption.txt", header = T, sep = ';', na.strings = "?")
fdata$Date <- as.Date(fdata$Date, format = "%d/%m/%Y")
## Subsetting the data
dat3 <- subset(fdata, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(fdata)
## Converet Dates
dateTimeTmp <- paste(as.Date(dat3$Date), data$Time)
dat3$dateTime <- as.POSIXct(dateTimeTmp)
## Plot data to PNG file
dev.cur()
png(filename = "plot3.png", width = 480, height = 480)
with(dat3, {
plot(Sub_metering_1 ~ dateTime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
lines(Sub_metering_2 ~ dateTime, col = 'Red')
lines(Sub_metering_3 ~ dateTime, col = 'Blue')
})
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
ad7f49036f00214d08c91ad37bdee7e36f635b88
|
cdf3d4455bfbba06427a143e052d73e4f38ec57c
|
/danielLib/R/get_groups_cor_tbl.R
|
c9d12c8177c5eabda8d126cdaac349db8c12cca4
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
danlooo/DAnIEL
|
c194d695bb74577934fc68e1fa56da22c8470bf8
|
198fcd82546d3020af67020e5c021b1633718ba4
|
refs/heads/main
| 2023-09-06T08:39:28.620621
| 2021-11-11T15:49:59
| 2021-11-11T15:49:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,425
|
r
|
get_groups_cor_tbl.R
|
# Copyright by Daniel Loos
#
# Research Group Systems Biology and Bioinformatics - Head: Assoc. Prof. Dr. Gianni Panagiotou
# https://www.leibniz-hki.de/en/systembiologie-und-bioinformatik.html
# Leibniz Institute for Natural Product Research and Infection Biology - Hans Knöll Institute (HKI)
# Adolf-Reichwein-Straße 23, 07745 Jena, Germany
#
# The project code is licensed under BSD 2-Clause.
# See the LICENSE file provided with the code for the full license.
#' Create nested correlation tibble containing plots and graphs
#' @param correlation_dir Path to correlation directory of DAnIEL. Must contain file groups.txt and a subdirectiry for each group
get_groups_cor_tbl <- function(
correlation_dir, nodes_tbl, features_grouping_col = "order",
n_features = 8, max_p_value = 0.05, min_abs_cor = 0.1) {
correlation_groups <-
sprintf("%s/groups.txt", correlation_dir) %>%
readr::read_lines() %>%
# correlation result must be available
purrr::keep(~ paste0(correlation_dir, .x, "/results.csv") %>% file.exists())
edges_tbl <-
tibble::tibble(correlation_group = correlation_groups) %>%
dplyr::mutate(
edges_tbl_path = sprintf("%s/%s/results.csv", correlation_dir, correlation_group),
edges_tbl = purrr::map2(edges_tbl_path, correlation_group, ~ readr::read_csv(.x) %>% dplyr::mutate(correlation_group = .y))
) %>%
dplyr::pull(edges_tbl) %>%
dplyr::bind_rows() %>%
dplyr::rename(from = feature_a, to = feature_b) %>%
# filter by p value if provided. This is needed to pass banocc results
dplyr::filter(if (p_value %>% is.na() %>% all()) TRUE else p_value <= max_p_value) %>%
dplyr::filter(abs(cor) >= min_abs_cor)
if (edges_tbl %>% nrow() == 0) {
warning("Filtered edge table must not be empty. Return NULL instead.")
return(NULL)
}
cor_limits <- edges_tbl$cor %>%
abs() %>%
max() %>%
{
c(-., .)
}
# get most frequent node groups
feature_groups <-
edges_tbl %>%
{
c(.$from, .$to)
} %>%
tibble::tibble(feature = .) %>%
group_by(feature) %>%
dplyr::count() %>%
dplyr::inner_join(nodes_tbl, by = "feature") %>%
dplyr::group_by_at(features_grouping_col) %>%
dplyr::summarise(n = sum(n)) %>%
dplyr::arrange(-n) %>%
utils::head(n_features) %>%
purrr::pluck(features_grouping_col)
node_colors <-
viridisLite::viridis(length(feature_groups)) %>%
magrittr::set_names(feature_groups) %>%
magrittr::inset("other", "grey")
# same node tibble for all groups to uniform color coding
nodes_tbl <-
nodes_tbl %>%
mutate_at(features_grouping_col, ~ ifelse(.x %in% feature_groups, .x, "other")) %>%
dplyr::mutate(
color = .[[features_grouping_col]] %>% factor(levels = names(node_colors)),
tooltip = feature
) %>%
dplyr::rename(name = feature)
nested_cor_tbl <-
edges_tbl %>%
dplyr::group_by(correlation_group) %>%
tidyr::nest() %>%
dplyr::rename(edges_tbl = data) %>%
dplyr::mutate(
graph = purrr::map(edges_tbl, ~ danielLib::get_correlation_graph(edges_tbl = .x, nodes_tbl = nodes_tbl)),
plt = purrr::map2(graph, correlation_group, ~ danielLib::plot_correlation_network(
.x, .y,
limits = cor_limits, node_colors = node_colors, color_title = features_grouping_col
))
)
return(nested_cor_tbl)
}
|
f215b69264166173a71ad3efb46318b9da20701f
|
d2e4e8b0fde53e8e331e275f8a8777650381d5fd
|
/plot4.R
|
27b93decbb568adb0287e54342f8559c7bfd4bc9
|
[] |
no_license
|
Shivam-1117/EDA-Course-Project-2
|
3fcd82ca4f6ccf1176cc7113903e3b8cce117550
|
328acaf7262f14b19d2f3d4eeb4f843dacc6e7a1
|
refs/heads/master
| 2022-11-23T19:33:07.705513
| 2020-07-26T14:41:44
| 2020-07-26T14:41:44
| 282,592,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 745
|
r
|
plot4.R
|
scc <-readRDS("Source_Classification_Code.rds")
nei <-readRDS("summarySCC_PM25.rds")
s <- intersect(grep("COAL|coal|Coal", scc$Short.Name, value = TRUE), grep("COMB|comb|Comb", scc$Short.Name, value = TRUE))
id <- scc[scc$Short.Name %in% s, c(1, 3)]
nei <- nei[nei$SCC %in% id$SCC, c("fips", "SCC", "Emissions", "year")]
library(tidyverse)
emissions <- data.frame(nei %>% group_by(year) %>% summarise(emissions = sum(Emissions)))
library(ggplot2)
g <- ggplot(emissions, aes(year, emissions))
g <- g + geom_line() + labs(x = "Year") + labs(y = "Emission (tons)") +
labs(title = "Emissions from coal combustion related sources in US over 1999-2008") +
theme(plot.title = element_text(hjust = 0.5))
print(g)
dev.copy(png, "plot4.png")
dev.off()
|
eb7d5d55f280d27e8147b0ecfbb8d263dffdeb3e
|
d961e5b4cd130939ed954839d6a6499d7933e57e
|
/man/summarySEwithin.Rd
|
633b392b2c41bedc36c27971244a163aaca90743
|
[
"MIT"
] |
permissive
|
thomas-hinterecker/Ragbu
|
6c6b013b2b9e306d14ddb7562043aea4839d1cf6
|
df53682065e5e011a332655ed7457aa7b6c31560
|
refs/heads/master
| 2021-01-01T16:55:03.594389
| 2018-05-07T08:42:43
| 2018-05-07T08:42:43
| 97,953,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,096
|
rd
|
summarySEwithin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistics.R
\name{summarySEwithin}
\alias{summarySEwithin}
\title{Summarize within-subjects data}
\usage{
summarySEwithin(data = NULL, measurevar, betweenvars = NULL,
withinvars = NULL, idvar = NULL, na.rm = TRUE, conf.interval = 0.95)
}
\arguments{
\item{data}{A data frame}
\item{measurevar}{The name of a column that contains the variable to be summariezed}
\item{betweenvars}{A vector containing names of columns that are between-subjects variables}
\item{withinvars}{Vector containing names of columns that are within-subjects variables}
\item{idvar}{The name of a column that identifies each subject (or matched subjects)}
\item{na.rm}{Boolean that indicates whether to ignore NA's}
\item{conf.interval}{The percent range of the confidence interval (default is 95%)}
}
\value{
a data frame with count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
}
\description{
Summarizes data, handling within-subjects variables by removing inter-subject variability
}
|
8c2c7472eedea292161b33087081e483a49043da
|
3a6b6c9f9c96f40af333797da28251a4445097a3
|
/codigos/o3/o3_jags.r
|
44bc3fc64df77150f47040e65ac04d44f3e0bc13
|
[] |
no_license
|
andrequeiroz/mestrado
|
be9d0c8cdd24a7eab38f1221af94c62c67f76b6a
|
05e649053154a639e646cfb083f720f536bc85fa
|
refs/heads/master
| 2021-09-03T15:13:29.683459
| 2018-01-10T02:26:08
| 2018-01-10T02:26:08
| 111,966,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
r
|
o3_jags.r
|
library(R2jags)
library(DBI)
link <- dbConnect(RSQLite::SQLite(), "../../dados/o3.db")
dados <- dbGetQuery(link, "SELECT * FROM achcar2011")
y <- t(dados)
K <- ncol(dados)
N <- nrow(dados)
dbDisconnect(link)
## jags
## 8 chains
result <- jags.parallel(data = list("y", "K", "N"), inits = NULL,
parameters.to.save = c("mu", "phi", "sigma"),
model.file = "../../modelos/o3/o3.jags.model1",
n.chains = 8, n.iter = 15000, n.burnin = 5000,
n.thin = 10)
## 1 chain
result <- jags(data = list("y", "K", "N"), inits = NULL,
parameters.to.save = c("mu", "phi", "sigma"),
model.file = "../../modelos/o3/o3.jags.model1",
n.chains = 1, n.iter = 15000, n.burnin = 5000, n.thin = 10)
result <- jags(data = list("y", "K", "N"), inits = NULL,
parameters.to.save = c("mu", "phi", "sigma", "sigmaomega"),
model.file = "../../modelos/o3/o3.jags.model2",
n.chains = 1, n.iter = 15000, n.burnin = 5000, n.thin = 10)
result <- jags(data = list("y", "K", "N"), inits = NULL,
parameters.to.save = c("mu", "phi", "sigma"),
model.file = "../../modelos/o3/o3.jags.model3",
n.chains = 1, n.iter = 15000, n.burnin = 5000, n.thin = 10)
result <- jags(data = list("y", "K", "N"), inits = NULL,
parameters.to.save = c("mu", "phi", "sigma", "sigmaomega"),
model.file = "../../modelos/o3/o3.jags.model4",
n.chains = 1, n.iter = 15000, n.burnin = 5000, n.thin = 10)
plot(result)
|
c93d008e9e0a0d70e6095c595df7a00c24b34e60
|
366ec1e0e81f9d8c40e2fde01efa44d640c67daa
|
/tests/testthat/test-horn.R
|
c85e09f405ea571af2209360d8a0138bdc39de23
|
[] |
no_license
|
tjfarrar/skedastic
|
20194324833b8f2f20e5666b642cff617159588c
|
050e6a177a28fb0cc2054b506a53b09d6859e3c7
|
refs/heads/master
| 2022-11-17T22:41:43.930246
| 2022-11-06T06:39:13
| 2022-11-06T06:39:13
| 219,455,416
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,310
|
r
|
test-horn.R
|
context("horn works for two lm examples across all argument permutations")
# theargs <- formals(horn)
test_that("linear regression works with all combinations of formals", {
skip_on_cran()
carslm <- lm(dist ~ speed, data = cars)
bostonlm <- lm(medv ~ crim + zn + indus + chas + nox + rm +
age + dis + rad + tax + ptratio + b + lstat, data = BostonHousing)
theargs <- list("deflator" = c(NA, "speed", "crim", "2"),
"alternative" = c("two.sided",
"greater", "less"), "restype" = c("ols", "blus"),
"mainlm" = list(carslm, bostonlm))
allargs <- expand.grid(theargs, stringsAsFactors = FALSE)
allargs <- allargs[-which(vapply(1:nrow(allargs), function(i) allargs$deflator[i] == "speed" &
!("speed" %in% colnames(model.matrix(allargs$mainlm[[i]]))), NA)), ]
allargs <- allargs[-which(vapply(1:nrow(allargs), function(i) allargs$deflator[i] == "crim" &
!("crim" %in% colnames(model.matrix(allargs$mainlm[[i]]))), NA)), ]
pvals <- vapply(1:nrow(allargs), function(i) do.call(what = horn,
args = append(list("statonly" = FALSE, "exact" = FALSE),
unlist(allargs[i, ], recursive = FALSE)))$p.value, NA_real_)
lapply(1:length(pvals), function(i) expect_true(is.btwn01(pvals[i])))
})
|
673e39a3313c90d8d67a8e79b5654c661ee52e1d
|
238d68e7f927d37a1bb985c1fbd89198325a8ab3
|
/man/osc.Rd
|
088f928094212f73c6d7bb0f97ec2071c207695b
|
[] |
no_license
|
RajeshKN02/mktgConjoint
|
f8b00e7db34dbc862466d67c05e7bc33155d63e9
|
6ae64547ab1505664ed193fcf548ed6a070cde98
|
refs/heads/master
| 2021-03-23T06:01:21.285553
| 2016-08-09T17:36:54
| 2016-08-09T17:36:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,357
|
rd
|
osc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{osc}
\alias{osc}
\title{Office System Conjoint data set. It is a list with all data sets needed to test the package}
\format{list}
\source{
\url{http://www.decisionpro.biz/}
}
\usage{
osc
}
\description{
Data a conjoint analysis study.
It is fictional data used to illustrate the use of conjoint analysis
to decide the range of products to be offered by an office department store.
The data file osc is a list contening these files:
design: a data frame with as many rows as attributes used in the conjoint analysis and as many varaibles as levels in each attribute
bundles: a data frame with as many rows as bundles of profiles indiviuals have rated and as many variables as attributes used in teh analysis
ratings: a data frame with as many rows as individuals have rated the bundles displayed in the columns
full: a data frame with a full conjoint design
market.profiles: a data frame with as many rows as products are in the market (competitors particularly) by attributes (in columns)
constrains: a data frame with some constrains to be used in the estimation of optimal products
reveneu: a data frame with attributes' variation in cost
}
\examples{
data(osc)
names(osc)
head(names(osc))
}
\keyword{datasets}
\keyword{file}
|
7b5af8fb090df7b1270c18c0fa7b0619fa707756
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NISTunits/examples/NISTmeterTOmile.Rd.R
|
fb971d25a4c070ff46c68272e543a67d13a45875
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
NISTmeterTOmile.Rd.R
|
library(NISTunits)
### Name: NISTmeterTOmile
### Title: Convert meter to mile
### Aliases: NISTmeterTOmile
### Keywords: programming
### ** Examples
NISTmeterTOmile(10)
|
93d20006771bde898b38d66c2efbef23f62cf769
|
a0c17b0b2ab5425156170f04ceb5686c24ab05ba
|
/manuscript_synthesis/src/process_data_rtm_chla_wt.R
|
f9550d34fcfe4c8250edefc0cd9f33862df05b31
|
[] |
no_license
|
InteragencyEcologicalProgram/ND-FASTR
|
fc23c815b866eaa2c56aec33965f00759e1804f4
|
d8f7ab0c824d7bb05b59b5da5406048ce70c3da7
|
refs/heads/master
| 2023-09-04T09:40:41.789719
| 2023-09-01T17:35:41
| 2023-09-01T17:35:41
| 232,142,766
| 10
| 1
| null | 2021-07-26T21:20:22
| 2020-01-06T16:41:03
|
HTML
|
UTF-8
|
R
| false
| false
| 2,564
|
r
|
process_data_rtm_chla_wt.R
|
# NDFS Synthesis Manuscript
# Purpose: Process the 15-minute continuous chlorophyll and water temperature
# data collected from the Yolo Bypass and downstream at three representative
# stations (RD22, STTD, LIB) during years 2013-2019. Calculate daily averages
# which are used in figures and analysis for the NDFS synthesis manuscript.
# Author: Dave Bosworth
# Contacts: David.Bosworth@water.ca.gov
# Load packages
library(tidyverse)
library(fs)
library(here)
library(conflicted)
# Source functions
source(here("manuscript_synthesis/src/global_functions.R"))
# Declare package conflict preferences
conflicts_prefer(dplyr::filter())
# Check if we are in the correct working directory
i_am("manuscript_synthesis/src/process_data_rtm_chla_wt.R")
# Import Continuous Data --------------------------------------------------
# Define directory for the continuous WQ data on the NDFA SharePoint
fp_rtm_wq <- ndfa_abs_sp_path("2011-2019 Synthesis Study-FASTR/WQ_Subteam/Processed_Data/Continuous")
# Import QA'ed and cleaned continuous chlorophyll and water temperature data for
# the NDFS period of interest
df_rtm_wq <- read_csv(
file = path(fp_rtm_wq, "RTM_INPUT_all_2021-04-20.csv"),
col_types = cols_only(
StationCode = "c",
DateTime = "c",
WaterTemp = "d",
Chla = "d"
)
)
# Calculate Daily Averages ------------------------------------------------
df_wq_daily_avg <- df_rtm_wq %>%
# parse date-time variable and define tz as PST; add date variable
mutate(
DateTime = ymd_hms(DateTime, tz = "Etc/GMT+8"),
Date = date(DateTime)
) %>%
# filter to years 2013-2019 and only keep three representative stations with a
# long-term record - RD22, STTD, and LIB
filter(
year(Date) %in% 2013:2019,
StationCode %in% c("RD22", "STTD", "LIB")
) %>%
# calculate daily average chlorophyll and water temperature values
summarize(across(c(WaterTemp, Chla), ~ mean(.x, na.rm = TRUE)), .by = c(StationCode, Date)) %>%
# remove all NaN chlorophyll values
drop_na(Chla) %>%
# convert NaN water temperature values to NA
mutate(WaterTemp = if_else(is.nan(WaterTemp), NA_real_, WaterTemp)) %>%
arrange(StationCode, Date)
# Save and Export Data ----------------------------------------------------
# Save daily average chlorophyll and water temperature data as csv and rds files
df_wq_daily_avg %>% write_csv(here("manuscript_synthesis/data/processed/chla_wt_daily_avg_2013-2019.csv"))
df_wq_daily_avg %>% saveRDS(here("manuscript_synthesis/data/processed/chla_wt_daily_avg_2013-2019.rds"))
|
6fe746b85fa6f1b1bb0ae694db654fa44220b112
|
9b691ceaaf1022528218f72f73094f1dc542c909
|
/Structural Feature Selection from Event Logs/testing.r
|
3102e2a4c3bb77b7931ddd2bfc9d105e025b6fd0
|
[] |
no_license
|
mhinkka/articles
|
dddcda36124b96a6fc5529acb593a32cfcd62f94
|
cccf2e4a26d3babe95e86094e24737f9657cc5e5
|
refs/heads/master
| 2020-06-08T17:23:34.561603
| 2019-06-23T14:17:02
| 2019-06-23T14:17:02
| 30,371,933
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58,368
|
r
|
testing.r
|
########################################################################################
# R test framework sources used to perform the tests required by paper: "Structural Feature Selection for Event Logs"
# by Markku Hinkka, Teemu Lehto, Keijo Heljanko and Alexander Jung
# 2.7.2017
########################################################################################
# Configuration
fileLocation <- "D:\\dev\\aalto\\papers\\structural-influence-analysis\\"
logFileLocation <- "C:\\Users\\User\\Dropbox\\Aalto\\testing\\testruns\\"
#logFileLocation <- "C:\\Users\\marhink\\Dropbox\\Aalto\\testing\\testruns\\"
# Initialize random seed so that the results are repeatable.
seed <- 1234
########################################################################################
# Load required libraries
require(caret)
require(caretEnsemble)
require(pROC)
require(caTools)
require(psych)
require(plyr)
require(cluster)
require(data.table)
require(kernlab)
require(fastICA)
require(bnlearn)
require(glmnet)
require(randomForest)
require(infotheo)
require(mRMRe)
require(e1071)
require(flexclust)
########################################################################################
# Function definitions
writeLogMessage <- function(writeLogMessage) {
print(paste(Sys.time(), paste(writeLogMessage, collapse=",")))
}
loadDataset <- function(datasetName, caseAttributesName, resultColumnName, outcomeName, separator) {
outcomeName <<- outcomeName
resultColumnName <<- resultColumnName
trainData <- read.csv(paste(fileLocation, datasetName, ".csv", sep = ""), sep=separator, dec=".", stringsAsFactors=FALSE)
trainData <- trainData[order(trainData[,1]),] # Order data by first column
traindf <- data.frame(trainData)
predictorNames <<- names(traindf)[names(traindf) != resultColumnName & names(traindf) != outcomeName & names(traindf) != "Id"]
outcome <- ifelse(traindf[,resultColumnName]==1, "yes", "no")
traindf <- cbind(outcome, traindf)
names(traindf)[1] <- outcomeName
traindf <- data.frame(lapply(traindf, as.factor))
traindf <<- traindf
if (caseAttributesName != "") {
caseAttributeData <<- read.csv(paste(fileLocation, caseAttributesName, ".csv", sep = ""), sep=separator, dec=".", stringsAsFactors=FALSE)
} else {
caseAttributeData <<- NULL
}
writeLogMessage(paste("Number of columns in the training dataset: ", ncol(traindf), sep=""))
}
initializeClassificationControl <- function(df, resamples) {
return(trainControl(
method="boot",
number=resamples,
savePredictions="final",
classProbs=TRUE,
index=createResample(df[,outcomeName], resamples),
summaryFunction=twoClassSummary
))
}
convertToFactors <- function(df) {
return(data.frame(lapply(df, factor)))
}
convertToNumeric <- function(df) {
for (col in names(df)) set(df, j=col, value=as.numeric(df[[col]]))
return(df)
}
getPrunedTraindfNone <- function(df, initialK, outcomeCol) {
result <- NULL
result$alldf <- df
return(result)
}
getPrunedTraindfPCA <- function(df, initialK, outcomeCol) {
result <- NULL
if (initialK >= ncol(df)) {
result$alldf <- df
return(result)
}
df <- df[, sapply(df, nlevels) > 1]
df <- convertToNumeric(df)
df <- addNoise(df)
result$pca <- preProcess(df, method=c("center", "scale", "pca"), pcaComp=initialK)
result$alldf <- data.frame(predict(result$pca, df))
result$featureExtractor <- function(df2) {
for (col in names(df2)) set(df2, j=col, value=as.numeric(df2[[col]]))
missingCols <- setdiff(names(result$pca$mean), names(df2))
for (col in missingCols) {
df2[col] = rep(0, nrow(df2))
}
return(predict(result$pca, df2))
}
return(result)
}
getPrunedTraindfICA <- function(df, initialK, outcomeCol) {
result <- NULL
if (initialK >= ncol(df)) {
result$alldf <- df
return(result)
}
for (col in names(df)) set(df, j=col, value=as.numeric(df[[col]]))
df <- addNoise(df)
result$ica <- preProcess(df, method=c("center", "scale", "ica"), n.comp=initialK)
result$alldf <- data.frame(predict(result$ica, df))
result$featureExtractor <- function(df2) {
for (col in names(df2)) set(df2, j=col, value=as.numeric(df2[[col]]))
missingCols <- setdiff(names(result$ica$mean), names(df2))
for (col in missingCols) {
df2[col] = rep(0, nrow(df2))
}
return(predict(result$ica, df2))
}
return(result)
}
getPrunedTraindfClusterEx <- function(df, initialK, outcomeCol, removeDuplicates) {
result <- NULL
if (initialK >= ncol(df)) {
result$alldf <- df
return(result)
}
getUniqueFeatures <- function(df, threshold) {
res <- NULL
findFeatureClustering <- function(df, threshold) {
featuredf <- t(data.frame(lapply(df, function(x) as.numeric(as.character(x)))))
k <- initialK
repeat {
kresult <- kmeans(featuredf, k)
clusterPercentage <- (kresult$betweenss / kresult$totss) * 100
writeLogMessage(paste("Cluster size #", k, " percentage:", clusterPercentage))
if (clusterPercentage >= threshold) {
res$clusterPercentage <<- clusterPercentage
res$clusterCount <<- k
return(kresult)
}
k <- k + 1
}
}
clusters <- findFeatureClustering(df, threshold)
# Initialize clustering maps
clusters$maps <- new.env()
for (i in 1:length(clusters$cluster)) {
c <- clusters$cluster[i]
varName <- paste("C", as.numeric(c), sep="")
if (!exists(varName, envir=clusters$maps)) {
evalStr <- paste(varName, " <- data.frame(clusters$center[", as.numeric(c),",])", sep="")
eval(parse(text=evalStr), envir=clusters$maps)
}
evalStr <- paste(varName, " <- cbind.data.frame(", varName,", ", names(c), "=df$", names(c), ")", sep="")
eval(parse(text=evalStr), envir=clusters$maps)
}
# Calculate closest features to cluster centers
clusterCenterFeatureIndexes <- NULL
for (i in 1:initialK) {
evalStr <- paste("C", i, sep="")
cmp <- eval(parse(text=evalStr), envir=clusters$maps)
d <- data.frame(as.matrix(dist(t(cmp))))
d <- d[order(d[,1]), ]
id <- which(names(df) == rownames(d)[2])
clusterCenterFeatureIndexes <- c(clusterCenterFeatureIndexes, id)
}
res$clusters <- clusters
res$clusters$clusterCenterFeatureIndexes <- clusterCenterFeatureIndexes
res$clusters$clusterCenterFeatures <- names(df)[clusterCenterFeatureIndexes]
res$clusters$clusterCenterFeatureNames <- colnames(df[, clusterCenterFeatureIndexes])
return(res)
}
if (removeDuplicates) {
df <- df[!duplicated(lapply(df, summary))]
nc <- ncol(df)
writeLogMessage(paste("Predictors with duplicates removed:", nc))
}
if (initialK >= ncol(df)) {
result$alldf <- df
writeLogMessage(paste("Target number of predictors reached before clustering: ", ncol(result$alldf), sep=""))
return(result)
}
result <- getUniqueFeatures(df, 0)
result$alldf <- df[, result$clusters$clusterCenterFeatureNames]
writeLogMessage(paste("Predictor names after cluster (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfClusterWithOutliers <- function(df, initialK, outcomeCol) {
result <- NULL
if (initialK >= ncol(df)) {
result$alldf <- df
return(result)
}
getUniqueFeatures <- function(df, threshold) {
featuredf <- t(data.frame(lapply(df, function(x) as.numeric(as.character(x)))))
findFeatureClustering <- function(df, threshold) {
k <- initialK
repeat {
kresult <- kmeans(featuredf, k)
clusterPercentage <- (kresult$betweenss / kresult$totss) * 100
writeLogMessage(paste("Cluster size #", k, " percentage:", clusterPercentage))
if (clusterPercentage >= threshold) {
result$clusterPercentage <<- clusterPercentage
result$clusterCount <<- k
return(kresult)
}
k <- k + 1
}
}
clusters <- findFeatureClustering(df, threshold)
# Initialize clustering maps
clusters$maps <- new.env()
for (i in 1:length(clusters$cluster)) {
c <- clusters$cluster[i]
varName <- paste("C", as.numeric(c), sep="")
if (!exists(varName, envir=clusters$maps)) {
evalStr <- paste(varName, " <- data.frame(clusters$center[", as.numeric(c),",])", sep="")
eval(parse(text=evalStr), envir=clusters$maps)
}
evalStr <- paste(varName, " <- cbind.data.frame(", varName,", ", names(c), "=df$", names(c), ")", sep="")
eval(parse(text=evalStr), envir=clusters$maps)
}
# Calculate closest features to cluster centers
clusterCenterFeatureIndexes <- NULL
for (i in 1:(as.integer(0.8 * initialK))) {
evalStr <- paste("C", i, sep="")
cmp <- eval(parse(text=evalStr), envir=clusters$maps)
d <- data.frame(as.matrix(dist(t(cmp))))
d <- d[order(d[,1]), ]
id <- which(names(df) == rownames(d)[2])
clusterCenterFeatureIndexes <- c(clusterCenterFeatureIndexes, id)
}
centers <- clusters$centers[clusters$cluster, ] # "centers" is a data frame of 3 centers but the length of iris dataset so we can canlculate distance difference easily.
distances <- sqrt(rowSums((featuredf - centers)^2))
nOutliers <- initialK - length(clusterCenterFeatureIndexes)
writeLogMessage(paste("Selecting ", nOutliers, " outliers", sep=""))
outliers <- order(distances, decreasing=T)
clusterCenterFeatureIndexes <- unique(c(clusterCenterFeatureIndexes, outliers))[1:initialK]
result$clusters <<- clusters
result$clusters$clusterCenterFeatureIndexes <<- clusterCenterFeatureIndexes
result$clusters$clusterCenterFeatures <<- names(df)[clusterCenterFeatureIndexes]
return(colnames(df[, clusterCenterFeatureIndexes]))
}
if (initialK >= ncol(df)) {
result$alldf <- df
writeLogMessage(paste("Target number of predictors reached before clustering: ", ncol(result$alldf), sep=""))
return(result)
}
prunedPredictorNames <- getUniqueFeatures(df, 0)
result$alldf <- df[, prunedPredictorNames]
writeLogMessage(paste("Predictor names after cluster (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfCluster <- function(df, initialK, outcomeCol) {
getPrunedTraindfClusterEx(df, initialK, outcomeCol, TRUE)
}
getPrunedTraindfClusterDuplicates <- function(df, initialK, outcomeCol) {
getPrunedTraindfClusterEx(df, initialK, outcomeCol, FALSE)
}
getPrunedTraindfClusterPCA <- function(df, initialK, outcomeCol) {
n <- (ncol(df) - initialK)
result1 <- getPrunedTraindfCluster(df, ncol(df) - (n / 2))
result <- getPrunedTraindfPCA(result1$alldf, initialK)
result$step1 <- result1
return(result)
}
getPrunedTraindfClusterICA <- function(df, initialK, outcomeCol) {
n <- (ncol(df) - initialK)
result1 <- getPrunedTraindfCluster(df, ncol(df) - (n / 2))
result <- getPrunedTraindfICA(result1$alldf, initialK)
result$step1 <- result1
return(result)
}
addNoise <- function(mtx)
{
noise <- matrix(runif(prod(dim(mtx)), min = -0.0000001, max = 0.0000001), nrow = dim(mtx)[1])
return(noise + mtx)
}
getPrunedTraindfImportance <- function(df, initialK, outcomeCol) {
result <- NULL
# prepare training scheme
# train the model
predictorCols <- df
for (col in names(predictorCols)) set(predictorCols, j=col, value=ifelse(predictorCols[[col]]==1, 1, 0))
model <- randomForest(predictorCols, as.factor(outcomeCol))
# estimate variable importance
result$importance <- varImp(model, scale=FALSE)
i <- cbind.data.frame(rownames(result$importance), result$importance)
prunedPredictorNames <- (i[order(-i$Overall),][1:initialK,])[,1]
writeLogMessage("Predictor names after importance:")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getPrunedTraindfImportanceGBM <- function(df, initialK, outcomeCol) {
result <- NULL
# prepare training scheme
# train the model
predictorCols <- df
for (col in names(predictorCols)) set(predictorCols, j=col, value=ifelse(predictorCols[[col]]==1, 1, 0))
tc <- trainControl(
method="boot",
number=3,
savePredictions="final",
classProbs=TRUE,
index=createResample(outcomeCol, 1),
summaryFunction=twoClassSummary
)
model <- train(
predictorCols,
ifelse(outcomeCol==1, 'yes', 'no'),
method="gbm",
metric="ROC",
trControl=tc)
# estimate variable importance
result$importance <- varImp(model, scale=FALSE)
# summarize importance
i <- cbind.data.frame(rownames(result$importance$importance), result$importance$importance)
prunedPredictorNames <- (i[order(-i$Overall),][1:initialK,])[,1]
writeLogMessage("Predictor names after importance:")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getPrunedTraindfImportanceCaret <- function(df, initialK, outcomeCol) {
result <- NULL
# prepare training scheme
# train the model
predictorCols <- df
for (col in names(predictorCols)) set(predictorCols, j=col, value=ifelse(predictorCols[[col]]==1, 1, 0))
control <- trainControl(method="repeatedcv", number=1, repeats=1)
model <- train(predictorCols, outcomeCol, method="rf", importance = TRUE)
# estimate variable importance
result$importance <- varImp(model, scale=FALSE)
i <- data.frame(result$importance$importance)
prunedPredictorNames <- rownames(i[order(-i$X0),][1:initialK,])
writeLogMessage("Predictor names after importance (caret):")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getRFEControl <- function(funcs) {
# define the control using a random forest selection function
return(rfeControl(functions=funcs, method="cv", repeats=1, number=3, returnResamp="final", verbose = FALSE))
}
getTrainControlForRF <- function() {
return(trainControl(classProbs = TRUE, summaryFunction = twoClassSummary))
}
getPrunedTraindfRecursive <- function(df, initialK, outcomeCol) {
result <- NULL
outcomedf <- cbind.data.frame(Selected=outcomeCol, SelectedC=ifelse(outcomeCol==1, "yes", "no"))
# define the control using a random forest selection function
# run the RFE algorithm
predictorCols <- df
result$rfe <- rfe(predictorCols, outcomedf[, "SelectedC"],
sizes=c(initialK),
rfeControl=getRFEControl(rfFuncs),
trControl=getTrainControlForRF())
prunedPredictorNames <- result$rfe$optVariables[1:min(length(result$rfe$optVariables), initialK)]
writeLogMessage(paste("Features in the descending order of importance: ", paste(prunedPredictorNames, sep=",", collapse = ',')))
writeLogMessage("Predictor names after recursion:")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getPrunedTraindfRecursive2Sizes <- function(df, initialK, outcomeCol) {
result <- NULL
outcomedf <- cbind.data.frame(Selected=outcomeCol, SelectedC=ifelse(outcomeCol==1, "yes", "no"))
n <- (ncol(df) - initialK)
mid <- max(initialK * 4, ncol(df) - (3 * n / 4))
# define the control using a random forest selection function
# run the RFE algorithm
predictorCols <- df
result$rfe <- rfe(predictorCols, outcomedf[, "SelectedC"],
sizes=c(mid, initialK),
rfeControl=getRFEControl(rfFuncs),
trControl=getTrainControlForRF())
prunedPredictorNames <- result$rfe$optVariables[1:min(length(result$rfe$optVariables), initialK)]
writeLogMessage(paste("Features in the descending order of importance: ", paste(prunedPredictorNames, sep=",", collapse = ',')))
writeLogMessage("Predictor names after recursion:")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getPrunedTraindfRecursive4Sizes <- function(df, initialK, outcomeCol) {
result <- NULL
outcomedf <- cbind.data.frame(Selected=outcomeCol, SelectedC=ifelse(outcomeCol==1, "yes", "no"))
n <- (ncol(df) - initialK)
writeLogMessage("Using sizes:")
s <- c(as.integer(ncol(df) - (0.25 * n)), as.integer(ncol(df) - (0.5 * n)), as.integer(ncol(df) - (0.75 * n)), initialK)
writeLogMessage(s)
# define the control using a random forest selection function
# run the RFE algorithm
predictorCols <- df
result$rfe <- rfe(predictorCols, outcomedf[, "SelectedC"],
sizes=s,
rfeControl=getRFEControl(rfFuncs),
trControl=getTrainControlForRF())
prunedPredictorNames <- result$rfe$optVariables[1:min(length(result$rfe$optVariables), initialK)]
writeLogMessage(paste("Features in the descending order of importance: ", paste(prunedPredictorNames, sep=",", collapse = ',')))
writeLogMessage("Predictor names after recursion:")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getPrunedTraindfRecursiveSVM <- function(df, initialK, outcomeCol) {
result <- NULL
outcomedf <- cbind.data.frame(Selected=outcomeCol, SelectedC=ifelse(outcomeCol==1, "yes", "no"))
predictorCols <- convertToNumeric(df)
# run the RFE algorithm
result$rfe <- rfe(predictorCols, outcomedf[, "SelectedC"],
sizes=c(initialK),
rfeControl=getRFEControl(caretFuncs),
method="svmRadial",
metric = "Accuracy",
trControl = getTrainControlForRF())
prunedPredictorNames <- result$rfe$optVariables[1:min(length(result$rfe$optVariables), initialK)]
writeLogMessage(paste("Features in the descending order of importance: ", paste(prunedPredictorNames, sep=",", collapse = ',')))
writeLogMessage("Predictor names after recursion:")
writeLogMessage(prunedPredictorNames)
result$alldf <- df[, prunedPredictorNames]
return(result)
}
getPrunedTraindfBlanket <- function(df, initialK, outcomeCol) {
result <- NULL
predictorCols <- df[, sapply(df, nlevels) > 1]
predictorCols <- convertToFactors(cbind.data.frame(predictorCols, Selected=outcomeCol))
result$model <- hc(predictorCols, score="aic")
result$mb <- mb(result$model, "Selected")
prunedPredictorNames <- result$mb[1:min(length(result$mb), initialK)]
writeLogMessage("Predictor names after applying Markov blanket:")
writeLogMessage(paste(prunedPredictorNames, collapse=","))
result$alldf <- predictorCols[, prunedPredictorNames]
return(result)
}
getPrunedTraindfBlanketPCA <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfBlanket(df, initialK, outcomeCol)
if (initialK < ncol(result$alldf)) {
result1 <- result;
result <- getPrunedTraindfPCA(df[, result1$mb], initialK, outcomeCol)
result$blanket <- result1
}
return(result)
}
getPrunedTraindfBlanketICA <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfBlanket(df, initialK, outcomeCol)
if (initialK < ncol(result$alldf)) {
result1 <- result;
result <- getPrunedTraindfICA(df[, result1$mb], initialK, outcomeCol)
result$blanket <- result1
}
return(result)
}
getPrunedTraindfBlanketImpPCA <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfBlanket(df, 1000000, outcomeCol);
if (initialK < length(result$mb)) {
result1 <- result
result2 <- getPrunedTraindfImportance(result$alldf, initialK, outcomeCol)
result <- getPrunedTraindfPCA(result2$alldf, initialK, outcomeCol)
result$blanket <- result1
result$importance <- result2
}
return(result)
}
getPrunedTraindfLASSO <- function(df, initialK, outcomeCol) {
getPredictorNames <- function() {
predictorCols <- df
for (col in names(predictorCols)) set(predictorCols, j=col, value=ifelse(predictorCols[[col]]==1, 1, 0))
predictorCols <- addNoise(predictorCols)
oc <- ifelse(outcomeCol==1, 1, 0)
'%ni%'<-Negate('%in%')
result$glmnet <<- cv.glmnet(x=as.matrix(predictorCols),y=oc,type.measure='mse',nfolds=5,alpha=.5)
c <- coef(result$glmnet,s='lambda.min')
inds <- which(c!=0)
v <- row.names(c)[inds]
v <- head(v[v != '(Intercept)'], initialK)
return(v)
}
predictorNames <- NULL
bestPredictorNames <- c()
result <- NULL
i <- 1
repeat {
predictorNames <- getPredictorNames()
if (length(predictorNames) >= initialK)
break;
writeLogMessage(paste("Got ", length(predictorNames), " predictors from LASSO (trying to get:", initialK, "). Retrying...", sep=""))
if (length(predictorNames) > length(bestPredictorNames))
bestPredictorNames <- predictorNames
if (i > 10) {
predictorNames <- bestPredictorNames
break;
}
i <- i + 1
}
result$alldf <- df[, predictorNames]
writeLogMessage(paste("Predictor names after LASSO (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfLASSORepeated <- function(df, initialK, outcomeCol) {
return(getPrunedTraindfLASSORepeatedEx(df, initialK, outcomeCol, "lambda.min"))
}
getPrunedTraindfLASSORepeated1se <- function(df, initialK, outcomeCol) {
return(getPrunedTraindfLASSORepeatedEx(df, initialK, outcomeCol, "lambda.1se"))
}
getUniqueOrderedPredictors <- function(predictorNamesFunc, messageSuffix = "") {
predictorNamesTable <- data.frame(predictorName=character(), count=numeric(), stringsAsFactors=FALSE)
i <- 1
repeat {
# Get next set of predictors and add all the predictors in that into the beginning of the
# list of predictors
newPredictorNames <- predictorNamesFunc(i)
if (is.null(newPredictorNames)) {
break;
}
for (pn in newPredictorNames) {
ind <- which(predictorNamesTable[,1] == pn)
if (length(ind) > 0) {
predictorNamesTable[ind[1], 2] <- as.numeric(predictorNamesTable[ind, 2]) + 1
}
else {
predictorNamesTable[nrow(predictorNamesTable) + 1,] <- c(pn, as.numeric(1))
}
}
writeLogMessage(paste("Got ", nrow(predictorNamesTable), " unique predictors in ", i, " iterations", messageSuffix, ".", sep=""))
i <- i + 1
}
return(predictorNamesTable[order(-as.numeric(predictorNamesTable[,2])), ]$predictorName)
}
getPrunedTraindfLASSORepeatedEx <- function(df, initialK, outcomeCol, coefAlgorithm) {
getPredictorNames <- function(index) {
if (index > 10) {
return(NULL)
}
predictorCols <- df
for (col in names(predictorCols)) set(predictorCols, j=col, value=ifelse(predictorCols[[col]]==1, 1, 0))
predictorCols <- addNoise(predictorCols)
oc <- ifelse(outcomeCol==1, 1, 0)
'%ni%'<-Negate('%in%')
result$glmnet <<- cv.glmnet(x=as.matrix(predictorCols),y=oc)
c <- coef(result$glmnet,s=coefAlgorithm)
inds <- which(c!=0)
v <- row.names(c)[inds]
v <- head(v[v != '(Intercept)'], initialK)
return(v)
}
result <- NULL
predictorNames <- getUniqueOrderedPredictors(getPredictorNames, paste(" using LASSO (trying to get: ", initialK, ")", sep=""))
predictorNames <- predictorNames[1:min(length(predictorNames), initialK)]
result$alldf <- df[, predictorNames[1:min(initialK, length(predictorNames))]]
writeLogMessage(paste("Predictor names after ", 10, " iterations of LASSO (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfLASSOImportance <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfLASSO(df, 1000000, outcomeCol);
if (initialK < ncol(result$alldf)) {
result1 <- result
importanceSampleSize <- 100
result <- getPrunedTraindfImportance(result$alldf[1:importanceSampleSize,], initialK, outcomeCol[1:importanceSampleSize])
result$alldf <- result1$alldf[,names(result$alldf)]
result$LASSO <- result1
}
return(result)
}
getPrunedTraindfLASSOPCA <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfLASSO(df, 1000000, outcomeCol);
if (initialK < ncol(result$alldf)) {
result1 <- result
result$alldf <- convertToFactors(result$alldf)
result <- getPrunedTraindfPCA(result$alldf, initialK, outcomeCol)
result$LASSO <- result1
}
return(result)
}
getPrunedTraindfLASSOCluster <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfLASSO(df, 1000000, outcomeCol);
if (initialK < ncol(result$alldf)) {
result1 <- result
result <- getPrunedTraindfCluster(result$alldf, initialK)
result$LASSO <- result1
}
return(result)
}
getPrunedTraindfClusterImportance <- function(df, initialK, outcomeCol) {
n <- (ncol(df) - initialK)
result <- getPrunedTraindfCluster(df, max(initialK * 4, ncol(df) - (3 * n / 4)))
if (initialK < ncol(result$alldf)) {
result1 <- result
importanceSampleSize <- min(nrow(df), 1000)
result <- getPrunedTraindfImportance(result$alldf[1:importanceSampleSize,], initialK, outcomeCol[1:importanceSampleSize])
result$alldf <- result1$alldf[,names(result$alldf)]
result$cluster <- result1
}
return(result)
}
getPrunedTraindfInfluence <- function(df, initialK, outcomeCol) {
result <- NULL
if (initialK < ncol(df)) {
n_totalSel <- table(outcomeCol)[2]
p_totalSel <- n_totalSel / nrow(df)
contributions <- rep(0, ncol(df))
i <- 1
for (col in names(df)) {
df_all <- data.frame(col = df[col], outcome = outcomeCol)
df_sel <- df_all[which(df_all$outcome != 0),]
n_feat <- length(which(df_all[col] != 0))
n_sel <- length(which(df_sel[col] != 0))
p_sel <- n_sel / n_feat
diff <- abs(p_sel - p_totalSel)
contributions[i] <- diff * n_feat
i <- i + 1
}
tmpdf <- data.frame(col = names(df), contribution = abs(contributions))
tmpdf <- tmpdf[order(-tmpdf$contribution),]
featureNames <- tmpdf[1:initialK,1]
writeLogMessage("Predictor names after influence:")
writeLogMessage(featureNames)
result$alldf <- df[,featureNames]
return(result)
}
else {
result$alldf <- df
return(result)
}
}
getPrunedTraindfClusterInfluence <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfCluster(df, initialK * 2, outcomeCol)
if (initialK < ncol(result$alldf)) {
result1 <- result
result <- getPrunedTraindfInfluence(result$alldf, initialK, outcomeCol)
result$cluster <- result1
}
return(result)
}
getPrunedTraindfFisher <- function(df, initialK, outcomeCol) {
# http://ink.library.smu.edu.sg/cgi/viewcontent.cgi?article=1458&context=sis_research
# A feature will
# have a very large Fisher score if it has very similar values
# within the same class and very different values across different
# classes. In this case, this feature is very discriminative to
# differentiate instances from different classes
result <- NULL
if (initialK < ncol(df)) {
mu <- mean(as.numeric(outcomeCol) - 1)
scores <- rep(0, ncol(df))
i <- 1
for (col in names(df)) {
df_all <- data.frame(col = convertToNumeric(df[col]) - 1, outcome = outcomeCol)
df_sel <- df_all[which(df_all$outcome != 0),]
df_notSel <- df_all[which(df_all$outcome == 0),]
# df_feat <- df_all[which(df_all[col] != 0),]
# df_notFeat <- df_sel[which(df_sel[col] != 0),]
n_sel <- nrow(df_sel)
n_notSel <- nrow(df_all) - n_sel
col_sel <- df_sel[col]
col_notSel <- df_notSel[col]
mu_sel <- sapply(col_sel, mean, na.rm = TRUE)
sigma_sel <- sapply(col_sel, sd, na.rm = TRUE)
mu_notSel <- sapply(col_notSel, mean, na.rm = TRUE)
sigma_notSel <- sapply(col_notSel, sd, na.rm = TRUE)
a = n_sel * (mu_sel - mu) * (mu_sel - mu) + n_notSel * (mu_notSel - mu) * (mu_notSel - mu)
b = n_sel * sigma_sel + n_notSel * sigma_notSel
if (b == 0) {
score = 0
}
else {
score = a / b
}
scores[i] = score
i <- i + 1
}
tmpdf <- data.frame(col = names(df), score = scores)
tmpdf <- tmpdf[order(-tmpdf$score),]
featureNames <- tmpdf[1:initialK,1]
writeLogMessage("Predictor names after Fisher scoring:")
writeLogMessage(featureNames)
result$alldf <- df[,featureNames]
return(result)
}
else {
result$alldf <- df
return(result)
}
}
getPrunedTraindfClusterFisher <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfCluster(df, initialK * 2, outcomeCol)
if (initialK < ncol(result$alldf)) {
result1 <- result
result <- getPrunedTraindfInfluence(result$alldf, initialK, outcomeCol)
result$cluster <- result1
}
return(result)
}
getPrunedTraindfClusterAllInfluence <- function(df, initialK, outcomeCol) {
n <- (ncol(df) - initialK)
# result <- getPrunedTraindfClusterDuplicates(df, initialK * 1.5)
result <- getPrunedTraindfClusterDuplicates(df, initialK + 2)
if (initialK < ncol(result$alldf)) {
contributions <- rep(0, ncol(result$alldf))
names(contributions) <- colnames(result$alldf)
featureToClusterCenterFeatureMap <- rep(0, ncol(df))
names(featureToClusterCenterFeatureMap) <- colnames(df)
for (col in names(df)) {
clusterId <- result$clusters$cluster[col]
featureToClusterCenterFeatureMap[col] <- result$clusters$clusterCenterFeatures[clusterId]
}
nTotalNotSel <- table(outcomeCol)[1]
nTotalSel <- table(outcomeCol)[2]
pTotalSel <- nTotalSel / nrow(df)
i <- 1
for (col in names(df)) {
coldf <- data.frame(col = df[col], outcome = outcomeCol)
seldf <- coldf[which(coldf$outcome != 0),]
featseldf <- which(seldf[col] != 0)
nSel <- length(featseldf)
if (length(featseldf) != 0) {
pSel <- nSel / length(which(coldf[col] != 0))
diff <- pSel - pTotalSel
contributions[featureToClusterCenterFeatureMap[col]] <- contributions[featureToClusterCenterFeatureMap[col]] + (diff * nTotalSel)
}
i <- i + 1
}
tmpdf <- data.frame(col = names(result$alldf), contribution = contributions)
tmpdf <- tmpdf[order(-tmpdf$contribution),]
featureNames <- tmpdf[1:initialK,1]
writeLogMessage("Predictor names after influence:")
writeLogMessage(featureNames)
result$cluster <- result
result$alldf <- result$alldf[,featureNames]
}
return(result)
}
genericKcca <- function(df, initialK, outcomeCol, family, control) {
result <- NULL
if (initialK >= ncol(df)) {
result$alldf <- df
return(result)
}
getUniqueFeatures <- function(df, threshold) {
res <- NULL
findFeatureClustering <- function(df, threshold) {
featuredf <- t(data.frame(lapply(df, function(x) as.numeric(as.character(x)))))
if (is.null(control)) {
kresult <- kcca(featuredf, initialK, family=family)
}
else {
kresult <- kcca(featuredf, initialK, family=family, control=control)
}
res$clusterCount <<- initialK
return(kresult)
}
res$clusters <- findFeatureClustering(df, threshold)
clusters <- res$clusters
# Initialize clustering maps
res$maps <- new.env()
for (i in 1:length(clusters@cluster)) {
c <- clusters@cluster[i]
varName <- paste("C", as.numeric(c), sep="")
if (!exists(varName, envir=res$maps)) {
evalStr <- paste(varName, " <- data.frame(clusters@centers[", as.numeric(c),",])", sep="")
eval(parse(text=evalStr), envir=res$maps)
}
evalStr <- paste(varName, " <- cbind.data.frame(", varName,", ", names(c), "=df$", names(c), ")", sep="")
eval(parse(text=evalStr), envir=res$maps)
}
# Calculate closest features to cluster centers
clusterCenterFeatureIndexes <- NULL
for (i in 1:initialK) {
evalStr <- paste("C", i, sep="")
cmp <- eval(parse(text=evalStr), envir=res$maps)
d <- data.frame(as.matrix(dist(t(cmp))))
d <- d[order(d[,1]), ]
id <- which(names(df) == rownames(d)[2])
clusterCenterFeatureIndexes <- c(clusterCenterFeatureIndexes, id)
}
res$clusters <- clusters
res$clusterCenterFeatureIndexes <- clusterCenterFeatureIndexes
res$clusterCenterFeatures <- names(df)[clusterCenterFeatureIndexes]
res$clusterCenterFeatureNames <- colnames(df[, clusterCenterFeatureIndexes])
return(res)
}
if (initialK >= ncol(df)) {
result$alldf <- df
writeLogMessage(paste("Target number of predictors reached before clustering: ", ncol(result$alldf), sep=""))
return(result)
}
result <- getUniqueFeatures(df, 0)
result$alldf <- df[, result$clusterCenterFeatureNames]
writeLogMessage(paste("Predictor names after cluster (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfClusterKccaKMeans <- function (df, initialK, outcomeCol) {
return(genericKcca(df, initialK, outcomeCol, kccaFamily("kmeans"), list(initcent="kmeanspp")))
}
getPrunedTraindfClusterKccaKMedians <- function (df, initialK, outcomeCol) {
return(genericKcca(df, initialK, outcomeCol, kccaFamily("kmedians"), list(initcent="kmeanspp")))
}
getPrunedTraindfClusterKccaJaccard <- function (df, initialK, outcomeCol) {
return(genericKcca(df, initialK, outcomeCol, kccaFamily("jaccard"), list(initcent="kmeanspp")))
}
getPrunedTraindfClusterKccaKMeansWeightedDistance <- function (df, initialK, outcomeCol) {
contributions <- rep(0, ncol(df))
names(contributions) <- colnames(df)
nTotalNotSel <- table(outcomeCol)[1]
nTotalSel <- table(outcomeCol)[2]
pTotalSel <- nTotalSel / nrow(df)
i <- 1
for (col in names(df)) {
coldf <- data.frame(col = df[col], outcome = outcomeCol)
seldf <- coldf[which(coldf$outcome != 0),]
featseldf <- which(seldf[col] != 0)
nSel <- length(featseldf)
if (length(featseldf) != 0) {
pSel <- nSel / length(which(coldf[col] != 0))
diff <- pSel - pTotalSel
contributions[col] <- contributions[col] + (diff * nTotalSel)
}
i <- i + 1
}
for (col in names(df)) {
coldf <- data.frame(col = df[col], outcome = outcomeCol)
seldf <- coldf[which(coldf$outcome != 0),]
featseldf <- which(seldf[col] != 0)
nSel <- length(featseldf)
if (length(featseldf) != 0) {
pSel <- nSel / length(which(coldf[col] != 0))
diff <- pSel - pTotalSel
contributions[col] <- contributions[col] + (diff * nTotalSel)
}
i <- i + 1
}
w <- rep(0, ncol(df))
maxContribution <- max(contributions, na.rm=TRUE)
for (i in 1:length(contributions)) {
w[i] <- abs(contributions[i] / maxContribution)
}
family <- kccaFamily(dist=function (x, centers)
{
if (ncol(x) != ncol(centers))
stop(sQuote("x"), " and ", sQuote("centers"), " must have the same number of columns")
z <- matrix(0, nrow = nrow(x), ncol = nrow(centers))
for (k in 1:nrow(centers)) {
z[, k] <- sqrt(colSums((w*(t(x) - centers[k, ]))^2))
}
z
})
return(genericKcca(df, initialK, outcomeCol, family, list(initcent="kmeanspp")))
}
getPrunedTraindfClusterImportanceGBM <- function(df, initialK, outcomeCol) {
n <- (ncol(df) - initialK)
result <- getPrunedTraindfCluster(df, max(initialK * 4, ncol(df) - (3 * n / 4)))
if (initialK < ncol(result$alldf)) {
result1 <- result
importanceSampleSize <- min(nrow(df), 1000)
result <- getPrunedTraindfImportanceGBM(result$alldf[1:importanceSampleSize,], initialK, outcomeCol[1:importanceSampleSize])
result$alldf <- result1$alldf[,names(result$alldf)]
result$cluster <- result1
}
return(result)
}
getPrunedTraindfRandom <- function(df, initialK, outcomeCol) {
result <- NULL
result$alldf <- df[, sample(names(df), initialK)]
writeLogMessage(paste("Predictor names after random (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfMRMR <- function(df, initialK, outcomeCol, solutionCount) {
result <- NULL
datadf <- convertToNumeric(cbind.data.frame(outcomeCol, df))
dd <- mRMR.data(data = datadf)
result$mRMR <- mRMR.ensemble(data = dd, target_indices = c(1), solution_count = solutionCount, feature_count = initialK)
getPredictorNames <- function(index) {
filters <- attr(result$mRMR, "filters")[[1]]
if (index > ncol(filters)) {
return(NULL)
}
return(names(df)[filters[,index]])
}
predictorNames <- getUniqueOrderedPredictors(getPredictorNames)
predictorNames <- predictorNames[1:min(length(predictorNames), initialK)]
predictorNames <- predictorNames[!is.na(predictorNames)]
result$alldf <- df[, predictorNames]
writeLogMessage(paste("Predictor names after minimum-redundancy maximum-relevancy (", ncol(result$alldf), "):", sep=""))
writeLogMessage(names(result$alldf))
return(result)
}
getPrunedTraindfMRMRClassic <- function(df, initialK, outcomeCol) {
return(getPrunedTraindfMRMR(df, initialK, outcomeCol, 1))
}
getPrunedTraindfMRMREnsemble5 <- function(df, initialK, outcomeCol) {
return(getPrunedTraindfMRMR(df, initialK, outcomeCol, 5))
}
getPrunedTraindfClusterMRMR <- function(df, initialK, outcomeCol) {
result <- getPrunedTraindfCluster(df, initialK * 2, outcomeCol)
if (initialK < ncol(result$alldf)) {
result1 <- result
result <- getPrunedTraindfMRMREnsemble5(result$alldf, initialK, outcomeCol)
result$cluster <- result1
}
return(result)
}
removeColumnsHavingOneLevel <- function (df) {
nc <- ncol(df)
writeLogMessage(paste("Number of predictors before naming constant valued columns:", nc))
df <- df[, sapply(df, nlevels) > 1]
if (nc > ncol(df)) {
writeLogMessage(paste((nc - ncol(df)), " predictors having constant value removed in training set", sep=""))
}
return(df)
}
initializePrunedFeatures <- function(traindf, selectionFunc, initialK, outcomeFeature, filteredFeatures, dummyFunc) {
result <- NULL
prunedTraindf <- traindf
if (filteredFeatures != "") {
prunedTraindf$Selected <- traindf[, outcomeFeature]
prunedTraindf <- prunedTraindf[, !names(traindf) %in% grep(filteredFeatures, names(traindf), perl=TRUE, value=TRUE)]
}
prunedTraindf <- prunedTraindf[, !names(prunedTraindf) %in% grep("X_0|X0", names(prunedTraindf), perl=TRUE, value=TRUE)]
fixedCols <- prunedTraindf[, 1:4]
predictorCols <- prunedTraindf[, 5:ncol(prunedTraindf)]
removeColumnsHavingOneLevel(predictorCols)
result$featureNamesBeforeSelection <- colnames(predictorCols)
result$dummyFunc <- dummyFunc
if (!is.null(dummyFunc)) {
writeLogMessage("Dummifying training set.")
predictorCols <- dummyFunc(predictorCols)
}
predictorCols <- convertToFactors(predictorCols)
d1 <<- predictorCols
d2 <<- initialK
d3 <<- prunedTraindf$Selected
if (initialK < ncol(predictorCols)) {
pruneResult <- selectionFunc(predictorCols, initialK, prunedTraindf$Selected)
}
else {
writeLogMessage(paste("Feature selection was not required due to the desired number of selected features being greater than the number of actual features."))
pruneResult <- NULL
pruneResult$alldf <- predictorCols
}
result$featureNames <- sort(colnames(pruneResult$alldf))
result$pruneResult <- predictorCols
writeLogMessage(paste("Pruned number of predictors:", ncol(pruneResult$alldf)))
result$traindf <- cbind(fixedCols, pruneResult$alldf)
result$predictorNames <- result$featureNames[result$featureNames != "Selected"]
result$outcomeName <- "Result"
outcome <- ifelse(result$traindf[,"Selected"]!=0, "yes", "no")
result$traindf[result$outcomeName] <- outcome
result$trainControl <- initializeClassificationControl(result$traindf, 5)
writeLogMessage(paste("Predictors and outcome initialized"))
return(result)
}
initializeSamples <- function(df) {
result <- NULL
s <- sample(nrow(df), 0.75 * nrow(df))
result$traindf <- df[s,]
result$testdf <- df[-s,]
result$alldf <- df
return(result)
}
preprocessTestData <- function(trainingResult, testdf) {
writeLogMessage("Pre-processing test data started.")
testPredictorCols <- testdf[, trainingResult$featureSelection$featureNamesBeforeSelection]
testPredictorCols <- data.frame(as.matrix(testPredictorCols))
nc <- ncol(testPredictorCols)
testPredictorCols <- testPredictorCols[, sapply(testPredictorCols, nlevels) > 1]
if (nc > ncol(testPredictorCols)) {
writeLogMessage(paste((nc - ncol(testPredictorCols)), " predictors with 1 levels removed in test set", sep=""))
}
predictorCols <- trainingResult$featureSelection$traindf[, (5:(ncol(trainingResult$featureSelection$traindf) - 1))]
if (!is.null(trainingResult$featureSelection$dummyFunc)) {
writeLogMessage("Dummifying.")
testPredictorCols <- trainingResult$featureSelection$dummyFunc(testPredictorCols)
}
missingCols <- setdiff(names(predictorCols), names(testPredictorCols))
for (col in missingCols) {
testPredictorCols[col] = 0
}
testPredictorCols <- convertToFactors(testPredictorCols)
testdf <- cbind(testdf[, 1:5], testPredictorCols)
outcome <- ifelse(testdf[,"Selected"]!=0, "yes", "no")
testdf[trainingResult$featureSelection$outcomeName] <- outcome
writeLogMessage("Pre-processing test data finished.")
return(testdf)
}
calculateMutualInformation <- function (df, predictors) {
originaldf <- df
extractPredictorsFunc <- function (df, predictors) {
return (df[,predictors])
}
newdf <- try(extractPredictorsFunc(originaldf, predictors))
if (inherits(newdf, "try-error")) {
res <- 0
} else {
res <- condinformation(originaldf, newdf, method="emp")
}
return(res)
}
calculateMutualInformationWithFeature <- function (df, featureCol, predictors) {
extractPredictorsFunc <- function (df, predictors) {
return (df[,predictors])
}
newdf <- try(extractPredictorsFunc(df, predictors))
if (inherits(newdf, "try-error")) {
res <- 0
} else {
res <- condinformation(featureCol, newdf, method="emp")
}
return(res)
}
testClassificationModel <- function(trainingResult, testdfIn) {
result <- NULL
td0 <<- trainingResult
td1 <<- testdfIn
testdf <- preprocessTestData(trainingResult, testdfIn)
predictorNames <- trainingResult$featureSelection$predictorNames
outcomeName <- trainingResult$featureSelection$outcomeName
traindf <- trainingResult$featureSelection$traindf
if (!is.null(trainingResult$featureSelection$pruneResult$featureExtractor)) {
td1 <<- testdf
td2 <<- trainingResult
testdfNew <- cbind.data.frame(testdf[,1:5], trainingResult$featureSelection$pruneResult$featureExtractor(testdf[,5:ncol(testdf)-1]))
testdfNew[,outcomeName] = testdf[,outcomeName]
testdf <- testdfNew
}
predTest <- predict(trainingResult$model, testdf[,predictorNames])
predTrain <- predict(trainingResult$model, traindf[,predictorNames])
result$correctTestP <- sum(predTest==testdf[,outcomeName])/nrow(testdf)
result$correctTrainP <- sum(predTrain==traindf[,outcomeName])/nrow(traindf)
result$testPredictions <- cbind.data.frame(predTest, testdf[,outcomeName])
result$trainPredictions <- cbind.data.frame(predTrain, traindf[,outcomeName])
writeLogMessage(paste(trainingResult$model$method, ": Correct in training set %:", 100 * result$correctTrainP, ", correct in test set %:", 100 * result$correctTestP))
result$testcm <- confusionMatrix(data=predTest, ref=testdf[,outcomeName])
result$traincm <- confusionMatrix(data=predTrain, ref=traindf[,outcomeName])
alldf <- trainingResult$alldf
predictorsdf <- alldf[,6:ncol(alldf)]
result$mutualInformation <- calculateMutualInformation(predictorsdf, predictorNames)
result$mutualInformationWithOutcome <- calculateMutualInformationWithFeature(predictorsdf, alldf$Selected, predictorNames)
# result$mutualInformation <- NULL
# result$mutualInformationWithOutcome <- NULL
writeLogMessage(paste("Mutual information factors: all: ", result$mutualInformation, " outcome: ", result$mutualInformationWithOutcome, sep=""))
return(result)
}
performTest <- function(df, selectionFunc, initialK, outcomeFeature, filteredFeatures, dummyFunc, seedValue) {
set.seed(seedValue)
result <- initializeSamples(df)
featureSelectionFunc <- function() {
result$featureSelection <<- initializePrunedFeatures(result$traindf, selectionFunc, initialK, outcomeFeature, filteredFeatures, dummyFunc)
}
result$featureSelectionDurations <- system.time(featureSelectionFunc())
modelBuildingFunc <- function() {
result$model <<- train(
result$featureSelection$traindf[,result$featureSelection$predictorNames],
result$featureSelection$traindf[,result$featureSelection$outcomeName],
method="gbm",
metric="ROC",
trControl=result$featureSelection$trainControl)
}
set.seed(seedValue)
result$modelBuildingDurations <- system.time(modelBuildingFunc())
set.seed(seedValue)
testFunc <- function() {
result$testResult <<- testClassificationModel(result, result$testdf)
}
result$testDurations <- system.time(testFunc())
return(result)
}
report <- function(res, includePredictors = FALSE) {
cols <- c(
"Phenomenon",
"DataSetSize",
"TestName",
"Repeat #",
"StartTime",
"Algorithm",
"DummyFunc",
"PredictorSets",
"NumPredictors",
"CorrectPTest",
"CorrectPTrain",
"ProcTime",
"ModelBuildTime",
"TestProcTime",
"MutualInformation",
"MutualInformationOutcome",
"# CM TP",
"# CM TN",
"# CM FP",
"# CM FN",
"Predictors")
result <- data.frame(matrix(vector(), 0, length(cols),
dimnames=list(c(), cols)),
stringsAsFactors=F)
for (i in 1:length(res$runs)) {
r <- res$runs[[i]]
if (is.null(r$error)) {
result[nrow(result)+1,] <- c(
r$phenomenon,
r$dataSetSize,
r$testName,
r$rpt,
as.character(r$startTime),
r$featureSelection$algorithm,
r$featureSelection$dummyVariableCreationFunction,
r$featureSelection$initialPredictorSets,
length(r$featureSelection$featureNames),
r$testResult$correctTestP,
r$testResult$correctTrainP,
r$featureSelectionDurations[3],
r$modelBuildingDurations[3],
r$testDurations[3],
r$testResult$mutualInformation,
r$testResult$mutualInformationWithOutcome,
r$testResult$testcm$table[2,2],
r$testResult$testcm$table[1,1],
r$testResult$testcm$table[2,1],
r$testResult$testcm$table[1,2],
paste(r$featureSelection$featureNames, sep=",", collapse = ',')
)
}
else {
result[nrow(result)+1,] <- c(
r$phenomenon,
r$dataSetSize,
r$testName,
r$rpt,
as.character(r$startTime),
r$featureSelection$algorithm,
r$featureSelection$dummyVariableCreationFunction,
r$featureSelection$initialPredictorSets,
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>",
"<error>"
)
}
}
if (!includePredictors)
result <- subset(result, select = -c(Predictors) )
return(result)
}
resetResultRegistry <- function() {
rr <<- NULL
rr$runs <<- list()
}
selectFeatureSets <- function(featureSets, df) {
allFeatures <- names(df)[6:ncol(df)]
writeLogMessage(paste("Selecting predictor sets: ", featureSets, " out of ", length(allFeatures), " predictors", sep=""))
resultFeatures <- names(df)[1:5]
sets <- strsplit(featureSets, ",")[[1]]
for (i in 1:length(sets)) {
set <- sets[i]
if (set == "task") {
resultFeatures <- append(resultFeatures, grep("^X[0-9]+$", allFeatures, perl=TRUE, value=TRUE))
}
else if (set == "2gram") {
resultFeatures <- append(resultFeatures, grep("^X[1-9][0-9]*_[1-9][0-9]*$", allFeatures, perl=TRUE, value=TRUE))
}
else if (set == "startend") {
resultFeatures <- append(resultFeatures, grep("^X((0_[0-9]+|([0-9]+_0)))$", allFeatures, perl=TRUE, value=TRUE))
}
else if (set == "order") {
resultFeatures <- append(resultFeatures, grep("^X[1-9][0-9]*\\.[1-9][0-9]*$", allFeatures, perl=TRUE, value=TRUE))
}
}
result <- df[,resultFeatures]
writeLogMessage(paste("Selected total of ", length(resultFeatures), " predictors", sep=""))
return(result)
}
performTestSuite <- function(testName, paramdf, sampleSizes, numVars, selectionFuncNames, outcomeFeature, filteredFeatures, dummyFuncs, featureSetsToTest, numRepeats = 1, phenomenon = NULL, seedOffset = 0) {
if (is.null(testName))
testName <- "test"
if (is.null(seedOffset))
seedOffset <- 0
filePrefix <- paste(logFileLocation, testName, "-", as.Date(Sys.time()), "-", format(Sys.time(), "%H%M%S"), sep="")
logFile <- paste(filePrefix, ".csv", sep="")
logMessageFile <- paste(filePrefix, ".txt", sep="")
writeLogMessage(paste("Redirecting output to: ", logMessageFile, sep=""))
sink(logMessageFile, split="TRUE")
if (is.null(phenomenon)) {
phenomenon <- "duration>7d"
} else {
tmp <- cbind.data.frame(Name=caseAttributeData$Name, Selected2=ifelse(caseAttributeData[,(which(colnames(caseAttributeData)==phenomenon[1]))]==phenomenon[2], 1, 0))
tmp <- merge(paramdf, tmp, by="Name")
paramdf$Selected <- tmp$Selected2
paramdf$SelectedC <- ifelse(paramdf$Selected==1, "yes", "no")
phenomenon <- paste(phenomenon[1], "=", phenomenon[2], sep="")
}
writeLogMessage(paste("Starting test set for outcome feature: ", outcomeFeature, " rows in full test data: ", nrow(paramdf), " filtered features: ", filteredFeatures, " phenomenon: ", phenomenon, sep=""))
result <- NULL
result$runs <- list()
id <- 1
if (is.null(dummyFuncs)) {
dummyFuncs = c(NULL)
}
if (is.null(featureSetsToTest)) {
featureSetsToTest = c("task,startend,2gram,order")
}
reportdf <- data.frame()
totalIterationCount <- numRepeats * length(sampleSizes) * length(numVars) * length(selectionFuncNames) * length(dummyFuncs) * length(featureSetsToTest)
for (r in 1:numRepeats) {
for (s in 1:length(sampleSizes)) {
df <- paramdf[1:(sampleSizes[s]),]
writeLogMessage(paste("Starting tests for test data having ", nrow(df), " rows.", sep=""))
for (v in numVars) {
for (i in 1:length(selectionFuncNames)) {
for (d in 1:length(dummyFuncs)) {
for (f in 1:length(featureSetsToTest)) {
result$runs <- list() # reset list to avoid excessive memory use
featureSets <- featureSetsToTest[f]
dummyFuncName <- dummyFuncs[d]
if (is.null(dummyFuncName) || dummyFuncName == "") {
dummyFunc <- NULL
dummyFuncName <- "<none>"
}
else
dummyFunc <- eval(parse(text=dummyFuncName))
sFuncName <- selectionFuncNames[i]
sFunc <- eval(parse(text=sFuncName))
writeLogMessage(paste("Starting test #", id, "/", totalIterationCount ," using ", v, " features, selection function: ", sFuncName, ", dummy function: ", dummyFuncName, ", feature sets: ", featureSets, sep=""))
res <- NULL
startTime <- Sys.time()
tdf <- selectFeatureSets(featureSets, df)
testFunc <- function() {
res <<- performTest(tdf, sFunc, v, outcomeFeature, filteredFeatures, dummyFunc, seed + r + seedOffset)
}
dur <- try(system.time(testFunc()))
res$id <- id
res$rpt <- r + seedOffset
res$startTime <- startTime
res$phenomenon <- phenomenon
res$dataSetSize <- nrow(df)
res$testName <- testName
res$featureSelection$algorithm <- sFuncName
res$featureSelection$dummyVariableCreationFunction <- dummyFuncName
res$featureSelection$initialPredictorSets <- featureSets
if (inherits(dur, "try-error")) {
res$error <- dur
res$stacktrace <- traceback()
}
else {
res$durations <- dur
}
result$runs[[1]] <- res
writeLogMessage(paste("Test finished for function ", sFuncName, ": elapsed=", dur[3], "", sep=""))
dbg <<- result
reportdf <- rbind.data.frame(reportdf, report(result, TRUE))
write.csv(reportdf, logFile)
id <- id + 1
}
}
}
}
}
}
sink()
return(result);
}
performTestSuiteIncludingDefaultPhenomenon <- function(testName, paramdf, sampleSizes, numVars, selectionFuncNames, outcomeFeature, filteredFeatures, dummyFuncs, featureSetsToTest, numRepeats = 1, phenomenon = NULL, seedOffset = 0) {
if (!is.null(phenomenon)) {
performTestSuite(testName, paramdf, sampleSizes, numVars, selectionFuncNames, outcomeFeature,
filteredFeatures, dummyFuncs, featureSetsToTest, numRepeats, NULL, seedOffset)
}
performTestSuite(testName, paramdf, sampleSizes, numVars, selectionFuncNames, outcomeFeature,
filteredFeatures, dummyFuncs, featureSetsToTest, numRepeats, phenomenon, seedOffset)
}
dummify <- function (df, createFeaturesForMoreThanNLevels = 0) {
writeLogMessage(paste("Number of features before dummification: ", ncol(df), sep=""))
predictorCols <- df
trsf <- data.frame(predict(dummyVars(" ~ .", data = predictorCols, fullRank = T), newdata = predictorCols))
writeLogMessage(paste("Number of features with dummies: ", ncol(trsf), sep=""))
t <- convertToFactors(trsf)
trsf <- trsf[, sapply(t, nlevels) > 1] # using trsf instead of t causes "incorrect number of dimensions"
result <- convertToFactors(trsf)
writeLogMessage(paste("Number of features after dummy creation: ", ncol(result), sep=""))
if (createFeaturesForMoreThanNLevels != 0) {
colsWithManyLevels <- names(predictorCols[, sapply(predictorCols, function(col) length(unique(col))) >= createFeaturesForMoreThanNLevels])
for (col in 1:length(colsWithManyLevels)) {
result <- (cbind.data.frame(result, ifelse(predictorCols[colsWithManyLevels[col]]!=0, 1, 0)))
}
writeLogMessage(paste("Number of features after adding multi level indicator cols: ", ncol(result), sep=""))
}
return(result);
}
dummyAddSeparateFeatureForMoreThanOneLevel <- function (df) {
originalCols <- df
predictorCols <- convertToNumeric(originalCols)
writeLogMessage(paste("Number of predictors before adding indicators for 1, 2 and >2 visits:", ncol(originalCols)))
colsWithManyRevisits <- names(predictorCols[, sapply(predictorCols, function(col) sum(ifelse(col == 1, 1, 0)) >= 1)])
if (length(colsWithManyRevisits) > 0) {
for (col in 1:length(colsWithManyRevisits)) {
predictorCols <- (cbind.data.frame(predictorCols, ifelse(predictorCols[colsWithManyRevisits[col]]==1, 1, 0)))
names(predictorCols)[ncol(predictorCols)] <- paste(colsWithManyRevisits[col],".1", sep="")
}
writeLogMessage(paste("Indicators added for features with exactly one visits: ", length(colsWithManyRevisits), sep=""))
}
colsWithManyRevisits <- names(predictorCols[, sapply(predictorCols, function(col) sum(ifelse(col == 2, 1, 0)) >= 1)])
if (length(colsWithManyRevisits) > 0) {
for (col in 1:length(colsWithManyRevisits)) {
predictorCols <- (cbind.data.frame(predictorCols, ifelse(predictorCols[colsWithManyRevisits[col]]==2, 1, 0)))
names(predictorCols)[ncol(predictorCols)] <- paste(colsWithManyRevisits[col],".2", sep="")
}
writeLogMessage(paste("Indicators added for features with exactly two visits: ", length(colsWithManyRevisits), sep=""))
}
colsWithManyRevisits <- names(predictorCols[, sapply(predictorCols, function(col) max(col) >= 3)])
if (length(colsWithManyRevisits) > 0) {
for (col in 1:length(colsWithManyRevisits)) {
predictorCols <- (cbind.data.frame(predictorCols, ifelse(predictorCols[colsWithManyRevisits[col]]>=3, 1, 0)))
names(predictorCols)[ncol(predictorCols)] <- paste(colsWithManyRevisits[col],".N", sep="")
}
writeLogMessage(paste("Indicators added for features with more than three visits: ", length(colsWithManyRevisits), sep=""))
}
predictorCols <- predictorCols[,!(names(predictorCols) %in% names(originalCols))]
predictorCols <- convertToFactors(predictorCols)
predictorCols <- removeColumnsHavingOneLevel(predictorCols)
writeLogMessage(paste("Number of predictors after adding indicators for 1, 2 and >2 visits:", ncol(predictorCols)))
return(predictorCols);
}
dummyOnly <- function(df) {
dummify(df)
}
dummyAndCreateMultiLevelFeatures <- function(df) {
dummify(df, 1)
}
resetResultRegistry()
########################################################################################
# Initialize test data
#loadDataset("rabobank-all-structural-features", "rabobank-case-attributes", "Selected", "SelectedC", ";")
#loadDataset("hospital-all-features", "", "Selected", "SelectedC", ",")
#loadDataset("BPIC13_incidents-all-features", "", "Selected", "SelectedC", ";")
#loadDataset("BPIC17_morethan5weeks-all-features", "", "Selected", "SelectedC", ";")
#loadDataset("BPIC12_morethan2weeks-all-features", "", "Selected", "SelectedC", ";")
########################################################################################
# Example for running actual tests
loadDataset("rabobank-all-structural-features", "rabobank-case-attributes", "Selected", "SelectedC", ";")
r <- performTestSuiteIncludingDefaultPhenomenon(
"bpic14",
traindf,
c(40000),
c(10,30),
c(
"getPrunedTraindfCluster",
"getPrunedTraindfClusterMRMR",
"getPrunedTraindfClusterFisher",
"getPrunedTraindfFisher",
"getPrunedTraindfMRMREnsemble5"
),
"Selected",
"",
c(""),
c("task", "task,startend", "task,startend,2gram", "task,startend,order", "task,startend,2gram,order", "task,2gram", "task,2gram,order", "task,order", "2gram", "order", "2gram,order"),
1
, c("Category", "request for information")
)
loadDataset("BPIC12_morethan2weeks-all-features", "", "Selected", "SelectedC", ";")
r <- performTestSuite(
"bpic12",
traindf,
c(13087),
c(10,30),
c(
"getPrunedTraindfCluster",
"getPrunedTraindfClusterMRMR",
"getPrunedTraindfClusterFisher",
"getPrunedTraindfFisher",
"getPrunedTraindfMRMREnsemble5"
),
"Selected",
"",
c(""),
c("task", "task,startend", "task,startend,2gram", "task,startend,order", "task,startend,2gram,order", "task,2gram", "task,2gram,order", "task,order", "2gram", "order", "2gram,order"),
1
)
|
b956a12730ffb0001e3a74bbbac69ed308f50e23
|
c7c5813adee3d966baced00501b4f7d15ecc3e4c
|
/man/plot.slm.Rd
|
b4fd5285c2178e709e64948583216922829f0071
|
[] |
no_license
|
E-Caron/slm
|
8f181ce1a03526843f1b4ea1b647186b67145edc
|
a80d9765fda9e29fa3af78c990fea3931199d0f2
|
refs/heads/master
| 2020-06-06T09:43:52.667197
| 2020-01-08T19:08:17
| 2020-01-08T19:08:17
| 192,704,653
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
plot.slm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slm-method.R
\name{plot.slm}
\alias{plot.slm}
\title{Plot.slm}
\usage{
\method{plot}{slm}(x, ...)
}
\arguments{
\item{x}{\code{slm} object.}
\item{...}{other parameters to be passed through to plotting functions.}
}
\value{
This function returns the graphics of \code{plot.lm(x)}.
}
\description{
Same function as the \code{\link[stats:plot.lm]{plot.lm}} function.
}
\examples{
data("shan")
reg = slm(shan$PM_Xuhui ~ . , data = shan, method_cov_st = "fitAR", model_selec = -1)
plot(reg)
}
|
16a2931fffc592f7c7a7b75f0b68c533010874a8
|
556fbe5b5bfec4a57f03d70656132ad36e4703b7
|
/raw_scripts/relatedness-vec-to-mat.R
|
59b77e2d8010b5c30b3b0d2e493ad802fe2a2d4b
|
[] |
no_license
|
mastoffel/scent
|
1cf03da1b41f4a161421d5e98a884877da2ee667
|
cbc1beca6a455f3f2d23ba7c51dbe9e4aa706e61
|
refs/heads/master
| 2021-01-20T05:33:37.163758
| 2015-05-18T16:21:25
| 2015-05-18T16:21:25
| 24,264,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
relatedness-vec-to-mat.R
|
# load new relatedness values in strange vector form from athinas program
relatedness.raw <- read.csv(".\\csv_files\\relatedness_raw.csv")
# "copy" (old) relatedness matrix
relate_new <- relatedness
relate_new[, ] <- NA
# loop through matrix and fill non-NA values with new ones
for (i in row.names(relatedness)) {
for (k in names(relatedness)) {
if (!is.na(relatedness[i,k])) {
rowind <- which(relatedness.raw$Ind1 == i & relatedness.raw$Ind2 == k)
# may be otherway rounf
if (length(rowind) == 0) {
rowind <- which(relatedness.raw$Ind1 == k & relatedness.raw$Ind2 == i)
}
relateval <- relatedness.raw[rowind,3] # third column contains values
relate_new[i,k] <- relateval
}
}
}
library(rJava)
library(xlsx)
write.xlsx(relate_new,"relatedness_new.xlsx",row.names=TRUE,col.names=TRUE)
|
d1005b11f7f10cbc8499e53a1fd99cc40c032800
|
178c78e68d91522d4d869bbc765f5fa3bfc97283
|
/11_paris_reu_script.R
|
abefcd87d6bd3ddeef1ea1ba33d09b12ecbafd45
|
[] |
no_license
|
FoRTExperiment/FoRTE-canopy
|
c9284cfbe4461e4ae032cf91a774fb557c095369
|
a8790b62db2a1194c76b17581001ade5cc859e11
|
refs/heads/master
| 2020-03-19T01:57:43.223085
| 2020-03-02T15:28:17
| 2020-03-02T15:28:17
| 135,586,490
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,380
|
r
|
11_paris_reu_script.R
|
#
##### pulling the data
#
# these are require packages to install/load
require(tidyverse)
require(ggplot2)
require(stringr)
# lets import the data
df <- read.csv("./data/ndvi/ndvi_reu_forte_set.csv")
# let's make summary stats
df %>%
select(SubplotID, treatment, severity, date, lai, ndvi, gf) %>%
group_by(SubplotID, date, treatment, severity) %>%
summarise(mean.lai = mean(lai, na.rm = TRUE), sd.lai = sd(lai, na.rm = TRUE),
mean.ndvi = mean(ndvi, na.rm = TRUE), sd.ndvi = sd(ndvi, na.rm = TRUE),
mean.gf = mean(gf, na.rm = TRUE), sd.gf = sd(gf, na.rm = TRUE),n = n()) -> df.sums
#make into a data frame
df.sums <- data.frame(df.sums)
##### PLOTS
# BY TREATMENT
x11(width = 4, height = 4)
ggplot(df.sums, aes(x = as.Date(date), y = mean.lai, color = treatment))+
geom_point(size = 4, alpha = 0.4)+
theme_bw()+
xlab("DATE")+
ylab("LAI")+
geom_smooth(method = lm, se = FALSE)+
scale_color_discrete(name="Treatment",
breaks=c("C", "B", "T"),
labels=c("Control", "Bottom-Up", "Top-Down"))+
theme(legend.position = "Bottom")+
facet_grid(rows = vars(treatment))
x11(width = 4, height = 4)
ggplot(df.sums, aes(x = as.Date(date), y = mean.ndvi, color = treatment))+
geom_point(size = 4, alpha = 0.4)+
theme_bw()+
xlab("DATE")+
ylab("NDVI")+
geom_smooth(method = lm, se = FALSE)+
scale_color_discrete(name="Treatment",
breaks=c("C", "B", "T"),
labels=c("Control", "Bottom-Up", "Top-Down"))+
theme(legend.position = "Bottom")+
facet_grid(rows = vars(treatment))
x11(width = 4, height = 4)
ggplot(df.sums, aes(x = as.Date(date), y = mean.gf, color = treatment))+
geom_point(size = 4, alpha = 0.4)+
theme_bw()+
xlab("DATE")+
ylab("GF")+
geom_smooth(method = lm, se = FALSE)+
scale_color_discrete(name="Treatment",
breaks=c("C", "B", "T"),
labels=c("Control", "Bottom-Up", "Top-Down"))+
theme(legend.position = "Bottom")+
facet_grid(rows = vars(treatment))
# boxplots
x11()
ggplot(df, aes(x = date, y = ndvi, fill = SubplotID))+
geom_boxplot()+
theme_classic()+
guides(fill=FALSE)+
xlab("")+
ylab("NDVI")+
facet_grid(rows = vars(group))
# an analysis of variance
aov.lai.trt <- aov(lai ~ date * treatment * group, data = df)
summary(aov.lai.trt)
|
dba41807414b929058237402423a84ec26729689
|
0fd6ff59ea66c900a3fc25c9b7de478670570782
|
/Statistical Inference and Modeling for High-Throughput Experiments.R
|
57b7b30bf61d93899fb4632c51e4ed4104252091
|
[] |
no_license
|
Staguado/Data-Analysis-in-the-Health-Sciences
|
b61ee23570829f20fe1ff2a23b87c5d9d142f180
|
176a8637875e4969e08aa00511f34b8036e1db98
|
refs/heads/main
| 2023-07-18T01:15:03.823893
| 2021-08-26T11:13:25
| 2021-08-26T11:13:25
| 392,615,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62,711
|
r
|
Statistical Inference and Modeling for High-Throughput Experiments.R
|
#-----------------------------------------------------------------------------------------------
# By Santiago Taguado Menza
# Statistical Inference & Modeling for High-throughput Experiments
# Harvard Course
#-----------------------------------------------------------------------------------------------
# Testing P-values without Adjustment
# Using Swirl
dat[ dat[,3] > k , ]
install.packages("swirl")
library(swirl)
swirl()
# Downloading Training Data
library(devtools)
install_github("genomicsclass/GSE5859Subset")
library(GSE5859Subset)
data(GSE5859Subset) ##this loads the three tables
exams_on_july_27_2005<- sampleInfo %>% filter(date == "2005-06-27")
nrow(exams_on_july_27_2005)
# Alternatively, use the following:
sum(sampleInfo$date=="2005-06-27")
# How many of the genes represented in this particular technology are on chromosome Y?
# Remove features that have NA in the column of interest.
gene_Y <- geneAnnotation %>% filter(CHR == "chrY")
nrow(gene_Y)
# Alternatively, use the following:
sum(geneAnnotation$CHR=="chrY",na.rm=TRUE)
# We need the na.rm=TRUE option because some features are controls and have NA in the CHR column.
# What is the log expression value of the for gene ARPC1A on the one subject that was measured on 2005-06-10?
exams_on_june_10_2005<- sampleInfo %>% filter(date == "2005-06-10")
exams_on_june_10_2005
ARPCIA_gene <- geneAnnotation %>% filter(SYMBOL == "ARPC1A")
ARPCIA_gene
# GSM136727.CEL.gz
# 200950_at
geneExpression["200950_at","GSM136727.CEL.gz"]
# Alternatively, use the following:
i = which(geneAnnotation$SYMBOL=="ARPC1A")
j = which(sampleInfo$date=="2005-06-10")
geneExpression[i,j]
median_column <- apply(geneExpression,MARGIN = 2, median)
median(median_column)
# Note that we can also use the colMedians() function from the matrixStats package.
g <- factor(sampleInfo$group)
p_values <- function(e) {t.test(e[g==1], e[g==0])$p.value}
p_values_data <- apply(geneExpression,1, p_values)
min(p_values_data)
myttest <- function(e,group){
x <- e[group==1]
y <- e[group==0]
return( t.test(x,y)$p.value )
}
g <- factor(sampleInfo$group)
pvals <- apply(geneExpression,1,myttest, group=g)
min( pvals )
#-----------------------------------------------------------------------------------------------
# Testing P-values without Adjustment Part 2
# Using Swirl
dat[ dat[,3] > k , ]
install.packages("swirl")
library(swirl)
swirl()
# Downloading Training Data
library(devtools)
install_github("genomicsclass/GSE5859Subset")
library(GSE5859Subset)
data(GSE5859Subset) ##this loads the three tables
exams_on_july_27_2005<- sampleInfo %>% filter(date == "2005-06-27")
nrow(exams_on_july_27_2005)
# Alternatively, use the following:
sum(sampleInfo$date=="2005-06-27")
# How many of the genes represented in this particular technology are on chromosome Y?
# Remove features that have NA in the column of interest.
gene_Y <- geneAnnotation %>% filter(CHR == "chrY")
nrow(gene_Y)
# Alternatively, use the following:
sum(geneAnnotation$CHR=="chrY",na.rm=TRUE)
# We need the na.rm=TRUE option because some features are controls and have NA in the CHR column.
# What is the log expression value of the for gene ARPC1A on the one subject that was measured on 2005-06-10?
exams_on_june_10_2005<- sampleInfo %>% filter(date == "2005-06-10")
exams_on_june_10_2005
ARPCIA_gene <- geneAnnotation %>% filter(SYMBOL == "ARPC1A")
ARPCIA_gene
# GSM136727.CEL.gz
# 200950_at
geneExpression["200950_at","GSM136727.CEL.gz"]
# Alternatively, use the following:
i = which(geneAnnotation$SYMBOL=="ARPC1A")
j = which(sampleInfo$date=="2005-06-10")
geneExpression[i,j]
median_column <- apply(geneExpression,MARGIN = 2, median)
median(median_column)
# Note that we can also use the colMedians() function from the matrixStats package.
g <- factor(sampleInfo$group)
p_values <- function(e) {t.test(e[g==1], e[g==0])$p.value}
p_values_data <- apply(geneExpression,1, p_values)
min(p_values_data)
myttest <- function(e,group){
x <- e[group==1]
y <- e[group==0]
return( t.test(x,y)$p.value )
}
g <- factor(sampleInfo$group)
pvals <- apply(geneExpression,1,myttest, group=g)
min( pvals )
#-----------------------------------------------------------------------------------------------
# Computationally Proving P-value are Random Variables
# Note that we will later learn about the rowttests() function from the genefilter package,
# which performs this operation.
# Inference in Practice Exercises #1
# These exercises will help clarify that p-values are random variables and some of the properties
# of these p-values. Note that just like the sample average is a random variable because it is based
# on a random sample, the p-values are based on random variables (sample mean and sample standard
# deviation for example) and thus it is also a random variable.
# To see this, let's see how p-values change when we take different samples.
set.seed(1)
library(downloader)
url = "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv"
filename = "femaleControlsPopulation.csv"
if (!file.exists(filename)) download(url,destfile=filename)
population = read.csv(filename)
pvals <- replicate(1000,{
control = sample(population[,1],12)
treatment = sample(population[,1],12)
t.test(treatment,control)$p.val
})
head(pvals)
hist(pvals)
# What proportion of the p-values is below 0.05?
mean(pvals < 0.05)
# What proportion of the p-values are below 0.01?
mean(pvals < 0.01)
# Assume you are testing the effectiveness of 20 diets on mice weight.
# For each of the 20 diets, you run an experiment with 10 control mice and 10 treated mice.
# Assume the null hypothesis that the diet has no effect is true for all 20 diets and
# that mice weights follow a normal distribution with mean 30 grams and a standard deviation of 2 grams.
# Run a Monte Carlo simulation for one of these studies:
cases = rnorm(10,30,2)
controls = rnorm(10,30,2)
t.test(cases,controls)$p.value
# Now run a Monte Carlo simulation imitating the results for the experiment for all 20 diets.
# If you set the seed at 100, set.seed(100), and use the same code as above inside a call to replicate(),
# how many of the p-values (number not proportion) are below 0.05?
set.seed(100)
pvals_20_diets <- replicate(20, {cases = rnorm(10,30,2)
controls = rnorm(10,30,2)
p_values = t.test(cases,controls)$p.value
sum(p_values <= 0.05)})
set.seed(100)
mousePval = function(){
cases = rnorm(10,30,2)
controls = rnorm(10,30,2)
return(t.test(cases,controls)$p.value)
}
pvals = c()
for (i in 1:20){
pvals[i] = mousePval()
}
length(pvals[pvals < 0.05])
# Now create a simulation to learn about the distribution of the number of p-values that are less than 0.05.
# In the previous question, we ran the 20 diet experiment once.
# Now we will run these 20 experiments 1,000 times and each time save the number of p-values that are
# less than 0.05.
# Set the seed at 100 again, set.seed(100), run the code from the previous question 1,000 times,
# and save the number of times the p-value is less than 0.05 for each of the 1,000 instances.
# What is the average of these 1,000 numbers?
# Note that this is the expected number of tests (out of the 20 we run)
# that we will reject when the null is true.
set.seed(100)
pvals_20_diets_1000 <- replicate(1000,{
pvals = replicate(20,{
cases = rnorm(10,30,2)
controls = rnorm(10,30,2)
t.test(cases,controls)$p.value
})
sum(pvals <= 0.05)
})
table(pvals_20_diets_1000)
mean(pvals_20_diets_1000)
# Inference in Practice Exercises #5
# 1 point possible (graded)
# Note that what the answer to question #4 says is that on average,
# we expect some p-value to be 0.05 even when the null is true for all diets.
# Using the same simulation data from the question above, for what proportion of the 1,000 replicates
# do we reject the null hypothesis at least once (more than 0 false positives)?
mean(plessthan>0)
# Quiz 1
# Suppose you plan to run an experiment screening a panel of 30,000 small molecules to determine which
# ones increase expression of a fluorescent reporter gene. In untreated cells, the reporter gene expression
# follows a normal distribution with a mean of 8 units and a standard deviation of 2 units.
# There will be 100 untreated control wells, and each of the 30,000 molecules will be tested in
# 10 technical replicates. You want to simulate the experiment to figure out how many hits would come
# out of your screen if the null hypothesis is true for all 30,000 cases.
# Set the seed to 3, then generate the results of the 10 control wells:
set.seed(3)
ctrl = rnorm(100, 8, 2)
# This example code simulates 10 technical replicates for one compound for which the null hypothesis is true:
expt = rnorm(10, 8, 2)
t.test(ctrl, expt)$p.value
# Now set the seed to 4 and use replicate() to simulate 30,000 tests for which the null hypothesis is true.
# The example code for one compound should go inside your replicate() call. Note that each test will compare
# the same ctrl vector to a new simulated experimental vector.
# Make a histogram of the p-values from this experiment.
# Question 1 - Quiz 1: Which distribution do these p-values follow most closely?
set.seed(4)
B <- 30000
pval_distribution <- replicate(B, {
expt = rnorm(10, 8, 2)
t.test(ctrl, expt)$p.value})
hist((pval_distribution))
# Question 2 - Quiz 1:
# What proportion of tests have a p-value below 0.05?
sum(pval_distribution < 0.05)/B
# Question 3 - Quiz 1:
# Since this simulation assumes that the null distribution is true for all compounds,
# any results that have a p-value below a given cutoff will be false positives.
# How many compounds have a p-value below 0.05?
sum(pval_distribution < 0.05)
# Question 4 - Quiz 1:
# If the p-value cutoff is lowered to 0.001, how many false positive results are there?
sum(pval_distribution < 0.001)
# Assume you are testing the effectiveness of 30 drugs on the white blood cell count of mice.
# For each of the 30 drugs you run an experiment with 5 control mice and 5 treated mice.
# Assume the null hypothesis that the drug has no effect is true for all 30 drugs and that white blood cell
# counts follow a normal distribution with mean 7.5 units and a standard deviation of 2.5 units.
# We will analyze the number of significant p-values expected by chance under the null distribution.
# Question 5 - Quiz 1:
# Set the seed to 28, then run a Monte Carlo simulation for one of these studies by randomly generating
# white blood cell counts for the cases and controls.
# Use a t-test to compute the p-value for this simulated study.
# What is the p-value for the one simulated study?
set.seed(28)
cases <- rnorm(5,7.5,2.5)
control <- rnorm(5,7.5,2.5)
t.test(cases, control)$p.value
# Question 6 - Quiz 1:
# Now run a Monte Carlo simulation imitating the results for the experiment for all 30 drugs.
# Set the seed to 51, set.seed(51),
# then use your code from the previous question inside of replicate().
# How many of the 30 simulated p-values are below 0.05?
set.seed(51)
# Number of Drugs
B<- 30
# Simulation of the drugs
Monte_carlos_H0_real <- replicate(B, {
cases <- rnorm(5,7.5,2.5)
control <- rnorm(5,7.5,2.5)
t.test(cases, control)$p.value
})
sum(Monte_carlos_H0_real < 0.05)
# Question 7 - Quiz 1:
# Set the seed to 100, then repeat the simulated experiment 1000 times by using your code from the
# previous question inside a second replicate() loop.
# For each experiment, save the number of simulated p-values below 0.05.
# What is the average of the counts of p-values below 0.05?
set.seed(100)
Monte_carlos_null <- replicate(1000, {
pvals = replicate(B, {
cases <- rnorm(5,7.5,2.5)
control <- rnorm(5,7.5,2.5)
t.test(cases, control)$p.value})
sum(pvals < 0.05)})
# Notes: You have to define the variable when doing a replicate within a replicate first. See how
# you first define pvals and then were able to return the values that were less than 0.05. On the
# previous question, you defined the function as Monte_Carlos_H0_real whereas in this case you defined
# it as pvals.
# Question 8 - Quiz 1:
# Make a histogram of the p-value counts from Question 7.
# Which of the following is NOT true about the distribution of p-values?
hist(Monte_carlos_null)
# Question 9 - Quiz 1:
# What proportion of simulated experiments have more than 3 p-values below 0.05?
mean(Monte_carlos_null > 3)
# In the previous assessment we saw how the probability of incorrectly rejecting the null for at
# least one of 20 experiments for which the null is true is well over 5%.
# Now let's consider a case in which we run thousands of tests as
# we would do in a high throughput experiment.
# We previously learned that under the null, the probability of a p-value < p is p.
# If we run 8,793 independent tests,
# what is the probability of incorrectly rejecting at least one of the null hypotheses?
n <- 8793
p_1 <- 0.95
p_2 <- 0.05
0.95*8793
# Let 𝑃1,…,𝑃8793 be the the p-values (that are random variables).
# Pr(at least one rejection)=1−Pr(no rejections)=1−∏8793𝑖=1Pr(𝑃𝑖>0.05)=1−0.958793≈1
1 - 0.5^8793
# Or if you want to use a simulation:
B<-1000
minpval <- replicate(B, min(runif(8793,0,1))< 0)
mean(minpval>=1)
# Suppose we need to run 8,793 statistical tests and we want to make the probability of a mistake very small,
# say 5%. Using the answer to exercise #2, how small do we have to change the cutoff, previously 0.05,
# to lower our probability of at least one mistake to be 5%.
# Sidak Procedure: 1 - (1 - alpla)^(1/m)
alpha <- 1 - (1 - 0.05)^(1/8793)
##warning this can take several minutes
##and will only give an approximate answer
B=10000
cutoffs = 10^seq(-7,-4,0.1) ##we know it has to be small
prob = sapply(cutoffs,function(cutoff){
minpval =replicate(B, min(runif(8793,0,1))<=cutoff)
mean(minpval>=1)
})
cutoffs[which.min(abs(prob-0.05))]
#-----------------------------------------------------------------------------------------------
# On optimization of code via vectorization:
library(downloader)
url <-"https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv"
filename <- "femaleControlsPopulation.csv"
if (!file.exists(filename)) download(url,destfile=filename)
set.seed(1)
(population = unlist( read.csv("femaleControlsPopulation.csv") ))
# To give an example of how we can simulate 𝑉 and 𝑆 we constructed a simulation with:
alpha <- 0.05
N <- 12
m <- 10000
p0 <- 0.90 ##10% of diets work, 90% don't
m0 <- m*p0
m1 <- m-m0
(nullHypothesis <- c( rep(TRUE,m0), rep(FALSE,m1)))
delta <- 3
# We then ran a Monte Carlo simulation by repeating a procedure in which 10,000 tests were run one by
# one using sapply().
B <- 10 ##number of simulations
system.time(
VandS <- replicate(B,{
calls <- sapply(1:m, function(i){
control <- sample(population,N)
treatment <- sample(population,N)
if(!nullHypothesis[i]) treatment <- treatment + delta
t.test(treatment,control)$p.val < alpha
})
c(sum(nullHypothesis & calls),sum(!nullHypothesis & calls))
})
)
# In each iteration we checked if that iteration was associated with the null or alternative hypothesis.
# We did this with the line:
# if(!nullHypothesis[i]) treatment <- treatment + delta
# HOWEVER, in R, operations based on matrices are typically much faster than operations performed within
# loops or sapply(). We can vectorize the code to make it go much faster.
# This means that instead of using sapply() to run m tests, we will create a matrix with all data in one
# call to sample.
# This code runs several times faster than the code above, which is necessary here due to the fact that
# we will be generating several simulations.
# Understanding this chunk of code and how it is equivalent to the code above using sapply() will
# take a you long way in helping you code efficiently in R.
library(genefilter) ##rowttests is here
set.seed(1)
##Define groups to be used with rowttests
(g <- factor( c(rep(0,N),rep(1,N)) ))
B <- 10 ##number of simulations
system.time(
VandS <- replicate(B,{
##matrix with control data (rows are tests, columns are mice)
(controls <- matrix(sample(population, N*m, replace=TRUE),nrow=m))
##matrix with control data (rows are tests, columns are mice)
(treatments <- matrix(sample(population, N*m, replace=TRUE),nrow=m))
##add effect to 10% of them
(treatments[which(!nullHypothesis),]<-treatments[which(!nullHypothesis),]+delta)
##combine to form one matrix
(dat <- cbind(controls,treatments))
(calls <- rowttests(dat,g)$p.value < alpha)
c(sum(nullHypothesis & calls),sum(!nullHypothesis & calls))
})
)
#-----------------------------------------------------------------------------------------------
# On P-value Adjustments: Bonferroni & Sidaks Corrections
library(rafalib)
mypar(1,2)
m <- 10000
alphas <- seq(0,0.25,0.01)
bonferroni <- alphas/m
sidaks <- 1 - (1-alphas)^(1/m)
plot(alphas,bonferroni)
plot(alphas,sidaks)
# Rafa's Code
alphas <- seq(0,0.25,0.01)
par(mfrow=c(2,2))
for(m in c(2,10,100,1000)){
plot(alphas,alphas/m - (1-(1-alphas)^(1/m)),type="l")
abline(h=0,col=2,lty=2)
}
# As shown in both codes, Bonferroni's is more consertaive showing smaller p-values/cutoff
# values than Zidak's Procedure
# To simulate the p-value results of, say, 8,793 t-tests for which the null is true,
# we don't actual have to generate the original data. As we learned in class,
# we can generate p-values from a uniform distribution like this:
pvals <- runif(8793,0,1)
# Using what we have learned, set the cutoff using the Bonferroni correction that
# guarantees an FWER lower than 0.05 and report back the FWER.
# Set the seed at 1,set.seed(1), and run 10,000 simulations.
# Report the Monte Carlo estimate of the FWER below.
# Running a Monte Carlo using a bonferroni correction
set.seed(2)
bonferroni <- 0.05/8793
B<- 10000
FWER_Bonferroni <- replicate(B, {
pvals <- runif(8793,0,1)
sum(pvals <= bonferroni)
})
mean(FWER_Bonferroni)
# Alternatively,
set.seed(1)
B <- 10000
m <- 8793
alpha <- 0.05
pvals <- matrix(runif(B*m,0,1),B,m)
k <- alpha/m
mistakes <- rowSums(pvals<k)
mean(mistakes>0)
# Using the same seed repeat the above for Sidak's cutoff.
# Report the FWER below.
# Running a Monte Carlo using a sidak's correction
set.seed(2)
sidak <- 1 - (1 - 0.05)^(1/8793)
B<- 10000
FWER_Sidak <- replicate(B, {
pvals <- runif(8793,0,1)
sum(pvals <= bonferroni)
})
mean(FWER_Sidak)
##if pvals already defined no need to rerun this
set.seed(2)
B <- 10000
m <- 8793
alpha <- 0.05
pvals <- matrix(runif(B*m,0,1),B,m)
pvals
k <- (1-(1-alpha)^(1/m))
mistakes <- rowSums(pvals<k)
mean(mistakes>0)
# It must be noted that Dr. Irizarry code is much better. My answer showed Bonferroni rate bigger
# than Zidak's. This is not the case because Bonferrni should have a lower FWER. However,
# he accepted my answer because it is dependent on the seed. His isn't.
#-----------------------------------------------------------------------------------------------
# Using the Qvalue package and Introduction to Key BiocManager Packages:
library(devtools)
library(rafalib)
install_github("genomicsclass/GSE5859Subset")
BiocManager::install(c("genefilter", "qvalue"))
library(GSE5859Subset)
data(GSE5859Subset)
library(genefilter)
library(qvalue)
# Question 1
# Compute a p-value for each gene using the function rowttests() from the genefilter package in
# Bioconductor.
g <- sampleInfo$group
first_rowttest <- rowttests(geneExpression, factor(g))
sum(first_rowttest$p.value<0.05)
# Question 2
# Now applying the bonferroni correction
# Apply the Bonferroni correction to the p-values obtained in question #1 to achieve a FWER of 0.05.
# How many genes are called significant under this procedure?
sum(first_rowttest$p.value< 0.05/8793)
# Question 3
# Note that the FDR is a property of a list of features, not each specific feature.
# The q-value relates FDR to an individual feature. To define the q-value we order features we tested
# by p-value then compute the FDRs for a list with the most significant,
# the two most significant, the three most significant, etc...
# The FDR of the list with the, say, m most significant tests is defined as the q-value of the
# m-th most significant feature.
# In other words, the q-value of a feature, is the FDR of the biggest list that includes that gene.
# In R, we can compute the q-value using the p.adjust function with the FDR option.
# Read the help file for p.adjust and then, for our gene expression dataset,
# compute how many genes achieve an FDR < 0.05
p_adjust <- p.adjust(first_rowttest$p.value, method = "fdr")
sum(p.adjust(first_rowttest$p.value, method = "fdr") < 0.05)
# Question 4
# Now use the qvalue function, in the Bioconductor qvalue package, to estimate q-values using the
# procedure described by Storey.
# Using this estimate how many genes have q-values below 0.05?
mypar(1,1)
qvalue_list <- qvalue(first_rowttest$p.value,fdr.level = 0.05)
sum(qvalue_list$qvalues < 0.05)
hist(qvalue_list$qvalues)
# Question 5
# Read the help file for qvalue and report the estimated proportion of genes for which the null hypothesis
# is true 𝜋0=𝑚0/𝑚
qvalue_list$pi0*8793
# Question 6
plot(qvalue_list$qvalues,p_adjust)
# Question 7
# Create a Monte Carlo Simulation in which you simulate measurements from 8,793 genes for 24 samples:
# 12 cases and 12 controls.
n <- 24
m <- 8793
mat <- matrix(rnorm(n*m),m,n)
# Now for 500 genes, there is a difference of 2 between cases and controls:
delta <- 2
positives <- 500
mat[1:positives,1:(n/2)] <- mat[1:positives,1:(n/2)]+delta
# So the null hypothesis is true for 8793-500 genes.
# Using the notation from the videos m=8793, m0=8293 and m1=500
# Set the seed at 1, set.seed(1), and run this experiment 1,000 times with a Monte Carlo simulation.
# For each instance compute p-values using a t-test (using rowttests() in the genefilter package) and
# create three lists of genes using:
# Bonferroni correction to achieve an FWER of 0.05,
# p.adjust() estimates of FDR to achieve an FDR of 0.05, and
# qvalue() estimates of FDR to to achieve an FDR of 0.05.
# For each of these three lists compute the number of false positives in the list and the number of
# false negatives: genes not in the list that should have been because the null hypothesis is not true
# (we added 2 to the controls to create the cases).
# What is the false positive rate (false positives divided by m0) if we use Bonferroni?
set.seed(1)
library(qvalue)
library(genefilter)
n <- 24
m <- 8793
B <- 1000
delta <-2
positives <- 500
g <- factor(rep(c(0,1),each=12))
result <- replicate(B,{
mat <- matrix(rnorm(n*m),m,n)
mat[1:positives,1:(n/2)] <- mat[1:positives,1:(n/2)]+delta
pvals = rowttests(mat,g)$p.val
##Bonferroni
FP1 <- sum(pvals[-(1:positives)]<=0.05/m)
FP1
})
mean(result/(m-positives))
# From the same Monte Carlo simulation as in the question above, what is the false negative rate if we
# use Bonferroni?
set.seed(1)
library(qvalue)
library(genefilter)
n <- 24
m <- 8793
B <- 1000
delta <-2
positives <- 500
g <- factor(rep(c(0,1),each=12))
result <- replicate(B,{
mat <- matrix(rnorm(n*m),m,n)
mat[1:positives,1:(n/2)] <- mat[1:positives,1:(n/2)]+delta
pvals = rowttests(mat,g)$p.val
##Bonferroni
FP1 <- sum(pvals[-(1:positives)]<=0.05/m)
FN1 <- sum(pvals[1:positives]>0.05/m)
c(FP1,FN1)
})
mean(result[2,]/(positives))
# Adjusting the qvalue for the false positive rates/false negative rates
set.seed(1)
library(qvalue)
library(genefilter)
n <- 24
m <- 8793
B <- 1000
delta <-2
positives <- 500
g <- factor(rep(c(0,1),each=12))
result <- replicate(B,{
mat <- matrix(rnorm(n*m),m,n)
mat[1:positives,1:(n/2)] <- mat[1:positives,1:(n/2)]+delta
pvals = rowttests(mat,g)$p.val
##p.adjust value
pvals_adjusted = p.adjust(pvals, method ="fdr")
FP1 <- sum(pvals_adjusted[-(1:positives)]<=0.05)
FN1 <- sum(pvals_adjusted[1:positives]>0.05)
c(FP1,FN1)
})
mean(result[1,]/(m-positives))
mean(result[2,]/(positives))
# Adjusting the qvalue for the false positive rates/false negative rates using the qvalue formula
set.seed(1)
library(qvalue)
library(genefilter)
n <- 24
m <- 8793
B <- 1000
delta <-2
positives <- 500
g <- factor(rep(c(0,1),each=12))
result <- replicate(B,{
mat <- matrix(rnorm(n*m),m,n)
mat[1:positives,1:(n/2)] <- mat[1:positives,1:(n/2)]+delta
pvals = rowttests(mat,g)$p.val
##Q value formula
pvals_adjusted = qvalue(pvals, fdr.level = 0.05)
FP1 <- sum(pvals_adjusted$qvalues[-(1:positives)]<=0.05)
FN1 <- sum(pvals_adjusted$qvalues[1:positives]>0.05)
c(FP1,FN1)
})
mean(result[1,]/(m-positives))
mean(result[2,]/(positives))
qvalue_list <- qvalue(first_rowttest$p.value,fdr.level = 0.05)
sum(qvalue_list$qvalues < 0.05)
# Question 1 was listed in the booklet
# Question 2
# A clinical trial of a diagnostic test is performed on 200 people.
# The null hypothesis is that an individual does not have the disease.
# 92 people with the disease are correctly labeled as having the disease.
# 9 people with the disease are incorrectly labeled as healthy when the disease is actually present.
# 16 healthy people are incorrectly labeled as having the disease.
# 83 healthy people are correctly labeled as healthy.
# A. How many type I errors are there?
# 16
# B. How many type II errors are there?
# 9
# C. What percentage of healthy people are false positives?
# 16.16 %
# D. What percentage of people with the disease are false negatives?
# 8.91 %
# Question 3
# A certain RNA-seq experiment measures expression of 𝑚=6,319 features.
# Using the Bonferroni correction, what p-value cutoff 𝑘 would control the familywise error rate at
# 𝛼=0.05 ?
p_value_cutoff <- 0.05/6319
# Question 4
# Simulate the results of the RNA-seq experiment from Question 3 assuming the null distribution is true
# for all features. Set the seed to 11. Use runif() to simulate 𝑚 p-values.
# How many p-values are below the cutoff 𝑘 ?
set.seed(11)
FWER_bonferroni <- runif(6319,0,1)
sum(FWER_bonferroni<= p_value_cutoff)
# Question 5
# Perform a Monte Carlo simulation of the familywise error rate for the RNA-seq experiment in question 3.
# Set the seed to 12. Use runif() and replicate() to simulate 10,000 sets of 𝑚 p-values. For each set,
# determine how many p-values are below the cutoff 𝑘 . Under the assumption of the null distribution
# ,these are false positives.
# What proportion of simulated experiments have at least one false positive?
set.seed(12)
B <- 10000
FWER_bonferroni <- replicate(B, {
pvals <- runif(6319,0,1)
sum(pvals <= p_value_cutoff)
})
sum(FWER_bonferroni > 0)/10000
# Question 6 - 10
# This is a dataset produced by Bottomly et al., performing RNA-sequencing on two strains of mouse
# with many biological replicates.
# download Bottomly et al. data
if (!file.exists("bottomly_eset.RData")) download.file("http://bowtie-bio.sourceforge.net/recount/ExpressionSets/bottomly_eset.RData",
"bottomly_eset.RData")
load("bottomly_eset.RData")
# also load these libraries, which we previously installed from Bioconductor
library(Biobase)
library(genefilter)
library(qvalue)
# These data are stored in an ExpressionSet object. We will learn how to work with these objects in future
# courses, but for now we can manually extract the gene expression and strain information:
dat = exprs(bottomly.eset) # gene expression matrix
strain = pData(bottomly.eset)$strain # strain factor
# dat is a matrix with each row representing a gene, each column representing a sample, and the values
# representing RNA-seq read counts for a given gene in a given sample. strain is a factor representing
# the genetic strain of each sample column.
# Question 6
# Use the rowttests() function from the genefilter libaary to calculate p-values for every gene (row) in
# dat based on strain
library(genefilter)
results <- rowttests(dat,strain)
pvals <- results$p.value
sum(pvals < 0.05, na.rm = pvals)
# Question 7
# Using the Bonferroni correction, what p-value cutoff would be required to ensure a FWER below 0.05?
(p_value_cutoff <- 0.05/nrow(dat))
# Question 8
# How many genes have a p-value below the cutoff determined by the Bonferroni correction?
sum(pvals < p_value_cutoff, na.rm = pvals)
# Question 9
# Use p.adjust() with the method="fdr" option to compute q-values in order to determine how many genes
# are significant at an FDR cutoff of 0.05.
# How many genes have significant q-values at an FDR cutoff of 0.05 when using p.adjust()?
pvals_fdr <- p.adjust(pvals, method = "fdr")
sum(pvals_fdr < 0.05, na.rm = pvals)
# Question 10
# Now try computing q-values with an alternative method, using the qvalue() function from the qvalue package.
# How many genes have significant q-values at an FDR cutoff of 0.05 when using qvalue()?
# You may need to remove NAs from pvals before finding this value.
qvalue_pvals <- qvalue(pvals, fdr.level = 0.05)
qvalue_p_ad <- na.omit(qvalue_pvals$qvalues)
sum(qvalue_p_ad < 0.05)
#----------------------------------------------------------------------------------------------------------
# Binomial and Poisson Distribution:
# Suppose you have an urn with blue and red balls. If 𝑁 balls are selected at random with replacement
# (you put the ball back after you pick it) we can denote the outcomes as random variables 𝑋1,…,𝑋𝑁
# that are 1 or 0. If the proportion of red balls is 𝑝 then the distribution of each of these is:
# Pr(Xi = 1) = p
# These are also called Bernoulli trials. Note that these random variables are independent because
# we replace the balls. Flipping a coin is an example of this with p = 0.5.
# You can show that the mean and variance are 𝑝 and 𝑝(1−𝑝) respectively. The binomial distribut
# ion gives us the distribution of the sum 𝑆𝑁 of these random variables. The probability that we see
# 𝑘 red balls is given by:
# Pr(Sn = k) = (N k) combinatorial * (p ^ k) * ((1 - p)^(N - k))
# In R the function dbinom() gives you this result. The function pbinom() gives us Pr(𝑆𝑁≤𝑘) .
# This equation has many uses in the life sciences. We give some examples below.
# The probability of conceiving a girl is 0.49.
# Question # 1
# What is the probability that a family with 4 children has 2 girls and 2 boys (you can assume no twins)?
(p_x <- choose(4,2) * 0.49^2 * 0.51^2)
(twogs_twobs <- dbinom(2, 4, 0.49, log = FALSE))
# Question # 2
# What is the probability that a family with 10 children has 4 girls and 6 boys (you can assume no twins)?
(fourgirl_sixboys <- dbinom(4,10, 0.49))
# Question # 3
# The genome has 3 billion bases. About 20% are C, 20% are G, 30% are T and 30% are A.
# Suppose you take a random interval of 20 bases, what is the probability that the GC-content
# (proportion of Gs or Cs) is strictly above 0.5 in this interval (you can assume independence)?
dbinom(1,20,0.4)
pbinom(10, 20, 0.4, lower.tail = FALSE, log.p = FALSE)
# Question # 4
# The following two questions are motivated by this event.
# The probability of winning the lottery is 1 in 175,223,510. If 189,000,000 randomly generated
# (with replacement) tickets are sold, what is the probability that at least one winning tickets is sold?
# Give your answer as a proportion between 0 and 1, not a percentage.
pr_lottery <- 1/175223510
lambda <- pr_lottery*189000000
(1 - ppois(0,lambda,lower.tail = TRUE, log.p = FALSE))
1 - dbinom(0, 189000000, 1/175223510)
# Statistical Models Exercises #5
# Using the information from the previous question, what is the probability that two or more winning
# tickets are sold?
1 - dbinom(0, 189000000, 1/175223510) -dbinom(1, 189000000, 1/175223510)
# Question # 6
# We can show that the binomial distribution is approximately normal with 𝑁 is large and 𝑝 is no
# t too close to 0 or 1. This means that
# (S(n) - E(S(n))/sqrt(var (s(n)))
# is approximately normal with mean 0 and SD 1. Using the results for sums of independent
# random variables we learned in a previous course, we can show that E(𝑆𝑁)=𝑁𝑝 and Var(𝑆𝑁)=𝑁𝑝(1−𝑝) .
# The genome has 3 billion bases. About 20% are C, 20% are G, 30% are T and 30% are A.
# Suppose you take a random interval of 20 bases, what is the exact probability that the
# GC-content (proportion of Gs of Cs) is greater than 0.35 and smaller or equal to 0.45 in this interval?
(pb_between45 <- pbinom(9, 20, 0.4, lower.tail = TRUE, log.p = FALSE) - pbinom(7, 20, 0.4, lower.tail = TRUE, log.p = FALSE))
# For the question above, what is the normal approximation to the probability?
((pb_between45 * 20) - (20*0.4))/sqrt(20 * 0.4 * 0.6)
pnorm(-0.5527957,mean = 0,sd = 1,lower.tail = FALSE, log.p=FALSE)
b <- (9 - 20*.4)/sqrt(20*.4*.6)
a <- (7 - 20*.4)/sqrt(20*.4*.6)
pnorm(b)-pnorm(a)
# Statistical Models Exercises #8
# Repeat Statistical Models Exercises #3, but using an interval of 1000 bases.
# What is the difference (in absolute value) between the normal approximation and the exact probability
# (using binomial) of the GC-content being greater than 0.35 and lesser or equal to 0.45?
(pb_between1000 <- pbinom(450, 1000, 0.4, lower.tail = TRUE, log.p = FALSE) - pbinom(350, 1000, 0.4, lower.tail = TRUE, log.p = FALSE))
b <- (450 - 1000*.4)/sqrt(1000*.4*.6)
a <- (350 - 1000*.4)/sqrt(1000*.4*.6)
normal_approximation <- pnorm(b)-pnorm(a)
abs(pb_between1000 - normal_approximation)
# The Cs in our genomes can be methylated or unmethylated. Suppose we have a large (millions) group of
# cells in which a proportion 𝑝 of a C of interest are methylated. We break up the DNA of these cells
# and randomly select pieces and end up with 𝑁 pieces that contain the C we care about. This means
# that the probability of seeing 𝑘 methylated Cs is binomial:
exact = dbinom(k,Ns,ps)
# We can approximate this with the normal distribution:
a <- (k+0.5 - Ns*ps)/sqrt(Ns*ps*(1-ps))
b <- (k-0.5 - Ns*ps)/sqrt(Ns*ps*(1-ps))
approx = pnorm(a) - pnorm(b)
# Let
Ns <- c(5,10,30,50, 100)
ps <- c(0.01,0.10,0.5,0.9,0.99)
# Question 9
# Compare the normal approximation and exact probability (from binomial) of the proportion of Cs being
# 𝑘=1,…,𝑁−1 . Plot the exact versus approximate probability for each 𝑝 and 𝑁 combination
# Study the plots and tell us which of the following is NOT true.
mypar(2,2)
k <- c(1)
plot(exact, ps)
plot(exact,Ns)
plot(approx,ps)
plot(approx,Ns)
Ns <- c(5,10,30,100)
ps <- c(0.01,0.10,0.5,0.9,0.99)
library(rafalib)
mypar(4,5)
for(N in Ns){
ks <- 1:(N-1)
for(p in ps){
exact = dbinom(ks,N,p)
a = (ks+0.5 - N*p)/sqrt(N*p*(1-p))
b = (ks-0.5 - N*p)/sqrt(N*p*(1-p))
approx = pnorm(a) - pnorm(b)
LIM <- range(c(approx,exact))
plot(exact,approx,main=paste("N =",N," p = ",p),xlim=LIM,ylim=LIM,col=1,pch=16)
abline(0,1)
}
}
# Question 10
# We saw in the previous question that when 𝑝 is very small, the normal approximation breaks down.
# If 𝑁 is very large, then we can use the Poisson approximation.
# Earlier we computed the probability of 2 or more tickets winning the lottery when the odds of winning
# were 1 in 175,223,510 and 189,000,000 tickets were sold. Using the binomial, we can run the code
# below to compute the probability of exactly two people winning to be:
N <- 189000000
p <- 1/175223510
dbinom(2,N,p)
# If we were to use the normal approximation, we would overestimate this, as you can see by running this
# code:
a <- (2+0.5 - N*p)/sqrt(N*p*(1-p))
b <- (2-0.5 - N*p)/sqrt(N*p*(1-p))
pnorm(a) - pnorm(b)
# To use the Poisson approximation here, use the rate 𝜆=𝑁𝑝 representing the number of tickets p
# er 189,000,000 that win the lottery. Run the following code and note how much better the approximation is:
dpois(2,N*p)
# In this case it is practically the same because 𝑁 is very very large and 𝑁𝑝 is not 0. These a
# re the assumptions needed for the Poisson to work.
# What is the Poisson approximation for the probability of two or more tickets winning?
1 - dpois(1,N*p) - dpois(0,N*p)
#----------------------------------------------------------------------------------------------------------
# Maximum Likelihood Exercises using the Human Cytomegalovirus Genome:
# In this assessment we are going to try to answer the question:
# is there a section of the human cytomegalovirus genome in which the rate of palindromes is
# higher than expected?
# Make sure you have the latest version of the dagdata library:
library(devtools)
install_github("genomicsclass/dagdata")
# and then load the palindrome data from the Human cytomegalovirus genome:
library(dagdata)
data(hcmv)
# These are the locations of palindromes on the genome of this virus:
library(rafalib)
mypar()
plot(locations,rep(1,length(locations)),ylab="",yaxt="n")
# These palindromes are quite rare, 𝑝 is very small. If we break the genome into bins of 4000 basepairs,
# then we have 𝑁𝑝 not so small and we might be able to use Poisson to model the number of palindromes
# in each bin:
breaks=seq(0,4000*round(max(locations)/4000),4000)
tmp=cut(locations,breaks)
counts=as.numeric(table(tmp))
# So if our model is correct counts should follow a Poisson distribution. The distribution seems
# about right:
hist(counts)
# Let 𝑋1,…,𝑋𝑛 be the random variables representing counts then
# Pr(𝑋𝑖=𝑘)=𝜆𝑘/𝑘!exp(−𝜆)
# So to fully describe this distribution we need 𝜆 . For this we will use MLE.
# To compute the Maximum Likelihood Estimate (MLE) we ask what is the probability of observing our data (which we denote with small caps) for a given 𝜆 :
# 𝐿(𝜆)=Pr(𝑋1=𝑥1 and 𝑋2=𝑥2 and …𝑋𝑛=𝑥𝑛;𝜆)
# We assume that the 𝑋 are independent, thus the probabilities multiply:
# 𝐿(𝜆)=Pr(𝑋1=𝑥1)×Pr(𝑋2=𝑥2)×⋯×Pr(𝑋𝑛=𝑥𝑛)
# Now we can write it in R. For example for 𝜆=4 we have:
probs <- dpois(counts,4)
likelihood <- prod(probs)
likelihood
# Run the code above to note that this is a tiny number. It is usually more convenient to compute
# log-likelihoods
logprobs <- dpois(counts,4,log=TRUE)
loglikelihood <- sum(logprobs)
loglikelihood
# Now write a function that takes 𝜆 and the vector of counts as input, and returns the log-likelihood.
# Compute this log-likelihood for
lambdas = seq(0,15,len=300)
# and make a plot.
loglikelihood_func <- function(L){
logprobs <- dpois(counts,L,log=TRUE)
(loglikelihood <- sum(logprobs))
}
values <- sapply(lambdas,loglikelihood_func)
(v <- plot(lambdas, values))
mle=optimize(loglikelihood_func,c(0,15),maximum=TRUE)
abline(v=mle$maximum)
mle$maximum
# Alternative code:
loglikelihood = function(lambda,x){
sum(dpois(x,lambda,log=TRUE))
}
lambdas = seq(1,15,len=300)
l = sapply(lambdas,function(lambda) loglikelihood(lambda,counts))
plot(lambdas,l)
mle=lambdas[which.max(l)]
abline(v=mle)
print(mle)
# The point of collecting this dataset was to try to determine if there is a region of the genome that
# has higher palindrome rate than expected. We can create a plot and see the counts per location:
# Question 2: What is the center of the bin with the highest count?
breaks=seq(0,4000*round(max(locations)/4000),4000)
tmp=cut(locations,breaks)
counts=as.numeric(table(tmp))
binLocation=(breaks[-1]+breaks[-length(breaks)])/2
plot(binLocation,counts,type="l",xlab=)
binLocation[which.max(counts)]
# Question 3
# For the question above, what is the maximum count?
max(counts)
# Question 4
# Now that we have identified the location with the largest palindrome count, we want to know if by
# chance we could see a value this big.
(lambda = mean(counts[ - which.max(counts) ]))
# If 𝑋 is a Poisson random variable with rate
1 - ppois(13,5)
# MLE Exercises #5
# From the question above, we obtain a p-value smaller than 0.001 for a count of 14.
# Why is it problematic to report this p-value as strong evidence of a location that is different?
# We selected the highest region out of 57 and need to adjust for multiple testing.
# MLE Exercise # 6
# Use the Bonferroni correction to determine the p-value cut-off that guarantees a FWER of 0.05.
# What is this p-value cutoff?
0.05/57
# MLE Exercise # 7
# Create a qq-plot to see if our Poisson model is a good fit:
ps <- (seq(along=counts) - 0.5)/length(counts)
lambda <- mean( counts[ -which.max(counts)])
poisq <- qpois(ps,lambda)
qqplot(poisq,counts)
abline(0,1)
# Poisson is a very good approximation except for one point that we actually think is associated with a
# region of interest.
#----------------------------------------------------------------------------------------------------------
# On Models for Variance:
# Install and load the following data library:
library(devtools)
install_github("genomicsclass/tissuesGeneExpression")
library(tissuesGeneExpression)
# Now load this data and select the columns related to endometrium:
data("tissuesGeneExpression")
library(genefilter)
y = e[,which(tissue=="endometrium")]
dim(y)
# Question # 1
# Compute the across sample variance for the fifteen samples.
# Then make a qq-plot to see if the variances follow a normal distribution.
# Which statement is true? (pick one)
mypar(3,5)
for (i in 1:15){
qqnorm(y[,i])
qqline(y[,i])
}
# Rafa's Code
library(genefilter)
(s2 <- rowVars(y))
library(rafalib)
mypar(1,2)
qqnorm(s2)
qqline(s2)
##To see the square root transformation does not help much:
qqnorm(sqrt(s2))
qqline(sqrt(s2))
# Question 2
# Now fit an F-distribution with 14 degrees of freedom using the fitFDist() function in the limma package:
# What is estimated the estimated scale parameter?
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("limma")
?fitFDist()
estimates <- fitFDist(s2,14)
# Question 3
# Now create a qq-plot of the observed sample standard deviation versus the quantiles predicted by the
# F-distribution (remember to take square root).
# Which of the following best describes the qq-plot?
theoretical<- sqrt(qf((seq(0,999)+0.5)/1000, 14, estimates$df2)*estimates$scale)
observed <- s2
mypar(1,2)
qqplot(theoretical,observed)
abline(0,1)
# Dr. Irizarry's Code:
ps <- (seq(along=s2)-0.5)/length(s2)
theoretical<- qf(ps,14,estimates$df2)*estimates$scale
LIM <- sqrt( range(c(theoretical,s2)) )
mypar(1,2)
qqplot(sqrt( theoretical ), sqrt( s2 ),ylim=LIM,xlim=LIM)
abline(0,1)
##close up excluding the upper 5%
K <- sqrt( quantile(s2,0.95) )
qqplot( sqrt( theoretical ), sqrt( s2 ),ylim=c(0,K),xlim=c(0,K))
abline(0,1)
#----------------------------------------------------------------------------------------------------------
# Mammograms, RNA seq Data Statistics, & Monte Carlo Simulations in F Distributions
# Mammograms are important breast cancer screening tests that have contributed to an increase in early
# diagnosis and decrease in breast cancer mortality. However, like most screening tests, they have a high
# false positive rate - most women with an abnormal mammogram who are called back for additional testing
# (like breast biopsies) do not have breast cancer.
# The probability that a woman with a positive mammogram has cancer on follow-up testing is around 0.1.
# Suppose you are a pathologist evaluating 30 random breast biopsies from mammogram follow-up tests.
# A. What is the probability that none of the biopsies show cancer?
1 - pbinom(9, 30, 0.1)
# B. What is the probability that exactly 3 of the biopsies show cancer?
dbinom(3, 30, 0.1)
# C. What is the probability that at least 10 of the biopsies show cancer?
1 - pbinom(9, 30, 0.1)
# Question 2
# Suppose you are analyzing RNA-seq data and transcript X is expressed at a level such that it represents
# 2 out of every 1,000,000 transcripts. This means the probability of observing transcript X in a random
# read is 0.000002. Now suppose that you evaluate 3,000,000 reads in an experiment.
# A. What is the expected number of reads for transcript X?
(lambda <- (2/1000000)*3000000)
# B. Use the Poisson distribution to calculate the probability of observing exactly 1 read for transcript X.
ppois(1,6)
# C. What is the probability of observing more than 10 reads for transcript X?
1 - ppois(10,6)
# Question 3
# In the human genome, cytosines that are followed by guanines (CpGs) are methylated 80% of the time.
# A. Consider 30 CpG sites. Using the binomial distribution, what is the exact probability that between 70% and 90% of the CpGs are methylated?
.70*30
.9*30
pbinom(27,30,0.8) -pbinom(21,30,0.8)
# B. Using the normal distribution, what is the approximate probability that between 70% and 90% of
# CpGs are methylated?
N <- 30
p <- 0.8
a <- (27 - N*p)/sqrt(N*p*(1-p))
b <- (21 - N*p)/sqrt(N*p*(1-p))
pnorm(a) - pnorm(b)
# C. What is the difference (in absolute value) between the normal approximation and the exact
# probability (using binomial) of observing methylation between 70% and 90%?
abs(0.8290965 - 0.8271703)
# Question 4
# In a previous week, we performed 1000 simulations of a series of 30 mouse experiments under the
# null distribution and, for each simulation, counted the number of p-values under 0.05 to generate a
# vector pval_counts:
set.seed(100)
pval_counts = replicate(1000,{
pvals = replicate(30, {
cases = rnorm(5,7.5,2.5)
controls = rnorm(5,7.5,2.5)
t.test(cases,controls)$p.value
})
sum(pvals < 0.05)
})
mean(pval_counts)
# This random sampling can be modeled as a Poisson process, and the Maximum Likelihood Estimate can
# be used to determine the 𝜆 that best fits this process.
# This function takes a 𝜆 and a vector of counts as inputs and returns the log-likelihood for that 𝜆 :
loglikelihood = function(lambda,x){
sum(dpois(x,lambda,log=TRUE))
}
# Compute this log-likelihood for:
# A. Which value of 𝜆 maximizes the log likelihood?
lambdas = seq(0,10,len=101)
l = sapply(lambdas,function(lambdas) loglikelihood(lambdas,pval_counts))
plot(lambdas,l)
mle=lambdas[which.max(l)]
abline(v=mle)
max(mle)
# B. Given that value of lambda, what is the probability of observing 3 or more p-values below 0.05.
1 - ppois(3,1.3)
# C. Compare the estimated value of 𝜆 from the simulated experiment to the theoretical expected value
# of 𝜆 . How many p-values are expected to be below 0.05 due to chance given 𝑁=30 tests with
# a probability of success of p = 0.05 ?
lambda <- 0.05*30
# Question 5
# You can generate a set of random variables from an F-distribution with the function rf().
# This line of code generates 100 random numbers from an F-distribution with parameters
x = rf(100,df1=8,df2=16)
# Set the seed to 25, then generate an F-distributed list of random numbers x using the code above.
# Use fitFDist() from the limma package to fit an F-distribution to x using df1 = 8.
set.seed(25)
library(limma)
(F <- fitFDist(x,8))
F$df2
# Question 6
# Set the seed to 28, then use replicate() to repeat the previous procedure 1000 times:
# each time, generate 100 F-distributed random numbers with the code provided, then use fitFDist()
# with a known value of df1=8and determine the estimated value of df2.
# A. What is the median value of df2 in this Monte Carlo simulation?
set.seed(28)
F_values_1 <- replicate(1000,{
x = rf(100,df1=8,df2=16)
values <- fitFDist(x,8)
values$df2
})
median(F_values)
# B. What proportion of estimated df2 values are between 12 and 20 in this Monte Carlo simulation?
sum(F_values_1 > 12 & F_values_1 < 20)/1000
# Question 7
# Set the seed to 28 again, then repeat the previous question except this time increase the number
# of randomly generated values in rf() to 1000, representing a larger sample size.
# Again, use fitFDist() with a known value of df1=8 and determine the estimated value of df2.
# A. What is the median value of df2 in the Monte Carlo simulation with a larger sample size?
set.seed(28)
F_values_2 <- replicate(1000,{
x = rf(1000,df1=8,df2=16)
values <- fitFDist(x,8)
values$df2
})
median(F_values)
# B. What proportion of estimated df2 values are between 12 and 20 in the Monte Carlo simulation
# with a larger sample size?
sum(F_values_2 > 12 & F_values_2 < 20)/1000
# Question 8
boxplot(F_values_1)
boxplot(F_values_2)
#---------------------------------------------------------------------------------------------------------
# On Hierarchal Models, Limma and Introduction tp Volcano Plots:
# Question 1
p <- 1/4000
(0.99*p)/(0.99*p+(0.01*(1-p)))
# Question 2
tmpfile <- tempfile()
tmpdir <- tempdir()
download.file("http://seanlahman.com/files/database/lahman-csv_2014-02-14.zip",tmpfile)
##this shows us files
filenames <- unzip(tmpfile,list=TRUE)
players <- read.csv(unzip(tmpfile,files="Batting.csv",exdir=tmpdir),as.is=TRUE)
unlink(tmpdir)
file.remove(tmpfile)
# Which of the following dplyr commands gives us the batting averages (AVG) for players with more
# than 500 at bats (AB) in 2012:@
filter(players,yearID==2012) %>% mutate(AVG=H/AB) %>% filter(AB>=500) %>% select(AVG)
# Question 3
# Edit the command above to obtain all the batting averages from 2010, 2011, 2012 and
# removing rows with AB < 500.
# What is the average of these batting averages?
players_list <- filter(players,yearID>= 2010, yearID <= 2012) %>% mutate(AVG=H/AB) %>% filter(AB>=500) %>% select(AVG)
mean_prior <- mean(players_list$AVG)
# What is the standard deviation of these batting averages?
sd_prior <- sd(players_list$AVG)
# Use exploratory data analysis to decide which of the following distributions approximates the
# distribution of the average across players (hint: this is contained in the AVG component)?
library(rafalib)
mypar(1,2)
hist(players_list$AVG)
qqnorm(players_list$AVG)
qqline(players_list$AVG)
# It is April and after 20 at bats, Jose Iglesias is batting .450 (this is very good). We
# can think of this as a binomial distribution with 20 trials with probability of success 𝑝 .
# Our sample estimate of 𝑝 is .450. What is our estimate of standard deviation?
# Hint: This AVG a sum of Bernoulli trials, that is binomial, divided by 20.
p <- 0.450
n <- 20
sd_sample <- sqrt(((p)*(1-p))/n)
# The sum (numerator of AVG) is binomial so it has SD sqrt(Np(1-p)) .
# The SD of a random variable times a constant is the SD of the random variable times that constant.
# For the AVG we divide by 𝑁 to get 𝑝(1−𝑝)/𝑁‾‾‾‾‾‾‾‾‾‾‾√ . This is
# The Binomial is approximated by normal when the sample size is large, so our sampling distribution
# is approximately normal with mean 𝜃 = 0.45 and SD 𝜎=0.11 . Earlier we used a baseball databas
# e to determine that our prior distribution for 𝜃 is Normal with mean 𝜇=0.275 and SD 𝜏=0.027
# We saw that this is the posterior mean prediction of the batting average.
# What is your estimate of Jose Iglesias' batting average going forward taking into account
# his current batting average?
B <- sd_sample^2/(sd_prior^2 + sd_sample^2)
(E <- mean_prior + (1 - B)*(0.450 - mean_prior))
# Load the following data (you can install it from Bioconductor) and extract the data matrix
# using exprs() (we will discuss this function in detail in a future course):
BiocManager::install("SpikeInSubset")
library(Biobase)
library(SpikeInSubset)
data(rma95)
y <- exprs(rma95)
# This dataset comes from an experiment in which RNA was obtained from the same background pool to
# create six replicate samples. Then RNA from 16 genes were artificially added in different quantities
# to each sample. These quantities (in picoMolars) and gene IDs are stored here:
pData(rma95)
y <- exprs(rma95)
# Note that these quantities were the same in the first three arrays and in the last three arrays.
# So we define two groups like this:
g <- factor(rep(0:1,each=3))
# and create an index of which rows are associated with the artificially added genes:
spike <- rownames(y) %in% colnames(pData(rma95))
# Note that only these 16 genes are differentially expressed since these six samples differ only due
# to random sampling (they all come from the same background pool of RNA).
# Perform a t-test on each gene using the rowttests() function in the genefilter package.
# What proportion of genes with a p-value < 0.01 (no multiple comparison correction) are not part of the artificially added (false positive)?
library(genefilter)
rtt = rowttests(y,g)
index = rtt$p.value < 0.01
print (mean( !spike[index] ))
## We can make a volcano plot to visualize this:
mask <- with(rtt, abs(dm) < .2 & p.value < .01)
cols <- ifelse(mask,"red",ifelse(spike,"dodgerblue","black"))
with(rtt,plot(-dm, -log10(p.value), cex=.8, pch=16,
xlim=c(-1,1), ylim=c(0,5),
xlab="difference in means",
col=cols))
abline(h=2,v=c(-.2,.2), lty=2)
# Now compute the within group sample standard deviation for each gene (you can use group 1).
# Based on the p-value < 0.01 cut-off, split the genes into true positives, false positives, true negatives and false negatives.
# Create a boxplot comparing the sample SDs for each group.
# Which of the following best described the box-plot?
library(genefilter)
sds <- rowSds(y[,g==0])
index <- paste0( as.numeric(spike), as.numeric(rtt$p.value<0.01))
index <- factor(index,levels=c("11","01","00","10"),labels=c("TP","FP","TN","FN"))
boxplot(split(sds,index))
# Question 3
# In the previous two questions we observed results consistent with the fact that the random
# variability associated with the sample standard deviation leads to t-statistics that are large by chance.
# Note that the sample standard deviation we use in the t-test is an estimate and that with just a
# pair of triplicate samples, the variability associated with the denominator in the t-test can be large.
# The following three steps perform the basic limma analysis.
# The eBayes step uses a hierarchical model that provides a new estimate of the gene specific standard error.
library(limma)
fit <- lmFit(y, design=model.matrix(~ g))
colnames(coef(fit))
fit <- eBayes(fit)
# Make a plot of the original new hierarchical models based estimate versus the sample based estimate.
sampleSD = fit$sigma
posteriorSD = sqrt(fit$s2.post)
LIM = range( c(posteriorSD,sampleSD))
plot(sampleSD, posteriorSD,ylim=LIM,xlim=LIM)
abline(0,1)
abline(v=sqrt(fit$s2.prior))
# Use these new estimates (computed in Question 4.6.3) of standard deviation in the denominator
# of the t-test and compute p-values. You can do it like this:
library(limma)
fit = lmFit(y, design=model.matrix(~ g))
fit = eBayes(fit)
##second coefficient relates to diffences between group
pvals = fit$p.value[,2]
index <- pvals < 0.01
# What proportion of genes with a p-value < 0.01 (no multiple comparison correction)
# are not part of the artificially added (false positives)?
index = rtt$p.value < 0.01
print (mean( !spike[index] ))
# Compare to the previous volcano plot and notice that we no longer have small p-values for genes with
# small effect sizes.
#---------------------------------------------------------------------------------------------------------
# Explorative Data Analysis: Log Ratio Comparisons
# Download and install the Bioconductor package SpikeInSubset and then load the library and the mas133 data:
data(mas133)
e <- exprs(mas133)
plot(e[,1],e[,2],main=paste0("corr=",signif(cor(e[,1],e[,2]),3)),cex=0.5)
k <- 3000
b <- 1000 #a buffer
polygon(c(-b,k,k,-b),c(-b,-b,k,k),col="red",density=0,border="red")
# What proportion of the points are inside the box?
(e_1 <- as.numeric(e[,1]))
(e_2 <- as.numeric(e[,2]))
(sum(e_1 <= 3000) + sum(e_2 <= 3000))/44600
#Now make the sample plot with log:
plot(log2(e[,1]),log2(e[,2]),main=paste0("corr=",signif(cor(log2(e[,1]),log2(e[,2])),2)),cex=0.5)
k <- log2(3000)
b <- log2(0.5)
polygon(c(b,k,k,b),c(b,b,k,k),col="red",density=0,border="red")
# When you take the log, 95% of data is no longer in a tiny section of plot.
# The two samples we are plotting are replicates (they random samples from the same batch of RNA).
# The correlation of the data was 0.997 in the original scale, 0.96 in the log-scale.
# High correlations are sometimes confused for evidence of replication. But replication implies
# we get very small difference between the observations, which is better measured with distance or
# differences.
# What is the standard deviation of the log ratios for this comparison?
# Make an MA-plot:
e <- log2(exprs(mas133))
plot((e[,1]+e[,2])/2,e[,2]-e[,1],cex=0.5)
sd(e[,2] - e[,1])
# How many fold changes above 2 do we see? Note that these measures of log (base 2) of expression so a fold
# change of 2 translates into a difference, in absolute value, of 1.
sum(abs(e[,2] - e[,1]) >= 1)
# ------------------------------------------------------------------------------------------------
# Hierarchal Models
# The incidence of prostate cancer in men over the age of 50 is roughly 0.5%.
# A prostate cancer screening test (PSA) exists, but it has recently fallen out of favor for several reasons.
# The PSA test is positive for 51% of men with advanced prostate cancer and negative for 91% of men without
# prostate cancer. These probabilities are summarized below:
# Question # 1
# A.
(p_neg <- (0.49*0.005)+( 0.91*0.995))
(p_pc_neg <- (0.49*0.005)/p_neg)
(p_neg_pc <- (p_pc_neg*p_neg)/0.005)
# B.
(p_pos <- 0.51*0.005 + 0.09*0.995)
(p_nopc_pos <- (0.49*0.995)/p_pos)
(p_pos_nopc <- (p_pos*p_nopc_pos)/0.995)
# C.
(0.51*0.005)/p_pos
# The ChickWeight dataset included in base R contains weights of chicks on different diets over the first
# 21 days of life.
# Question 2
# Suppose we want to evaluate the weight of a chick at day 21. Filter the ChickWeight data to
# include only weights at day 21:
library(tidyverse)
day21 <- ChickWeight %>% filter(Time == 21) # you don't need to load any packages to access ChickWeight
# A. What is the mean weight of chicks on diet 3 at day 21?
# Hint this will be the u
mu <- mean(day21$weight)
# B. What is the standard deviation of chick weight on diet 3 at day 21?
# Hint this will be your tau
tau <- sd(day21$weight)
# Questio 3
# In general, it is fairly uncommon for a 21 day old chick to weigh over 300g.
# However, different diets affect the chick weights. Suppose the chick is on diet 3.
day21_B <- ChickWeight %>% filter(Time == 21 & Diet == 3)
# A. What is the mean weight of chicks on diet 3 at day 21?
Y <- mean(day21_B$weight)
# B. What is the standard deviation of chick weight on diet 3 at day 21?
s <- sd(day21_B$weight)
# C. Assume that chick weights on diet 3 follow a normal distribution.
# What is the probability that a 21 day old chick on diet 3 weighs 300g or more?
1 - pnorm(300, 270.3,71.62254)
# Question 4
# Chicks on diet 3 have a higher probability of weighing over 300g than the general population of chicks
# on all diets. However, note that we have less information about chicks on each individual diet than
# we do about chicks on all diets - there are only 10 weights for chicks on diet 3.
# This means it may be helpful to apply a hierarchical model to chick weights based on diet.
# A. Using a hierarchical model that combines the overall weight data with the diet 3 weight data, what is the expected weight of a chick on diet 3 at day 21?
B <- s^2/(s^2 + tau^2)
(E <- mu + (1 - B)*(Y - mu))
# B. Using a hierarchical model that combines the overall weight data with the diet 3 weight data,
# what is the standard error of chick weights on diet 3 at day 21?
(sterror <- sqrt(1 /(1/s^2 + 1/tau^2)))
# C. Given the expected value and standard error of this hierarchical model, and assuming a normal
# distribution, what is the probability that a 21 day old chick on diet 3 weighs over 300g?
1 - pnorm(300,E,sterror)
# The probability has reduce when taking into account the prior statistics.
# Question 5
# Suppose that you use rowttests() from the genefilter library to compare gene expression across two
# biological contexts and you assign the resulting output to results.
# You want to create a volcano plot of the results.
# Which of these options gives the values for the x-axis of the volcano plot?
# results$dm
# Which of these options gives the values for the y-axis of the volcano plot?
# -log10(results$p.value)
# Question 6 - 8
# In previous exercises, we analyzed an RNA-seq experiment from Bottomly et al. comparing gene expression
# across two different strains of mice:
if (!file.exists("bottomly_eset.RData")) download.file("http://bowtie-bio.sourceforge.net/recount/ExpressionSets/bottomly_eset.RData",
"bottomly_eset.RData")
load("bottomly_eset.RData")
# also load these libraries, which we previously installed from Bioconductor
library(Biobase)
library(genefilter)
library(qvalue)
dat = exprs(bottomly.eset) # gene expression matrix
strain = pData(bottomly.eset)$strain # strain factor
results <- rowttests(dat,strain)
pvals <- results$p.value
# Set the seed to 1, then permute the strain information:
set.seed(1)
permut <- sample(strain)
results_1 <- rowttests(dat,permut)
pvals_1 <- results_1$p.value
# Question 6
# How many genes have a p-value below .05 in this simulated null distribution?
sum(pvals_1 < 0.05, na.rm = TRUE)
# Question 7
# Create a histogram of p-values for both the original results, pvals, and the permuted p-values.
# Which of the following is NOT true about the distribution of p values?
library(rafalib)
mypar(1,2)
hist(pvals)
hist(pvals_1)
# Because the permuted p-values do not follow a uniform distribution, this suggests unexpected
# correlation between some samples.
# Question 8
# Samples 1 and 4 are both from the mouse strain C57BL/6J. If these biological replicates are
# highly correlated to each other, then an MA-plot of these samples should be symmetrical around the
# line x=y (equivalent to x - y = 0).
# Assign x and y as the log base 2 of samples 1 and 4 respectively. Note that adding 1 before taking the
# log prevents problems related to zeros in the data:
x <- log2(dat[,1] + 1)
y <- log2(dat[,4] + 1)
plot(((x+y)/2),x-y)
abline(0,0)
|
81fdc49ce8903e5bd2a8af5fc411bef117617ea9
|
fef6ba95f4a6a98e26f7f9f81bc457c562e62364
|
/R/thumb.service.R
|
ca8eec357f642246b26be8411ed644370d5b2ad1
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
USGS-R/hazardItems
|
48b6701b082cde4a51edd417a86aa963ed2f9383
|
dcf69e2df7d4b0db5054c8193bcc4aca4d41e859
|
refs/heads/main
| 2023-04-13T20:30:37.493049
| 2020-08-13T20:42:26
| 2020-08-13T20:42:26
| 10,981,467
| 5
| 10
|
NOASSERTION
| 2023-04-07T23:06:59
| 2013-06-26T22:59:23
|
R
|
UTF-8
|
R
| false
| false
| 3,444
|
r
|
thumb.service.R
|
#'@title create thumbnail for an item
#'@description takes json url and creates summary image for item
#'@param json.url a valid JSON url
#'@return A strong location for the created png image
#'@importFrom jsonlite fromJSON
#'@import maps mapdata scales png
#'@importFrom httr GET write_disk
#'@examples
#'serviceEndpoint <- 'https://marine.usgs.gov/coastalchangehazardsportal/data/item/EuTmYy6a'
#'thumb.service(serviceEndpoint)
#'@export
thumb.service <- function(json.url){
dim.x <- 150 # px
dim.y <- 150 # px
if (dim.y!=dim.x){stop("non-square image not currently supported")}
ima <- array(dim=c(dim.y,dim.x,3),data=1)
wms.version <- "1.3.0"
response <- GET(json.url, accept_json())
item.json <- content(response, as = 'parsed')
item.id <- item.json$id
bbox = getSquareBBox(item.json)
png(filename = paste("thumb_",item.id,".png",sep=''), width = dim.x, height = dim.y, units = "px")
map("worldHires",xlim=c(bbox[1],bbox[3]), ylim=c(bbox[2],bbox[4]), col="floralwhite",
lwd = 0.01,fill=TRUE,boundary = TRUE,
mar=c(0,0,0,0),mai=c(0,0,0,0),oma=c(0,0,0,0),xpd = NA)
lim <- par() # get limits from map image
kids <- getVisibleChildren(json.url)
num.kids <- length(kids$json)
parent.char.bbox <- paste(as.character(bbox),collapse=',')
parent.char.x <- as.character(dim.x)
parent.char.y <- as.character(dim.y)
# get unique bounding boxes and indexes for the kids
bbox.idx <- getUniqueBBoxIDs(as.character(kids$json))
r.c <- vector(length=length(unique(bbox.idx))) # ribbon count
for (i in 1:num.kids){
child.json.url <- as.character(kids$json[i])
child.sld.url <- as.character(kids$sld[i])
response <- GET(child.json.url, accept_json())
child.json <- content(response, as = 'parsed')
child.services <- child.json$services
for (k in 1:length(child.services)){
if (child.services[[k]]$type=="proxy_wms"){
child.wms <- child.services[[k]]$endpoint
child.layer <- child.services[[k]]$serviceParameter
break
}
}
if (!item.json$ribbonable){
ribbon = "1"
} else if (item.json$ribbonable & !child.json$ribbonable){
ribbon = "1"
} else if (item.json$ribbonable & child.json$ribbonable){
# this child will be ribboned...
r.c[bbox.idx[i]] <- r.c[bbox.idx[i]]+1 # only incremented per number of ribboned kids
ribbon = as.character(r.c[bbox.idx[i]])
}
get.layer <- paste(child.wms,"?version=",wms.version,"&service=wms","&request=GetMap","&layers=",child.layer,
"&bbox=",parent.char.bbox,
"&STYLES=cch",
"&TRANSPARENT=FALSE",
"&width=",parent.char.x,"&height=",parent.char.y,
"&format=image%2Fpng","&SLD=",child.sld.url,"?ribbon=",ribbon,
sep='')
GET(get.layer,write_disk("thumb_temp.png", overwrite = TRUE))
temp.ima <- readPNG("thumb_temp.png")
ima[temp.ima!=1] = temp.ima[temp.ima!=1] # valid? no need to loop
}
rasterImage(ima, lim$usr[1], lim$usr[3], lim$usr[2], lim$usr[4])
map("worldHires",
xlim=c(bbox[1],bbox[3]), ylim=c(bbox[2],bbox[4]), col=c(alpha("gray10",0.25),alpha("gray10",0.25)),
interior=FALSE,fill=TRUE,boundary = TRUE,add=TRUE,lwd = 0.1,
mar=c(0,0,0,0),mai=c(0,0,0,0),oma=c(0,0,0,0),xpd = NA)
dev.off()
unlink(x="thumb_temp.png") # remove temporary png
return(paste("thumb_",item.id,".png",sep=''))
}
|
fa2f8fba4c78f374afc2f31342aabbb0a204108f
|
ff634f26f8661a7fd640b0a9aa2cabbea477f805
|
/man/ltrim.Rd
|
7ec37cc948c7cdd0167e5da6926eb6c354f54d5c
|
[] |
no_license
|
cmhh/geoserveR
|
0fcc0457153a81ba7358d7f4c4df31914c325d51
|
6b4a13a8c5cd6595f58f3d4f62d7aad0390d8406
|
refs/heads/master
| 2020-07-18T18:57:02.612669
| 2019-09-17T08:22:12
| 2019-09-17T08:22:12
| 206,296,139
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 195
|
rd
|
ltrim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sld.R
\name{ltrim}
\alias{ltrim}
\title{left trim}
\usage{
ltrim(str)
}
\description{
left trim
}
\keyword{internal}
|
f0320a95d6a58962092b45455864a5f1a1f14c6a
|
ec6d7621a54cb3ef71a8f4851f800dc574768617
|
/p4/Ejercicio2.R
|
9ce65400879f9b2333a7419500dea093f8f790ed
|
[] |
no_license
|
carmencitaMartinez/FundamentosCienciaDeDatos
|
1ca92d7828857692aa5c0171ec57b473f24f6c8d
|
d496ff494f7ae4d968eed3e857b0e1388f6a99bf
|
refs/heads/master
| 2023-02-04T19:00:17.355160
| 2020-12-28T10:10:02
| 2020-12-28T10:10:02
| 324,972,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,692
|
r
|
Ejercicio2.R
|
install.packages("LearnClust")
install.packages("factoextra")
install.packages("xlsx")
install.packages("magick")
library(magick)
library(LearnClust)
library(tidyverse)
library(cluster)
library(factoextra)
library(xlsx)
library(gridExtra)
#Hemos profundizado en la representacion de los datos de dos maneras. Para ello hemos a�adido nuevos datos y hemos ampliado las categorias en las calificaciones.
#Leemos las calificaciones del fichero de texto
calificaciones<-read.table("calificaciones.txt")
#Omitimos todos los valores NA en caso de haberlos
calificaciones<-na.omit(calificaciones)
#Como no queremos que el algoritmo de agrupamiento dependa solo de una variable, estandarizamos los datos con scale
calificaciones<-scale(calificaciones)
#Calculamos una matriz de distancia entre las filas
distancia<-get_dist(calificaciones)
#Visualizamos la matriz de distancias
fviz_dist(distancia, gradient = list(low = "#FDFEFF", mid = "#84B5E6", high = "#020D6C"))
#Hacemos los distintos casos, variando las agrupaciones entre 1 y 4
k2 <- kmeans(calificaciones, centers = 1, nstart = 25)
k3 <- kmeans(calificaciones, centers = 2, nstart = 25)
k4 <- kmeans(calificaciones, centers = 3, nstart = 25)
k5 <- kmeans(calificaciones, centers = 4, nstart = 25)
#Guardamos las distintas graficas
p1<-fviz_cluster(k2, data = calificaciones) + ggtitle("k = 1")
p2<-fviz_cluster(k3, data = calificaciones) + ggtitle("k = 2")
p3<-fviz_cluster(k4, data = calificaciones) + ggtitle("k = 3")
p4<-fviz_cluster(k5, data = calificaciones) + ggtitle("k = 4")
#Hacemos un grid con las 4 graficas sacadas anteriormente
grid.arrange(p1, p2, p3,p4, nrow = 2)
#Pasamos ahora a probar el paquete de R: LearnClust:Hierarchical Clustering Algorithms
#Ponemos la matriz de ejemplo
matrizA<-c(4,4,3,5,1,2,5,5,0,1,2,2,4,5,2,1)
#Hacemos que sea de 2 columnas
matriz<-matrix(matrizA,ncol=2)
#Ahora convertimos la matriz
matriz<-data.frame(matriz)
#Usamos el Agglomerative Hierarchical Clusterization Algorithm
agglomerativeHC(matriz,'EUC','MAX')
#Para ver como se ha desarrollado el algoritmo anterior hemos usado la siguiente funcion
agglomerativeHC.details(matriz,'EUC','MAX')
#Usamos el Divisive Hierchical Clusterization Algorithm
divisiveHC(matrizA,'CHE','MIN')
#Para ver como se ha desarrollado el algoritmo anterior hemos usado la siguiente funcion
divisiveHC.details(matrizA,'CHE','MIN')
#Creamos el target y el weight, necesarios para este algoritmo
target<- c(2,3)
weight<- c(1,5)
#Usamos el Hierarchical Correlation Algorithm
correlationHC(matriz,target,weight)
#Para ver como se ha desarrollado el algoritmo anterior hemos usado la siguiente funcion
correlationHC.details(matriz,target,weight)
|
1b8a617ed1d20ef310b7a46d1d79d1eadee07be3
|
a1241d111c801c927dc800722e82efd2329c1474
|
/man/gemini.Rd
|
b7d771f19150ba90544875167ab8e2910ecd0f23
|
[] |
no_license
|
zkzofn/GEMINI
|
819203296a8e6181aac2e8dfae868ee7a3e6c69b
|
90cea036dc9fe851032c53dd3e85fb922aac7f6f
|
refs/heads/master
| 2020-05-16T15:58:49.945475
| 2019-03-28T01:54:38
| 2019-03-28T01:54:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 196
|
rd
|
gemini.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gemini.R
\name{gemini}
\alias{gemini}
\title{Get data}
\usage{
gemini()
}
\description{
Run GEMINI
}
\keyword{gemini}
|
7170fae1ee0e9f33789590442a2656475fd0dc83
|
c78d381271668ae9fcb74afd00ece39348e349b1
|
/per-poll-simulations/0037-colmar-brunton/config.R
|
9214a3d76183b14434d0be1d83691a3d3dc22476
|
[] |
no_license
|
nzherald/nz-election-prediction
|
b1c1464e2ee0f3cb8bfd109b4ff9f244937f2424
|
5bbafe06a4a1cea09782c4f57210e84a5600b7df
|
refs/heads/master
| 2021-01-01T04:58:12.745919
| 2017-09-21T10:48:16
| 2017-09-21T10:48:16
| 97,279,096
| 0
| 0
| null | 2017-09-16T15:02:30
| 2017-07-14T23:13:56
|
R
|
UTF-8
|
R
| false
| false
| 28
|
r
|
config.R
|
MaxSims = 5000
DaysTo = 37
|
4b6145910b48841f7ed70c72d912413aaa7dcd7b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/micropan/examples/binomixEstimate.Rd.R
|
4ab930b06567b99b48677ab9bf0bfb117f8f5b76
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
binomixEstimate.Rd.R
|
library(micropan)
### Name: binomixEstimate
### Title: Binomial mixture model estimates
### Aliases: binomixEstimate
### ** Examples
# Loading a Panmat object in the micropan package
data(list="Mpneumoniae.blast.panmat",package="micropan")
# Estimating binomial mixture models
bino <- binomixEstimate(Mpneumoniae.blast.panmat,K.range=3:8) # using 3,4,...,8 components
print(bino$BIC.table) # minimum BIC at 3 components
# Plotting the optimal model, and printing the summary
plot(bino)
summary(bino)
# Plotting the 8-component model as well
plot(bino,ncomp=8) # clearly overfitted, we do not need this many sectors
# Plotting the distribution in a single genome
plot(bino,type="single") # completely dominated by core genes
|
90bec15aedfb7090d3327353ec7db41d14dae70a
|
5ba437404dc01c953830ab17d304e462ece6b5e1
|
/main.R
|
712a9f6b18c88420efabb5bc7c18537ca5538af1
|
[] |
no_license
|
SonjaiAlena/Proekt
|
a585e16f933b40ade2198c5cec7aa671c1303a94
|
2e2fa34bb2219a52e343e3f6e610d1f8ebdf03a9
|
refs/heads/master
| 2020-11-23T22:22:18.171103
| 2019-12-13T13:25:12
| 2019-12-13T13:25:12
| 227,845,218
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,703
|
r
|
main.R
|
#### Libraries ####
library(rvest)
library(xml2)
library(tidyverse)
library(zoo)
#### 1. Download data from CBR.RU ####
cbr_currencies_download <- function(date_from = "10.12.2019",
date_to = "10.12.2019") {
url <- "https://www.cbr.ru/currency_base/daily/?date_req="
# create a range to add to URL
daterange <- seq(from = as.Date(date_from, format = "%d.%m.%Y"),
to = as.Date(date_to, format = "%d.%m.%Y"),
by = "day") %>%
format("%d.%m.%Y")
# initialize empty list
list_cur <- list()
# start cycle
for (date in daterange) {
# for debug - see progress
if (which(daterange == date) %% 30 == 0) cat("Next month\n")
# combine URL + DATE to get address, and download page
page <- read_html(paste0(url, date))
# get table from page
table_tr <- xml_find_all(page, xpath = ".//tr")
# initialize empty DF
table_td <- data.frame(Date = NA,
NCode = NA,
TCode = NA,
Units = NA,
Name = NA,
Rate = NA)
# decompose HTML table into dataframe
for (tr in 2:length(table_tr)) {
row <- xml_find_all(table_tr[tr], xpath = ".//td")
table_td[tr, ] <- c(date, row %>% map_chr(~xml_text(xml_contents(.x))))
}
# add dataframe to list
list_cur[[date]] <- table_td
}
return(list_cur)
}
#### 2. Clean Data ####
clean_currencies <- function(currecies_list) {
# remove NA created at initialisation
currencies_df <- currecies_list %>%
map_df(~.x) %>%
na.omit()
# transform rates to numbers
currencies_df$Rate <- currencies_df$Rate %>%
str_replace(pattern = ",", replacement = ".") %>% # replace commas with dots
as.numeric() # convert to number
# correct dates
currencies_df$Date <- as.Date(currencies_df$Date, format = "%d.%m.%Y")
return(currencies_df)
}
#### 3. Visualisation ####
make_plots <- function(currencies_df) {
# Simple plot
gg1 <- currencies_df %>%
filter(TCode == "USD" | TCode == "EUR" | TCode == "JPY") %>%
ggplot(aes(x = Date, y = Rate, color = TCode)) +
geom_line() +
labs(x = "Date", y = "Exchange Rate",
title = "FX Rate for USD, EUR and JPY",
subtitle = paste("From", head(currencies_df$Date, 1),
"to", tail(currencies_df$Date, 1)),
caption = "Data from CBR")
print(gg1)
gg2 <- currencies_df %>%
group_by(TCode) %>%
mutate(med = median(Rate)) %>%
ungroup() %>%
ggplot(aes(x = reorder(TCode, -med), y = Rate,
fill = TCode)) +
geom_boxplot() +
labs(x = "Currency", y = "Rate distribution",
title = "Distribution of currency rates",
subtitle = paste("From", head(currencies_df$Date, 1),
"to", tail(currencies_df$Date, 1)),
caption = "Data from CBR")
print(gg2)
gg3 <- currencies_df %>%
select(Date, TCode, Rate) %>%
spread(TCode, Rate) %>%
select(-1) %>%
cor() %>%
as.data.frame() %>%
rownames_to_column("Cur2") %>%
gather(key = Cur1, value, -Cur2) %>%
ggplot(aes(x = Cur1, y = Cur2, fill = value)) +
geom_tile() +
scale_fill_gradient2(low = "darkblue", high = "darkred", mid = "white",
midpoint = 0, limit = c(-1, 1), space = "Lab",
name = "Pearson\nCorrelation") +
labs(x = "Currency 1", y = "Currency 2",
title = "Correlation matrix for currencies",
subtitle = paste("From", head(currencies_df$Date, 1),
"to", tail(currencies_df$Date, 1)),
caption = "Data from CBR")
print(gg3)
gg4 <- currencies_df %>%
mutate(logRate = log(Rate)) %>%
group_by(TCode) %>%
mutate(logret = diff(zoo(logRate), na.pad = TRUE)) %>%
ungroup() %>%
na.omit() %>%
filter(TCode == "EUR" | TCode == "USD" | TCode == "JPY") %>%
ggplot(aes(x = Date, y = logret, color = TCode)) +
geom_line() +
facet_grid(TCode~.) +
labs(x = "Date", y = "Log returns",
title = "Log returns of EUR, USD and JPY",
subtitle = paste("From", head(currencies_df$Date, 1),
"to", tail(currencies_df$Date, 1)),
caption = "Data from CBR")
print(gg4)
}
#### RUN ####
# Download
currencies <- cbr_currencies_download(date_from = "01.01.2019",
date_to = "10.12.2019")
# Clean
currencies_df <- clean_currencies(currencies)
# Plots
make_plots(currencies_df)
|
293ea65b3a7a317de0a8992818f469f4b0907edf
|
98dd503577be436fe8892501a137561c5afc18d6
|
/mapping1.R
|
1c6bcdbe67c17e6d41adf6c1db85f8d7b07756e2
|
[] |
no_license
|
xinqiaoz/MA-615-Mapping
|
a4e8dc3456fd82e0bcdcbfa54f5aa3e045e8f79a
|
9d57e07bf5bc252024ffdb3e8932b4f6ec55304c
|
refs/heads/master
| 2020-08-27T00:27:45.963603
| 2019-10-24T04:34:20
| 2019-10-24T04:34:20
| 217,194,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 738
|
r
|
mapping1.R
|
library(shiny)
library(ggmap)
library(maptools)
library(maps)
mapWorld <- map_data("world")
mp1 <- ggplot(mapWorld, aes(x=long, y=lat, group=group))+
geom_polygon(fill="white", color="black")
projections<- c("cylindrical", "mercator", "sinusoidal", "gnomonic")
ui<- fluidPage(
titlePanel("World map in different projections"),
sidebarLayout(
sidebarPanel(
selectInput("proj", "Select projection for the map", projections)
),
mainPanel(
plotOutput(outputId = "mapping")
)
)
)
server<- function(input, output, session){
output$mapping<- renderPlot({
mp1 + coord_map(input$proj, xlim=c(-180,180), ylim=c(-60, 90))
})
}
shinyApp(ui, server)
|
b975a84179a1236b2b0b8cf983f801c674d0ada5
|
eff5c5420e047f576766f26d2fb15b0fcd7deb96
|
/tad_boundary/merge_tads_across_stages.r
|
66269ed6d3f28b6e8fa940db59082c652ef9524a
|
[] |
no_license
|
liuzhe93/MERVL_mESC
|
3323ee742087f54b91161b6778960546482992cf
|
cd0c1b5431e48d3197f416769f8516175350c4b3
|
refs/heads/main
| 2023-05-07T07:53:44.088191
| 2021-05-24T12:50:16
| 2021-05-24T12:50:16
| 370,325,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
merge_tads_across_stages.r
|
a= data.frame(fread("combined_tads.raw.sorted.txt"))
colnames(a) = c("chr1","x1","x2","sample")
# a = a[order(-a$score),]
combined = list()
while ( nrow(a)>0){
tad = a[1,]
#x1 = ifelse(a$x1
idx = which( a$chr1 == tad$chr1 & abs(a$x1-tad$x1)<=50000 & abs(a$x2-tad$x2)<=50000 )
tad$samples = paste(sort(sub("(.{8}).TAD.bed","\\1",a[idx,"sample"])),collapse=",")
len = length(combined)
print(len)
combined[[len+1]] = tad
a = a[-idx,]
}
tads = do.call(rbind,combined)
library(stringr)
tads$num_calls = str_count(tads$samples,",")+1
tads$chr1 = paste0("chr",tads$chr1)
#tads$chr2 = paste0("chr",tads$chr2)
write.table(tads,"combined_tads.uniq.txt",row.names=F,sep='\t',quote=F)
write.table(tads[which(tads$num_calls>1),],"combined_tads.uniq.gt1.txt",row.names=F,sep='\t',quote=F)
a = read.delim("combined_tads.uniq.txt")
a$D00 = grepl("D00",a$samples)
a$D02 = grepl("D02",a$samples)
a$D05 = grepl("D05",a$samples)
a$D07 = grepl("D07",a$samples)
a$D15 = grepl("D15",a$samples)
a$D80 = grepl("D80",a$samples)
a$num_stages = rowSums(a[,7:12])
write.table(a,"combined_tads.uniq.gt1.rep.txt",row.names=F,sep='\t',quote=F)
|
604976e5ed97251cf2d72f16cc46684f731964c2
|
f422b206a514668a4cfe31533fdf411a33de088d
|
/paper/little_helpers.r
|
78b14005dd92668598f581349e5ab59d1f439394
|
[] |
no_license
|
a-fent/microsim-ipf
|
599ce154c350f18fdc6fa497156661c1cc5a5502
|
d70b37d925f32eb33b79e9d540c391a58f7158ec
|
refs/heads/master
| 2021-01-17T07:41:22.824130
| 2016-07-18T10:07:17
| 2016-07-18T10:07:17
| 41,634,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 699
|
r
|
little_helpers.r
|
# Definitions for boroughs
borough.defs <- read_csv("data/inner_outer_london.csv")
clean.la.colnames <- function(df) {
stripped.names <- gsub("\\.", " ", colnames(df))
}
shorten.la.names <- function(la.names) {
short <- borough.defs[match(la.names, borough.defs$LA.NAME), "LA.SHORTNAME"]
short[is.na(short)] <- as.character(la.names[is.na(short)])
short
}
abbreviate.la.names <- function(la.names) {
short <- borough.defs[match(la.names, borough.defs$LA.NAME), "LA.ABBR3"]
short[is.na(short)] <- as.character(la.names[is.na(short)])
short
}
library(Hmisc)
# Wrapper for Hmisc's latex
latex.glove <- function(tbl, ...) {
latex(tbl, file="", numeric.dollar=FALSE, ...)
}
|
840c48846aeb08853e39b691ec402100973ac737
|
249f5d382170830dd953323c5cc7c7dc539dd06d
|
/ROC and AUC.R
|
0ae578e136830170106bd5705caee6f10d758ac8
|
[] |
no_license
|
emuvan/dataset
|
5bbbcc09b7eed69a6650bceec9d7b9c19bbaebda
|
bbd4a6235c287697ccaf1e2ed35be31710b88b8e
|
refs/heads/master
| 2022-04-18T21:40:22.586715
| 2020-04-17T22:55:59
| 2020-04-17T22:55:59
| 256,626,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,305
|
r
|
ROC and AUC.R
|
library(ggplot2)
library(magrittr)
library(ROCR)
library(ISLR)
library(caret)
data1 <- transfusion
str(data1)
#logistic regression Model show the values
library(nnet)
mymodel <- multinom(output~., data = transfusion)
#miss classification and the confusionmatrix
p <- predict(mymodel,transfusion)
tab <- table(p, transfusion$output)
tab
#giving out the miss classification
1-sum(diag(tab))/sum(tab)
#giving out the right classification
sum(diag(tab))/sum(tab)
#overall result who transmitted blood
table(transfusion$output)
#creating prediction
pred <- predict(mymodel,transfusion,type = 'prob')
pred <- prediction(pred,transfusion$output)
eval <- performance(pred,"acc")
plot(eval)
abline(h=0.79, v=0.45)
#identify the best values
max <- which.max(slot(eval, "y.values")[[1]])
acc <- slot(eval, "y.values")[[1]][max]
cut <- slot(eval, "x.values")[[1]][max]
print(c(Accuracy=acc, Cutoff = cut))
#ROC curve plot
roc <- performance(pred, "tpr","fpr")
plot(roc,
colorize=T,
main = "ROC Curve",
ylab = "Sensitivity",
xlab = "1-Specificity")
abline(a=0, b=1)
#AUC
auc <- performance(pred, "auc")
auc <- unlist(slot(auc, "y.values"))
auc <- round(auc, 4)
legend(.7, .4, auc, title = "AUC", cex = 0.5)
#precision vs. recall
RP.perf <- performance(pred, "prec", "rec")
plot (RP.perf)
|
5c05d92a81e571a19555222f43201fe35b2834a9
|
175c00f92df7d5829d3fd09157df3db979f47701
|
/RVisualization/R实战-可视化.R
|
bf568ebd70354bec6220ad0fc9c3a0bdcef50442
|
[] |
no_license
|
lealb/r-project
|
25958f63bab82da0475efdc6196a18bf6418622c
|
c2e919a2198c4b5edc04c1db9271cec607a5b67a
|
refs/heads/master
| 2022-06-23T12:04:31.290286
| 2017-06-18T15:56:55
| 2017-06-18T15:56:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,962
|
r
|
R实战-可视化.R
|
#input from key
mydata <- data.frame(a=numeric(),b=character());
##1
mydata <- edit(mydata)
##2
mydata <- fix(mydata)
#grathic
pdf("result.pdf")# save as pdf
attach(mtcars)
head(mtcars)
plot(wt,mpg)
abline(lm(mpg~wt))
title("Regression 0f MPG ON Weight")
detach(mtcars)
dev.off()
par(no.readonly = T)#生成一个可以修改的当前图形参数列表
dose <- c(20,30,40,50,60)
drugA <- c(16,20,27,40,60)
drugB <- c(15,18,25,31,40)
data.drug <- data.frame(dose=dose,drugA=drugA,drugB=drugB)
opar <- par(no.readonly = T)
par(pin=c(2,3))#size
par(lwd=2,cex=1.5)
par(cex.axis=.75,font.axis=3)#font
lines(dose,drugB,type = "b",pch=17,lty=2,col="blue")
abline(h = c(30),lwd=1.5,lty=2,col="gray")
library(Hmisc)
minor.tick(nx=3,ny=3,tick.ratio=0.5)#添加次要刻度线
legend("topleft",inset = 0.05,title = "Drug TYpe",c("A","B"),lty = c(1,2),pch = c(15,17),col = c("red","blue"))
#par(xlab="dose",ylab="druaB")
plot(data.drug$dose,data.drug$drugA,type = "b",pch=19,lty=2,col="red")
plot(data.drug$dose,data.drug$drugB,type = "b",xlab="dose",ylab="druaB",main = "DOSE&DRUUB COMPARE",
sub = "author:Leal", pch=23,lty=6,col="blue",bg="green")
par(opar)
#1=下,2=左,3=上,4=右
#生成数据
x <- c(1:10)
y <- x
z <- 10/x
opar <- par(no.readonly = TRUE)
par(mar=c(5,4,4,8)+0.01)#增加边界大小
plot(x,y,type = "b",pch=21,col="red",yaxt="n",lty=3,ann = FALSE)#绘制 x 对 y 的图形
lines(x,z,type = "b",pch=22,col="blue",lty=2)#添加 x 对1/ x 的直线
#绘制你自己的坐标轴
axis(2,at = x,labels = x,col.axis="red",las=2)
axis(4,at = z,labels = round(z,digits = 2),col.axis="blue",las=2,cex.axis=0.7,tick = -.01)
mtext("y=1/x",side = 4,line = 3,cex.lab=1,las=2,col = "blue")#添加标题和文本
title("An example",xlab = "x values",ylab = "y=x")
par(opar)
#text&mtext
attach(mtcars)
plot(wt,mpg,main = "Milege vs Car.Weight",xlab = "Weight",ylab = "Milege",pch=18,col="blue")
text(wt,mpg,row.names(mtcars),cex = 0.6,pos = 4,col = "red")
detach(mtcars)
#函数 par() 或 layout() 可以容易地组合多幅图形
attach(mtcars)
opar <- par(no.readonly = TRUE)
par(mfrow=c(2,2))
plot(wt,mpg,main = "wt~mpg")
plot(wt,disp,main = "wt~disp")
hist(wt,main = "hist of wt")
boxplot(wt,main="boxplot oc wt")
par(opar)
detach(mtcars)
#way 2
#widths = 各列宽度值组成的一个向量
#heights = 各行高度值组成的一个向量
#精确地控制每幅图形的大小
attach(mtcars)
opar <- par(no.readonly = TRUE)
#par(mfrow=c(3,1))
layout(matrix(c(1,1,2,3),2,2,byrow = TRUE))
hist(wt)
hist(mpg)
hist(disp)
par(opar)
detach(mtcars)
#多幅图形布局的精细控制
opar <- par(no.readonly = TRUE)
par(flg=c(0,0.8,0,0.8))#设置散点图
plot(mtcars$wt,mtcars$mpg,xlab = "Miles per Gallon",ylab = "Car Weight")
par(fig=c(0,0.8,0.55,1),new=TRUE)
boxplot(mtcars$wt,horizontal = TRUE,axes=FALSE)
par(fig=c(0.65,1,0,0.8),new=TRUE)
boxplot(mtcars$wt,axes=FALSE)
mtext("Enhanced Scatterplot",side = 3,outer = TRUE,line = -3)
par(opar)
#
|
7072d63f1e0bc81342fa551240ba5e882dcf66c4
|
2d6418dc719114e785f716b08478089503bc0ab2
|
/r/library/table/return.r
|
6f339ebe394f55b2bc4922fa901a95c6c1d8269e
|
[] |
no_license
|
jk983294/math
|
9a9343bde5804c8655a897a20ded3e67f528b225
|
f883e281961cd4cf16851de15e64746f59b966c7
|
refs/heads/master
| 2023-09-04T13:04:17.944946
| 2023-09-04T09:25:07
| 2023-09-04T09:25:07
| 139,425,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,285
|
r
|
return.r
|
library(data.table)
# 1天10个bar, 两个ii
index <- 1:10
ticks <- 929000 + (1000 * index)
ticks <- rep(ticks, 4)
cp <- c(1.01^(1:10), (1.01^10) * (1.02^(1:10)), 1.02^(1:20))
pre_cp <- c(rep(1, 10), rep(1.01^10, 10), rep(1, 10), rep(1.02^10, 10))
ii <- c(rep(1, 20), rep(2, 20))
capt <- c(rep(0.5, 20), rep(2, 20))
beta <- c(rep(0.1, 20), rep(0.2, 20))
dates <- rep(c(rep(1, 10), rep(2, 10)), 2)
UNIVERSE <- rep(1, 40)
dt <- data.table(ukey = ii, DataDate = dates, ticktime = ticks, close = cp, vwap = cp,
pre_close = pre_cp, capt = capt, beta = beta, UNIVERSE = UNIVERSE)
# fill=pre_close[[1L]] 表示以组内第一个值来填充
dt[, `:=`(ret, close/shift(close, fill = pre_close[[1L]]) - 1), by = .(ukey, DataDate)] # 每个bar收益
# open to now
dt[, `:=`(ret_on, close/close[[1L]] - 1), by = .(ukey, DataDate)]
# pre_close to now
dt[, `:=`(ret_pcn, close/pre_close[[1L]] - 1), by = .(ukey, DataDate)]
# pre_close to open
dt[, `:=`(ret_pco, close[[1L]]/pre_close[[1L]] - 1), by = .(ukey, DataDate)]
# today close to next day open
dt[ticktime == 930000L, `:=`(ret_co, shift(ret_pco, 1L, type = "lead", fill = 0)),
by = ukey]
dt[, `:=`(ret_co, first(ret_co)), by = .(ukey, DataDate)]
# open to close
dt[, `:=`(ret_oc, last(ret_on)), by = .(ukey, DataDate)]
# now to close
dt[, `:=`(ret_nc, last(close)/close - 1), by = .(ukey, DataDate)]
# now to next day open
dt[, `:=`(ret_no, (1 + ret_nc) * (1 + ret_co) - 1), by = .(ukey, DataDate)]
# previous day now to previous day close
dt[, `:=`(ret_pnc, shift(ret_nc, fill = 0)), by = .(ukey, ticktime)]
# previous day now to today open
dt[, `:=`(ret_pno, (1 + ret_pnc) * (1 + ret_pco) - 1)]
# vret: vwap to vwap ret
dt[, `:=`(vret, vwap/shift(vwap, fill = pre_close[[1L]]) - 1), by = .(ukey, DataDate)]
ret_cols <- grep("ret", names(dt), value = TRUE)
mret_cols <- paste0("m", ret_cols)
umret_cols <- paste0("u", mret_cols)
x <- 1:2
cap <- 2:3
weighted.mean(x, cap, na.rm = TRUE) # 1.6 = 1 * (2/5) + 2 * (3/5)
dt[, `:=`((mret_cols), lapply(.SD, function(x) x - beta * weighted.mean(x, capt,
na.rm = TRUE))), by = .(DataDate, ticktime), .SDcols = ret_cols]
dt[, `:=`((umret_cols), lapply(.SD, function(x) x - weighted.mean(x, capt, na.rm = TRUE))),
by = .(DataDate, ticktime, UNIVERSE), .SDcols = mret_cols]
|
baa41a1fae5b2461ecfc17f9b1c78ee94afa7cdd
|
a5d1975f45ed63de4f6ed733823d1b13fbc6755e
|
/dataUtils/R/uniqueNames.R
|
46a8c17ec299293701acd15a2b6414c4d274c823
|
[
"MIT"
] |
permissive
|
xieguigang/visualbasic.R
|
ce865f314cfca04e556cfc48fa0b50a2ec2c126a
|
d91613a72f722616ec342873b8acfb2d6fd2b7f2
|
refs/heads/master
| 2022-02-27T17:08:53.160878
| 2022-02-07T05:30:30
| 2022-02-07T05:30:30
| 81,850,763
| 0
| 0
| null | 2018-06-26T02:34:49
| 2017-02-13T17:17:08
|
R
|
UTF-8
|
R
| false
| false
| 569
|
r
|
uniqueNames.R
|
#Region "Microsoft.ROpen::1dd9bd372509b65b661f52e0264aabe7, uniqueNames.R"
# Summaries:
# uniqueNames = function(names) {...
#End Region
uniqueNames = function(names) {
nhits = list();
uniques = c();
for(name in names) {
if (is.null(nhits[[name]])) {
nhits[[name]] = 1;
uniques = append(uniques, name);
} else {
uniq = sprintf("%s_%s", name, nhits[[name]]);
nhits[[name]] = nhits[[name]] + 1;
uniques = append(uniques, uniq);
warning(sprintf("found duplicated name: %s", name));
}
}
uniques;
}
|
3ded5bafc68d74ea8614d7c2e93761611bf5e6ca
|
4dd03c6cae89837a1c8c86925b8f40fff0df053c
|
/PUT FILES IN HERE/SPADE.R
|
2d4733906ac0236245d632995bc4beb7b57da023
|
[] |
no_license
|
NortonS1/KempkirmanRwork
|
3a945f2e839339a5ff74719dc6a86e57148f58e2
|
14c18c39617e7491c3c58303e278f83054e14266
|
refs/heads/master
| 2020-07-26T00:11:15.706622
| 2017-02-28T20:45:56
| 2017-02-28T20:45:56
| 73,643,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,856
|
r
|
SPADE.R
|
#SPADE!!!
read.csv("Sample 1-Sam.csv") -> x
apply(x,2,mean) -> xm
apply(x,2,sd) -> xs
scale(x,xm,xs) -> xscaled
as.data.frame(xscaled) -> x
My_Palette <- colorRampPalette(c("navy","aliceblue","bisque","chocolate1","firebrick"))(256)
library(igraph)
library(circlize)
library(ComplexHeatmap)
library(dendextend)
SPADE <- function(x,k){
#initial clustering and binning
dist(x, method = "manhattan") -> distx
hclust(distx) -> clus_x
cutree(clus_x, k = k) ->cut_x
datalist = list()
abundancedatalist = list()
for(i in 1:k){
dat = data.frame(colMeans(x[c(cut_x == i),]))
datalist[[i]] <- dat
abundancedat = data.frame(dim(x[c(cut_x == i),]))
abundancedatalist[[i]] <- abundancedat}
#cleaning data and assigning to data frame
big_data = do.call(cbind, datalist)
big_data = t(big_data)
clus_num <- c(1:k)
clus_names <- as.character(clus_num)
as.data.frame(big_data, row.names = c(clus_names)) -> cluster_means
abd_data = do.call(cbind, abundancedatalist)
abd_data = t(abd_data)
clus_num <- c(1:k)
clus_names <- as.character(clus_num)
as.data.frame(abd_data, row.names = c(clus_names)) -> cluster_abundance
cluster_abundance[,1] -> cluster_abundance
full_data = data.frame(cluster_means, cluster_abundance)
# View(full_data[,-ncol(full_data)])
# Heatmapping clusters for easy viewing
mypath4 <- file.path("~/Desktop","Lab R work","PUT FILES IN HERE",
"Images",paste("Heatmap_","cluster_", ".png", sep = ""))
png(file = mypath4)
heatmap(as.matrix(cluster_means), Colv = NA, col = My_Palette)
dev.off()
# Saving phenotypes as box and whisker graphs
for(i in 1:k){
mypath3 <- file.path("~/Desktop","Lab R work","PUT FILES IN HERE",
"Images",paste("phenotype_","cluster_", i, ".png", sep = ""))
png(file = mypath3, width = 1700, units = "px")
phedat = data.frame(x[c(cut_x == i),])
boxplot.matrix(as.matrix(phedat), cex = 0.5, pch = 20, las = 2,
main = paste("cluster",i, sep = " "))
dev.off()}
#calculating cluster distances and plotting
dist(full_data[,-ncol(full_data)], method = "manhattan") -> distx1
adjgraph <- graph.adjacency(as.matrix(distx1),mode="upper",weighted=TRUE)
SPADEgraph <<-minimum.spanning.tree(adjgraph)
V(adjgraph)$abundance <- full_data[,ncol(full_data)]
V(adjgraph)$size <- log10(V(adjgraph)$abundance)*10
# E(adjgraph)$edge.width <- (E(adjgraph)$weight)
mypath2 <- file.path("~/Desktop","Lab R work","PUT FILES IN HERE",
"Images",paste("Network_", ".png", sep = ""))
cut.off <- mean(E(adjgraph)$weight)+sd(E(adjgraph)$weight)
adjgraph.sp <- delete_edges(adjgraph,E(adjgraph)[E(adjgraph)$weight < cut.off])
print(adjgraph.sp)
print(E(adjgraph)$weight)
png(file = mypath2)
layout.forceatlas2(adjgraph.sp, iterations = 100, linlog = TRUE, k = 100, gravity = 1, ks = 100 ) -> forcedirected
plot(adjgraph.sp, layout = forcedirected)
dev.off()}
|
617df4ce82c9893e3365e41f362d388ec5bc58c8
|
0bdea795ba4eacc6703abdb937312189abab426d
|
/man/ttpower.Rd
|
3d65c17fb5e2a7c5dec4704b29f8c2785de6950d
|
[] |
no_license
|
cran/TrendInTrend
|
8bcc66ae3690672ad6b4f478d37b6fa4353b4455
|
4f060f9b8c31a4df6eaabd915df04bb46024c6eb
|
refs/heads/master
| 2021-01-20T19:21:33.170856
| 2020-03-05T16:30:02
| 2020-03-05T16:30:02
| 60,498,774
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,009
|
rd
|
ttpower.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TrendInTrend.R
\name{ttpower}
\alias{ttpower}
\title{Power calculation in trend-in-trend design}
\usage{
ttpower(N, time, G, cstat, alpha_t, beta_0, h1.OR, nrep)
}
\arguments{
\item{N}{Sample Size.}
\item{time}{Number of time points.}
\item{G}{Number of CPE strata.}
\item{cstat}{Value of the c-statistic.}
\item{alpha_t}{A scaler that qunatifies the trend in exposure prevalence.}
\item{beta_0}{Intercept of the outcome model.}
\item{h1.OR}{A given odds ratio.}
\item{nrep}{Number of Monte Carlo replicates.}
}
\value{
\item{power}{Power of detecting the given Odds Ratio.}
}
\description{
Monte Carlo power calculation for trend-in-trend design.
}
\examples{
\donttest{
set.seed(123)
ttpower(N=10000,time=10,G=10,cstat=0.75,alpha_t= 0.4,beta_0=-4.3,h1.OR=1.5,nrep=50)
}
}
\references{
Ertefaie A, Small DS, Ji X, Leonard C, Hennessy S (2018). Statistical Power for Trend-in-trend Design. Epidemiology. 29(3), e21–e23.
}
|
152d6fcf86219f4e6abb4c670da4526f03b0b38b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RQEntangle/vignettes/CoupledTwoLevelSystems.R
|
80cedaaa921c540bbba3afa107ed3a0bef84e57a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
CoupledTwoLevelSystems.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----loading library, include=FALSE--------------------------------------
library(RQEntangle)
## ----singlet-------------------------------------------------------------
singlet<- matrix(c(0, sqrt(0.7), sqrt(0.3), 0), byrow = TRUE, nrow = 2)
## ----run_decompose, echo=FALSE-------------------------------------------
modes<- schmidt.decompose(singlet)
## ----display_modes-------------------------------------------------------
modes
## ----entropy-------------------------------------------------------------
entanglement.entropy(modes)
|
9c0f010da56a0ef03d5c4dc2b594522fcb2f152a
|
f042fbdf31a2106bfbe298b32dc0aa551bd3ae84
|
/man/spatial_fit_control.Rd
|
fd616f5ab8746244507efca9db14fda27eaded41
|
[] |
no_license
|
danielbonhaure/weather-generator
|
c76969967c3a60500a6d90d5931a88fb44570eba
|
6a207415fb53cca531b4c6be691ff2d7d221167d
|
refs/heads/gamwgen
| 2023-01-21T17:38:46.102213
| 2020-12-04T21:59:05
| 2020-12-04T21:59:05
| 286,565,700
| 0
| 0
| null | 2020-12-01T13:19:05
| 2020-08-10T19:50:16
|
R
|
UTF-8
|
R
| false
| true
| 415
|
rd
|
spatial_fit_control.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_spatial.R
\name{spatial_fit_control}
\alias{spatial_fit_control}
\title{Weather model fit configuration}
\usage{
spatial_fit_control(
prcp_occurrence_threshold = 0.1,
avbl_cores = 2,
planar_crs_in_metric_coords = 22185
)
}
\description{
Provides fine control of different parameters that will be used to fit a weather model.
}
|
fc0b9bb4d9762f562f3b132675e3fc71dca40270
|
b283a855db471c6cf2b6f3395a38a27e45c19da6
|
/ProviderProfiling1/Functions/Functions - Simulation/fun_LRana_raw.R
|
9dce4db770f88a05dd60ca7322c2749a214f3e6c
|
[] |
no_license
|
timobrakenhoff/ProviderProfiling1
|
28431d4456038dad7582fc2e346495e079166e63
|
d21038543dfdf0282ff8a565736fa601d79bd593
|
refs/heads/master
| 2021-08-19T19:48:01.392787
| 2017-11-27T08:54:37
| 2017-11-27T08:54:37
| 112,163,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,480
|
r
|
fun_LRana_raw.R
|
###################################################
# Logistic regression Function with fixed effects #
###################################################
##INPUT:
#dat frame with last col = outcome, first col = indicator, All predictors (centered)
#REMOVE Variable #s that need to be removed before analysis
#SEED Select to set seed
##OUTPUT:
#GLM GLM object with binomial and logit
#BRIER SCORE Calculated Brier Score
LRana_raw <- function(dat,remove=NA,seed=F,ref="C",n.zh=2,...){
#set seed
if (seed==T){set.seed(123)}
#Remove variables indicated in function
if (is.na(remove)==F){dat <- dat[,-remove]} else {dat <- dat}
#Define outcome variable name
out <- tail(colnames(dat),1)
#List predictor names
preds <- colnames(dat)[-ncol(dat)]
#Make formula for GLM
form <- as.formula(paste0(out,"~1+ relevel(",preds[1],",\"",ref,"\")"))
des <- svydesign(ids=~1,weights=rep(1,nrow(dat)),data=dat)
ml <- svyglm(formula=form, design=des,data=dat,family=binomial())
BS <- sum((ml$fitted.values-dat[,ncol(dat)])^2)/nrow(dat)
#Extract coefficients
LR.coef <- ml$coefficients[1:(n.zh+1)]
#Extract SEs
LR.se <- summary(ml)$coefficients[1:(n.zh+1),2]
#Extract CIs
LR.ci <- confint(ml,parm=1:n.zh+1)
#List GLM model and Brier Score
list('GLM'=ml,"BS"=BS,"LR.coef"=LR.coef,"LR.se"=LR.se,"LR.ci"=LR.ci)
}
|
0a84560d5d1b9d13f2a0eb91293df9cd74ef041a
|
8059c9c4ba6d57e651b6c65fa22506fc2de735c2
|
/10_04/deploy-many-apps.R
|
0742f8e28f0d0a91b97307509d425ca3873deb18
|
[] |
no_license
|
dmancilla85/r-shiny-essential-training
|
da4367552d6d23ee2db62a6d18f7076f200195bc
|
d41c3e824fda94ee13ffdd5dbe4462bae76b477a
|
refs/heads/master
| 2022-12-22T22:55:07.369389
| 2020-09-27T20:55:51
| 2020-09-27T20:55:51
| 299,117,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
deploy-many-apps.R
|
library("tidyverse")
library("rsconnect")
"the_shiny_app/"
apps_tib <- tribble(
~appDir, ~appName,
"the_shiny_app", "foobar-app",
"the_shiny_app", "fubar-app"
)
deploy_app <- function(name_of_app,
directory_of_app){
tryCatch(
deployApp(
account = "visibledata",
appDir = "the_shiny_app",
appName = "programmatic-deploy",
forceUpdate = TRUE
),
error = function(e) {
FALSE
},
finally = TRUE)
}
apps_tib %>%
mutate(deployed = map_lgl(list(appDir, appName), ~deploy_app(.x, .y)))
|
ed70b54a059331f32d0f7e8b39b02d300676632b
|
029f297321558c468dc7618756ec72597e2a1876
|
/BlackFriday.R
|
ff5393ef6b72a4d888aeed636f3864df52f5a58d
|
[] |
no_license
|
RakeshKumarA/Project-1
|
2db4270cd95c01b62b182e0e4634eb2f9db293b2
|
73921b246e4c48d633d68afd41c34477a19e2e6b
|
refs/heads/master
| 2020-12-25T14:24:03.760539
| 2016-09-11T12:51:15
| 2016-09-11T12:51:15
| 67,844,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,573
|
r
|
BlackFriday.R
|
##Black Friday Problem using H20
##Setting Environment for H20
require(data.table)
##Read Testing and Training data
train <- fread("train.csv",stringsAsFactors = T)
test <- fread("test-comb.csv",stringsAsFactors = T)
##Dimensions of train and test data
dim(train)
dim(test)
str(train)
##With just mean of the entire population
sub_mean <- data.frame(User_ID = test$User_ID, Product_ID = test$Product_ID, Purchase = mean(train$Purchase))
write.csv(sub_mean,"first_sub.csv",row.names = F)
##Second Approach
##Summary of Data
summary(train)
summary(test)
##Add Purchase column in test data
test$Purchase <- mean(train$Purchase)
head(test)
str(test)
require(dplyr)
test <- select(test,-Comb)
## Combine Train and Test
combin <- rbind(train,test)
##Analyze univariate
##Gender Variable
combin[,prop.table(table(Gender))]
##Age
combin[,prop.table(table(Age))]
##City Category Variable
combin[,prop.table(table(City_Category))]
##Stay in City in years
combin[,prop.table(table(Stay_In_Current_City_Years))]
##Unique values of User Id and Product Id
NROW(unique(combin$User_ID))
NROW(unique(combin$Product_ID))
colSums(is.na(combin))
##We need to encode Gender variable into 0 and 1 (good practice).
##We’ll also need to re-code the Age bins.
##Since there are three levels in City_Category, we can do one-hot encoding.
##The “4+” level of Stay_in_Current_Years needs to be revalued.
##The data set does not contain all unique IDs. This gives us enough hint for feature engineering.
##Only 2 variables have missing values. In fact, a lot of missing values, which could be capturing a hidden trend. We’ll need to treat them differently.
##Analyze bivariate Variables
require(ggplot2)
##Age vs Gender
ggplot(combin,aes(Age)) + geom_bar(aes(fill=Gender))
##Age vs City Category
ggplot(combin,aes(Age)) + geom_bar(aes(fill=combin$City_Category))
##We can also create cross tables for analyzing categorical variables
##We use gmodels package for this
require(gmodels)
CrossTable(combin$Occupation,combin$City_Category)
##Data Manipulation using Data.table
##There are lots of NA's which feels like we will miss some important trend
##Creating new variable for NA's in product category 2 and 3
combin$Product_Category_2_NA <- ifelse(is.na(combin$Product_Category_2),1,0)
combin$Product_Category_3_NA <- ifelse(is.na(combin$Product_Category_3),1,0)
##Moving NA's to arbitrary value -999 for product category 2 and 3
combin[is.na(combin$Product_Category_2),]$Product_Category_2 <- -999
combin[is.na(combin$Product_Category_3),]$Product_Category_3 <- -999
##Change 4+ to 4
combin[combin$Stay_In_Current_City_Years == "4+",]$Stay_In_Current_City_Years <- "4"
##Regrouping Age Groups
levels(combin$Age)[levels(combin$Age) == "0-17"] <- 0
levels(combin$Age)[levels(combin$Age) == "18-25"] <- 1
levels(combin$Age)[levels(combin$Age) == "26-35"] <- 2
levels(combin$Age)[levels(combin$Age) == "36-45"] <- 3
levels(combin$Age)[levels(combin$Age) == "46-50"] <- 4
levels(combin$Age)[levels(combin$Age) == "51-55"] <- 5
levels(combin$Age)[levels(combin$Age) == "55+"] <- 6
combin$Age <- as.numeric(combin$Age)
#convert Gender into numeric
combin[, Gender := as.numeric(as.factor(Gender)) - 1]
##Feature Engineering
##Number of times user has made the purchase
combin[, User_Count := .N, by = User_ID]
##Number of times product has been purchased
combin[, Product_Count := .N, by = Product_ID]
#Mean Purchase of Product
combin[, Mean_Purchase_Product := mean(Purchase), by = Product_ID]
#Mean purchase made by customer
combin[, Mean_Purchase_User := mean(Purchase), by = User_ID]
##one hot encoding of City_Category variable
require(dummies)
combin <- dummy.data.frame(combin, names = c("City_Category"), sep = "_")
sapply(combin, class)
#converting Product Category 2 & 3
combin$Product_Category_2 <- as.integer(combin$Product_Category_2)
combin$Product_Category_3 <- as.integer(combin$Product_Category_3)
##Model Building using H2O
##Divide test and train
c.train <- combin[1:NROW(train),]
c.test <- combin[-(1:NROW(train)),]
c.train <- c.train[c.train$Product_Category_1 <=18,]
require(h2o)
localH2O <- h2o.init(nthreads = -1)
#data to h2o cluster
train.h2o <- as.h2o(c.train)
test.h2o <- as.h2o(c.test)
#dependent variable (Purchase)
y.dep <- 14
#independent variables (dropping ID variables)
x.indep <- c(3:13,15:20)
#Multiple Regression in H2O
regression.model <- h2o.glm(y = y.dep, x = x.indep, training_frame = train.h2o, family = "gaussian")
h2o.performance(regression.model)
##Prediction
predict.reg <- as.data.frame(h2o.predict(regression.model, test.h2o))
sub_reg <- data.frame(User_ID = test$User_ID, Product_ID = test$Product_ID, Purchase = predict.reg$predict)
write.csv(sub_reg, file = "sub_reg.csv", row.names = F)
##Random Forest in H2O
system.time(rforest.model <- h2o.randomForest(y=y.dep, x=x.indep, training_frame = train.h2o, ntrees = 1000, mtries = 3, max_depth = 4, seed = 1122))
h2o.performance(rforest.model)
#check variable importance
h2o.varimp(rforest.model)
#making predictions on unseen data
system.time(predict.rforest <- as.data.frame(h2o.predict(rforest.model, test.h2o)))
sub_rf <- data.frame(User_ID = test$User_ID, Product_ID = test$Product_ID, Purchase = predict.rforest$predict)
write.csv(sub_rf, file = "sub_rf.csv", row.names = F)
##GBM in H2O
system.time(
gbm.model <- h2o.gbm(y=y.dep, x=x.indep, training_frame = train.h2o, ntrees = 1000, max_depth = 4, learn_rate = 0.01, seed = 1122)
)
#making prediction and writing submission file
predict.gbm <- as.data.frame(h2o.predict(gbm.model, test.h2o))
sub_gbm <- data.frame(User_ID = test$User_ID, Product_ID = test$Product_ID, Purchase = predict.gbm$predict)
write.csv(sub_gbm, file = "sub_gbm.csv", row.names = F)
##Deep Learning in H2O
#deep learning models
system.time(
dlearning.model <- h2o.deeplearning(y = y.dep,
x = x.indep,
training_frame = train.h2o,
epoch = 60,
hidden = c(100,100),
activation = "Rectifier",
seed = 1122
)
)
h2o.performance(dlearning.model)
#making predictions
predict.dl2 <- as.data.frame(h2o.predict(dlearning.model, test.h2o))
#create a data frame and writing submission file
sub_dlearning <- data.frame(User_ID = test$User_ID, Product_ID = test$Product_ID, Purchase = predict.dl2$predict)
write.csv(sub_dlearning, file = "sub_dlearning_new.csv", row.names = F)
|
07c98d21e22e9c22ac29ffaab8710133866511c3
|
767beb025b7bb92ad0fba01fb66f470d3a48b5c6
|
/R/print.GEPhenotype.R
|
fd080eda31d027d656bb7773cd026e96c28ed535
|
[] |
no_license
|
ramcqueary/gramEvol3
|
cf46ba9b88d751899b630c37ca5676e6e4a21327
|
1704cd06723402e38911f37d490da790b48b420a
|
refs/heads/master
| 2021-09-23T18:58:03.400514
| 2021-09-12T03:09:15
| 2021-09-12T03:09:15
| 249,277,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 218
|
r
|
print.GEPhenotype.R
|
function (x, ..., simplify = TRUE)
{
if (x$type == "NT") {
cat("Non-Terminal Sequence:\n", x$expr, "\n", ...)
}
else {
cat(as.character(x, simplify = simplify), "\n", ...)
}
}
|
3d508c8dc2f156efeebbe3cad1e485d01878904b
|
a87d2e0c6a27f28ac8d239b6285df3b0d01cad17
|
/r animate.R
|
3e24100fe347ed75043cf073db207256b77d846f
|
[] |
no_license
|
kavyakk42/Covid-19-Insights
|
a33002da84f0fe579c3f6f2253314ace1d592bfe
|
63a03ab333940924a3e319604caa1260b49dcb41
|
refs/heads/master
| 2022-11-06T10:38:06.555907
| 2020-06-20T13:55:13
| 2020-06-20T13:55:13
| 265,772,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,225
|
r
|
r animate.R
|
#ANIMATED TIME SERIES PLOT
library(ggplot2) #for bar and line graphs
library(dplyr)
library(tidyr)
library(lubridate) # for date time and month
library(gganimate) # for animation
library(gifski)
library(av)
library(gapminder) #for bubble plot
data= read.csv(('latestdata.csv'),header = T)
# Handling dates
datanew <- data %>%
mutate(date_confirmation = dmy(date_confirmation))
head(datanew$date_confirmation)
# animated time series plot
datanew %>% group_by(date_confirmation) %>%
summarise(count=n()) %>%
mutate(cuml=cumsum(count)) %>%
ggplot(aes(x=date_confirmation,y=cuml)) +
geom_line(color='red')+
geom_point(size=1.5)+
geom_area(fill='red')+
theme_bw()+
ggtitle('Daily Cumulative values')+
transition_reveal(cuml)
# save animation
anim_save('cumlplot')
#data completion for 4 countries
#extract day and month information
datanew$day<-day(datanew$date_confirmation)
datanew$month<-month(datanew$date_confirmation)
new <-datanew %>%
filter(month == 3) %>%
group_by(day,country) %>%
summarise(count=n())
new <- data.frame(complete(new, day, country,
fill = list(count = 0)))
new %>% filter(country == 'United States' |
country == 'France' |
country == 'United kingdom' |
country =='China') %>%
ggplot(aes(x= day, y=count,group=country,color=country)) +
geom_line()+
geom_point()+
theme_bw()+
ggtitle('Animated Daily Line Plot')+
transition_reveal(day)
#animated bar plots
#animated bar plot by month
new <- datanew %>%
filter(country == 'United States' |
country == 'France' |
country == 'United Kingdom' |
country =='China') %>%
filter(month==2|month==3) %>%
group_by(country,month) %>%
summarise(count=n())
p<-new %>% ggplot(aes(x=country,y=count,fill=country))+
geom_bar(stat='identity')+
geom_point(size=1.5)+
scale_y_log10()+
theme_bw()+
guides(Fill=F)
#animated bar plot by month
p + transition_time(as.integer(month))+
labs(title='Animated bar plot for covd 19 by month',
subtitle = 'Month time')
#animated bar plot by country
p + transition_states(count)+
labs(title='Animated animated bar plot by country ')+
shadow_mark()+
enter_grow()
# animated bar plot by day
new1 <- datanew %>%
filter(country == 'United States' |
country == 'France' |
country == 'United Kingdom' |
country =='China') %>%
filter(day==1|day==30) %>%
group_by(country,day) %>%
summarise(count=n())
p1<-new1 %>% ggplot(aes(x=country,y=count,fill=country))+
geom_bar(stat='identity')+
geom_point(size=1.5)+
scale_y_log10()+
theme_bw()+
guides(Fill=F)
#animated bar plot by day and country
p1 + transition_time(as.integer(day))+
labs(title='Animated bar plot for covd 19 by month',
subtitle = 'Month time')
#bubble plot
head(gapminder)
p2<- ggplot(gapminder,aes(x=gdpPercap,y=lifeexp,
size=pop,color=country))+
geom_point(show.legend = F,alpha=0,7)+
scale_x_log10()+
labs(x='GDP per Capital',
y='Life Expectancy')
|
f6af45c856d7801229c3c7627ec61d08efdbc006
|
e9f14b14f1ce16f78adf86e7fda095c57662d27d
|
/R/splitmix.R
|
9917c60e6f46daf70e0d0122988111c1433c4c5e
|
[] |
no_license
|
chavent/PCAmixdata
|
9c224aab5618ed440749b0bac1ac2b7b937d6e6e
|
428226c55bbdf81b3a0a9bb16607727ced179d9b
|
refs/heads/master
| 2023-05-10T19:38:37.558635
| 2022-12-05T16:07:57
| 2022-12-05T16:07:57
| 36,010,690
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 746
|
r
|
splitmix.R
|
#' @export
splitmix<-function(data){
data<-data.frame(data,check.names=T)
data_desc <- sapply(data,class)
X.quanti <- X.quali <- NULL
col.quant <- c()
col.qual <- c()
for(i in 1:length(data_desc)){
xdf <- as.data.frame(data_desc[i])
if(xdf[1,1] == "factor" | xdf[1,1] == "character" | xdf[1,1] == "ordered"){
col.qual <- c(col.qual, i)
}
else if(xdf[1,1] == "numeric" | xdf[1,1] =="integer"){
col.quant <- c(col.quant, i)
}
else{
stop("Undefined data type")
}
}
if (length(col.quant)!=0) X.quanti<-data[,col.quant,drop=FALSE]
if (length(col.qual)!=0) X.quali<-data[,col.qual,drop=FALSE]
return(list(X.quanti=X.quanti,X.quali=X.quali,col.quant=col.quant,col.qual=col.qual))
}
|
997a6cf0e165d87a41cbdeccb2426307ca372846
|
953de0b74445c535e97c5c834ab2a5cca37143eb
|
/R/mixVectors.R
|
d8d3105311b3f66067f16e5577ed85ccf8224464
|
[] |
no_license
|
fischuu/Luke
|
725af9739c1207bc6f425d8bfabacdf63bc3d36b
|
8485e1eabe73df5cf66100d121851a39641ddb36
|
refs/heads/master
| 2022-09-06T06:20:32.057076
| 2022-08-29T04:12:26
| 2022-08-29T04:12:26
| 55,674,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
mixVectors.R
|
# This function mixes two vectors alternating
mixVectors <- function(x,y){
unlist(c(rbind(x, y)) )
}
|
f1bb3989756ce483bb6f196a8fb8f93338c58b6f
|
4440906c05aab8b4fb8906d2dd2658cc5d00009e
|
/R/conf_bands.R
|
02a2fcc05cf30417d342ca47339d1667b16adb4c
|
[] |
no_license
|
djluckett/svmroc
|
e5508c04a721804e0026cf6b31d0d16d18a5d717
|
457d095ec6c2e59af36eda34cc8022181d7762b3
|
refs/heads/master
| 2020-03-28T04:35:18.089423
| 2019-10-04T21:28:28
| 2019-10-04T21:28:28
| 147,724,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,631
|
r
|
conf_bands.R
|
# conf_bands.R
#' SVM ROC confidence bands
#'
#' \code{conf_band} constructs bootstrap confidence bands for the SVM ROC curve from
#' an object of class \code{svmroc}.
#'
#' @param object An object of class \code{svmroc}.
#' @param num_boot Number of bootstrap replications. Defaults to 1000.
#' @param gamma Complement of confidence level, e.g., \code{gamma = 0.1}
#' will produce 90\% confidence bands. Defaults to 0.1.
#' @param x Values used for interpolation. Defaults to \code{seq(0.01, 0.99, 0.01)}.
#'
#' @return An object of class \code{conf_band}, a list with the following components:
#' \code{lower}, a vector of values for the lower confidence band; \code{upper}, a
#' vector of values for the upper confidence band; \code{y}, values of the ROC curve;
#' and \code{x}, values used for interpolation.
#'
#' @export
conf_band = function(object, num_boot = 1000, gamma = 0.1, x = seq(0.01, 0.99, 0.01)) {
# extract estimated sensitivities and specificities
sens = object$sens
spec = object$spec
actual = object$new_A
# create list of predicted values
pred = list()
for (i in 1:length(object$weights)) {
new_pred = predict(object, object$new_X, object$weights[i])
pred[[i]] = new_pred
} # end loop through weights
# ensure matrices and get n
sens = as.matrix(sens)
spec = as.matrix(spec)
n = length(actual)
# interpolate sens and spec
y_hat = approx(x = 1 - spec, y = sens, xout = x, yleft = 0, yright = 1)$y
# compute values needed for confidence band
sens_tilde = matrix(NA, nrow = length(pred), ncol = num_boot)
spec_tilde = matrix(NA, nrow = length(pred), ncol = num_boot)
y_tilde = matrix(NA, nrow = length(x), ncol = num_boot)
# loop through bootstrap replications
for (b in 1:num_boot) {
# create bootstrap weights
weights = rexp(n, 1)
weights = weights / mean(weights)
# calculate bootstrap weighted sensitivity and specificity
for (k in 1:length(pred)) {
sens_tilde[k, b] = mean(weights * as.numeric(actual == levels(actual)[2]) * as.numeric(pred[[k]] == levels(actual)[2])) / mean(weights * as.numeric(actual == levels(actual)[2]))
spec_tilde[k, b] = mean(weights * as.numeric(actual == levels(actual)[1]) * as.numeric(pred[[k]] == levels(actual)[1])) / mean(weights * as.numeric(actual == levels(actual)[1]))
} # end loop through alphas
# linearly interpolate bootstrap weighted ROC curve
y_tilde[ , b] = approx(x = 1 - spec_tilde[ , b], y = sens_tilde[ , b], xout = x,
yleft = 0, yright = 1)$y
} # end loop through bootstrap samples
# sort over bootstrap samples for each x
y_tilde_ordered = t(apply(y_tilde, 1, sort))
# take medians over bootstrap samples for each x
y_check = apply(y_tilde_ordered, 1, median)
# initialize ell and u
ell = rep(NA, length(x))
u = rep(NA, length(x))
# loop through steps toward median
for (s in (num_boot / 2):1) {
old_ell = ell
old_u = u
ell = y_tilde_ordered[ , num_boot / 2 - s + 1]
u = y_tilde_ordered[ , num_boot / 2 + s]
# determine what proportion of bootstrap samples are captured by s steps away from the median
cover = rep(1, num_boot)
for (b in 1:num_boot) {
for (k in 1:length(x)) {
if (y_tilde[k, b] < ell[k] | y_tilde[k, b] > u[k]) {
cover[b] = 0
break
}
} # end loop through x values
} # end loop through bootstrap samples
# check coverage proportion
cover_prob = mean(cover)
if (cover_prob < 1 - gamma) {
break
}
} # end loop through steps away from median
# set limits equal to the last ones to obtain coverage 1 - gamma across bootstrap replications
up = old_u
low = old_ell
upper = up
lower = low
# truncate upper and lower values at 0 and 1
upper = pmin(upper, 1)
upper = pmax(upper, 0)
lower = pmin(lower, 1)
lower = pmax(lower, 0)
if (sum(lower >= 0.95) > 0) {
temp_inds = which(lower >= 0.95)
lower[temp_inds] = approx(x = c(x[min(temp_inds)], 1), y = c(lower[min(temp_inds)], 1), xout = x[temp_inds], yleft = 0, yright = 1)$y
}
# create object to return
to_return = list(lower = lower, upper = upper, y = y_hat, x = x)
class(to_return) = "conf_band"
return(to_return)
} # end function conf_band
#' Plot SVM ROC curve confidence bands
#'
#' \code{plot.conf_band} produces a plot of the ROC curve with confidence bands
#' from an object of class conf_band.
#'
#' @param object An object of class \code{conf_band}.
#' @param xlab Label for X axis. Defaults to "One minus specificity".
#' @param ylab Label for Y axis. Defaults to "Sensitivity".
#' @param include_opt Logical. If \code{TRUE}, the optimal point on the ROC curve
#' (the closest to (0, 1) in Euclidean distance) is marked on the plot.
#' Defaults to \code{TRUE}.
#'
#' @return A plot as a object of class \code{ggplot}.
#'
#' @export
plot.conf_band = function(object, xlab = "One minus specificity", ylab = "Sensitivity",
include_opt = T) {
# create data frame for plotting
dat = cbind(c(0, object$y, 1), c(0, object$lower, 1), c(0, object$upper, 1), c(0, object$x, 1))
dat = as.data.frame(dat)
names(dat) = c("y", "low", "up", "x")
# create ggplot object
g = ggplot(dat, aes(x = x, y = y)) + geom_line(aes(x = x, y = y, linetype = "solid")) +
geom_ribbon(aes(x = x, ymin = low, ymax = up), alpha = 0.3) +
geom_segment(x = 0, y = 0, xend = 1, yend = 1, linetype = 2) + theme_classic() +
theme(legend.position = "none") + labs(x = xlab, y = ylab)
# add optimal point
if (include_opt) {
opt = opt_weight(object)
g = g + geom_point(x = 1 - opt$opt_spec, y = opt$opt_sens)
} # end if optimal cutpoint should be added to plot
return(g)
} # end function plot.conf_band
#' Calculate area between the curves
#'
#' \code{abc} is used to calculate the area between two confidence band curves.
#'
#' @param object An object to calculate area between the curves.
#'
#' @return The numeric area between the curves.
#'
#' @export
abc = function(object) {
UseMethod("abc", object)
} # end function abc
# function to compute area between the curve for a conf_band object
# calls the generic directly, so there is no need for documentation
#' @export
abc.conf_band = function(object) {
# calculate area under the upper curve
y = object$upper
x = object$x
x = c(0, x, 1)
y = c(0, y, 1)
idx = 2:length(x)
auc_upper = abs(as.double((x[idx] - x[idx - 1]) %*% (y[idx] + y[idx - 1])) / 2)
# calculate area under the lower curve
y = object$lower
x = object$x
x = c(0, x, 1)
y = c(0, y, 1)
idx = 2:length(x)
auc_lower = abs(as.double((x[idx] - x[idx - 1]) %*% (y[idx] + y[idx - 1])) / 2)
# return area between curves
return(auc_upper - auc_lower)
} # end function abc.conf_band
#' Determine coverage
#'
#' \code{get_coverage} is used to determine if confidence band covers true ROC curve.
#'
#' @param conf_band An object of class \code{conf_band}.
#' @param svmroc An object of class \code{svmroc}.
#' @param new_X New X matrix to determine true ROC curve.
#' @param new_A New class assignments to determine true ROC curve.
#'
#' @return A list with the following components: \code{coverage}, numeric, 1 if
#' the confidence band covers the true ROC curve and 0 if it does not; \code{true_roc},
#' a vector of points on the true ROC curve; \code{x}, the x values for the true ROC
#' curve (taken from the object \code{conf_band}).
#'
#' @export
get_coverage = function(conf_band, svmroc, new_X, new_A) {
# check that levels(new_A) match levels used in the original fit
if (!(identical(levels(as.factor(new_A)), levels(as.factor(svmroc$new_A))))) {
stop("'as.factor(new_A)' must have the same levels as the class labels in the original fit.")
}
# check that new_X has the same number of columns as used in the original fit
if (ncol(new_X) != ncol(svmroc$new_X)) {
stop("'new_X' must have the same number of columns as the covariate matrix used to obtain 'svmroc'")
}
# estimate true ROC curve
roc = fit_roc(object = svmroc, new_X = new_X, new_A = new_A)
# linear interpolation to get true ROC curve
true_roc = approx(x = 1 - roc$spec, y = roc$sens, xout = conf_band$x, yleft = 0, yright = 1)$y
# check coverage only at indices larger than some delta
inds = which(conf_band$x > 0.05 & conf_band$x < 0.95)
# get indicator of coverage
coverage = 1 - as.numeric(sum(true_roc[inds] > conf_band$upper[inds]) + sum(true_roc[inds] < conf_band$lower[inds]) > 0)
return(coverage)
} # end function get_coverage
|
6252dc995fa1ca1a93db0d8089a5f22974c53164
|
3812d02a0b7a2b17ffae371fc2d41076fe2ad868
|
/R/coef.PSM.R
|
3f217e39f13fecd9b98690b407daae4188d9861c
|
[] |
no_license
|
reealpeppe/PSM
|
eef24128396d06bfa1ff1203bc35b6ba99b17463
|
027a8440461c5d3e698ab42620599fdbb2c96d5c
|
refs/heads/main
| 2023-07-24T05:44:06.384114
| 2021-09-06T20:36:13
| 2021-09-06T20:36:13
| 401,394,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
coef.PSM.R
|
#' Extract Model Coefficients
#' @description Extracts model coefficients beta and delta
#' @param object an object of class 'PSM' returned by PartialSplines function
#'
#' @return a list containing the coefficients
#' @export
#'
#' @examples
#'
#' x <- data.frame(x = rnorm(100))
#' tx <- rnorm(100)
#' eps <- rnorm(100)
#' y <- x[,1] + tx^4 + eps
#' psm <- PartialSplines(y, x, tx)
#' coef(psm)
#'
coef.PSM <- function(object) {
b <- object$beta
d <- object$delta
list(beta = b, delta = d)
}
|
f9ac9b8e0fe4bfb51aafbeacbe6d1171feb84f47
|
30712e0ebb841fb214a73024acdc1ba8b571c82a
|
/gissr/gissr_part2/man/sp_projection.Rd
|
ba6f88187c56a70b50710fd4f655c70dc2e5017c
|
[] |
no_license
|
karaframe/R_useful_packages
|
93bc9fe28f9411d2986c09a4529c92bf5d195583
|
b132d94eb7f54129253fc51ce1e7a01b4ee0692e
|
refs/heads/master
| 2021-01-11T06:31:41.654758
| 2016-09-26T08:12:45
| 2016-09-26T08:12:45
| 69,226,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 372
|
rd
|
sp_projection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sp_projection.R
\name{sp_projection}
\alias{sp_projection}
\title{Function to return a spatial object's projection system.}
\usage{
sp_projection(sp)
}
\arguments{
\item{sp}{Spatial object}
}
\description{
Function to return a spatial object's projection system.
}
\author{
Stuart K. Grange
}
|
92b86890512c7deb7a8f2ce7a2789d5a4a46fabf
|
d70039cf9ccbc466a04c9b8cfbfad1c57a35b2bb
|
/man/retrieve_meetings.Rd
|
f9bd4facf8703f1ad366d626de31cd42dd5649a5
|
[] |
no_license
|
jkadcav/meeting
|
a211e3d52ddd63b773d5a3397b4060841671b1c8
|
0048324148a973089fb6548d66ffc800da28bd08
|
refs/heads/master
| 2021-01-13T10:47:03.130629
| 2016-11-01T06:53:24
| 2016-11-01T06:53:24
| 72,383,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 367
|
rd
|
retrieve_meetings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meeting.R
\name{retrieve_meetings}
\alias{retrieve_meetings}
\title{Retrieve meeting events}
\usage{
retrieve_meetings(date, animal)
}
\arguments{
\item{meetingId}{meeting ID to use for retrival}
}
\description{
Retrieve meeting events
}
\examples{
retrieve_meetings()
}
\keyword{events}
|
6d2d32c3f6247e1001ff7d6460f8cb1c37833ced
|
cef9e0516578ddc6fc2f1184b8f4850408887c0d
|
/R/lazyFactorCoding.R
|
01bbc562041dac9184cbef43d139f1565d2df724
|
[] |
no_license
|
tagteam/Publish
|
62cad3b48930743d19b5246ce2f8aeb7343ebe98
|
b027ce49f882ee051118247997f0a931cedcd46e
|
refs/heads/master
| 2023-01-28T04:10:25.237750
| 2023-01-17T14:21:03
| 2023-01-17T14:21:03
| 32,985,684
| 17
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,897
|
r
|
lazyFactorCoding.R
|
##' This function eases the process of generating factor variables
##' with relevant labels. All variables in a data.frame with less than
##' a user set number of levels result in a line which suggests levels and
##' labels. The result can then be modified for use.
##'
##' The code needs to be copy-and-pasted from the R-output
##' buffer into the R-code buffer. This can be customized
##' for the really efficiently working people e.g. in emacs.
##' @title Efficient coding of factor levels
##' @param data Data frame in which to search for categorical variables.
##' @param max.levels Treat non-factor variables only if the number of unique values less than max.levels. Defaults to 10.
##' @return R-code one line for each variable.
##' @author Thomas Alexander Gerds
##' @examples
##' data(Diabetes)
##' lazyFactorCoding(Diabetes)
##'
##' @export
lazyFactorCoding <- function(data,max.levels=10){
if (!is.character(data))
data <- as.character(substitute(data))
d <- get(data, envir=parent.frame())
isdt <- match("data.table",class(d),nomatch=FALSE)
out <- lapply(names(d),function(x){
dx <- d[[x]]
if ((is.factor(dx) && length(unique(dx))<max.levels) || (length(unique(dx))<max.levels)){
levs.x <- if (is.factor(unique(dx))) levels(dx) else sort(unique(dx))
labels.x <- paste("\"",paste(levs.x,collapse="\",\"",sep=""),"\"",sep="")
if (isdt){
paste0(data,"[",",",x,":=factor(",x,",levels=c(",as.character(labels.x),"),labels=c(",as.character(labels.x),"))]\n")
}else{
obj.x <- paste(data,"$",x,sep="")
paste(obj.x," <- factor(",obj.x,",levels=c(",as.character(labels.x),"),labels=c(",as.character(labels.x),"))\n",sep="")
}
}
else NULL
})
out <- out[!sapply(out,is.null)]
sapply(unlist(out),cat)
invisible(out)
}
|
421488e9cbf71c1b750986be73964b8fdb830362
|
15c8bab24090d3d3f9324ad3aa718b0f8957a8b0
|
/week_05/day_3/download_app/app.R
|
2b8f1c99ed17dac83a974c24e1943f6704420513
|
[] |
no_license
|
paddyhudson/codeclan_homework_PaddyHudson
|
1caf14758c60a6df15b7c5c10a92ea586cde73fc
|
09997e0d068a17a6eb50c00f9121fa023114b52e
|
refs/heads/main
| 2023-08-20T11:56:50.493952
| 2021-10-07T08:48:25
| 2021-10-07T08:48:25
| 387,508,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,602
|
r
|
app.R
|
library(shiny)
library(CodeClanData)
library(tidyverse)
library(shinythemes)
library(rmarkdown)
ui <- fluidPage(
theme = shinytheme("superhero"),
titlePanel("Five Country Medal Comparison"),
tabsetPanel(
tabPanel("Plot",
plotOutput("plot"),
fluidRow(
column(6,
radioButtons("season", "Summer or Winter Olympics?",
c("Summer", "Winter")),
),
column(6,
checkboxGroupInput(
"medal",
"Medal Type?",
c("Gold", "Silver", "Bronze"),
inline = TRUE
)
)
)),
tabPanel("Download",
radioButtons("format",
"Document format",
c("PDF", "HTML", "Word"),
inline = TRUE
),
downloadButton("download_report")
)
)
)
server <- function(input, output){
#plot for the UI
output$plot <- renderPlot({
olympics_overall_medals %>%
filter(team %in% c("United States",
"Soviet Union",
"Germany",
"Italy",
"Great Britain")) %>%
filter(medal %in% input$medal) %>%
filter(season == input$season) %>%
ggplot() +
aes(x = team, y = count, fill = medal) +
geom_col(position = "dodge") +
scale_fill_manual(values = c("Gold" = "gold",
"Silver" = "gray70",
"Bronze" = "darkorange"))
})
#download functionatility which will happen when download_report is clicked
#first call a downloadHander
output$download_report <- downloadHandler(
#set the filename as my-report.(PDF/html/docx) based on the choice of format
filename = function() {
paste('my-report', sep = '.', switch(
input$format, PDF = 'pdf', HTML = 'html', Word = 'docx'
))
},
#get the file content
content = function(file) {
#get the filepath of the markdown file
src <- normalizePath('report.Rmd')
#this is a precaution to ensure you have write permission for the directory
owd <- setwd(tempdir())
on.exit(setwd(owd))
#create a copy of the markdown file
file.copy(src, 'report.Rmd', overwrite = TRUE)
#create the search parameters to be passed to markdown file
params <- list(medal = input$medal, season = input$season)
#render the markdown file in the desired format
#pass in the search parameters
#and create a new environment for the render, in case of duplicate terms
out <- render('report.Rmd',
output_format = switch(
input$format,
PDF = pdf_document(),
HTML = html_document(),
Word = word_document()
),
params = params,
envir = new.env(parent = globalenv())
)
#rename the file... don't quite understand this bit yet
file.rename(out, file)
}
)
}
shinyApp(ui = ui, server = server)
|
f8722904334f7c578828c3d184c465419f9502a8
|
e06965698053952f7f97c60349a590e42d08b633
|
/inst/test_files/test_sketch_2.R
|
41897814ac8960d09dc4cd9b24a26675a319d61c
|
[
"Apache-2.0"
] |
permissive
|
kcf-jackson/sketch
|
a9940c89ed8183627914861a11893856b1c47429
|
b597f01e540f35aab1f5ee2d3744f6f64c70c94d
|
refs/heads/master
| 2022-11-01T03:28:32.088340
| 2022-10-23T14:22:05
| 2022-10-23T14:22:05
| 222,058,097
| 106
| 5
|
NOASSERTION
| 2022-10-23T14:22:07
| 2019-11-16T06:36:59
|
HTML
|
UTF-8
|
R
| false
| false
| 113
|
r
|
test_sketch_2.R
|
fib <- function(n) {
if (n < 2) {
return(n)
} else {
return(fib(n-1) + fib(n-2))
}
}
|
67e609490870fe66d024a5537e49e4beda6347cd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/grpreg/examples/cv-grpreg.Rd.R
|
d9d5ce18ebc62738850a154a5ee2f842bb8bcc26
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 379
|
r
|
cv-grpreg.Rd.R
|
library(grpreg)
### Name: cv.grpreg
### Title: Cross-validation for grpreg/grpsurv
### Aliases: cv.grpreg cv.grpsurv
### ** Examples
data(Birthwt)
X <- Birthwt$X
y <- Birthwt$bwt
group <- Birthwt$group
cvfit <- cv.grpreg(X, y, group)
plot(cvfit)
summary(cvfit)
coef(cvfit) ## Beta at minimum CVE
cvfit <- cv.grpreg(X, y, group, penalty="gel")
plot(cvfit)
summary(cvfit)
|
31a96695f24bd2b7d25d3d01e867d00e97f13ac0
|
dd83da7973b6d7bddbc6ed9718b45ee1bf8caebe
|
/plot1.R
|
f7fada06bd93b7295a6673d6f8ddbd15b16c1d7f
|
[] |
no_license
|
bulajic/ExData_Plotting1
|
5dde805b230f55d6fea0e2fad2460bd4aa1b9cc8
|
66850836ea940725b1289709288a0cb37426cf51
|
refs/heads/master
| 2021-01-17T14:07:01.272224
| 2015-06-07T14:31:57
| 2015-06-07T14:31:57
| 37,019,401
| 0
| 0
| null | 2015-06-07T14:27:08
| 2015-06-07T14:27:08
| null |
UTF-8
|
R
| false
| false
| 492
|
r
|
plot1.R
|
# download the data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "data.zip")
file = unzip("data.zip")
# load the data
library(sqldf)
data = read.csv.sql(file, sql = "select * from file where Date in ('1/2/2007', '2/2/2007')", header = T, sep = ";")
# construct the plot 1
png("plot1.png", width=480, height=480)
hist(data$Global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
8b969c06ef176663ffbc2145806c1dd1b6d8027e
|
b8dbee4b91b48121bff4329ce2f37c89d8836290
|
/seqUtils/man/exportDataForRasqual.Rd
|
f6ad210b91b651262a85e5ab1d7a2af77237d836
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/macrophage-tuQTLs
|
18cc359c9052bd0eab45bd27f1c333566fb181d8
|
3ca0b9159f3e5d7d1e0a07cdeadbeb492e361dcb
|
refs/heads/master
| 2021-03-27T19:29:12.456109
| 2019-02-19T13:05:26
| 2019-02-19T13:05:26
| 93,025,290
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 680
|
rd
|
exportDataForRasqual.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qtl_rasqual.R
\name{exportDataForRasqual}
\alias{exportDataForRasqual}
\title{Write multiple files for RASQUAL onto disk.}
\usage{
exportDataForRasqual(condition_list, rasqual_input_folder,
max_batch_size = 50)
}
\arguments{
\item{condition_list}{Named list of expression lists, each expression list needs to
contain at least the following elements: 'counts' matrix, 'sample_metadata' df,}
\item{rasqual_input_folder}{Path to the RASQUAL input folder.}
\item{max_batch_size}{Maximal number of feaures to be included in a single batch.}
}
\description{
Write multiple files for RASQUAL onto disk.
}
|
d9e65c78a9e7032c3085a4a48649498612999132
|
86a282f2e03d0d8e64127bfe2aa4be6d968d24b4
|
/man/normpostpred.Rd
|
00805ade3cf499510e99b9819ed87b30f68ba9c2
|
[] |
no_license
|
u44027388/LearnBayes
|
fc57e5689c9619de966f4b9e0210bb3aa078ec8f
|
f7722076d01768bb845bfe9bed78c365fcf292df
|
refs/heads/master
| 2021-09-14T19:23:22.283849
| 2018-05-17T21:04:10
| 2018-05-17T21:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 949
|
rd
|
normpostpred.Rd
|
\name{normpostpred}
\alias{normpostpred}
\title{Posterior predictive simulation from Bayesian normal sampling model}
\description{
Given simulated draws from the posterior from a normal sampling model, outputs
simulated draws from the posterior predictive distribution of a statistic of interest.
}
\usage{
normpostpred(parameters,sample.size,f=min)
}
\arguments{
\item{parameters}{list of simulated draws from the posterior where mu contains the normal mean
and sigma2 contains the normal variance}
\item{sample.size}{size of sample of future sample}
\item{f}{function defining the statistic}
}
\value{
simulated sample of the posterior predictive distribution of the statistic}
\author{Jim Albert}
\examples{
# finds posterior predictive distribution of the min statistic of a future sample of size 15
data(darwin)
s=normpostsim(darwin$difference)
sample.size=15
sim.stats=normpostpred(s,sample.size,min)
}
\keyword{models}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.