content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
rm(list=ls())
options(stringsAsFactors = F)
setwd("O:/Documents/Projects/EWAS catalog/Catalog/Results/26244061")
for (i in list.files()[grepl(".csv",list.files())]){
data <- read.csv(i)
data$i2 <- ""
data$p_het <- ""
data$details <- ""
data$se <- round(abs(data$beta/qnorm(data$p/2)),4)
data$se[is.na(data$se)] <- NA
data$se[data$se==0] <- NA
data <- data[, c("cpg", "beta", "se", "p", "i2", "p_het", "details")]
write.csv(data, paste0(i), row.names=F)
} | /published-ewas/study-files/26244061/26244061.R | permissive | MRCIEU/ewascatalog | R | false | false | 485 | r | rm(list=ls())
options(stringsAsFactors = F)
setwd("O:/Documents/Projects/EWAS catalog/Catalog/Results/26244061")
for (i in list.files()[grepl(".csv",list.files())]){
data <- read.csv(i)
data$i2 <- ""
data$p_het <- ""
data$details <- ""
data$se <- round(abs(data$beta/qnorm(data$p/2)),4)
data$se[is.na(data$se)] <- NA
data$se[data$se==0] <- NA
data <- data[, c("cpg", "beta", "se", "p", "i2", "p_het", "details")]
write.csv(data, paste0(i), row.names=F)
} |
`slouchtree.plot` <-
function (topology, times, names = NULL, regimes = NULL, cex = NULL, lwd=NULL, reg.col=NULL) {
if(is.null(cex)) cex<-1;
if(is.null(lwd)) lwd<-1;
rx <- range(times);
rxd <- 0.1*diff(rx);
if (is.null(regimes))
regimes <- factor(rep(1,length(topology)));
levs <- levels(as.factor(regimes));
palette <- rainbow(length(levs));
for (r in 1:length(levs)) {
y <- tree.layout(topology);
x <- times;
f <- which(topology > 0 & regimes == levs[r]);
pp <- topology[f];
X <- array(data=c(x[f], x[pp], rep(NA,length(f))),dim=c(length(f),3));
Y <- array(data=c(y[f], y[pp], rep(NA,length(f))),dim=c(length(f),3));
oz <- array(data=1,dim=c(2,1));
X <- kronecker(t(X),oz);
Y <- kronecker(t(Y),oz);
X <- X[2:length(X)];
Y <- Y[1:(length(Y)-1)];
if(!is.null(regimes))
{if(is.null(reg.col))
C <- rep(palette[r],length(X))
}
{if(!is.null(reg.col))
C <- rep(reg.col[r],length(X))
}
if (r > 1) par(new=TRUE);
par(yaxt='n')
par(bty="n")
par(font="2")
plot(X,Y,type='l',col=C,lwd=lwd,xlab='time',ylab='',xlim = rx + c(-rxd,rxd),ylim=c(0,1));
if (!is.null(names))
text(X[seq(1,length(X),6)],Y[seq(1,length(Y),6)],names[f],pos=4, cex=cex);
}
par(yaxt="s") #reset graphic parameter to default
par(bty="o")
par(font="1")
}
| /S34_S38_phylogenetic_comparative_methods/scripts/resources/slouch/R/slouchtree.plot.R | no_license | hj1994412/teleost_genomes_immune | R | false | false | 1,375 | r | `slouchtree.plot` <-
function (topology, times, names = NULL, regimes = NULL, cex = NULL, lwd=NULL, reg.col=NULL) {
if(is.null(cex)) cex<-1;
if(is.null(lwd)) lwd<-1;
rx <- range(times);
rxd <- 0.1*diff(rx);
if (is.null(regimes))
regimes <- factor(rep(1,length(topology)));
levs <- levels(as.factor(regimes));
palette <- rainbow(length(levs));
for (r in 1:length(levs)) {
y <- tree.layout(topology);
x <- times;
f <- which(topology > 0 & regimes == levs[r]);
pp <- topology[f];
X <- array(data=c(x[f], x[pp], rep(NA,length(f))),dim=c(length(f),3));
Y <- array(data=c(y[f], y[pp], rep(NA,length(f))),dim=c(length(f),3));
oz <- array(data=1,dim=c(2,1));
X <- kronecker(t(X),oz);
Y <- kronecker(t(Y),oz);
X <- X[2:length(X)];
Y <- Y[1:(length(Y)-1)];
if(!is.null(regimes))
{if(is.null(reg.col))
C <- rep(palette[r],length(X))
}
{if(!is.null(reg.col))
C <- rep(reg.col[r],length(X))
}
if (r > 1) par(new=TRUE);
par(yaxt='n')
par(bty="n")
par(font="2")
plot(X,Y,type='l',col=C,lwd=lwd,xlab='time',ylab='',xlim = rx + c(-rxd,rxd),ylim=c(0,1));
if (!is.null(names))
text(X[seq(1,length(X),6)],Y[seq(1,length(Y),6)],names[f],pos=4, cex=cex);
}
par(yaxt="s") #reset graphic parameter to default
par(bty="o")
par(font="1")
}
|
# ui.R - BMI calculator
#
# AUTHOR
# H. Barrientos
#
# DATE
# 2016-06-24
#
# DESCRIPTION
# This app calculates a person's Body Mass Index, also known as BMI, and visually
# reports it to the user via an attention-catching GoogleVis Gauge. The gauge is
# preset with using three colors and BMI value ranges reported in several medical
# websites.
#
# There are two sliders for user input: one for height in centimeters, and another
# one for weight in kilograms. These sliders have been preset with value ranges for
# adult persons. The user just needs to select the desired values, and the app will
# respond immediately. In addition to the colors and BMI index shown by the gauge,
# a documentation table is also presented to the user containing the BMI value ranges
# and the corresponding health condition for each range.
#
# Health condition indicator colors: GREEN - normal weight; AMBER - overweight; RED - obese.
# Height slider range: 100 - 250 cm.
# Weight slider range: 40 - 250 kg.
# Load required libraries
library(shiny)
# User interface texts
appTitle <- "BMI Calculator"
appOwner <- "A free service by The Health Corner"
callToAction_1 <- "Stop! Take a minute to check your health risk by calculating your Body Mass Index, or BMI."
callToAction_2 <- "Simply move the sliders to indicate your height and weight, and compare the result against the gauge indicator and the BMI table."
# Slider values
heightSliderLabel <- "Height in cm:"
weightSliderLabel <- "Weight in kg:"
heightSliderMin <- 100
heightSliderMax <- 210
heightSliderPreset <- 170
weightSliderMin <- 40
weightSliderMax <- 250
weightSliderPreset <- 65
# BMI documentation table
bmiInfoTable <-
"<div align='left'>
<strong><font size='3'>BMI TABLE</font></strong>
<table width='100%'>
<TR>
<TD width='20%'><strong>Value</strong></TD>
<TD width='35%'><strong>Condition</strong></TD>
<TD width='45%'><strong>Health Risk</strong></TD>
</TR>
<TR>
<TD width='20%'>19 - 24</TD>
<TD width='35%'><strong><font color='green'>Normal weight</font-color></strong></TD>
<TD width='45%'>Low</TD>
</TR>
<TR>
<TD width='20%'>25 - 29</TD>
<TD width='35%'><strong><font color='orange'>Overweight</font-color></strong></TD>
<TD width='45%'>Medium</TD>
</TR>
<TR>
<TD width='20%'>30 - 50</TD>
<TD width='35%'><strong><font color='red'>Obese</font-color></strong></TD>
<TD width='45%'>High</TD>
</TR>
</table>
</div>"
# Object ids
heightObjectId <- "height"
weightObjectId <- "weight"
outputObjectId <- "bmiGauge"
shinyUI(
fluidPage(
# Provide the title for the app, and the "app owner" name
titlePanel(appTitle),
h3(appOwner),
# Create a sidebar with the sliders and the BMI table
sidebarLayout(
sidebarPanel(
div(callToAction_1),
br(),
div(callToAction_2),
br(),
sliderInput(heightObjectId,
heightSliderLabel,
min = heightSliderMin,
max = heightSliderMax,
value = heightSliderPreset),
sliderInput(weightObjectId,
weightSliderLabel,
min = weightSliderMin,
max = weightSliderMax,
value = weightSliderPreset),
br(),
HTML(bmiInfoTable)
), # END sidebarPanel
# Call the output function
mainPanel(uiOutput(outputObjectId))
) # END sidebarLayout
) # END fluidPage
) # END shinyUI
| /ui.R | no_license | hbarrien/DevelopingDataProducts | R | false | false | 3,804 | r | # ui.R - BMI calculator
#
# AUTHOR
# H. Barrientos
#
# DATE
# 2016-06-24
#
# DESCRIPTION
# This app calculates a person's Body Mass Index, also known as BMI, and visually
# reports it to the user via an attention-catching GoogleVis Gauge. The gauge is
# preset with using three colors and BMI value ranges reported in several medical
# websites.
#
# There are two sliders for user input: one for height in centimeters, and another
# one for weight in kilograms. These sliders have been preset with value ranges for
# adult persons. The user just needs to select the desired values, and the app will
# respond immediately. In addition to the colors and BMI index shown by the gauge,
# a documentation table is also presented to the user containing the BMI value ranges
# and the corresponding health condition for each range.
#
# Health condition indicator colors: GREEN - normal weight; AMBER - overweight; RED - obese.
# Height slider range: 100 - 250 cm.
# Weight slider range: 40 - 250 kg.
# Load required libraries
library(shiny)
# User interface texts
appTitle <- "BMI Calculator"
appOwner <- "A free service by The Health Corner"
callToAction_1 <- "Stop! Take a minute to check your health risk by calculating your Body Mass Index, or BMI."
callToAction_2 <- "Simply move the sliders to indicate your height and weight, and compare the result against the gauge indicator and the BMI table."
# Slider values
heightSliderLabel <- "Height in cm:"
weightSliderLabel <- "Weight in kg:"
heightSliderMin <- 100
heightSliderMax <- 210
heightSliderPreset <- 170
weightSliderMin <- 40
weightSliderMax <- 250
weightSliderPreset <- 65
# BMI documentation table
bmiInfoTable <-
"<div align='left'>
<strong><font size='3'>BMI TABLE</font></strong>
<table width='100%'>
<TR>
<TD width='20%'><strong>Value</strong></TD>
<TD width='35%'><strong>Condition</strong></TD>
<TD width='45%'><strong>Health Risk</strong></TD>
</TR>
<TR>
<TD width='20%'>19 - 24</TD>
<TD width='35%'><strong><font color='green'>Normal weight</font-color></strong></TD>
<TD width='45%'>Low</TD>
</TR>
<TR>
<TD width='20%'>25 - 29</TD>
<TD width='35%'><strong><font color='orange'>Overweight</font-color></strong></TD>
<TD width='45%'>Medium</TD>
</TR>
<TR>
<TD width='20%'>30 - 50</TD>
<TD width='35%'><strong><font color='red'>Obese</font-color></strong></TD>
<TD width='45%'>High</TD>
</TR>
</table>
</div>"
# Object ids
heightObjectId <- "height"
weightObjectId <- "weight"
outputObjectId <- "bmiGauge"
shinyUI(
fluidPage(
# Provide the title for the app, and the "app owner" name
titlePanel(appTitle),
h3(appOwner),
# Create a sidebar with the sliders and the BMI table
sidebarLayout(
sidebarPanel(
div(callToAction_1),
br(),
div(callToAction_2),
br(),
sliderInput(heightObjectId,
heightSliderLabel,
min = heightSliderMin,
max = heightSliderMax,
value = heightSliderPreset),
sliderInput(weightObjectId,
weightSliderLabel,
min = weightSliderMin,
max = weightSliderMax,
value = weightSliderPreset),
br(),
HTML(bmiInfoTable)
), # END sidebarPanel
# Call the output function
mainPanel(uiOutput(outputObjectId))
) # END sidebarLayout
) # END fluidPage
) # END shinyUI
|
complete <- function(directory, id=1:332) {
obs <- c()
i <- 1
for(monitor in id) {
data = read.csv(paste(directory, "\\", str_pad(monitor, 3, pad = "0"), ".csv", sep=""))
hasObservation <- sum(!is.na(data$sulfate) & !is.na(data$nitrate))
obs[i] <- hasObservation
i <- i + 1
}
data.frame(id, obs)
} | /r programming/week2/complete.R | no_license | goldenc/datasciencecoursera | R | false | false | 422 | r | complete <- function(directory, id=1:332) {
obs <- c()
i <- 1
for(monitor in id) {
data = read.csv(paste(directory, "\\", str_pad(monitor, 3, pad = "0"), ".csv", sep=""))
hasObservation <- sum(!is.na(data$sulfate) & !is.na(data$nitrate))
obs[i] <- hasObservation
i <- i + 1
}
data.frame(id, obs)
} |
######################
## Cargar librerías ##
######################
library(readr) # Cargar datos
library(dplyr) # Manejo de datos
library(tidyr) # Transformación de datos
library(stringr) # Manejo de datos tipo texto
library(ggplot2) # Visualizar datos
##################
## Cargar datos ##
##################
datosONU <- read_csv("datos/DatosONU_select.csv") %>%
select(-X1, -`Series Code`)
################
## Ejercicios ##
################
## Donde vea "***" es donde debe escribir algo
# Modifique la forma de los datos de "ancho" a "largo". Tome las primeras 36 columnas y asigne los nombres a una
# nueva variable "anio" y sus valores correspondientes a una columna "valor".
datosONU2 <- datosONU %>%
pivot_***(1:36, ***_to = "anio", ***_to = "valor")
datosONU2
# Cambie el nombre de las columnas "Country Name" y "Series Name" a "pais" e "indicador", respectivamente
datosONU3 <- datosONU2 %>%
***(
*** = `Country Name`,
*** = `Series Name`
)
datosONU3
# Cargar datos complementarios
region <- read_csv("datos/region.csv")
grupo_ingresos <- read_csv("datos/income_group.csv")
# Una "datosONU3" a las bases "region" y "grupo_ingresos". Asegurese de ver que columnas tienen en común
# Ordene la base para que queden las columnas en el siguiente orden: pais, region, grupo_ingresos, y el resto
datosONU4 <- datosONU3 %>%
left_***(***, by = c("pais" = "country_name")) %>%
left_***(grupo_ingresos, *** = c("pais" = "country_name")) %>%
***(grupo_ingresos = income_group) %>%
select(***, ***, ***, everything())
datosONU4
# Cambie el nombre de los valores de la variable "indicador" a una forma más simple en español.
datosONU5 <- datosONU4 %>%
***(
indicador = ***(
indicador == "CO2 emissions (metric tons per capita)" ~ "emisiones_co2",
indicador == "Fertility rate, total (births per woman)" ~ "tasa_fertilidad",
indicador == "Forest area (% of land area)" ~ "area_bosques",
indicador == "GDP per capita (constant 2005 US$)" ~ "PIB_percapita",
indicador == "Health expenditure per capita, PPP (constant 2005 international $)" ~ "gasto_medico_percapita",
indicador == "Labor force participation rate, female (% of female population ages 15+) (modeled ILO estimate)" ~ "participacion_laboral_femenina",
indicador == "Life expectancy at birth, total (years)" ~ "expectativa_vida",
indicador == "Malnutrition prevalence, weight for age (% of children under 5)" ~ "malnutricion",
indicador == "Population (Total)" ~ "poblacion",
indicador == "Urban population (% of total)" ~ "poblacion_urbana",
indicador == "Fossil fuel energy consumption (% of total)" ~ "consumo_combustible_fosil)",
indicador == "Poverty headcount ratio at $2 a day (PPP) (% of population)" ~ "pobreza",
indicador == "Public spending on education, total (% of government expenditure)" ~ "gasto_publico_educacion"))
datosONU5
# Cambie los nombres de las variables "grupo_ingresos" y "region" español. En el caso de la variable "grupo ingresos",
# fusione Lower y Upper Middle Income en una sola categoria.
datosONU6 <- datosONU5 %>%
***(
*** = ***(
grupo_ingresos == "Low Income" ~ "Ingresos Bajos",
grupo_ingresos *** c("Lower Middle Income", "Upper Middle Income") ~ "Ingresos Medio-Bajo",
grupo_ingresos == "High Income" ~ "Ingresos Altos"),
*** = ***(
region == "East Asia and Pacific" ~ "Asia Oriente y Pacifico",
region == "Europe and Central Afica" ~ "Europa y Africa Central",
region == "Latin America and the Caribbean" ~ "Latinoamerica y el Caribe",
region == "Middle East and North Africa" ~ "Medio Oriente y Africa del Norte",
region == "North America" ~ "Norte America",
region == "South Asia" ~ "Asia del sur",
region == "Sub-saharan Africa" ~ "Africa subsahariana"))
datosONU6
# Asigne los valores de "indicador" como columnas y complete los valores con la columna "valor"
datosONU7 <- datosONU6 %>%
pivot_***(***_from = indicador, ***_from = valor)
datosONU7
# Sobreescriba la columna "anio" extrayendo solo el valor numérico correspondiente. Asegurese que
# la variable quede como tupo numérico y no texto.
datosONU8 <- datosONU7 %>%
***(anio = str_sub(***, 1, 4),
anio = as.numeric(***))
datosONU8
# Tomando solo datos del año 2007, calcule el promedio de "emisiones_co2" para cada combinación de
# grupo_ingresos y region
datosONU9 <- datosONU8 %>%
filter(anio == ***) %>%
***(grupo_ingresos, region) %>%
summarise(emisiones_co2 = ***(emisiones_co2, na.rm = TRUE))
datosONU9
# Genere una tabla con regiones como filas y grupos de ingreso como columnas.
datosONU9 %>%
pivot_***(***_from = ***, ***_from = ***)
#rm(datosONU2, datosONU3, datosONU4, datosONU5, datosONU6, datosONU7, datosONU8) | /Semana 4 - Manejo de Datos II/Clase04_EjercicioI.R | no_license | pjaguirreh/DataScience_PP | R | false | false | 4,833 | r | ######################
## Cargar librerías ##
######################
library(readr) # Cargar datos
library(dplyr) # Manejo de datos
library(tidyr) # Transformación de datos
library(stringr) # Manejo de datos tipo texto
library(ggplot2) # Visualizar datos
##################
## Cargar datos ##
##################
datosONU <- read_csv("datos/DatosONU_select.csv") %>%
select(-X1, -`Series Code`)
################
## Ejercicios ##
################
## Donde vea "***" es donde debe escribir algo
# Modifique la forma de los datos de "ancho" a "largo". Tome las primeras 36 columnas y asigne los nombres a una
# nueva variable "anio" y sus valores correspondientes a una columna "valor".
datosONU2 <- datosONU %>%
pivot_***(1:36, ***_to = "anio", ***_to = "valor")
datosONU2
# Cambie el nombre de las columnas "Country Name" y "Series Name" a "pais" e "indicador", respectivamente
datosONU3 <- datosONU2 %>%
***(
*** = `Country Name`,
*** = `Series Name`
)
datosONU3
# Cargar datos complementarios
region <- read_csv("datos/region.csv")
grupo_ingresos <- read_csv("datos/income_group.csv")
# Una "datosONU3" a las bases "region" y "grupo_ingresos". Asegurese de ver que columnas tienen en común
# Ordene la base para que queden las columnas en el siguiente orden: pais, region, grupo_ingresos, y el resto
datosONU4 <- datosONU3 %>%
left_***(***, by = c("pais" = "country_name")) %>%
left_***(grupo_ingresos, *** = c("pais" = "country_name")) %>%
***(grupo_ingresos = income_group) %>%
select(***, ***, ***, everything())
datosONU4
# Cambie el nombre de los valores de la variable "indicador" a una forma más simple en español.
datosONU5 <- datosONU4 %>%
***(
indicador = ***(
indicador == "CO2 emissions (metric tons per capita)" ~ "emisiones_co2",
indicador == "Fertility rate, total (births per woman)" ~ "tasa_fertilidad",
indicador == "Forest area (% of land area)" ~ "area_bosques",
indicador == "GDP per capita (constant 2005 US$)" ~ "PIB_percapita",
indicador == "Health expenditure per capita, PPP (constant 2005 international $)" ~ "gasto_medico_percapita",
indicador == "Labor force participation rate, female (% of female population ages 15+) (modeled ILO estimate)" ~ "participacion_laboral_femenina",
indicador == "Life expectancy at birth, total (years)" ~ "expectativa_vida",
indicador == "Malnutrition prevalence, weight for age (% of children under 5)" ~ "malnutricion",
indicador == "Population (Total)" ~ "poblacion",
indicador == "Urban population (% of total)" ~ "poblacion_urbana",
indicador == "Fossil fuel energy consumption (% of total)" ~ "consumo_combustible_fosil)",
indicador == "Poverty headcount ratio at $2 a day (PPP) (% of population)" ~ "pobreza",
indicador == "Public spending on education, total (% of government expenditure)" ~ "gasto_publico_educacion"))
datosONU5
# Cambie los nombres de las variables "grupo_ingresos" y "region" español. En el caso de la variable "grupo ingresos",
# fusione Lower y Upper Middle Income en una sola categoria.
datosONU6 <- datosONU5 %>%
***(
*** = ***(
grupo_ingresos == "Low Income" ~ "Ingresos Bajos",
grupo_ingresos *** c("Lower Middle Income", "Upper Middle Income") ~ "Ingresos Medio-Bajo",
grupo_ingresos == "High Income" ~ "Ingresos Altos"),
*** = ***(
region == "East Asia and Pacific" ~ "Asia Oriente y Pacifico",
region == "Europe and Central Afica" ~ "Europa y Africa Central",
region == "Latin America and the Caribbean" ~ "Latinoamerica y el Caribe",
region == "Middle East and North Africa" ~ "Medio Oriente y Africa del Norte",
region == "North America" ~ "Norte America",
region == "South Asia" ~ "Asia del sur",
region == "Sub-saharan Africa" ~ "Africa subsahariana"))
datosONU6
# Asigne los valores de "indicador" como columnas y complete los valores con la columna "valor"
datosONU7 <- datosONU6 %>%
pivot_***(***_from = indicador, ***_from = valor)
datosONU7
# Sobreescriba la columna "anio" extrayendo solo el valor numérico correspondiente. Asegurese que
# la variable quede como tupo numérico y no texto.
datosONU8 <- datosONU7 %>%
***(anio = str_sub(***, 1, 4),
anio = as.numeric(***))
datosONU8
# Tomando solo datos del año 2007, calcule el promedio de "emisiones_co2" para cada combinación de
# grupo_ingresos y region
datosONU9 <- datosONU8 %>%
filter(anio == ***) %>%
***(grupo_ingresos, region) %>%
summarise(emisiones_co2 = ***(emisiones_co2, na.rm = TRUE))
datosONU9
# Genere una tabla con regiones como filas y grupos de ingreso como columnas.
datosONU9 %>%
pivot_***(***_from = ***, ***_from = ***)
#rm(datosONU2, datosONU3, datosONU4, datosONU5, datosONU6, datosONU7, datosONU8) |
library(dplyr)
library(Seurat)
library(patchwork)
pbmc.data <- Read10X(data.dir = "/data/tusers/lixiangr/eRNA/single-cell/hg38/PBMCs/data/filtered_feature_bc_matrix/hg38")
# Initialize the Seurat object with the raw (non-normalized data).
pbmc <- CreateSeuratObject(counts = pbmc.data, project = "pbmc10k", min.cells = 3, min.features = 200)
pbmc
pbmc[["percent.mt"]] <- PercentageFeatureSet(pbmc, pattern = "^MT-")
plot<-VlnPlot(pbmc, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
plot1 <- FeatureScatter(pbmc, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(pbmc, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
pdf("~/eRNA/pbmcs/read_count_p.pdf")
plot1
plot2
plot
dev.off()
pbmc <- subset(pbmc, subset = nFeature_RNA > 200 & nFeature_RNA < 6000 & percent.mt < 20)
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- NormalizeData(pbmc)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 2000)
# Identify the 10 most highly variable genes
top10 <- head(VariableFeatures(pbmc), 10)
# plot variable features with and without labels
plot1 <- VariableFeaturePlot(pbmc)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
pdf("~/eRNA/pbmcs/feature selection.pdf")
plot1
plot2
dev.off()
all.genes <- rownames(pbmc)
pbmc <- ScaleData(pbmc, features = all.genes)
pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc))
print(pbmc[["pca"]], dims = 1:5, nfeatures = 5)
pbmc <- JackStraw(pbmc, num.replicate = 100)
pbmc <- ScoreJackStraw(pbmc, dims = 1:20)
plot3<-ElbowPlot(pbmc)
pdf("~/eRNA/pbmcs/pca.pdf")
plot3
dev.off()
pbmc <- FindNeighbors(pbmc, dims = 1:10)
pbmc <- FindClusters(pbmc, resolution = 0.5)
head(Idents(pbmc), 5)
pbmc <- RunUMAP(pbmc, dims = 1:10)
umap<-DimPlot(pbmc, reduction = "umap")
pdf("~/eRNA/pbmcs/umap.pdf")
umap
dev.off()
pbmc.markers <- FindAllMarkers(pbmc, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
top10 <- pbmc.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_log2FC)
write.table(top10,"~/eRNA/pbmcs/cells/reads/top_10.txt",quote=F)
write.table(Idents(pbmc),"~/eRNA/pbmcs/cells/reads/cluster.txt",quote=F)
top100 <- pbmc.markers %>% group_by(cluster) %>% top_n(n = 100, wt = avg_log2FC)
top1000 <- pbmc.markers %>% group_by(cluster) %>% top_n(n = 1000, wt = avg_log2FC)
q<-top1000[which(top1000$p_val_adj<0.05),]
write.table(top100,"~/eRNA/pbmcs/cells/reads/top_100.txt",quote=F)
write.table(q,"~/eRNA/pbmcs/cells/reads/top_0.05.txt",quote=F)
load("/data/tusers/lixiangr/eRNA/single-cell/hg38/PBMCs/data/filtered_feature_bc_matrix/MAESTRO/data/human.immune.CIBERSORT.RData")
#load("/data/tusers/lixiangr/eRNA/single-cell/hg38/PBMCs/data/filtered_feature_bc_matrix/.RData")
RNAAnnotateCelltypeCluster <- function(genes, signatures = "human.immune.CIBERSORT", cluster = 0){
if(class(signatures) == "character"){
data(list = signatures)
signatures = get(signatures)
}
celltypes <- as.character(unique(signatures[,1]))
signature_list <- sapply(1:length(celltypes),function(x){
return(toupper(as.character(signatures[which(signatures[,1]==celltypes[x]),2])))})
names(signature_list) <- celltypes
idx = genes$cluster==cluster
avglogFC = genes$avg_log2FC[idx]
names(avglogFC) = toupper(genes$gene[idx])
score_cluster = sapply(signature_list, function(x){
score = sum(avglogFC[x], na.rm = TRUE) / log2(length(x))
return(score)
})
return(sort(score_cluster, decreasing=T))
}
celtype.score_0 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 0)
celtype.score_1 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 1)
celtype.score_2 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 2)
celtype.score_3 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 3)
celtype.score_4 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 4)
celtype.score_5 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 5)
celtype.score_6 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 6)
celtype.score_7 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 7)
celtype.score_8 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 8)
celtype.score_9 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 9)
celtype.score_10 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 10)
celtype.score_11 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 11)
celtype.score_12 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 12)
names<-c(names(celtype.score_0[1]),names(celtype.score_1[1]),names(celtype.score_2[1]),names(celtype.score_3[1]),names(celtype.score_4[1]),names(celtype.score_5[1]),names(celtype.score_6[1]),names(celtype.score_7[1]),names(celtype.score_8[1]),names(celtype.score_9[1]),names(celtype.score_10[1]),names(celtype.score_11[1]),names(celtype.score_12[1]))
cluster<-cbind(names,c(0:12))
write.table(cluster,"~/eRNA/pbmcs/cells/reads/cell_type.txt",quote=F)
new.cluster.ids <- cluster[,1]
names(new.cluster.ids) <- levels(pbmc)
pbmc <- RenameIdents(pbmc, new.cluster.ids)
plot4<-DimPlot(pbmc, reduction = "umap", label = TRUE, pt.size = 0.5) + NoLegend()
pdf("~/eRNA/pbmcs/umap selection.pdf")
plot4
dev.off()
| /human/PMBC/PBMC.R | permissive | Xiangruili-seed/eRNA | R | false | false | 5,427 | r | library(dplyr)
library(Seurat)
library(patchwork)
pbmc.data <- Read10X(data.dir = "/data/tusers/lixiangr/eRNA/single-cell/hg38/PBMCs/data/filtered_feature_bc_matrix/hg38")
# Initialize the Seurat object with the raw (non-normalized data).
pbmc <- CreateSeuratObject(counts = pbmc.data, project = "pbmc10k", min.cells = 3, min.features = 200)
pbmc
pbmc[["percent.mt"]] <- PercentageFeatureSet(pbmc, pattern = "^MT-")
plot<-VlnPlot(pbmc, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
plot1 <- FeatureScatter(pbmc, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(pbmc, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
pdf("~/eRNA/pbmcs/read_count_p.pdf")
plot1
plot2
plot
dev.off()
pbmc <- subset(pbmc, subset = nFeature_RNA > 200 & nFeature_RNA < 6000 & percent.mt < 20)
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- NormalizeData(pbmc)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 2000)
# Identify the 10 most highly variable genes
top10 <- head(VariableFeatures(pbmc), 10)
# plot variable features with and without labels
plot1 <- VariableFeaturePlot(pbmc)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE)
pdf("~/eRNA/pbmcs/feature selection.pdf")
plot1
plot2
dev.off()
all.genes <- rownames(pbmc)
pbmc <- ScaleData(pbmc, features = all.genes)
pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc))
print(pbmc[["pca"]], dims = 1:5, nfeatures = 5)
pbmc <- JackStraw(pbmc, num.replicate = 100)
pbmc <- ScoreJackStraw(pbmc, dims = 1:20)
plot3<-ElbowPlot(pbmc)
pdf("~/eRNA/pbmcs/pca.pdf")
plot3
dev.off()
pbmc <- FindNeighbors(pbmc, dims = 1:10)
pbmc <- FindClusters(pbmc, resolution = 0.5)
head(Idents(pbmc), 5)
pbmc <- RunUMAP(pbmc, dims = 1:10)
umap<-DimPlot(pbmc, reduction = "umap")
pdf("~/eRNA/pbmcs/umap.pdf")
umap
dev.off()
pbmc.markers <- FindAllMarkers(pbmc, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
top10 <- pbmc.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_log2FC)
write.table(top10,"~/eRNA/pbmcs/cells/reads/top_10.txt",quote=F)
write.table(Idents(pbmc),"~/eRNA/pbmcs/cells/reads/cluster.txt",quote=F)
top100 <- pbmc.markers %>% group_by(cluster) %>% top_n(n = 100, wt = avg_log2FC)
top1000 <- pbmc.markers %>% group_by(cluster) %>% top_n(n = 1000, wt = avg_log2FC)
q<-top1000[which(top1000$p_val_adj<0.05),]
write.table(top100,"~/eRNA/pbmcs/cells/reads/top_100.txt",quote=F)
write.table(q,"~/eRNA/pbmcs/cells/reads/top_0.05.txt",quote=F)
load("/data/tusers/lixiangr/eRNA/single-cell/hg38/PBMCs/data/filtered_feature_bc_matrix/MAESTRO/data/human.immune.CIBERSORT.RData")
#load("/data/tusers/lixiangr/eRNA/single-cell/hg38/PBMCs/data/filtered_feature_bc_matrix/.RData")
RNAAnnotateCelltypeCluster <- function(genes, signatures = "human.immune.CIBERSORT", cluster = 0){
if(class(signatures) == "character"){
data(list = signatures)
signatures = get(signatures)
}
celltypes <- as.character(unique(signatures[,1]))
signature_list <- sapply(1:length(celltypes),function(x){
return(toupper(as.character(signatures[which(signatures[,1]==celltypes[x]),2])))})
names(signature_list) <- celltypes
idx = genes$cluster==cluster
avglogFC = genes$avg_log2FC[idx]
names(avglogFC) = toupper(genes$gene[idx])
score_cluster = sapply(signature_list, function(x){
score = sum(avglogFC[x], na.rm = TRUE) / log2(length(x))
return(score)
})
return(sort(score_cluster, decreasing=T))
}
celtype.score_0 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 0)
celtype.score_1 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 1)
celtype.score_2 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 2)
celtype.score_3 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 3)
celtype.score_4 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 4)
celtype.score_5 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 5)
celtype.score_6 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 6)
celtype.score_7 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 7)
celtype.score_8 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 8)
celtype.score_9 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 9)
celtype.score_10 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 10)
celtype.score_11 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 11)
celtype.score_12 <- RNAAnnotateCelltypeCluster(q, human.immune.CIBERSORT, cluster = 12)
names<-c(names(celtype.score_0[1]),names(celtype.score_1[1]),names(celtype.score_2[1]),names(celtype.score_3[1]),names(celtype.score_4[1]),names(celtype.score_5[1]),names(celtype.score_6[1]),names(celtype.score_7[1]),names(celtype.score_8[1]),names(celtype.score_9[1]),names(celtype.score_10[1]),names(celtype.score_11[1]),names(celtype.score_12[1]))
cluster<-cbind(names,c(0:12))
write.table(cluster,"~/eRNA/pbmcs/cells/reads/cell_type.txt",quote=F)
new.cluster.ids <- cluster[,1]
names(new.cluster.ids) <- levels(pbmc)
pbmc <- RenameIdents(pbmc, new.cluster.ids)
plot4<-DimPlot(pbmc, reduction = "umap", label = TRUE, pt.size = 0.5) + NoLegend()
pdf("~/eRNA/pbmcs/umap selection.pdf")
plot4
dev.off()
|
\name{power.signtest}
\alias{power.signtest}
\title{Compute power of the sign test}
\description{Use the Noether (1987) formula to compute the power of the sign test}
\usage{
power.signtest (n, alpha, p)
}
\arguments{
\item{n}{sample size (scalar)}
\item{alpha}{p-value threshold (scalar)}
\item{p}{Pr (Y>X), as in Noether (JASA 1987)}
}
\value{vector of power estimates for two-sided tests}
\details{In most applications, the null effect size will be designated by p = 0.5 instead of p = 0.
Thus, in the call to fdr.sampsize, we specify null.effect=0.5 in the example below.}
\references{Noether, Gottfried E (1987) Sample size determination for some common nonparametric tests.
Journal of the American Statistical Association, 82:645-647.}
\examples{
power.signtest # show the power function
res=fdr.sampsize(fdr=0.1,
ave.pow=0.8,
pow.func=power.signtest,
eff.size=rep(c(0.8,0.5),c(100,900)),
null.effect=0.5)
res
}
| /man/power.signtest.Rd | no_license | cran/FDRsampsize | R | false | false | 1,063 | rd | \name{power.signtest}
\alias{power.signtest}
\title{Compute power of the sign test}
\description{Use the Noether (1987) formula to compute the power of the sign test}
\usage{
power.signtest (n, alpha, p)
}
\arguments{
\item{n}{sample size (scalar)}
\item{alpha}{p-value threshold (scalar)}
\item{p}{Pr (Y>X), as in Noether (JASA 1987)}
}
\value{vector of power estimates for two-sided tests}
\details{In most applications, the null effect size will be designated by p = 0.5 instead of p = 0.
Thus, in the call to fdr.sampsize, we specify null.effect=0.5 in the example below.}
\references{Noether, Gottfried E (1987) Sample size determination for some common nonparametric tests.
Journal of the American Statistical Association, 82:645-647.}
\examples{
power.signtest # show the power function
res=fdr.sampsize(fdr=0.1,
ave.pow=0.8,
pow.func=power.signtest,
eff.size=rep(c(0.8,0.5),c(100,900)),
null.effect=0.5)
res
}
|
# packages
library(stringr)
suppressPackageStartupMessages(library(lubridate))
suppressPackageStartupMessages(library(tidyverse))
library(purrr)
library(purrrlyr)
suppressPackageStartupMessages(library(twitteR))
library(tidytext)
library(e1071)
# Get response function, if reply is necessary
get_response <- function() {
response_list <- c("Yep, this is me.",
"Can you believe I'm president?",
"Hold my beer...",
"Big league,",
"SAD!",
"It's really me, I think...",
"Me again,",
"I'm Donald Trump, and I approved this message.",
"Not my staff, I swear.",
"Great crowd!",
"So presidential!",
"Fake News!!")
randomnum <- sample(1:length(response_list), 1)
response <- paste(response_list[randomnum], "@realDonaldTrump", sep = " ")
}
# Function to convert numerical to categorical
convert_counts <- function(x){
x <- as.factor(ifelse(x > 0, "Yes", "No"))
}
# Get latest tweet
source("twitterauth.R")
setup_twitter_oauth(twitter_consumer_key,
twitter_consumer_secret,
twitter_access_token,
twitter_access_token_secret)
new_trump_tweet <- userTimeline("realDonaldTrump", n = 10)
new_trump_tweet <- tbl_df(map_df(new_trump_tweet, as.data.frame))
# Check if there are new tweets
print(Sys.time())
print("Checking twitter feed... ")
load("../data/trump_tweets.Rdata")
new_trump_tweet <- new_trump_tweet %>% filter(!id %in% trump_tweets$id)
if(nrow(new_trump_tweet)>0) {
# Create features
new_trump_tweet <- new_trump_tweet %>%
mutate(quote = ifelse(str_detect(text, '^"'), TRUE, FALSE)) %>%
mutate(text = ifelse(str_detect(text, '^"'), "", text)) %>%
mutate(picture = ifelse(str_detect(text, "t.co"), TRUE, FALSE)) %>%
mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|&", "")) %>%
mutate(hashtag = ifelse(str_detect(text, "#"), TRUE, FALSE)) %>%
mutate(date.time = ymd_hms(created, tz = "EST")) %>%
mutate(dow = wday(date.time, label = TRUE)) %>%
mutate(tod = hour(with_tz(created, tzone = "EST")))
# Get sentiment
load("../data/nrc_dummy.Rdata")
new_trump_sentiment <- new_trump_tweet %>%
filter(!quote) %>%
unnest_tokens(output = word, input = text, token = "words") %>%
inner_join(nrc_dummy) %>%
group_by(id) %>%
summarise_at(vars(starts_with("sentiment")), max) %>%
right_join(new_trump_tweet, by = "id")
# Clean up data
new_trump_sentiment[is.na(new_trump_sentiment)] <- 0
new_trump_sentiment <- new_trump_sentiment %>%
mutate(tod = factor(tod, c(1:23))) %>%
mutate_at(vars(starts_with("sentiment")), convert_counts) %>%
select(quote, picture, hashtag, dow, tod, starts_with("sentiment"), id, text)
# load Naive Bayes model and make prediction
tweet_nb <- readRDS("../data/tweet_nb.Rds")
new_trump_sentiment$prediction <- predict(tweet_nb, newdata = new_trump_sentiment[,1:15])
posterior <- predict(tweet_nb, newdata = new_trump_sentiment[,1:15], type = "raw")
new_trump_sentiment$probability <- posterior[,2]
# Reply to tweets if predicted to be trump
# Really hate using a loop, but not sure how to execute the function otherwise
replytweets <- new_trump_sentiment %>%
filter(prediction == "trump")
print("Breakdown of new tweet predictions")
print(table(new_trump_sentiment$prediction))
if(nrow(replytweets) > 0){
for(n in 1:nrow(replytweets)){
text1 <- get_response()
text2 <- paste("Probability of Trump:", round(replytweets$probability[n], digits = 2), sep = " ")
response <- paste(text1, text2, sep = " ")
updateStatus(text = response, inReplyTo = replytweets$id[n])
}
}
#save new tweets to file
trump_tweets <- rbind(trump_tweets, new_trump_sentiment)
save(trump_tweets, file = "../data/trump_tweets.RData")
write_csv(new_trump_sentiment, path = "../data/trump_tweets.csv", append = TRUE)
} else {
print("There were no new tweets")
}
rm(list = ls(all=TRUE))
| /R/04-predict-new-tweet.R | no_license | kahultman/trump-tweets | R | false | false | 4,176 | r | # packages
library(stringr)
suppressPackageStartupMessages(library(lubridate))
suppressPackageStartupMessages(library(tidyverse))
library(purrr)
library(purrrlyr)
suppressPackageStartupMessages(library(twitteR))
library(tidytext)
library(e1071)
# Get response function, if reply is necessary
get_response <- function() {
response_list <- c("Yep, this is me.",
"Can you believe I'm president?",
"Hold my beer...",
"Big league,",
"SAD!",
"It's really me, I think...",
"Me again,",
"I'm Donald Trump, and I approved this message.",
"Not my staff, I swear.",
"Great crowd!",
"So presidential!",
"Fake News!!")
randomnum <- sample(1:length(response_list), 1)
response <- paste(response_list[randomnum], "@realDonaldTrump", sep = " ")
}
# Function to convert numerical to categorical
convert_counts <- function(x){
x <- as.factor(ifelse(x > 0, "Yes", "No"))
}
# Get latest tweet
source("twitterauth.R")
setup_twitter_oauth(twitter_consumer_key,
twitter_consumer_secret,
twitter_access_token,
twitter_access_token_secret)
new_trump_tweet <- userTimeline("realDonaldTrump", n = 10)
new_trump_tweet <- tbl_df(map_df(new_trump_tweet, as.data.frame))
# Check if there are new tweets
print(Sys.time())
print("Checking twitter feed... ")
load("../data/trump_tweets.Rdata")
new_trump_tweet <- new_trump_tweet %>% filter(!id %in% trump_tweets$id)
if(nrow(new_trump_tweet)>0) {
# Create features
new_trump_tweet <- new_trump_tweet %>%
mutate(quote = ifelse(str_detect(text, '^"'), TRUE, FALSE)) %>%
mutate(text = ifelse(str_detect(text, '^"'), "", text)) %>%
mutate(picture = ifelse(str_detect(text, "t.co"), TRUE, FALSE)) %>%
mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|&", "")) %>%
mutate(hashtag = ifelse(str_detect(text, "#"), TRUE, FALSE)) %>%
mutate(date.time = ymd_hms(created, tz = "EST")) %>%
mutate(dow = wday(date.time, label = TRUE)) %>%
mutate(tod = hour(with_tz(created, tzone = "EST")))
# Get sentiment
load("../data/nrc_dummy.Rdata")
new_trump_sentiment <- new_trump_tweet %>%
filter(!quote) %>%
unnest_tokens(output = word, input = text, token = "words") %>%
inner_join(nrc_dummy) %>%
group_by(id) %>%
summarise_at(vars(starts_with("sentiment")), max) %>%
right_join(new_trump_tweet, by = "id")
# Clean up data
new_trump_sentiment[is.na(new_trump_sentiment)] <- 0
new_trump_sentiment <- new_trump_sentiment %>%
mutate(tod = factor(tod, c(1:23))) %>%
mutate_at(vars(starts_with("sentiment")), convert_counts) %>%
select(quote, picture, hashtag, dow, tod, starts_with("sentiment"), id, text)
# load Naive Bayes model and make prediction
tweet_nb <- readRDS("../data/tweet_nb.Rds")
new_trump_sentiment$prediction <- predict(tweet_nb, newdata = new_trump_sentiment[,1:15])
posterior <- predict(tweet_nb, newdata = new_trump_sentiment[,1:15], type = "raw")
new_trump_sentiment$probability <- posterior[,2]
# Reply to tweets if predicted to be trump
# Really hate using a loop, but not sure how to execute the function otherwise
replytweets <- new_trump_sentiment %>%
filter(prediction == "trump")
print("Breakdown of new tweet predictions")
print(table(new_trump_sentiment$prediction))
if(nrow(replytweets) > 0){
for(n in 1:nrow(replytweets)){
text1 <- get_response()
text2 <- paste("Probability of Trump:", round(replytweets$probability[n], digits = 2), sep = " ")
response <- paste(text1, text2, sep = " ")
updateStatus(text = response, inReplyTo = replytweets$id[n])
}
}
#save new tweets to file
trump_tweets <- rbind(trump_tweets, new_trump_sentiment)
save(trump_tweets, file = "../data/trump_tweets.RData")
write_csv(new_trump_sentiment, path = "../data/trump_tweets.csv", append = TRUE)
} else {
print("There were no new tweets")
}
rm(list = ls(all=TRUE))
|
setwd("~/GitHub/Spring2018-Project3-spring2018-project3-group10/doc")
getwd()
# read train dataset
load('../output/feature_HOG.RData')
label_train <- read.csv('../data/label_train.csv')
dat_train <- hog
label_train <- label_train[,3]
dim(dat_train)
#
source("../lib/train.R")
source("../lib/test.R")
source("../lib/cross_validation.R")
# which model to perform cross validation
run.cv = T
cv.svm = T
K = 5
svm_values <- seq(0.01, 0.1, by = 0.02) # gamma for svm
svm_labels = paste("SVM with gamma =", svm_values)
#
if(cv.svm){
err_cv <- array(dim=c(length(svm_values), 2))
for(k in 1:length(svm_values)){
cat("k=", k, "\n")
err_cv[k,] <- cv.function(as.data.frame(dat_train), label_train, svm_values[k], K, cv.svm = T)
}
}
save(err_cv, file="../output/err_cv_HOG_svm.RData")
| /lib/cross_validation/HOG+svm.R | no_license | wenyuangu/Spring2018-Project3-Group10 | R | false | false | 793 | r | setwd("~/GitHub/Spring2018-Project3-spring2018-project3-group10/doc")
getwd()
# read train dataset
load('../output/feature_HOG.RData')
label_train <- read.csv('../data/label_train.csv')
dat_train <- hog
label_train <- label_train[,3]
dim(dat_train)
#
source("../lib/train.R")
source("../lib/test.R")
source("../lib/cross_validation.R")
# which model to perform cross validation
run.cv = T
cv.svm = T
K = 5
svm_values <- seq(0.01, 0.1, by = 0.02) # gamma for svm
svm_labels = paste("SVM with gamma =", svm_values)
#
if(cv.svm){
err_cv <- array(dim=c(length(svm_values), 2))
for(k in 1:length(svm_values)){
cat("k=", k, "\n")
err_cv[k,] <- cv.function(as.data.frame(dat_train), label_train, svm_values[k], K, cv.svm = T)
}
}
save(err_cv, file="../output/err_cv_HOG_svm.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readinput.R
\name{readconfig}
\alias{readconfig}
\title{Read the configuration (.para, .calib) file
\code{readconfig}}
\usage{
readconfig(file = shud.filein()["md.para"])
}
\arguments{
\item{file}{full path of file}
}
\value{
.para or .calib
}
\description{
Read the configuration (.para, .calib) file
\code{readconfig}
}
| /man/readconfig.Rd | permissive | SHUD-System/rSHUD | R | false | true | 400 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readinput.R
\name{readconfig}
\alias{readconfig}
\title{Read the configuration (.para, .calib) file
\code{readconfig}}
\usage{
readconfig(file = shud.filein()["md.para"])
}
\arguments{
\item{file}{full path of file}
}
\value{
.para or .calib
}
\description{
Read the configuration (.para, .calib) file
\code{readconfig}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_threeway_between.R
\name{power_threeway_between}
\alias{power_threeway_between}
\title{Analytic power calculation for three-way between designs.}
\usage{
power_threeway_between(design_result, alpha_level = 0.05)
}
\arguments{
\item{design_result}{Output from the ANOVA_design function}
\item{alpha_level}{Alpha level used to determine statistical significance (default to 0.05)}
}
\value{
mu = means
sigma = standard deviation
n = sample size
alpha_level = alpha level
Cohen_f_A = Cohen's f for main effect A
Cohen_f_B = Cohen's f for main effect B
Cohen_f_C = Cohen's f for main effect C
Cohen_f_AB = Cohen's f for the A*B interaction
Cohen_f_AC = Cohen's f for the A*C interaction
Cohen_f_BC = Cohen's f for the B*C interaction
Cohen_f_ABC = Cohen's f for the A*B*C interaction
f_2_A = Cohen's f squared for main effect A
f_2_B = Cohen's f squared for main effect B
f_2_C = Cohen's f squared for main effect C
f_2_AB = Cohen's f squared for A*B interaction
f_2_AC = Cohen's f squared for A*C interaction
f_2_BC = Cohen's f squared for B*C interaction
f_2_ABC = Cohen's f squared for A*B*C interaction
lambda_A = lambda for main effect A
lambda_B = lambda for main effect B
lambda_C = lambda for main effect C
lambda_AB = lambda for A*B interaction
lambda_AC = lambda for A*C interaction
lambda_BC = lambda for B*C interaction
lambda_ABC = lambda for A*B*C interaction
critical_F_A = critical F-value for main effect A
critical_F_B = critical F-value for main effect B
critical_F_C = critical F-value for main effect C
critical_F_AB = critical F-value for A*B interaction
critical_F_AC = critical F-value for A*C interaction
critical_F_BC = critical F-value for B*C interaction
critical_F_ABC = critical F-value for A*B*C interaction
power_A = power for main effect A
power_B = power for main effect B
power_C = power for main effect C
power_AB = power for A*B interaction
power_AC = power for A*C interaction
power_BC = power for B*C interaction
power_ABC = power for A*B*C interaction
df_A = degrees of freedom for main effect A
df_B = degrees of freedom for main effect B
df_C = degrees of freedom for main effect C
df_AB = degrees of freedom for A*B interaction
df_AC = degrees of freedom for A*C interaction
df_BC = degrees of freedom for B*C interaction
df_ABC = degrees of freedom for A*B*C interaction
df_error = degrees of freedom for error term
eta_p_2_A = partial eta-squared for main effect A
eta_p_2_B = partial eta-squared for main effect B
eta_p_2_C = partial eta-squared for main effect C
eta_p_2_AB = partial eta-squared for A*B interaction
eta_p_2_AC = partial eta-squared for A*C interaction
eta_p_2_BC = partial eta-squared for B*C interaction
eta_p_2_ABC = partial eta-squared for A*B*C interaction
mean_mat = matrix of the means
}
\description{
Analytic power calculation for three-way between designs.
}
\section{References}{
to be added
}
\examples{
design_result <- ANOVA_design(design = "2b*2b*2b", n = 40,
mu = c(1, 0, 1, 0, 0, 1, 1, 0), sd = 2,
labelnames = c("condition", "cheerful", "sad",
"voice", "human", "robot", "color", "green", "red"))
power_result <- power_threeway_between(design_result, alpha_level = 0.05)
}
| /man/power_threeway_between.Rd | permissive | arcaldwell49/Superpower | R | false | true | 3,309 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_threeway_between.R
\name{power_threeway_between}
\alias{power_threeway_between}
\title{Analytic power calculation for three-way between designs.}
\usage{
power_threeway_between(design_result, alpha_level = 0.05)
}
\arguments{
\item{design_result}{Output from the ANOVA_design function}
\item{alpha_level}{Alpha level used to determine statistical significance (default to 0.05)}
}
\value{
mu = means
sigma = standard deviation
n = sample size
alpha_level = alpha level
Cohen_f_A = Cohen's f for main effect A
Cohen_f_B = Cohen's f for main effect B
Cohen_f_C = Cohen's f for main effect C
Cohen_f_AB = Cohen's f for the A*B interaction
Cohen_f_AC = Cohen's f for the A*C interaction
Cohen_f_BC = Cohen's f for the B*C interaction
Cohen_f_ABC = Cohen's f for the A*B*C interaction
f_2_A = Cohen's f squared for main effect A
f_2_B = Cohen's f squared for main effect B
f_2_C = Cohen's f squared for main effect C
f_2_AB = Cohen's f squared for A*B interaction
f_2_AC = Cohen's f squared for A*C interaction
f_2_BC = Cohen's f squared for B*C interaction
f_2_ABC = Cohen's f squared for A*B*C interaction
lambda_A = lambda for main effect A
lambda_B = lambda for main effect B
lambda_C = lambda for main effect C
lambda_AB = lambda for A*B interaction
lambda_AC = lambda for A*C interaction
lambda_BC = lambda for B*C interaction
lambda_ABC = lambda for A*B*C interaction
critical_F_A = critical F-value for main effect A
critical_F_B = critical F-value for main effect B
critical_F_C = critical F-value for main effect C
critical_F_AB = critical F-value for A*B interaction
critical_F_AC = critical F-value for A*C interaction
critical_F_BC = critical F-value for B*C interaction
critical_F_ABC = critical F-value for A*B*C interaction
power_A = power for main effect A
power_B = power for main effect B
power_C = power for main effect C
power_AB = power for A*B interaction
power_AC = power for A*C interaction
power_BC = power for B*C interaction
power_ABC = power for A*B*C interaction
df_A = degrees of freedom for main effect A
df_B = degrees of freedom for main effect B
df_C = degrees of freedom for main effect C
df_AB = degrees of freedom for A*B interaction
df_AC = degrees of freedom for A*C interaction
df_BC = degrees of freedom for B*C interaction
df_ABC = degrees of freedom for A*B*C interaction
df_error = degrees of freedom for error term
eta_p_2_A = partial eta-squared for main effect A
eta_p_2_B = partial eta-squared for main effect B
eta_p_2_C = partial eta-squared for main effect C
eta_p_2_AB = partial eta-squared for A*B interaction
eta_p_2_AC = partial eta-squared for A*C interaction
eta_p_2_BC = partial eta-squared for B*C interaction
eta_p_2_ABC = partial eta-squared for A*B*C interaction
mean_mat = matrix of the means
}
\description{
Analytic power calculation for three-way between designs.
}
\section{References}{
to be added
}
\examples{
design_result <- ANOVA_design(design = "2b*2b*2b", n = 40,
mu = c(1, 0, 1, 0, 0, 1, 1, 0), sd = 2,
labelnames = c("condition", "cheerful", "sad",
"voice", "human", "robot", "color", "green", "red"))
power_result <- power_threeway_between(design_result, alpha_level = 0.05)
}
|
seed <- 166
log.wt <- -14.53895858703376
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225598.74039258796
df.resid <- 35402
df <- 165
coefs <- c(6.752096077194391, 5.786286453053833, 5.80793783396487, 5.431187414091852, 5.076796977997395, 4.85852240680021, 4.834342743846192, 4.646813952226384, 4.387690925313685, 4.250840623686072, 4.311535438939833, 4.166412825145408, 4.0012267753212605, 3.9600196551574087, 3.7291163252402852, 3.5284191962561446, 3.2382467441276597, 2.935999846458411, 2.4588392421614547, 2.0568895917876335, 1.6092434480734246, 0.9472390894085613, 1.0744889711312595, 0.1774277496202695, 0.3711913485730184, -0.9760518536603522, -0.11873640177599196, 0.9779481155151475, 1.0374838943552616, -1.0811526961246343, -2.916743757664183, -2.4484807730542886, -0.7405689372929743, 0.8171113598359842, 1.1524724833955362, -0.7974194326242581, -0.5635417504424219, -0.7207134016516252, 0.10297819407493314, -0.5101551018139282, 0.8856716091194293, 0.8650449747304934, -0.8431687128433288, -1.489911417975853, -0.7920424770913734, -0.7019907696147576, -0.6542058233394689, -2.150401696539895e-2, 0.6826082019878433, -0.6809019393452693, 0.40642458096568623, 0.698747470532092, -2.3691441460426637, 1.7280678966763003, 0.9018638981680268, 1.0817378804865607, -1.3102634022037163, -0.6032506178702078, -6.878938394124436e-2, 1.108953780787598, 0.6786399832354917, 0.5860122063428878, -1.7553117650356926, -0.3078489931429703, -0.6559151688367417, -7.764803163183828e-2, 0.7095084724694118, -0.4801427177627841, -1.0662783472488535, -0.6449212925337858, -1.968669720136223, -0.35795283659645916, 0.6141368963479837, 0.8718187932558658, 0.6016500371339472, -0.8165460522735207, -1.2052054645820662, -1.199036425441026, 6.252617174498568e-2, 0.6186314177610178, 1.0437305263167924, 0.21551926128364182, 0.29771381318541046, -1.4555676241551232, -0.18370563933856698, 0.4123434926412146, 1.0923576264086676, 0.3820539679393771, 0.831690967678715, -1.8903037164261443, 0.4320218561377863, 0.9417026667772584, 0.7604618988928783, 0.4087803236633902, -0.3541837056009758, 1.2726391675865931, -0.3654536492557123, 0.47141354646520983, -0.4103139006942764, -0.5648522181473606, 0.34324854947219086, -0.6606971044632008, 0.9534018036560589, -0.13402447102065165, 0.6978451945946401, 0.9018106076099333, 1.1648200032576108, -0.47336537027015985, -0.4254013027711664, -0.96457238718344, 0.28656956273431144, 0.6896431449428491, 1.5780395940797969, -0.4620992119830079, -0.1955716799511525, -1.06929134082548, 0.6337544136090211, -0.413598129499325, 0.41242078458838893, 0.31452503634305556, -0.552057151968795, -0.5742238783707534, -1.079027378481093, -0.8218849512984338, 0.3413075278395119, 0.8671729994499991, -4.651831510360828e-3, 0.9355650544657175, -0.5297784423232575, -0.29802218111499756, 0.17790381174940575, 0.8175598330519018, 0.5802795271189165, 0.36076018172197455, 8.357995175085481e-2, 1.1237848698523922, -0.3803466060774579, 0.9739502565119037, 0.854456457254048, 0.8586041886305865, 0.6482856375068582, -0.7012505246729229, -1.010711421976936, 0.7824090653469955, 0.22844705583671093, 0.4728137820593607, -0.23430028199567576, -0.7984781298192913, -1.9187739526836582, 1.1647753991768888, 9.814558388356724e-2, 1.1830384058770882, -0.42765570734408703, 7.257287350448675e-2, -0.2232598123127731, -1.3677053521250253, -1.0651435193232963, 0.741301457724867, 1.024129774914286, -5.241212228630579e-2, 1.511943195677635, -0.22807434047587896, -5.178826134499013e-2, 2.5711066407950824e-3, 1.0825096425840197)
| /analysis/boot/boot166.R | no_license | patperry/interaction-proc | R | false | false | 3,765 | r | seed <- 166
log.wt <- -14.53895858703376
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225598.74039258796
df.resid <- 35402
df <- 165
coefs <- c(6.752096077194391, 5.786286453053833, 5.80793783396487, 5.431187414091852, 5.076796977997395, 4.85852240680021, 4.834342743846192, 4.646813952226384, 4.387690925313685, 4.250840623686072, 4.311535438939833, 4.166412825145408, 4.0012267753212605, 3.9600196551574087, 3.7291163252402852, 3.5284191962561446, 3.2382467441276597, 2.935999846458411, 2.4588392421614547, 2.0568895917876335, 1.6092434480734246, 0.9472390894085613, 1.0744889711312595, 0.1774277496202695, 0.3711913485730184, -0.9760518536603522, -0.11873640177599196, 0.9779481155151475, 1.0374838943552616, -1.0811526961246343, -2.916743757664183, -2.4484807730542886, -0.7405689372929743, 0.8171113598359842, 1.1524724833955362, -0.7974194326242581, -0.5635417504424219, -0.7207134016516252, 0.10297819407493314, -0.5101551018139282, 0.8856716091194293, 0.8650449747304934, -0.8431687128433288, -1.489911417975853, -0.7920424770913734, -0.7019907696147576, -0.6542058233394689, -2.150401696539895e-2, 0.6826082019878433, -0.6809019393452693, 0.40642458096568623, 0.698747470532092, -2.3691441460426637, 1.7280678966763003, 0.9018638981680268, 1.0817378804865607, -1.3102634022037163, -0.6032506178702078, -6.878938394124436e-2, 1.108953780787598, 0.6786399832354917, 0.5860122063428878, -1.7553117650356926, -0.3078489931429703, -0.6559151688367417, -7.764803163183828e-2, 0.7095084724694118, -0.4801427177627841, -1.0662783472488535, -0.6449212925337858, -1.968669720136223, -0.35795283659645916, 0.6141368963479837, 0.8718187932558658, 0.6016500371339472, -0.8165460522735207, -1.2052054645820662, -1.199036425441026, 6.252617174498568e-2, 0.6186314177610178, 1.0437305263167924, 0.21551926128364182, 0.29771381318541046, -1.4555676241551232, -0.18370563933856698, 0.4123434926412146, 1.0923576264086676, 0.3820539679393771, 0.831690967678715, -1.8903037164261443, 0.4320218561377863, 0.9417026667772584, 0.7604618988928783, 0.4087803236633902, -0.3541837056009758, 1.2726391675865931, -0.3654536492557123, 0.47141354646520983, -0.4103139006942764, -0.5648522181473606, 0.34324854947219086, -0.6606971044632008, 0.9534018036560589, -0.13402447102065165, 0.6978451945946401, 0.9018106076099333, 1.1648200032576108, -0.47336537027015985, -0.4254013027711664, -0.96457238718344, 0.28656956273431144, 0.6896431449428491, 1.5780395940797969, -0.4620992119830079, -0.1955716799511525, -1.06929134082548, 0.6337544136090211, -0.413598129499325, 0.41242078458838893, 0.31452503634305556, -0.552057151968795, -0.5742238783707534, -1.079027378481093, -0.8218849512984338, 0.3413075278395119, 0.8671729994499991, -4.651831510360828e-3, 0.9355650544657175, -0.5297784423232575, -0.29802218111499756, 0.17790381174940575, 0.8175598330519018, 0.5802795271189165, 0.36076018172197455, 8.357995175085481e-2, 1.1237848698523922, -0.3803466060774579, 0.9739502565119037, 0.854456457254048, 0.8586041886305865, 0.6482856375068582, -0.7012505246729229, -1.010711421976936, 0.7824090653469955, 0.22844705583671093, 0.4728137820593607, -0.23430028199567576, -0.7984781298192913, -1.9187739526836582, 1.1647753991768888, 9.814558388356724e-2, 1.1830384058770882, -0.42765570734408703, 7.257287350448675e-2, -0.2232598123127731, -1.3677053521250253, -1.0651435193232963, 0.741301457724867, 1.024129774914286, -5.241212228630579e-2, 1.511943195677635, -0.22807434047587896, -5.178826134499013e-2, 2.5711066407950824e-3, 1.0825096425840197)
|
library(dplyr)
state_cds <- c("FL","GA","AL","SC")
pCodes = c("00065")
dates <- list(start = "2018-10-09 12:00:00")
path_to_save <- "vizstorm_sites/michael_data"
fetch_sites_from_states <- function(state_cds, dates, pCodes, path_to_save) {
# Cast wide net for all NWIS sites with stage data that fall within that bbox
sites_df <- dplyr::bind_rows(lapply(state_cds, function(cd) {
dataRetrieval::whatNWISdata(stateCd = cd, parameterCd = pCodes, service = "uv") %>%
dplyr::select(site_no, station_nm, dec_lat_va, dec_long_va, site_tp_cd, end_date, begin_date, count_nu)
}))
# Get NWS flood stage table
nws_flood_stage_list <- jsonlite::fromJSON("https://waterwatch.usgs.gov/webservices/floodstage?format=json")
nws_flood_stage_table <- nws_flood_stage_list[["sites"]]
# Filtering applied to every storm
sites_filtered <- sites_df %>%
# Filter out any sites that don't have flood stage data from NWS
inner_join(nws_flood_stage_table, by='site_no') %>%
dplyr::filter(!is.na(flood_stage)) %>%
# we only need stream sites
dplyr::filter(site_tp_cd == "ST") %>%
# keeps only sites that have data since the start of the storm
# if a gage goes out during the storm, this filter would still capture that gage
# also filter out sites that weren't up before the start of the storm (e.g., we are GIF'ing a historical storm)
dplyr::filter(end_date >= as.Date(dates$start), begin_date <= as.Date(dates$start))
sites <- sites_filtered %>%
distinct()
more_site_info <- dataRetrieval::readNWISsite(sites$site_no)
sites <- dplyr::left_join(sites, dplyr::select(more_site_info, site_no, drain_area_va), by="site_no")
# Write the data file
sub_folders <- strsplit(path_to_save, "/")[[1]]
current_folder <- sub_folders[1]
for(folder in sub_folders[-1]){
dir.create(path = current_folder,
showWarnings = FALSE)
current_folder <- paste(current_folder, folder, sep = "/")
}
dir.create(path = current_folder,
showWarnings = FALSE)
saveRDS(sites, file.path(path_to_save,"all_sites.rds"))
all_flow <- dataRetrieval::readNWISuv(siteNumbers = sites$site_no,
parameterCd = pCodes,
startDate = as.Date(dates$start))
saveRDS(all_flow, file.path(path_to_save,"all_flow.rds"))
}
fetch_sites_from_states(state_cds = state_cds,
dates = dates,
path_to_save = path_to_save,
pCodes = pCodes)
| /vizstorm_sites/get_raw_data.R | no_license | ldecicco-USGS/viz-scratch | R | false | false | 2,568 | r | library(dplyr)
state_cds <- c("FL","GA","AL","SC")
pCodes = c("00065")
dates <- list(start = "2018-10-09 12:00:00")
path_to_save <- "vizstorm_sites/michael_data"
fetch_sites_from_states <- function(state_cds, dates, pCodes, path_to_save) {
# Cast wide net for all NWIS sites with stage data that fall within that bbox
sites_df <- dplyr::bind_rows(lapply(state_cds, function(cd) {
dataRetrieval::whatNWISdata(stateCd = cd, parameterCd = pCodes, service = "uv") %>%
dplyr::select(site_no, station_nm, dec_lat_va, dec_long_va, site_tp_cd, end_date, begin_date, count_nu)
}))
# Get NWS flood stage table
nws_flood_stage_list <- jsonlite::fromJSON("https://waterwatch.usgs.gov/webservices/floodstage?format=json")
nws_flood_stage_table <- nws_flood_stage_list[["sites"]]
# Filtering applied to every storm
sites_filtered <- sites_df %>%
# Filter out any sites that don't have flood stage data from NWS
inner_join(nws_flood_stage_table, by='site_no') %>%
dplyr::filter(!is.na(flood_stage)) %>%
# we only need stream sites
dplyr::filter(site_tp_cd == "ST") %>%
# keeps only sites that have data since the start of the storm
# if a gage goes out during the storm, this filter would still capture that gage
# also filter out sites that weren't up before the start of the storm (e.g., we are GIF'ing a historical storm)
dplyr::filter(end_date >= as.Date(dates$start), begin_date <= as.Date(dates$start))
sites <- sites_filtered %>%
distinct()
more_site_info <- dataRetrieval::readNWISsite(sites$site_no)
sites <- dplyr::left_join(sites, dplyr::select(more_site_info, site_no, drain_area_va), by="site_no")
# Write the data file
sub_folders <- strsplit(path_to_save, "/")[[1]]
current_folder <- sub_folders[1]
for(folder in sub_folders[-1]){
dir.create(path = current_folder,
showWarnings = FALSE)
current_folder <- paste(current_folder, folder, sep = "/")
}
dir.create(path = current_folder,
showWarnings = FALSE)
saveRDS(sites, file.path(path_to_save,"all_sites.rds"))
all_flow <- dataRetrieval::readNWISuv(siteNumbers = sites$site_no,
parameterCd = pCodes,
startDate = as.Date(dates$start))
saveRDS(all_flow, file.path(path_to_save,"all_flow.rds"))
}
fetch_sites_from_states(state_cds = state_cds,
dates = dates,
path_to_save = path_to_save,
pCodes = pCodes)
|
-## Put comments here that give an overall description of what your
-## functions do
+## The following Functions that cache the inverse of a matrix
+##
+## Usage example:
+##
+## > source('cachematrix.R')
+## > m <- makeCacheMatrix(matrix(c(2, 0, 0, 2), c(2, 2)))
+## > cacheSolve(m)
+## [,1] [,2]
+## [1,] 0.5 0.0
+## [2,] 0.0 0.5
-## Write a short comment describing this function
+## Create a special "matrix", which is a list containing
+## a function to
+## - set the value of the matrix
+## - get the value of the matrix
+## - set the value of the inverse matrix
+## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
-
+ i <- NULL
+ set <- function(y) {
+ x <<- y
+ i <<- NULL
+ }
+ get <- function() x
+ setinverse <- function(inv) i <<- inv
+ getinverse <- function() i
+ list(
+ set = set,
+ get = get,
+ setinverse = setinverse,
+ getinverse = getinverse
+ )
}
-## Write a short comment describing this function
+## Calculate the inverse of the special "matrix" created with the above
+## function, reusing cached result if it is available
cacheSolve <- function(x, ...) {
- ## Return a matrix that is the inverse of 'x'
-}
+ i <- x$getinverse()
+ if(!is.null(i)) {
+ message("getting cached data")
+ return(i)
+ }
+ m <- x$get()
+ i <- solve(m, ...)
+ x$setinverse(i)
+ i
+}
| /cachematrix.R | no_license | seethapr/ProgrammingAssignment2 | R | false | false | 1,452 | r | -## Put comments here that give an overall description of what your
-## functions do
+## The following Functions that cache the inverse of a matrix
+##
+## Usage example:
+##
+## > source('cachematrix.R')
+## > m <- makeCacheMatrix(matrix(c(2, 0, 0, 2), c(2, 2)))
+## > cacheSolve(m)
+## [,1] [,2]
+## [1,] 0.5 0.0
+## [2,] 0.0 0.5
-## Write a short comment describing this function
+## Create a special "matrix", which is a list containing
+## a function to
+## - set the value of the matrix
+## - get the value of the matrix
+## - set the value of the inverse matrix
+## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
-
+ i <- NULL
+ set <- function(y) {
+ x <<- y
+ i <<- NULL
+ }
+ get <- function() x
+ setinverse <- function(inv) i <<- inv
+ getinverse <- function() i
+ list(
+ set = set,
+ get = get,
+ setinverse = setinverse,
+ getinverse = getinverse
+ )
}
-## Write a short comment describing this function
+## Calculate the inverse of the special "matrix" created with the above
+## function, reusing cached result if it is available
cacheSolve <- function(x, ...) {
- ## Return a matrix that is the inverse of 'x'
-}
+ i <- x$getinverse()
+ if(!is.null(i)) {
+ message("getting cached data")
+ return(i)
+ }
+ m <- x$get()
+ i <- solve(m, ...)
+ x$setinverse(i)
+ i
+}
|
library(caret)
library(tm)
library(SnowballC)
library(arm)
# Training data.
data <- c('BJ Habibie Dikabarkan Meninggal.',
'Dalam postingan tersebut disampaikan bahwa BJ Habibie sudah didampingi anaknya.',
'Sebelumnya BJ Habibie tengah menjalani perawatan di Munich Jerman.',
'Presiden ketiga Indonesia itu didiagnosis mengalami kebocoran pada klep jantungnya.',
'Presiden Joko Widodo sempat menghubungi BJ Habibie secara langsung dan berbincang sejenak.',
'Melalui pembicaraan tersebut, Presiden menyanggupi permintaan Habibie yang menginginkan adanya tim dokter kepresidenan dan Paspampres untuk hadir di Jerman saat dilakukan tindakan medis.',
'Untuk mendampingi Habibie selama dilakukan tindakan medis.',
'Presiden Joko Widodo sudah mengutus Prof. Dr. Lukman Hakim, SpPD-KKV, SpJP, Kger, seorang spesialis jantung dan pembuluh darah dari tim dokter kepresidenan, untuk berangkat ke Jerman, termasuk anggota Paspampres juga diberangkatkan.',
'Penyakit jantung yang membuat presiden ketiga Indonesia yang membuat meninggal.',
'sebelum pergi ke jerman pak habibi berpesan seolah mau meninggal.',
'Kondisi kesehatan Presiden ketiga Republik Indonesia, BJ Habibie semakin membaik.',
'Hal itu terjadi setelah mendapatkan perawatan di rumah sakit di Munchen, Jerman.',
'Eyang Habibie sudah merasa lebih sehat tapi masih menjalankan pemeriksaan dan istirahat di RS di Muchen.',
'Meskipun sudah merasa lebih sehat, The Habibie Center tetap meminta doa dari masyarakat Indonesia untuk kesehatan BJ Habibie.',
'Presiden ketiga Indonesia tersebut sudah di kabarkan dokter kondisinya terus membaik.',
'Melalui Menteri Luar Negeri, Presiden juga telah menginstruksikan kepada Duta Besar Republik Indonesia di Jerman untuk terus memantau kondisi terkini dari Habibie dan melaporkan langsung kepadanya.',
'Selain itu, dirinya memerintahkan Menteri Sekretaris Negara untuk memastikan bahwa pemerintah mampu memberikan pelayanan terbaik dan menanggung seluruh biaya perawatan Presiden RI ke-3 itu sebagaimana diatur dalam Undang-Undang Nomor 7 Tahun 1978 tentang Hak Keuangan/Administratif Presiden dan Wakil Presiden serta Bekas Presiden dan Wakil Presiden Republik Indonesia.',
'Presiden telah memerintahkan untuk memantau dan memberikan pelayanan terbaik kepada Habibie.',
'Presiden sendiri berharap agar B.J. Habibie dapat kembali beraktivitas seperti sedia kala. Melalui sambungan telepon sore ini, ia bersama dengan seluruh rakyat Indonesia juga sekaligus mendoakan kesembuhan beliau.',
'Kita semua di Indonesia, seluruh rakyat Indonesia, mendoakan Bapak. Semoga segera sehat kembali, bisa beraktivitas dan kembali ke Indonesia.')
corpus <- VCorpus(VectorSource(data))
# Create a document term matrix.
tdm <- DocumentTermMatrix(corpus, list(removePunctuation = TRUE, stopwords = TRUE, stemming = TRUE, removeNumbers = TRUE))
# Convert to a data.frame for training and assign a classification (factor) to each document.
train <- as.matrix(tdm)
train <- cbind(train, c(0, 1))
colnames(train)[ncol(train)] <- 'y'
train <- as.data.frame(train)
train$y <- as.factor(train$y)
data
train
# Train.
fit <- train(y ~ ., data = train, method = 'bayesglm')
# Check accuracy on training.
predict(fit, newdata = train)
# Test data.
data2 <- c('Entah siapa yang memulai menyebarkan, namun isu tersebut berkembang dengan cepat.',
'Sejumlah pengguna twitter pun seakan-akan berlomba menyampaikan ucapan belasungkawanya atas meninggalnya Presiden Habibie tersebut.',
'Habibie dikabarkan meninggal setelah sebelumnya kritis di sebuah rumah sakit di Jerman.',
'pusat penelitian yang dibangun oleh Habibie, yakni The Habibie Center, melalui akun twitter resminya, membantah kabar meninggalnya BJ Habibie tersebut.',
'Senada dengan itu, artis Melanie Soebono yang merupakan cucu Presiden Habibie, juga membantah kabar tersebut.',
'alam keterangan yang dituliskan The Habibie Center, disebutkan bahwa B.J. Habibie dalam kondisi sehat walafiat, dan sekarang sedang berada di Jerman.',
'Alhamdulillah Bapak BJ Habibie dalam keadaan sehat walafiat. Beliau masih di Jerman sesudah merayakan Tahun Baru dengan cucu-cucu beliau.',
'amun klarifikasi akun Facebook The Habibie Center sedikit membuat banyak orang terkejut.',
'Pasalnya, saat kabar tersebut berhembus, Habibie malah dikatakan menghadiri sebuah acara penghargaan.',
'Tadi malam beliau sangat senang ngobrol dan tertawa lepas dengan Reza dan Pandji LIVE dari Kediaman di Patra Kuningan di acara Indonesia Box Office Movie Awards di SCTV.')
corpus <- VCorpus(VectorSource(data2))
tdm <- DocumentTermMatrix(corpus, control = list(dictionary = Terms(tdm), removePunctuation = TRUE, stopwords = TRUE, stemming = TRUE, removeNumbers = TRUE))
test <- as.matrix(tdm)
# Check accuracy on test.
predict(fit, newdata = test)
| /TugasHoax.R | no_license | RohmadSung/tugashoax | R | false | false | 5,069 | r | library(caret)
library(tm)
library(SnowballC)
library(arm)
# Training data.
data <- c('BJ Habibie Dikabarkan Meninggal.',
'Dalam postingan tersebut disampaikan bahwa BJ Habibie sudah didampingi anaknya.',
'Sebelumnya BJ Habibie tengah menjalani perawatan di Munich Jerman.',
'Presiden ketiga Indonesia itu didiagnosis mengalami kebocoran pada klep jantungnya.',
'Presiden Joko Widodo sempat menghubungi BJ Habibie secara langsung dan berbincang sejenak.',
'Melalui pembicaraan tersebut, Presiden menyanggupi permintaan Habibie yang menginginkan adanya tim dokter kepresidenan dan Paspampres untuk hadir di Jerman saat dilakukan tindakan medis.',
'Untuk mendampingi Habibie selama dilakukan tindakan medis.',
'Presiden Joko Widodo sudah mengutus Prof. Dr. Lukman Hakim, SpPD-KKV, SpJP, Kger, seorang spesialis jantung dan pembuluh darah dari tim dokter kepresidenan, untuk berangkat ke Jerman, termasuk anggota Paspampres juga diberangkatkan.',
'Penyakit jantung yang membuat presiden ketiga Indonesia yang membuat meninggal.',
'sebelum pergi ke jerman pak habibi berpesan seolah mau meninggal.',
'Kondisi kesehatan Presiden ketiga Republik Indonesia, BJ Habibie semakin membaik.',
'Hal itu terjadi setelah mendapatkan perawatan di rumah sakit di Munchen, Jerman.',
'Eyang Habibie sudah merasa lebih sehat tapi masih menjalankan pemeriksaan dan istirahat di RS di Muchen.',
'Meskipun sudah merasa lebih sehat, The Habibie Center tetap meminta doa dari masyarakat Indonesia untuk kesehatan BJ Habibie.',
'Presiden ketiga Indonesia tersebut sudah di kabarkan dokter kondisinya terus membaik.',
'Melalui Menteri Luar Negeri, Presiden juga telah menginstruksikan kepada Duta Besar Republik Indonesia di Jerman untuk terus memantau kondisi terkini dari Habibie dan melaporkan langsung kepadanya.',
'Selain itu, dirinya memerintahkan Menteri Sekretaris Negara untuk memastikan bahwa pemerintah mampu memberikan pelayanan terbaik dan menanggung seluruh biaya perawatan Presiden RI ke-3 itu sebagaimana diatur dalam Undang-Undang Nomor 7 Tahun 1978 tentang Hak Keuangan/Administratif Presiden dan Wakil Presiden serta Bekas Presiden dan Wakil Presiden Republik Indonesia.',
'Presiden telah memerintahkan untuk memantau dan memberikan pelayanan terbaik kepada Habibie.',
'Presiden sendiri berharap agar B.J. Habibie dapat kembali beraktivitas seperti sedia kala. Melalui sambungan telepon sore ini, ia bersama dengan seluruh rakyat Indonesia juga sekaligus mendoakan kesembuhan beliau.',
'Kita semua di Indonesia, seluruh rakyat Indonesia, mendoakan Bapak. Semoga segera sehat kembali, bisa beraktivitas dan kembali ke Indonesia.')
corpus <- VCorpus(VectorSource(data))
# Create a document term matrix.
tdm <- DocumentTermMatrix(corpus, list(removePunctuation = TRUE, stopwords = TRUE, stemming = TRUE, removeNumbers = TRUE))
# Convert to a data.frame for training and assign a classification (factor) to each document.
train <- as.matrix(tdm)
train <- cbind(train, c(0, 1))
colnames(train)[ncol(train)] <- 'y'
train <- as.data.frame(train)
train$y <- as.factor(train$y)
data
train
# Train.
fit <- train(y ~ ., data = train, method = 'bayesglm')
# Check accuracy on training.
predict(fit, newdata = train)
# Test data.
data2 <- c('Entah siapa yang memulai menyebarkan, namun isu tersebut berkembang dengan cepat.',
'Sejumlah pengguna twitter pun seakan-akan berlomba menyampaikan ucapan belasungkawanya atas meninggalnya Presiden Habibie tersebut.',
'Habibie dikabarkan meninggal setelah sebelumnya kritis di sebuah rumah sakit di Jerman.',
'pusat penelitian yang dibangun oleh Habibie, yakni The Habibie Center, melalui akun twitter resminya, membantah kabar meninggalnya BJ Habibie tersebut.',
'Senada dengan itu, artis Melanie Soebono yang merupakan cucu Presiden Habibie, juga membantah kabar tersebut.',
'alam keterangan yang dituliskan The Habibie Center, disebutkan bahwa B.J. Habibie dalam kondisi sehat walafiat, dan sekarang sedang berada di Jerman.',
'Alhamdulillah Bapak BJ Habibie dalam keadaan sehat walafiat. Beliau masih di Jerman sesudah merayakan Tahun Baru dengan cucu-cucu beliau.',
'amun klarifikasi akun Facebook The Habibie Center sedikit membuat banyak orang terkejut.',
'Pasalnya, saat kabar tersebut berhembus, Habibie malah dikatakan menghadiri sebuah acara penghargaan.',
'Tadi malam beliau sangat senang ngobrol dan tertawa lepas dengan Reza dan Pandji LIVE dari Kediaman di Patra Kuningan di acara Indonesia Box Office Movie Awards di SCTV.')
corpus <- VCorpus(VectorSource(data2))
tdm <- DocumentTermMatrix(corpus, control = list(dictionary = Terms(tdm), removePunctuation = TRUE, stopwords = TRUE, stemming = TRUE, removeNumbers = TRUE))
test <- as.matrix(tdm)
# Check accuracy on test.
predict(fit, newdata = test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_cell_count_matrix}
\alias{get_cell_count_matrix}
\title{Get cell counts from seurat object}
\usage{
get_cell_count_matrix(obj, row_var, col_var)
}
\arguments{
\item{row_var}{meta.data column to group for counts. will
be rows in the output matrix}
\item{col_var}{meta.data column to group for counts. will
be columns in the output matrix}
\item{sobj}{seurat object}
}
\description{
Get cell counts from seurat object
}
| /man/get_cell_count_matrix.Rd | no_license | standardgalactic/scbp | R | false | true | 516 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_cell_count_matrix}
\alias{get_cell_count_matrix}
\title{Get cell counts from seurat object}
\usage{
get_cell_count_matrix(obj, row_var, col_var)
}
\arguments{
\item{row_var}{meta.data column to group for counts. will
be rows in the output matrix}
\item{col_var}{meta.data column to group for counts. will
be columns in the output matrix}
\item{sobj}{seurat object}
}
\description{
Get cell counts from seurat object
}
|
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16L );
#' Run benchmarks.
#'
#' @examples
#' main();
main <- function() {
# Define benchmark parameters:
name <- "log1p";
iterations <- 1000000L;
repeats <- 3L;
#' Print the TAP version.
#'
#' @examples
#' print_version();
print_version <- function() {
cat( "TAP version 13\n" );
}
#' Print the TAP summary.
#'
#' @param total Total number of tests.
#' @param passing Total number of passing tests.
#'
#' @examples
#' print_summary( 3, 3 );
print_summary <- function( total, passing ) {
cat( "#\n" );
cat( paste0( "1..", total, "\n" ) ); # TAP plan
cat( paste0( "# total ", total, "\n" ) );
cat( paste0( "# pass ", passing, "\n" ) );
cat( "#\n" );
cat( "# ok\n" );
}
#' Print benchmark results.
#'
#' @param iterations Number of iterations.
#' @param elapsed Elapsed time in seconds.
#'
#' @examples
#' print_results( 10000L, 0.131009101868 );
print_results <- function( iterations, elapsed ) {
rate <- iterations / elapsed;
cat( " ---\n" );
cat( paste0( " iterations: ", iterations, "\n" ) );
cat( paste0( " elapsed: ", elapsed, "\n" ) );
cat( paste0( " rate: ", rate, "\n" ) );
cat( " ...\n" );
}
#' Run a benchmark.
#'
#' ## Notes
#'
#' * We compute and return a total "elapsed" time, rather than the minimum
#' evaluation time, to match benchmark results in other languages (e.g.,
#' Python).
#'
#'
#' @param iterations Number of Iterations.
#' @return Elapsed time in seconds.
#'
#' @examples
#' elapsed <- benchmark( 10000L );
benchmark <- function( iterations ) {
# Run the benchmarks:
results <- microbenchmark::microbenchmark( log1p( (1000.0*runif(1)) - 0.0 ), times = iterations );
# Sum all the raw timing results to get a total "elapsed" time:
elapsed <- sum( results$time );
# Convert the elapsed time from nanoseconds to seconds:
elapsed <- elapsed / 1.0e9;
return( elapsed );
}
print_version();
for ( i in 1:repeats ) {
cat( paste0( "# r::", name, "\n" ) );
elapsed <- benchmark( iterations );
print_results( iterations, elapsed );
cat( paste0( "ok ", i, " benchmark finished", "\n" ) );
}
print_summary( repeats, repeats );
}
main();
| /lib/node_modules/@stdlib/math/base/special/log1p/benchmark/r/benchmark.R | permissive | stdlib-js/stdlib | R | false | false | 2,854 | r | #!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16L );
#' Run benchmarks.
#'
#' @examples
#' main();
main <- function() {
# Define benchmark parameters:
name <- "log1p";
iterations <- 1000000L;
repeats <- 3L;
#' Print the TAP version.
#'
#' @examples
#' print_version();
print_version <- function() {
cat( "TAP version 13\n" );
}
#' Print the TAP summary.
#'
#' @param total Total number of tests.
#' @param passing Total number of passing tests.
#'
#' @examples
#' print_summary( 3, 3 );
print_summary <- function( total, passing ) {
cat( "#\n" );
cat( paste0( "1..", total, "\n" ) ); # TAP plan
cat( paste0( "# total ", total, "\n" ) );
cat( paste0( "# pass ", passing, "\n" ) );
cat( "#\n" );
cat( "# ok\n" );
}
#' Print benchmark results.
#'
#' @param iterations Number of iterations.
#' @param elapsed Elapsed time in seconds.
#'
#' @examples
#' print_results( 10000L, 0.131009101868 );
print_results <- function( iterations, elapsed ) {
rate <- iterations / elapsed;
cat( " ---\n" );
cat( paste0( " iterations: ", iterations, "\n" ) );
cat( paste0( " elapsed: ", elapsed, "\n" ) );
cat( paste0( " rate: ", rate, "\n" ) );
cat( " ...\n" );
}
#' Run a benchmark.
#'
#' ## Notes
#'
#' * We compute and return a total "elapsed" time, rather than the minimum
#' evaluation time, to match benchmark results in other languages (e.g.,
#' Python).
#'
#'
#' @param iterations Number of Iterations.
#' @return Elapsed time in seconds.
#'
#' @examples
#' elapsed <- benchmark( 10000L );
benchmark <- function( iterations ) {
# Run the benchmarks:
results <- microbenchmark::microbenchmark( log1p( (1000.0*runif(1)) - 0.0 ), times = iterations );
# Sum all the raw timing results to get a total "elapsed" time:
elapsed <- sum( results$time );
# Convert the elapsed time from nanoseconds to seconds:
elapsed <- elapsed / 1.0e9;
return( elapsed );
}
print_version();
for ( i in 1:repeats ) {
cat( paste0( "# r::", name, "\n" ) );
elapsed <- benchmark( iterations );
print_results( iterations, elapsed );
cat( paste0( "ok ", i, " benchmark finished", "\n" ) );
}
print_summary( repeats, repeats );
}
main();
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/env.overlap.R
\name{env.overlap}
\alias{env.overlap}
\title{Calculates overlap between models in environment space using latin hypercube sampling}
\usage{
env.overlap(model.1, model.2, env, tolerance = 0.001, max.reps = 10,
cor.method = "spearman")
}
\arguments{
\item{model.1}{An enmtools.model object model object that can be projected using the predict() function}
\item{env}{A raster or raster stack of environmental data.}
\item{tolerance}{How close do successive overlap metrics have to be before we decide we're close enough to the final answer}
\item{max.reps}{Maximum number of attempts that will be made to find suitable starting conditions}
\item{cor.method}{Which method to use for calculating correlations between models}
\item{model.1}{Another enmtools.model object or other model object that can be projected using the predict() function}
}
\description{
Calculates overlap between models in environment space using latin hypercube sampling
}
| /man/env.overlap.Rd | no_license | nmatzke/ENMTools | R | false | true | 1,044 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/env.overlap.R
\name{env.overlap}
\alias{env.overlap}
\title{Calculates overlap between models in environment space using latin hypercube sampling}
\usage{
env.overlap(model.1, model.2, env, tolerance = 0.001, max.reps = 10,
cor.method = "spearman")
}
\arguments{
\item{model.1}{An enmtools.model object model object that can be projected using the predict() function}
\item{env}{A raster or raster stack of environmental data.}
\item{tolerance}{How close do successive overlap metrics have to be before we decide we're close enough to the final answer}
\item{max.reps}{Maximum number of attempts that will be made to find suitable starting conditions}
\item{cor.method}{Which method to use for calculating correlations between models}
\item{model.1}{Another enmtools.model object or other model object that can be projected using the predict() function}
}
\description{
Calculates overlap between models in environment space using latin hypercube sampling
}
|
library("ade4")
library("gdata")
library("lme4")
library("nlme")
library("car")
library("gplots")
library("gdata")
library("made4")
library("clValid")
library("lattice")
library("ggplot2")
library("reshape2")
setwd("~/Documents/2016_02_17 Corps lipidiques MJ/R_analysis")
#****** Spectral Counting analysis of proteins from lipid bodies ****
# import data obtained from X!TandemPipeline
#emPAI<- read.csv2(file="emPAI_lipid_bodies.csv", header = TRUE, sep="\t", dec=",", stringsAsFactors=FALSE) #443 prots
dataSC<- read.csv2(file="SC_lipid_bodies_norm.csv", header = TRUE, sep="\t", dec=",", stringsAsFactors=FALSE) #443 prots
voies<- read.csv2(file="paths.tsv", header = TRUE, sep=",", stringsAsFactors=FALSE)
# generate metadata
r<- (colnames(dataSC[6:17]))
a=strsplit(unique(r), "_", fixed=TRUE)
esp=NULL
rep=NULL
temps=NULL
for(i in 1:length(a)){
esp=c(esp, a[[i]][1])
temps=c(temps, a[[i]][2])
rep=c(rep, a[[i]][3])
print(i)
}
metadata=cbind.data.frame(msrunfile=unique(r), esp=esp, temps=temps, rep=rep)
metadata$esp.temps=as.factor(paste(metadata$esp,metadata$temps,sep='-'))
# Generate a dataframe containing all the informations
test<- stack(dataSC[,6:17])
names(test)[1] <- "spectra"
names(test)[2] <- "msrunfile"
test=merge(test, metadata, "msrunfile")
tab.sc<-cbind.data.frame(test, protein=rep(dataSC$Top.Protein.ID, 12))
tab.sc<-cbind.data.frame(tab.sc, desc=rep(dataSC$Top.Protein.Description, 12))
head(tab.sc)
# Filter proteins showing low ration between conditions
drop.low.ratio=data.frame(dataSC$Top.Protein.ID)
names(drop.low.ratio) <- sub("dataSC.Top.Protein.ID", "prot", names(drop.low.ratio))
proteines = levels(tab.sc$protein) # 2355 prots
min.ratio = 2
for (i in 1:length(proteines)){
low.ratio=tab.sc[tab.sc$protein==proteines[i],]
low.ratio=drop.levels(low.ratio)
tab.ratios = aggregate(low.ratio$spectra, list(low.ratio$protein, low.ratio$esp.temps), FUN = mean)
maxvalue = max(tab.ratios$x)
minvalue = min(tab.ratios$x)
ratio = maxvalue/minvalue
if (ratio == Inf)
ratio = maxvalue/(minvalue+1)
if (ratio >= min.ratio)
drop.low.ratio$ratio[i] <-2
else
drop.low.ratio$ratio[i] <-0
print(i)
}
good_spectra=drop.low.ratio$prot[drop.low.ratio$ratio>1]
good_spectra=drop.levels(good_spectra)
SC=tab.sc[tab.sc$protein %in% good_spectra,]
SC = drop.levels(SC)
str(SC)
###################
### 384 prots ###
###################
# GLM model and multiple ANOVA tests
proteines = levels(SC$protein)
resultglm = NULL
for (i in 1:length(proteines))
{
sub=SC[SC$protein==proteines[i],]
sub=drop.levels(sub)
model=glm(spectra~esp+temps+rep, family="quasipoisson", data=sub)
test=anova(model, test="Chisq")
resultglm=rbind.data.frame(resultglm, cbind.data.frame(prot=proteines[i], pesp=test[[5]][2], ptemps=test[[5]][3], prep=test[[5]][4]))
print(i)
}
resultglm$fdr.esp=p.adjust(resultglm$pesp,method="fdr")
resultglm$fdr.temps=p.adjust(resultglm$ptemps,method="fdr")
signif.esp=resultglm$prot[resultglm$fdr.esp<0.01]
signif.esp = drop.levels(signif.esp)
signif.temps=resultglm$prot[resultglm$temps<0.01]
signif.temps = drop.levels(signif.temps)
liste_prot_signif = union(signif.esp, signif.temps)
length(liste_prot_signif)
spectral.count.glm.signif = SC[which(SC$protein %in% liste_prot_signif),]
spectral.count.glm.signif = drop.levels(spectral.count.glm.signif)
length(unique(spectral.count.glm.signif$protein)) ## 200 prots
spectral.count.glm.signif_INTACT <- spectral.count.glm.signif
#levels(spectral.count.glm.signif$esp)[levels(spectral.count.glm.signif$esp)=="Coel"] <- "M145"
#levels(spectral.count.glm.signif$esp)[levels(spectral.count.glm.signif$esp)=="Livi"] <- "TK24"
#spectral.count.glm.signif$esp.temps <- as.factor (paste(spectral.count.glm.signif$esp, spectral.count.glm.signif$temps, sep="-" ))
################# Export data Spectral Count
spec_signif=tapply(spectral.count.glm.signif$spectra,list(spectral.count.glm.signif$protein,spectral.count.glm.signif$esp.temps),FUN=mean)
spec_signif = as.data.frame(spec_signif)
# by.y =0 parce que il n'a pas le nom des proteines, donc utilise le nom des lignes
test1 =merge(resultglm, spec_signif, by.x="prot", by.y=0)
test1 = merge (test1, voies [,-2], by=c("prot"), all.x=TRUE)
write.table(test1,"prots_signif.tsv",sep="\t",row.names=F,col.names=T)
colnames(spectral.count.glm.signif)[7]="prot"
spectral.count.glm.signif=merge(spectral.count.glm.signif,voies [,-c(2)], by=c("prot"), all.x=TRUE)
spectral.count.glm.signif <- drop.levels(spectral.count.glm.signif)
spectral.count.glm.signif$Names <- as.factor(spectral.count.glm.signif$Names)
spectral.count.glm.signif$Sub_class <- as.factor(spectral.count.glm.signif$Sub_class)
# boxplots
formule1=formula("spectra ~ esp")
formule3=formula("spectra~esp.temps")
pdf(file="lipid_bodies_boxplots_signif.pdf", width=10,height=6)
for (i in 1:length(unique(spectral.count.glm.signif$prot))) {
subSC= spectral.count.glm.signif[spectral.count.glm.signif$prot==levels(spectral.count.glm.signif$prot)[i],]
par(mfrow=c(1,2))
boxplot(formule1,subSC,las=2,col=c("blue","red", 'darkgreen'),main=unique(subSC$Names),ylab="Spectral Count")
boxplot(formule3,subSC,las=2,col=c("blue","blue","red","red","darkgreen","darkgreen"), main=unique(subSC$Sub_class),ylab="Spectral Count")
}
dev.off()
tab.acpSC = tapply (spectral.count.glm.signif$spectra, list(spectral.count.glm.signif$prot,spectral.count.glm.signif$msrunfile), FUN=mean)
####### Principal components analysis + heatmap
quanti_data_acp = na.omit(tab.acpSC)
quanti_data_acp = t(quanti_data_acp)
z <- dudi.pca(quanti_data_acp,center = T, scale = T, scannf = F, nf = 4)
sm = sum(z$ei)
pound = round((z$e/sm*100),digits = 1)
acp = z$li
acp$msrunfile=row.names(acp)
acp = merge(acp,metadata,by= c("msrunfile"),all.x=TRUE,all.y=FALSE)
pdf(file="ACP_signif.pdf", width = 13, height = 7)
par(mfrow=c(1,2))
plot(acp$Axis1, acp$Axis2,type="n", xlab=paste("Axe1(",pound[1],"%)",sep=" "),ylab=paste("Axe2(",pound[2],"%)",sep=" "))
text(acp$Axis1, acp$Axis2, acp$msrunfile, col=c(acp$esp), cex = 0.9)
abline(h=0, v=0)
plot(acp$Axis1,acp$Axis3,type="n", xlab=paste("Axe1(",pound[1],"%)",sep=" "),ylab=paste("Axe3(",pound[3],"%)",sep=" "))
text(acp$Axis1, acp$Axis3, acp$msrunfile, col=c(acp$esp), cex=0.9)
abline(h=0, v=0)
dev.off()
# Class vec contains the values ib Spectral counting plus the metabolic pathways
heatmap_SC<- read.csv2("class_vec.csv", header = TRUE, sep="\t", dec=",")
pdf(file="Heatplot_signif.pdf", width = 7, height = 23)
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$CARBON, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Carbon")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$CELL_DIVISION_WALL, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Cell division/wall")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$NITROGEN, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Nitrogen")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$ENERGY, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Respiratory chain")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$METABOLITES, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Secondary metabolites")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$TRANSPORT, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Transport")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$TRANSLA_TRANSCRIP, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Translation/transcription")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$SIGNAL, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Signaling")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$OTHERS, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Other")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$UNKNOWN, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Unknown")
dev.off()
| /Script_Lipid_Bodies.R | no_license | AaronMillOro/Lipid_bodies_Streptomyces | R | false | false | 8,812 | r | library("ade4")
library("gdata")
library("lme4")
library("nlme")
library("car")
library("gplots")
library("gdata")
library("made4")
library("clValid")
library("lattice")
library("ggplot2")
library("reshape2")
setwd("~/Documents/2016_02_17 Corps lipidiques MJ/R_analysis")
#****** Spectral Counting analysis of proteins from lipid bodies ****
# import data obtained from X!TandemPipeline
#emPAI<- read.csv2(file="emPAI_lipid_bodies.csv", header = TRUE, sep="\t", dec=",", stringsAsFactors=FALSE) #443 prots
dataSC<- read.csv2(file="SC_lipid_bodies_norm.csv", header = TRUE, sep="\t", dec=",", stringsAsFactors=FALSE) #443 prots
voies<- read.csv2(file="paths.tsv", header = TRUE, sep=",", stringsAsFactors=FALSE)
# generate metadata
r<- (colnames(dataSC[6:17]))
a=strsplit(unique(r), "_", fixed=TRUE)
esp=NULL
rep=NULL
temps=NULL
for(i in 1:length(a)){
esp=c(esp, a[[i]][1])
temps=c(temps, a[[i]][2])
rep=c(rep, a[[i]][3])
print(i)
}
metadata=cbind.data.frame(msrunfile=unique(r), esp=esp, temps=temps, rep=rep)
metadata$esp.temps=as.factor(paste(metadata$esp,metadata$temps,sep='-'))
# Generate a dataframe containing all the informations
test<- stack(dataSC[,6:17])
names(test)[1] <- "spectra"
names(test)[2] <- "msrunfile"
test=merge(test, metadata, "msrunfile")
tab.sc<-cbind.data.frame(test, protein=rep(dataSC$Top.Protein.ID, 12))
tab.sc<-cbind.data.frame(tab.sc, desc=rep(dataSC$Top.Protein.Description, 12))
head(tab.sc)
# Filter proteins showing low ration between conditions
drop.low.ratio=data.frame(dataSC$Top.Protein.ID)
names(drop.low.ratio) <- sub("dataSC.Top.Protein.ID", "prot", names(drop.low.ratio))
proteines = levels(tab.sc$protein) # 2355 prots
min.ratio = 2
for (i in 1:length(proteines)){
low.ratio=tab.sc[tab.sc$protein==proteines[i],]
low.ratio=drop.levels(low.ratio)
tab.ratios = aggregate(low.ratio$spectra, list(low.ratio$protein, low.ratio$esp.temps), FUN = mean)
maxvalue = max(tab.ratios$x)
minvalue = min(tab.ratios$x)
ratio = maxvalue/minvalue
if (ratio == Inf)
ratio = maxvalue/(minvalue+1)
if (ratio >= min.ratio)
drop.low.ratio$ratio[i] <-2
else
drop.low.ratio$ratio[i] <-0
print(i)
}
good_spectra=drop.low.ratio$prot[drop.low.ratio$ratio>1]
good_spectra=drop.levels(good_spectra)
SC=tab.sc[tab.sc$protein %in% good_spectra,]
SC = drop.levels(SC)
str(SC)
###################
### 384 prots ###
###################
# GLM model and multiple ANOVA tests
proteines = levels(SC$protein)
resultglm = NULL
for (i in 1:length(proteines))
{
sub=SC[SC$protein==proteines[i],]
sub=drop.levels(sub)
model=glm(spectra~esp+temps+rep, family="quasipoisson", data=sub)
test=anova(model, test="Chisq")
resultglm=rbind.data.frame(resultglm, cbind.data.frame(prot=proteines[i], pesp=test[[5]][2], ptemps=test[[5]][3], prep=test[[5]][4]))
print(i)
}
resultglm$fdr.esp=p.adjust(resultglm$pesp,method="fdr")
resultglm$fdr.temps=p.adjust(resultglm$ptemps,method="fdr")
signif.esp=resultglm$prot[resultglm$fdr.esp<0.01]
signif.esp = drop.levels(signif.esp)
signif.temps=resultglm$prot[resultglm$temps<0.01]
signif.temps = drop.levels(signif.temps)
liste_prot_signif = union(signif.esp, signif.temps)
length(liste_prot_signif)
spectral.count.glm.signif = SC[which(SC$protein %in% liste_prot_signif),]
spectral.count.glm.signif = drop.levels(spectral.count.glm.signif)
length(unique(spectral.count.glm.signif$protein)) ## 200 prots
spectral.count.glm.signif_INTACT <- spectral.count.glm.signif
#levels(spectral.count.glm.signif$esp)[levels(spectral.count.glm.signif$esp)=="Coel"] <- "M145"
#levels(spectral.count.glm.signif$esp)[levels(spectral.count.glm.signif$esp)=="Livi"] <- "TK24"
#spectral.count.glm.signif$esp.temps <- as.factor (paste(spectral.count.glm.signif$esp, spectral.count.glm.signif$temps, sep="-" ))
################# Export data Spectral Count
spec_signif=tapply(spectral.count.glm.signif$spectra,list(spectral.count.glm.signif$protein,spectral.count.glm.signif$esp.temps),FUN=mean)
spec_signif = as.data.frame(spec_signif)
# by.y =0 parce que il n'a pas le nom des proteines, donc utilise le nom des lignes
test1 =merge(resultglm, spec_signif, by.x="prot", by.y=0)
test1 = merge (test1, voies [,-2], by=c("prot"), all.x=TRUE)
write.table(test1,"prots_signif.tsv",sep="\t",row.names=F,col.names=T)
colnames(spectral.count.glm.signif)[7]="prot"
spectral.count.glm.signif=merge(spectral.count.glm.signif,voies [,-c(2)], by=c("prot"), all.x=TRUE)
spectral.count.glm.signif <- drop.levels(spectral.count.glm.signif)
spectral.count.glm.signif$Names <- as.factor(spectral.count.glm.signif$Names)
spectral.count.glm.signif$Sub_class <- as.factor(spectral.count.glm.signif$Sub_class)
# boxplots
formule1=formula("spectra ~ esp")
formule3=formula("spectra~esp.temps")
pdf(file="lipid_bodies_boxplots_signif.pdf", width=10,height=6)
for (i in 1:length(unique(spectral.count.glm.signif$prot))) {
subSC= spectral.count.glm.signif[spectral.count.glm.signif$prot==levels(spectral.count.glm.signif$prot)[i],]
par(mfrow=c(1,2))
boxplot(formule1,subSC,las=2,col=c("blue","red", 'darkgreen'),main=unique(subSC$Names),ylab="Spectral Count")
boxplot(formule3,subSC,las=2,col=c("blue","blue","red","red","darkgreen","darkgreen"), main=unique(subSC$Sub_class),ylab="Spectral Count")
}
dev.off()
tab.acpSC = tapply (spectral.count.glm.signif$spectra, list(spectral.count.glm.signif$prot,spectral.count.glm.signif$msrunfile), FUN=mean)
####### Principal components analysis + heatmap
quanti_data_acp = na.omit(tab.acpSC)
quanti_data_acp = t(quanti_data_acp)
z <- dudi.pca(quanti_data_acp,center = T, scale = T, scannf = F, nf = 4)
sm = sum(z$ei)
pound = round((z$e/sm*100),digits = 1)
acp = z$li
acp$msrunfile=row.names(acp)
acp = merge(acp,metadata,by= c("msrunfile"),all.x=TRUE,all.y=FALSE)
pdf(file="ACP_signif.pdf", width = 13, height = 7)
par(mfrow=c(1,2))
plot(acp$Axis1, acp$Axis2,type="n", xlab=paste("Axe1(",pound[1],"%)",sep=" "),ylab=paste("Axe2(",pound[2],"%)",sep=" "))
text(acp$Axis1, acp$Axis2, acp$msrunfile, col=c(acp$esp), cex = 0.9)
abline(h=0, v=0)
plot(acp$Axis1,acp$Axis3,type="n", xlab=paste("Axe1(",pound[1],"%)",sep=" "),ylab=paste("Axe3(",pound[3],"%)",sep=" "))
text(acp$Axis1, acp$Axis3, acp$msrunfile, col=c(acp$esp), cex=0.9)
abline(h=0, v=0)
dev.off()
# Class vec contains the values ib Spectral counting plus the metabolic pathways
heatmap_SC<- read.csv2("class_vec.csv", header = TRUE, sep="\t", dec=",")
pdf(file="Heatplot_signif.pdf", width = 7, height = 23)
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$CARBON, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Carbon")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$CELL_DIVISION_WALL, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Cell division/wall")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$NITROGEN, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Nitrogen")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$ENERGY, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Respiratory chain")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$METABOLITES, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Secondary metabolites")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$TRANSPORT, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Transport")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$TRANSLA_TRANSCRIP, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Translation/transcription")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$SIGNAL, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Signaling")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$OTHERS, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Other")
heatplot(heatmap_SC[,2:7], margins=c(5,20) ,distfun="euclidean", dend="row", cexRow= 0.6, cex=0.7, classvec=heatmap_SC$UNKNOWN, classvecCol=c("white","darkgreen"),labRow=heatmap_SC$Names, main="Unknown")
dev.off()
|
## load the data
rm(list = ls())
data <- read.table("C://Users/Owner/datasciencecoursera/household_power_consumption.txt", header = T,
sep = ";", na.strings = "?")
# convert the date variable to Date class
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Subset the data
data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
# Convert dates and times
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
# Plot 2
data$datetime <- as.POSIXct(data$datetime)
attach(data)
plot(Global_active_power ~ datetime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
detach(data) | /Plot2.R | no_license | pce369/ExData_Plotting1 | R | false | false | 756 | r | ## load the data
rm(list = ls())
data <- read.table("C://Users/Owner/datasciencecoursera/household_power_consumption.txt", header = T,
sep = ";", na.strings = "?")
# convert the date variable to Date class
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Subset the data
data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
# Convert dates and times
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
# Plot 2
data$datetime <- as.POSIXct(data$datetime)
attach(data)
plot(Global_active_power ~ datetime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
detach(data) |
#' Train an Auxiliary Classifier Generative Adversarial Network (ACGAN) on the
#' MNIST dataset. See https://arxiv.org/abs/1610.09585 for more details.
#'
#' You should start to see reasonable images after ~5 epochs, and good images by
#' ~15 epochs. You should use a GPU, as the convolution-heavy operations are
#' very slow on the CPU. Prefer the TensorFlow backend if you plan on iterating,
#' as the compilation time can be a blocker using Theano.
#'
#' | Hardware | Backend | Time / Epoch |
#' | ---------------- | ------- | ------------------- |
#' |CPU | TF | 3 hrs |
#' |Titan X (maxwell) | TF | 4 min |
#' |Titan X (maxwell) | TH | 7 min |
#'
library(keras)
library(progress)
library(abind)
K <- keras::backend()
K$set_image_data_format('channels_first')
# Functions ---------------------------------------------------------------
build_generator <- function(latent_size){
# We will map a pair of (z, L), where z is a latent vector and L is a
# label drawn from P_c, to image space (..., 1, 28, 28)
cnn <- keras_model_sequential()
cnn %>%
layer_dense(1024, input_shape = latent_size, activation = "relu") %>%
layer_dense(128*7*7, activation = "relu") %>%
layer_reshape(c(128, 7, 7)) %>%
# Upsample to (..., 14, 14)
layer_upsampling_2d(size = c(2, 2)) %>%
layer_conv_2d(
256, c(5,5), padding = "same", activation = "relu",
kernel_initializer = "glorot_normal"
) %>%
# Upsample to (..., 28, 28)
layer_upsampling_2d(size = c(2, 2)) %>%
layer_conv_2d(
128, c(5,5), padding = "same", activation = "tanh",
kernel_initializer = "glorot_normal"
) %>%
# Take a channel axis reduction
layer_conv_2d(
1, c(2,2), padding = "same", activation = "tanh",
kernel_initializer = "glorot_normal"
)
# This is the z space commonly refered to in GAN papers
latent <- layer_input(shape = list(latent_size))
# This will be our label
image_class <- layer_input(shape = list(1))
# 10 classes in MNIST
cls <- image_class %>%
layer_embedding(
input_dim = 10, output_dim = latent_size,
embeddings_initializer='glorot_normal'
) %>%
layer_flatten()
# Hadamard product between z-space and a class conditional embedding
h <- layer_multiply(list(latent, cls))
fake_image <- cnn(h)
keras_model(list(latent, image_class), fake_image)
}
build_discriminator <- function(){
# Build a relatively standard conv net, with LeakyReLUs as suggested in
# the reference paper
cnn <- keras_model_sequential()
cnn %>%
layer_conv_2d(
32, c(3,3), padding = "same", strides = c(2,2),
input_shape = c(1, 28, 28)
) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_conv_2d(64, c(3, 3), padding = "same", strides = c(1,1)) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_conv_2d(128, c(3, 3), padding = "same", strides = c(2,2)) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_conv_2d(256, c(3, 3), padding = "same", strides = c(1,1)) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_flatten()
image <- layer_input(shape = c(1, 28, 28))
features <- cnn(image)
# First output (name=generation) is whether or not the discriminator
# thinks the image that is being shown is fake, and the second output
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake <- features %>%
layer_dense(1, activation = "sigmoid", name = "generation")
aux <- features %>%
layer_dense(10, activation = "softmax", name = "auxiliary")
keras_model(image, list(fake, aux))
}
# Parameters --------------------------------------------------------------
# Batch and latent size taken from the paper
epochs <- 50
batch_size <- 100
latent_size <- 100
# Adam parameters suggested in https://arxiv.org/abs/1511.06434
adam_lr <- 0.00005
adam_beta_1 <- 0.5
# Model Definition --------------------------------------------------------
# Build the discriminator
discriminator <- build_discriminator()
discriminator %>% compile(
optimizer = optimizer_adam(lr = adam_lr, beta_1 = adam_beta_1),
loss = list("binary_crossentropy", "sparse_categorical_crossentropy")
)
# Build the generator
generator <- build_generator(latent_size)
generator %>% compile(
optimizer = optimizer_adam(lr = adam_lr, beta_1 = adam_beta_1),
loss = "binary_crossentropy"
)
latent <- layer_input(shape = list(latent_size))
image_class <- layer_input(shape = list(1), dtype = "int32")
fake <- generator(list(latent, image_class))
# Only want to be able to train generation for the combined model
discriminator$trainable <- FALSE
results <- discriminator(fake)
combined <- keras_model(list(latent, image_class), results)
combined %>% compile(
optimizer = optimizer_adam(lr = adam_lr, beta_1 = adam_beta_1),
loss = list("binary_crossentropy", "sparse_categorical_crossentropy")
)
# Data Preparation --------------------------------------------------------
# Loade mnist data, and force it to be of shape (..., 1, 28, 28) with
# range [-1, 1]
mnist <- dataset_mnist()
mnist$train$x <- (mnist$train$x - 127.5)/127.5
mnist$test$x <- (mnist$test$x - 127.5)/127.5
dim(mnist$train$x) <- c(60000, 1, 28, 28)
dim(mnist$test$x) <- c(10000, 1, 28, 28)
num_train <- dim(mnist$train$x)[1]
num_test <- dim(mnist$test$x)[1]
# Training ----------------------------------------------------------------
for(epoch in 1:epochs){
num_batches <- trunc(num_train/batch_size)
pb <- progress_bar$new(
total = num_batches,
format = sprintf("epoch %s/%s :elapsed [:bar] :percent :eta", epoch, epochs),
clear = FALSE
)
epoch_gen_loss <- NULL
epoch_disc_loss <- NULL
possible_indexes <- 1:num_train
for(index in 1:num_batches){
pb$tick()
# Generate a new batch of noise
noise <- runif(n = batch_size*latent_size, min = -1, max = 1) %>%
matrix(nrow = batch_size, ncol = latent_size)
# Get a batch of real images
batch <- sample(possible_indexes, size = batch_size)
possible_indexes <- possible_indexes[!possible_indexes %in% batch]
image_batch <- mnist$train$x[batch,,,,drop = FALSE]
label_batch <- mnist$train$y[batch]
# Sample some labels from p_c
sampled_labels <- sample(0:9, batch_size, replace = TRUE) %>%
matrix(ncol = 1)
# Generate a batch of fake images, using the generated labels as a
# conditioner. We reshape the sampled labels to be
# (batch_size, 1) so that we can feed them into the embedding
# layer as a length one sequence
generated_images <- predict(generator, list(noise, sampled_labels))
X <- abind(image_batch, generated_images, along = 1)
y <- c(rep(1L, batch_size), rep(0L, batch_size)) %>% matrix(ncol = 1)
aux_y <- c(label_batch, sampled_labels) %>% matrix(ncol = 1)
# Check if the discriminator can figure itself out
disc_loss <- train_on_batch(
discriminator, x = X,
y = list(y, aux_y)
)
epoch_disc_loss <- rbind(epoch_disc_loss, unlist(disc_loss))
# Make new noise. Generate 2 * batch size here such that
# the generator optimizes over an identical number of images as the
# discriminator
noise <- runif(2*batch_size*latent_size, min = -1, max = 1) %>%
matrix(nrow = 2*batch_size, ncol = latent_size)
sampled_labels <- sample(0:9, size = 2*batch_size, replace = TRUE) %>%
matrix(ncol = 1)
# Want to train the generator to trick the discriminator
# For the generator, we want all the {fake, not-fake} labels to say
# not-fake
trick <- rep(1, 2*batch_size) %>% matrix(ncol = 1)
combined_loss <- train_on_batch(
combined,
list(noise, sampled_labels),
list(trick, sampled_labels)
)
epoch_gen_loss <- rbind(epoch_gen_loss, unlist(combined_loss))
}
cat(sprintf("\nTesting for epoch %02d:", epoch))
# Evaluate the testing loss here
# Generate a new batch of noise
noise <- runif(num_test*latent_size, min = -1, max = 1) %>%
matrix(nrow = num_test, ncol = latent_size)
# Sample some labels from p_c and generate images from them
sampled_labels <- sample(0:9, size = num_test, replace = TRUE) %>%
matrix(ncol = 1)
generated_images <- predict(generator, list(noise, sampled_labels))
X <- abind(mnist$test$x, generated_images, along = 1)
y <- c(rep(1, num_test), rep(0, num_test)) %>% matrix(ncol = 1)
aux_y <- c(mnist$test$y, sampled_labels) %>% matrix(ncol = 1)
# See if the discriminator can figure itself out...
discriminator_test_loss <- evaluate(
discriminator, X, list(y, aux_y),
verbose = FALSE
) %>% unlist()
discriminator_train_loss <- apply(epoch_disc_loss, 2, mean)
# Make new noise
noise <- runif(2*num_test*latent_size, min = -1, max = 1) %>%
matrix(nrow = 2*num_test, ncol = latent_size)
sampled_labels <- sample(0:9, size = 2*num_test, replace = TRUE) %>%
matrix(ncol = 1)
trick <- rep(1, 2*num_test) %>% matrix(ncol = 1)
generator_test_loss = combined %>% evaluate(
list(noise, sampled_labels),
list(trick, sampled_labels),
verbose = FALSE
)
generator_train_loss <- apply(epoch_gen_loss, 2, mean)
# Generate an epoch report on performance
row_fmt <- "\n%22s : loss %4.2f | %5.2f | %5.2f"
cat(sprintf(
row_fmt,
"generator (train)",
generator_train_loss[1],
generator_train_loss[2],
generator_train_loss[3]
))
cat(sprintf(
row_fmt,
"generator (test)",
generator_test_loss[1],
generator_test_loss[2],
generator_test_loss[3]
))
cat(sprintf(
row_fmt,
"discriminator (train)",
discriminator_train_loss[1],
discriminator_train_loss[2],
discriminator_train_loss[3]
))
cat(sprintf(
row_fmt,
"discriminator (test)",
discriminator_test_loss[1],
discriminator_test_loss[2],
discriminator_test_loss[3]
))
cat("\n")
# Generate some digits to display
noise <- runif(10*latent_size, min = -1, max = 1) %>%
matrix(nrow = 10, ncol = latent_size)
sampled_labels <- 0:9 %>%
matrix(ncol = 1)
# Get a batch to display
generated_images <- predict(
generator,
list(noise, sampled_labels)
)
img <- NULL
for(i in 1:10){
img <- cbind(img, generated_images[i,,,])
}
((img + 1)/2) %>% as.raster() %>%
plot()
}
| /website/articles/examples/mnist_acgan.R | no_license | rhalDTU/keras | R | false | false | 10,634 | r | #' Train an Auxiliary Classifier Generative Adversarial Network (ACGAN) on the
#' MNIST dataset. See https://arxiv.org/abs/1610.09585 for more details.
#'
#' You should start to see reasonable images after ~5 epochs, and good images by
#' ~15 epochs. You should use a GPU, as the convolution-heavy operations are
#' very slow on the CPU. Prefer the TensorFlow backend if you plan on iterating,
#' as the compilation time can be a blocker using Theano.
#'
#' | Hardware | Backend | Time / Epoch |
#' | ---------------- | ------- | ------------------- |
#' |CPU | TF | 3 hrs |
#' |Titan X (maxwell) | TF | 4 min |
#' |Titan X (maxwell) | TH | 7 min |
#'
library(keras)
library(progress)
library(abind)
K <- keras::backend()
K$set_image_data_format('channels_first')
# Functions ---------------------------------------------------------------
build_generator <- function(latent_size){
# We will map a pair of (z, L), where z is a latent vector and L is a
# label drawn from P_c, to image space (..., 1, 28, 28)
cnn <- keras_model_sequential()
cnn %>%
layer_dense(1024, input_shape = latent_size, activation = "relu") %>%
layer_dense(128*7*7, activation = "relu") %>%
layer_reshape(c(128, 7, 7)) %>%
# Upsample to (..., 14, 14)
layer_upsampling_2d(size = c(2, 2)) %>%
layer_conv_2d(
256, c(5,5), padding = "same", activation = "relu",
kernel_initializer = "glorot_normal"
) %>%
# Upsample to (..., 28, 28)
layer_upsampling_2d(size = c(2, 2)) %>%
layer_conv_2d(
128, c(5,5), padding = "same", activation = "tanh",
kernel_initializer = "glorot_normal"
) %>%
# Take a channel axis reduction
layer_conv_2d(
1, c(2,2), padding = "same", activation = "tanh",
kernel_initializer = "glorot_normal"
)
# This is the z space commonly refered to in GAN papers
latent <- layer_input(shape = list(latent_size))
# This will be our label
image_class <- layer_input(shape = list(1))
# 10 classes in MNIST
cls <- image_class %>%
layer_embedding(
input_dim = 10, output_dim = latent_size,
embeddings_initializer='glorot_normal'
) %>%
layer_flatten()
# Hadamard product between z-space and a class conditional embedding
h <- layer_multiply(list(latent, cls))
fake_image <- cnn(h)
keras_model(list(latent, image_class), fake_image)
}
build_discriminator <- function(){
# Build a relatively standard conv net, with LeakyReLUs as suggested in
# the reference paper
cnn <- keras_model_sequential()
cnn %>%
layer_conv_2d(
32, c(3,3), padding = "same", strides = c(2,2),
input_shape = c(1, 28, 28)
) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_conv_2d(64, c(3, 3), padding = "same", strides = c(1,1)) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_conv_2d(128, c(3, 3), padding = "same", strides = c(2,2)) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_conv_2d(256, c(3, 3), padding = "same", strides = c(1,1)) %>%
layer_activation_leaky_relu() %>%
layer_dropout(0.3) %>%
layer_flatten()
image <- layer_input(shape = c(1, 28, 28))
features <- cnn(image)
# First output (name=generation) is whether or not the discriminator
# thinks the image that is being shown is fake, and the second output
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake <- features %>%
layer_dense(1, activation = "sigmoid", name = "generation")
aux <- features %>%
layer_dense(10, activation = "softmax", name = "auxiliary")
keras_model(image, list(fake, aux))
}
# Parameters --------------------------------------------------------------
# Batch and latent size taken from the paper
epochs <- 50
batch_size <- 100
latent_size <- 100
# Adam parameters suggested in https://arxiv.org/abs/1511.06434
adam_lr <- 0.00005
adam_beta_1 <- 0.5
# Model Definition --------------------------------------------------------
# Build the discriminator
discriminator <- build_discriminator()
discriminator %>% compile(
optimizer = optimizer_adam(lr = adam_lr, beta_1 = adam_beta_1),
loss = list("binary_crossentropy", "sparse_categorical_crossentropy")
)
# Build the generator
generator <- build_generator(latent_size)
generator %>% compile(
optimizer = optimizer_adam(lr = adam_lr, beta_1 = adam_beta_1),
loss = "binary_crossentropy"
)
latent <- layer_input(shape = list(latent_size))
image_class <- layer_input(shape = list(1), dtype = "int32")
fake <- generator(list(latent, image_class))
# Only want to be able to train generation for the combined model
discriminator$trainable <- FALSE
results <- discriminator(fake)
combined <- keras_model(list(latent, image_class), results)
combined %>% compile(
optimizer = optimizer_adam(lr = adam_lr, beta_1 = adam_beta_1),
loss = list("binary_crossentropy", "sparse_categorical_crossentropy")
)
# Data Preparation --------------------------------------------------------
# Loade mnist data, and force it to be of shape (..., 1, 28, 28) with
# range [-1, 1]
mnist <- dataset_mnist()
mnist$train$x <- (mnist$train$x - 127.5)/127.5
mnist$test$x <- (mnist$test$x - 127.5)/127.5
dim(mnist$train$x) <- c(60000, 1, 28, 28)
dim(mnist$test$x) <- c(10000, 1, 28, 28)
num_train <- dim(mnist$train$x)[1]
num_test <- dim(mnist$test$x)[1]
# Training ----------------------------------------------------------------
for(epoch in 1:epochs){
num_batches <- trunc(num_train/batch_size)
pb <- progress_bar$new(
total = num_batches,
format = sprintf("epoch %s/%s :elapsed [:bar] :percent :eta", epoch, epochs),
clear = FALSE
)
epoch_gen_loss <- NULL
epoch_disc_loss <- NULL
possible_indexes <- 1:num_train
for(index in 1:num_batches){
pb$tick()
# Generate a new batch of noise
noise <- runif(n = batch_size*latent_size, min = -1, max = 1) %>%
matrix(nrow = batch_size, ncol = latent_size)
# Get a batch of real images
batch <- sample(possible_indexes, size = batch_size)
possible_indexes <- possible_indexes[!possible_indexes %in% batch]
image_batch <- mnist$train$x[batch,,,,drop = FALSE]
label_batch <- mnist$train$y[batch]
# Sample some labels from p_c
sampled_labels <- sample(0:9, batch_size, replace = TRUE) %>%
matrix(ncol = 1)
# Generate a batch of fake images, using the generated labels as a
# conditioner. We reshape the sampled labels to be
# (batch_size, 1) so that we can feed them into the embedding
# layer as a length one sequence
generated_images <- predict(generator, list(noise, sampled_labels))
X <- abind(image_batch, generated_images, along = 1)
y <- c(rep(1L, batch_size), rep(0L, batch_size)) %>% matrix(ncol = 1)
aux_y <- c(label_batch, sampled_labels) %>% matrix(ncol = 1)
# Check if the discriminator can figure itself out
disc_loss <- train_on_batch(
discriminator, x = X,
y = list(y, aux_y)
)
epoch_disc_loss <- rbind(epoch_disc_loss, unlist(disc_loss))
# Make new noise. Generate 2 * batch size here such that
# the generator optimizes over an identical number of images as the
# discriminator
noise <- runif(2*batch_size*latent_size, min = -1, max = 1) %>%
matrix(nrow = 2*batch_size, ncol = latent_size)
sampled_labels <- sample(0:9, size = 2*batch_size, replace = TRUE) %>%
matrix(ncol = 1)
# Want to train the generator to trick the discriminator
# For the generator, we want all the {fake, not-fake} labels to say
# not-fake
trick <- rep(1, 2*batch_size) %>% matrix(ncol = 1)
combined_loss <- train_on_batch(
combined,
list(noise, sampled_labels),
list(trick, sampled_labels)
)
epoch_gen_loss <- rbind(epoch_gen_loss, unlist(combined_loss))
}
cat(sprintf("\nTesting for epoch %02d:", epoch))
# Evaluate the testing loss here
# Generate a new batch of noise
noise <- runif(num_test*latent_size, min = -1, max = 1) %>%
matrix(nrow = num_test, ncol = latent_size)
# Sample some labels from p_c and generate images from them
sampled_labels <- sample(0:9, size = num_test, replace = TRUE) %>%
matrix(ncol = 1)
generated_images <- predict(generator, list(noise, sampled_labels))
X <- abind(mnist$test$x, generated_images, along = 1)
y <- c(rep(1, num_test), rep(0, num_test)) %>% matrix(ncol = 1)
aux_y <- c(mnist$test$y, sampled_labels) %>% matrix(ncol = 1)
# See if the discriminator can figure itself out...
discriminator_test_loss <- evaluate(
discriminator, X, list(y, aux_y),
verbose = FALSE
) %>% unlist()
discriminator_train_loss <- apply(epoch_disc_loss, 2, mean)
# Make new noise
noise <- runif(2*num_test*latent_size, min = -1, max = 1) %>%
matrix(nrow = 2*num_test, ncol = latent_size)
sampled_labels <- sample(0:9, size = 2*num_test, replace = TRUE) %>%
matrix(ncol = 1)
trick <- rep(1, 2*num_test) %>% matrix(ncol = 1)
generator_test_loss = combined %>% evaluate(
list(noise, sampled_labels),
list(trick, sampled_labels),
verbose = FALSE
)
generator_train_loss <- apply(epoch_gen_loss, 2, mean)
# Generate an epoch report on performance
row_fmt <- "\n%22s : loss %4.2f | %5.2f | %5.2f"
cat(sprintf(
row_fmt,
"generator (train)",
generator_train_loss[1],
generator_train_loss[2],
generator_train_loss[3]
))
cat(sprintf(
row_fmt,
"generator (test)",
generator_test_loss[1],
generator_test_loss[2],
generator_test_loss[3]
))
cat(sprintf(
row_fmt,
"discriminator (train)",
discriminator_train_loss[1],
discriminator_train_loss[2],
discriminator_train_loss[3]
))
cat(sprintf(
row_fmt,
"discriminator (test)",
discriminator_test_loss[1],
discriminator_test_loss[2],
discriminator_test_loss[3]
))
cat("\n")
# Generate some digits to display
noise <- runif(10*latent_size, min = -1, max = 1) %>%
matrix(nrow = 10, ncol = latent_size)
sampled_labels <- 0:9 %>%
matrix(ncol = 1)
# Get a batch to display
generated_images <- predict(
generator,
list(noise, sampled_labels)
)
img <- NULL
for(i in 1:10){
img <- cbind(img, generated_images[i,,,])
}
((img + 1)/2) %>% as.raster() %>%
plot()
}
|
#https://rstudio-pubs-static.s3.amazonaws.com/265713_cbef910aee7642dc8b62996e38d2825d.html
#Solve Chinese problem: https://psmethods.postach.io/post/ru-he-geng-gai-rde-yu-she-yu-xi
#Set Language as traditional Chinese
Sys.setlocale(category = "LC_ALL", locale = "cht")
rm(list=ls(all.names = TRUE))
library(NLP) # install.packages("NLP")
library(tm) # install.packages("tm")
library(RColorBrewer)
library(wordcloud) #install.packages("wordcloud")
library(jiebaRD) # install.packages("jiebaRD")
library(jiebaR) # install.packages("jiebaR") 中文文字斷詞
#Read all txt files
'filenames <- list.files(getwd(), pattern="*.txt")
files <- lapply(filenames, readLines)'
file <- readLines("D:/NTU_DataScience (R)/NTU_CSX_DataScience/Week_4/HW/Chinese_downloaded_txt/Chiang's Dairy.txt")
#file
#Cleanning data, online source: http://www.sthda.com/english/wiki/text-mining-and-word-cloud-fundamentals-in-r-5-simple-steps-you-should-know
docs <- Corpus(VectorSource(file))
toSpace <- content_transformer(function(x, pattern) {
return (gsub(pattern, " ", x))
}
)
docs <- tm_map(docs, removePunctuation) #remove space
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, toSpace, "日")
docs <- tm_map(docs, toSpace, "月")
docs <- tm_map(docs, toSpace, "之")
docs <- tm_map(docs, toSpace, "與")
docs <- tm_map(docs, toSpace, "而")
docs <- tm_map(docs, toSpace, "其")
docs <- tm_map(docs, toSpace, "在")
docs <- tm_map(docs, toSpace, "以")
docs <- tm_map(docs, toSpace, "今")
docs <- tm_map(docs, toSpace, "亦")
docs <- tm_map(docs, toSpace, "有")
docs <- tm_map(docs, toSpace, "則")
docs <- tm_map(docs, toSpace, "於")
docs <- tm_map(docs, toSpace, "二")
docs <- tm_map(docs, toSpace, "丑")
docs <- tm_map(docs, toSpace, "為")
docs <- tm_map(docs, toSpace, "我")
docs <- tm_map(docs, toSpace, "矣")
docs <- tm_map(docs, toSpace, "此")
docs <- tm_map(docs, toSpace, "㸶")
docs <- tm_map(docs, toSpace, "後")
docs <- tm_map(docs, toSpace, "已")
docs <- tm_map(docs, toSpace, "薔")
docs <- tm_map(docs, toSpace, "乃")
docs <- tm_map(docs, toSpace, "是")
docs <- tm_map(docs, toSpace, "皆")
docs <- tm_map(docs, toSpace, "胤")
docs <- tm_map(docs, toSpace, "螻")
docs <- tm_map(docs, toSpace, "的")
docs <- tm_map(docs, toSpace, "但")
docs <- tm_map(docs, toSpace, "㸴")
docs <- tm_map(docs, toSpace, "即")
docs <- tm_map(docs, toSpace, "由")
docs <- tm_map(docs, toSpace, "[a-zA-Z]")
docs <- tm_map(docs, stripWhitespace)
#docs <- tm_map(docs, PlainTextDocument)
docs
mixseg = worker()
#Cutter online source: https://www.jianshu.com/p/260c20c7e334
new_user_word(mixseg,c("中國", "中華", "司令", "中央", "對手", "對華", "不可", "不能", "不敵", "不如", "不料", "日本", "本軍", "根本"))
#segment(file,mixseg)
jieba_tokenizer=function(d){
unlist(segment(d[[1]],mixseg))
}
seg = lapply(docs, jieba_tokenizer)
freqFrame = as.data.frame(table(unlist(seg)))
freqFrame = freqFrame[order(freqFrame$Freq,decreasing=TRUE), ]
library(knitr)
kable(head(freqFrame, 50), format = "markdown")
wordcloud(freqFrame$Var1,freqFrame$Freq,
scale=c(5,0.1),min.freq=26,max.words=150,
random.order=TRUE, random.color=FALSE,
rot.per=.1, colors=brewer.pal(8, "Dark2"),
ordered.colors=FALSE,use.r.layout=FALSE,
fixed.asp=TRUE)
| /Week_4/HW/Chinese_downloaded_txt/Word cloud- Chiang's Dairy.R | no_license | LouieChen16/NTU_CSX_DataScience | R | false | false | 3,346 | r | #https://rstudio-pubs-static.s3.amazonaws.com/265713_cbef910aee7642dc8b62996e38d2825d.html
#Solve Chinese problem: https://psmethods.postach.io/post/ru-he-geng-gai-rde-yu-she-yu-xi
#Set Language as traditional Chinese
Sys.setlocale(category = "LC_ALL", locale = "cht")
rm(list=ls(all.names = TRUE))
library(NLP) # install.packages("NLP")
library(tm) # install.packages("tm")
library(RColorBrewer)
library(wordcloud) #install.packages("wordcloud")
library(jiebaRD) # install.packages("jiebaRD")
library(jiebaR) # install.packages("jiebaR") 中文文字斷詞
#Read all txt files
'filenames <- list.files(getwd(), pattern="*.txt")
files <- lapply(filenames, readLines)'
file <- readLines("D:/NTU_DataScience (R)/NTU_CSX_DataScience/Week_4/HW/Chinese_downloaded_txt/Chiang's Dairy.txt")
#file
#Cleanning data, online source: http://www.sthda.com/english/wiki/text-mining-and-word-cloud-fundamentals-in-r-5-simple-steps-you-should-know
docs <- Corpus(VectorSource(file))
toSpace <- content_transformer(function(x, pattern) {
return (gsub(pattern, " ", x))
}
)
docs <- tm_map(docs, removePunctuation) #remove space
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, toSpace, "日")
docs <- tm_map(docs, toSpace, "月")
docs <- tm_map(docs, toSpace, "之")
docs <- tm_map(docs, toSpace, "與")
docs <- tm_map(docs, toSpace, "而")
docs <- tm_map(docs, toSpace, "其")
docs <- tm_map(docs, toSpace, "在")
docs <- tm_map(docs, toSpace, "以")
docs <- tm_map(docs, toSpace, "今")
docs <- tm_map(docs, toSpace, "亦")
docs <- tm_map(docs, toSpace, "有")
docs <- tm_map(docs, toSpace, "則")
docs <- tm_map(docs, toSpace, "於")
docs <- tm_map(docs, toSpace, "二")
docs <- tm_map(docs, toSpace, "丑")
docs <- tm_map(docs, toSpace, "為")
docs <- tm_map(docs, toSpace, "我")
docs <- tm_map(docs, toSpace, "矣")
docs <- tm_map(docs, toSpace, "此")
docs <- tm_map(docs, toSpace, "㸶")
docs <- tm_map(docs, toSpace, "後")
docs <- tm_map(docs, toSpace, "已")
docs <- tm_map(docs, toSpace, "薔")
docs <- tm_map(docs, toSpace, "乃")
docs <- tm_map(docs, toSpace, "是")
docs <- tm_map(docs, toSpace, "皆")
docs <- tm_map(docs, toSpace, "胤")
docs <- tm_map(docs, toSpace, "螻")
docs <- tm_map(docs, toSpace, "的")
docs <- tm_map(docs, toSpace, "但")
docs <- tm_map(docs, toSpace, "㸴")
docs <- tm_map(docs, toSpace, "即")
docs <- tm_map(docs, toSpace, "由")
docs <- tm_map(docs, toSpace, "[a-zA-Z]")
docs <- tm_map(docs, stripWhitespace)
#docs <- tm_map(docs, PlainTextDocument)
docs
mixseg = worker()
#Cutter online source: https://www.jianshu.com/p/260c20c7e334
new_user_word(mixseg,c("中國", "中華", "司令", "中央", "對手", "對華", "不可", "不能", "不敵", "不如", "不料", "日本", "本軍", "根本"))
#segment(file,mixseg)
jieba_tokenizer=function(d){
unlist(segment(d[[1]],mixseg))
}
seg = lapply(docs, jieba_tokenizer)
freqFrame = as.data.frame(table(unlist(seg)))
freqFrame = freqFrame[order(freqFrame$Freq,decreasing=TRUE), ]
library(knitr)
kable(head(freqFrame, 50), format = "markdown")
wordcloud(freqFrame$Var1,freqFrame$Freq,
scale=c(5,0.1),min.freq=26,max.words=150,
random.order=TRUE, random.color=FALSE,
rot.per=.1, colors=brewer.pal(8, "Dark2"),
ordered.colors=FALSE,use.r.layout=FALSE,
fixed.asp=TRUE)
|
# helper function used by FSR() and getPoly()
N_distinct <- function(x) if(ncol(as.matrix(x)) == 1) length(unique(x)) else unlist(lapply(x, N_distinct))
#is_continuous <- function(x) if(is.numeric(x)) N_distict(x) > 2 else FALSE
is_continuous <- function(x) unlist(lapply(x, is.numeric)) & N_distinct(x) > 2
mod <- function(m) paste0("model", m)
complete <- function(x) !is.null(x) && sum(is.na(x)) == 0
match_arg <- function(arg, choices){if(is.null(arg)) arg else match.arg(arg, choices)}
model_matrix <- function(modelFormula, dataFrame, intercept, noisy=TRUE, ...){
tried <- try(model.matrix(modelFormula, dataFrame, na.action = "na.omit", ...), silent=TRUE)
if(inherits(tried, "try-error")){
if(noisy) cat("model.matrix() reported the following error:\n", tried, "\n\n")
return(NULL)
} else {
if(intercept) return(tried) else return(tried[,-1])
}
}
get_degree <- function(combo){
if(grepl("\\^", combo)){
ch <- unlist(strsplit(combo, "^"))
start <- match("^", ch) + 1
end <- match(")", ch) - 1
return(as.numeric(paste(ch[start:end], collapse="")))
}else{
return(1)
}
}
get_interactions <- function(features, maxInteractDeg,
may_not_repeat = NULL, maxDeg = NULL,
include_features = TRUE){
interactions <- list()
if(length(features) > 1 && maxInteractDeg > 1){
for(i in 2:maxInteractDeg){
combos <- combn(features, i) # i x choose(n, i) matrix
combos <- combos[ , which_include(combos, may_not_repeat)]
if(!is.null(maxDeg)) # drop combos for which sum of degrees > maxDeg
combos <- combos[,-which(colSums(apply(combos, 1:2, get_degree)) > maxDeg)]
interactions[[i]] <- apply(combos, 2, paste, collapse = " * ")
}
}
interactions <- unlist(interactions)
if(include_features) return(c(features, interactions)) else return(interactions)
}
which_include <- function(combos, may_not_repeat){
# prevents multiplication of mutually exclusive categorical variables' levels
# suppose you have a factor variable, party with levels D, R, I
# at this point, factor features are strings formatted
# (party == 'D') and (party == 'R')
# but identical((party == 'D') * (party == 'R'), rep(0, N)) == TRUE
# this function uses grepl() to prevent such 0 columns from entering
# the formula subsequently...
#
# also, different monomials of the same variable should not interact
# raising the polynomial degree beyond user specification
combos <- as.matrix(combos)
keepers <- 1:ncol(combos)
if(length(may_not_repeat) == 0){
return(keepers)
}else{
to_drop <- list()
for(i in 1:length(may_not_repeat)){
to_drop[[i]] <- which(colSums(apply(combos, 2, grepl, pattern = may_not_repeat[i])) > 1)
}
to_drop <- unique(unlist(to_drop))
if(length(to_drop)) return(keepers[-to_drop]) else return(keepers)
}
}
# depracated
isolate_interaction <- function(elements, degree){
f <- paste(elements, collapse = " * ")
for(i in 1:degree){
tmp <- combn(elements, i)
if(i > 1)
tmp <- apply(tmp, 2, paste, collapse="*")
f <- paste(f, "-", paste(tmp, collapse=" - "))
}
return(f)
}
classify <- function(probs, as_factor=TRUE, labels=NULL, cutoff = NULL){ # not meant for binary labels...
if(ncol(as.matrix(probs)) == 1){
if(is.null(labels))
labels <- c("label1", "label2")
if(is.null(cutoff))
cutoff <- 0.5
classified <- labels[(probs > cutoff) + 1]
}else{
if(!is.null(labels))
colnames(probs) <- labels
if(is.null(colnames(probs)))
colnames(probs) <- paste0("label", 1:ncol(probs))
classified <- colnames(probs)[apply(probs, 1, which.max)]
}
if(as_factor)
classified <- as.factor(classified)
return(classified)
}
log_odds <- function(x, split = NULL, noisy = TRUE){
if(N_distinct(x) == 2){
if(is.factor(x))
x <- as.numeric(x) - 1
p <- mean(if(is.null(split)) x else x[split], na.rm=TRUE)
y <- ifelse(x == 1, log(p/(1 - p)), log((1 - p)/p))
}else{
if(!is.factor(x))
x <- as.factor(x)
if(is.null(split)){
p_reference <- mean(x == levels(x)[1])
y <- matrix(nrow = length(x), ncol = (length(levels(x)) - 1))
colnames(y) <- levels(x)[-1]
for(i in 1:ncol(y)){
p_interest <- mean(x == levels(x)[i + 1])
y[ , i] <- ifelse(x == levels(x)[i + 1],
log(p_interest/p_reference),
log(p_reference/p_interest))
}
}else{ # put whole sample on training scale, so N rows, not N_train
x_train <- x[split]
p_reference <- mean(x_train == levels(x)[1])
y <- matrix(nrow = length(x),
ncol = (length(levels(x_train)) - 1))
colnames(y) <- levels(x_train)[-1]
for(i in 1:ncol(y)){
p_interest <- mean(x_train == levels(x_train)[i + 1])
y[ , i] <- ifelse(x == levels(x_train)[i + 1],
log(p_interest/p_reference),
log(p_reference/p_interest))
}
}
}
if(noisy && sum(is.na(y)))
warning("NAs encountered by log_odds")
return(y)
}
# if !recursive, divides into blocks based on n and max_block
# if recursive, calls block_solve(), rather than solve(), until n/2 < max_block
# note: matrix inversion and several matrix multiplications must be performed on largest blocks!
# assumes matrices are dense; otherwise, use sparse options...
# max_block chosen by trial-and-error on 2017 MacBook Pro i5 with 16 gigs of RAM
# (too small == too much subsetting, too big == matrix calculations too taxing)
# S, crossprod(X), will be crossprod(X) only at outer call
# Either S or X should be provided, but not both
# S = | A B |
# | C D |
# for full expressions used below: https://en.wikipedia.org/wiki/Invertible_matrix#Blockwise_inversion
# returns NULL if inversion fails either due to collinearity or memory exhaustion
block_solve <- function(S = NULL, X = NULL, max_block = 250, A_inv = NULL, recursive=TRUE, noisy=TRUE){
if(is.null(S) == is.null(X))
stop("Please provide either rectangular matrix as X or a square matrix as S to be inverted by block_solve(). (If X is provided, (X'X)^{-1} is returned but in a more memory efficient manner than providing S = X'X directly).")
if(!is.null(A_inv) && is.null(X))
stop("If A_inv is provided, X must be provided to block_solve() too. (Suppose A_inv has p columns; A must be equal to solve(crossprod(X[,1:p])) or, equivalently, block_solve(X=X[,1:p]).")
solvable <- function(A, noisy=TRUE){
tried <- try(solve(A), silent = noisy)
if(noisy) cat(".")
if(inherits(tried, "try-error")) return(NULL) else return(tried)
}
if(is.null(X)){
stopifnot(nrow(S) == ncol(S))
symmetric <- isSymmetric(S)
n <- ncol(S) # if S is crossprod(X), this is really a p * p matrix
k <- floor(n/2)
A <- S[1:k, 1:k]
B <- S[1:k, (k + 1):n]
D <- S[(k + 1):n, (k + 1):n]
}else{
n <- ncol(X) # n refers to the resulting crossproduct of S as above
if(is.null(A_inv)){
k <- floor(n/2)
A <- crossprod(X[,1:k])
}else{
k <- ncol(A_inv)
}
B <- crossprod(X[,1:k], X[,(k+1):n])
D <- crossprod(X[,(k+1):n])
symmetric <- TRUE # refers to S, not A, B, or D (B in general will be rectangular...)
}
invert <- if(recursive && (k > max_block)) block_solve else solvable
if(is.null(A_inv)){
A_inv <- invert(A, noisy=noisy)
remove(A)
}
if(!is.null(A_inv)){
if(symmetric){
# S, crossprod(X), will be symmetric at highest level but not at lower levels
# want memory savings from that symmetry when it applies
# by symmetry, B == t(C), so C is never constructed
if(exists("S")) remove(S)
C.A_inv <- crossprod(B, A_inv) # really C %*% A_inv since C == t(B)
schur_inv <- invert(D - C.A_inv %*% B)
remove(D)
if(!is.null(schur_inv)){
S_inv <- matrix(nrow=n, ncol=n)
S_inv[1:k, 1:k] <- A_inv + A_inv %*% B %*% schur_inv %*% C.A_inv
remove(B, A_inv)
S_inv[(k+1):n, 1:k] <- -schur_inv %*% C.A_inv
S_inv[(k+1):n, (k+1):n] <- schur_inv
remove(schur_inv, C.A_inv)
S_inv[1:k, (k+1):n] <- t(S_inv[(k+1):n, 1:k]) # since symmetric matrices have symm inverses
return(S_inv)
}else{
return(NULL)
}
}else{
C.A_inv <- crossprod(B, A_inv) # S[(k+1):n, 1:k] %*% A_inv # really C %*% A_inv
if(exists("C.A_inv")){
if(exists("S")) remove(S)
schur_inv <- invert(D - C.A_inv %*% B, noisy=noisy)
remove(D)
S_inv <- matrix(nrow=n, ncol=n)
S_inv[1:k, 1:k] <- A_inv + A_inv %*% B %*% schur_inv %*% C.A_inv
S_inv[(k+1):n, 1:k] <- -schur_inv %*% C.A_inv
remove(C.A_inv)
S_inv[(k+1):n, (k+1):n] <- schur_inv
S_inv[1:k, (k+1):n] <- -A_inv %*% B %*% schur_inv
remove(B, A_inv, schur_inv)
return(S_inv)
}else{
return(NULL)
}
}
}else{
return(NULL)
}
}
ols <- function(object, Xy, m, train = TRUE, y = NULL, y_test = NULL){
X <- if(train){
model_matrix(formula(object$models$formula[m]),
Xy[object$split == "train", ],
noisy = object$noisy, intercept=TRUE)
}else{
model_matrix(formula(object$models$formula[m]),
Xy, noisy = object$noisy, intercept=TRUE)
}
if(exists("X")){
if(is.null(y))
y <- if(train) Xy[object$split == "train", ncol(Xy)] else Xy[, ncol(Xy)]
if(ncol(X) >= length(y) && object$noisy){
message("There are too few training observations to estimate further models (model == ",
m, "). Exiting.")
object$unable_to_estimate <- object$max_fails
}else{
XtX_inv <- block_solve(X = X, max_block = object$max_block,
A_inv = object$XtX_inv_accepted)
# initialized to NULL,
# which block_solve interprets as 'start from scratch'
if(!is.null(XtX_inv)){
object[[mod(m)]][["coeffs"]] <- tcrossprod(XtX_inv, X) %*% y
if(complete(object[[mod(m)]][["coeffs"]])){
object$models$estimated[m] <- TRUE
object <- post_estimation(object, Xy, m, y_test)
if(object$models$accepted[m])
object$XtX_inv_accepted <- XtX_inv
remove(XtX_inv)
}
}
}
}
if(!object$models$estimated[m]){
warning("Unable to estimate model", m, "\n\n")
object$unable_to_estimate <- object$unable_to_estimate + 1
}
if(object$noisy) cat("\n")
return(object)
}
post_estimation <- function(object, Xy, m, y_test = NULL){
P <- if(object$outcome == "multinomial")
nrow(object[[mod(m)]][["coeffs"]]) else length(object[[mod(m)]][["coeffs"]])
object$models$P[m] <- object[[mod(m)]][["p"]] <- P
if(is.null(y_test))
y_test <- Xy[object$split == "test", ncol(Xy)]
if(object$outcome == "continuous"){
object[[mod(m)]][["y_hat"]] <- predict(object, Xy[object$split=="test", ], m, standardize = FALSE)
MAPE <- object$y_scale * mean(abs(object[[mod(m)]][["y_hat"]] - y_test))
object$models$MAPE[m] <- object[[mod(m)]][["MAPE"]] <- MAPE
}else{
pred <- predict(object, Xy[object$split=="test", ], m, standardize = FALSE)
object[[mod(m)]][["y_hat"]] <- pred$probs
object[[mod(m)]][["classified"]] <- pred$classified
object$models$test_accuracy[m] <- mean(as.character(pred$classified) == object$y_test_labels)
if(!object$linear_estimation){
object$models$AIC[m] <- if(object$outcome == "binary")
object[[mod(m)]][["fit"]][["aic"]] else
object[[mod(m)]][["fit"]][["AIC"]]
object$models$BIC[m] <- object$models$AIC[m] - 2*P + log(object$N_train)*P
}
}
if(object$outcome != "multinomial"){
R2 <- cor(object[[mod(m)]][["y_hat"]], as.numeric(y_test))^2
adjR2 <- (object$N_train - P - 1)/(object$N_train - 1)*R2
object$models$test_adjR2[m] <- object[[mod(m)]][["adj_R2"]] <- adjR2
improvement <- adjR2 - object$best_test_adjR2
}else{
adj_accuracy <- (object$N_train - P)/(object$N_train - 1)*object$models$test_accuracy[m]
object$models$test_adj_accuracy[m] <- adj_accuracy
improvement <- adj_accuracy - object$best_test_adj_accuracy
}
object[["improvement"]] <- improvement
if(object$improvement > object$threshold_include){
object[["best_formula"]] <- object$models$formula[m]
object[["best_coeffs"]] <- object[[mod(m)]][["coeffs"]]
if(object$outcome == "multinomial"){
object[["best_test_adj_accuracy"]] <- adj_accuracy
}else{
object[["best_test_adjR2"]] <- adjR2
}
object$models$accepted[m] <- TRUE
if(object$outcome == "continuous")
object[["best_MAPE"]] <- MAPE
}
return(object)
}
# 09/11/18, NM: moved this function out of polyFit(), now standalone,
# for readability
applyPCA <- function(x,pcaMethod,pcaPortion) {
if (pcaMethod == "prcomp") { # use prcomp for pca
tmp <- system.time(
#xy.pca <- prcomp(x[,-ncol(xy)])
xy.pca <- prcomp(x)
)
cat('PCA time: ',tmp,'\n')
if (pcaPortion >= 1.0) k <- pcaPortion else {
k <- 0
pcNo = cumsum(xy.pca$sdev)/sum(xy.pca$sdev)
for (k in 1:length(pcNo)) {
if (pcNo[k] >= pcaPortion)
break
}
}
cat(k,' principal comps used\n')
xdata <- xy.pca$x[,1:k, drop=FALSE]
} else { # use RSpectra for PCA
#requireNamespace(RSpectra)
xy.cov <- cov(x)
k <- pcaPortion
xy.eig <- eigs(xy.cov,k)
xy.pca <- xy.eig
cat(k,' principal comps used\n')
#xdata <- as.matrix(x[,-ncol(x)]) %*% xy.eig$vectors[,1:k]
xdata <- as.matrix(x) %*% xy.eig$vectors[,1:k]
}
return(list(xdata=xdata,xy.pca=xy.pca,k=k))
}
| /R/helper_functions.R | no_license | radovankavicky/polyreg | R | false | false | 14,101 | r | # helper function used by FSR() and getPoly()
N_distinct <- function(x) if(ncol(as.matrix(x)) == 1) length(unique(x)) else unlist(lapply(x, N_distinct))
#is_continuous <- function(x) if(is.numeric(x)) N_distict(x) > 2 else FALSE
is_continuous <- function(x) unlist(lapply(x, is.numeric)) & N_distinct(x) > 2
mod <- function(m) paste0("model", m)
complete <- function(x) !is.null(x) && sum(is.na(x)) == 0
match_arg <- function(arg, choices){if(is.null(arg)) arg else match.arg(arg, choices)}
model_matrix <- function(modelFormula, dataFrame, intercept, noisy=TRUE, ...){
tried <- try(model.matrix(modelFormula, dataFrame, na.action = "na.omit", ...), silent=TRUE)
if(inherits(tried, "try-error")){
if(noisy) cat("model.matrix() reported the following error:\n", tried, "\n\n")
return(NULL)
} else {
if(intercept) return(tried) else return(tried[,-1])
}
}
get_degree <- function(combo){
if(grepl("\\^", combo)){
ch <- unlist(strsplit(combo, "^"))
start <- match("^", ch) + 1
end <- match(")", ch) - 1
return(as.numeric(paste(ch[start:end], collapse="")))
}else{
return(1)
}
}
get_interactions <- function(features, maxInteractDeg,
may_not_repeat = NULL, maxDeg = NULL,
include_features = TRUE){
interactions <- list()
if(length(features) > 1 && maxInteractDeg > 1){
for(i in 2:maxInteractDeg){
combos <- combn(features, i) # i x choose(n, i) matrix
combos <- combos[ , which_include(combos, may_not_repeat)]
if(!is.null(maxDeg)) # drop combos for which sum of degrees > maxDeg
combos <- combos[,-which(colSums(apply(combos, 1:2, get_degree)) > maxDeg)]
interactions[[i]] <- apply(combos, 2, paste, collapse = " * ")
}
}
interactions <- unlist(interactions)
if(include_features) return(c(features, interactions)) else return(interactions)
}
which_include <- function(combos, may_not_repeat){
# prevents multiplication of mutually exclusive categorical variables' levels
# suppose you have a factor variable, party with levels D, R, I
# at this point, factor features are strings formatted
# (party == 'D') and (party == 'R')
# but identical((party == 'D') * (party == 'R'), rep(0, N)) == TRUE
# this function uses grepl() to prevent such 0 columns from entering
# the formula subsequently...
#
# also, different monomials of the same variable should not interact
# raising the polynomial degree beyond user specification
combos <- as.matrix(combos)
keepers <- 1:ncol(combos)
if(length(may_not_repeat) == 0){
return(keepers)
}else{
to_drop <- list()
for(i in 1:length(may_not_repeat)){
to_drop[[i]] <- which(colSums(apply(combos, 2, grepl, pattern = may_not_repeat[i])) > 1)
}
to_drop <- unique(unlist(to_drop))
if(length(to_drop)) return(keepers[-to_drop]) else return(keepers)
}
}
# depracated
isolate_interaction <- function(elements, degree){
f <- paste(elements, collapse = " * ")
for(i in 1:degree){
tmp <- combn(elements, i)
if(i > 1)
tmp <- apply(tmp, 2, paste, collapse="*")
f <- paste(f, "-", paste(tmp, collapse=" - "))
}
return(f)
}
classify <- function(probs, as_factor=TRUE, labels=NULL, cutoff = NULL){ # not meant for binary labels...
if(ncol(as.matrix(probs)) == 1){
if(is.null(labels))
labels <- c("label1", "label2")
if(is.null(cutoff))
cutoff <- 0.5
classified <- labels[(probs > cutoff) + 1]
}else{
if(!is.null(labels))
colnames(probs) <- labels
if(is.null(colnames(probs)))
colnames(probs) <- paste0("label", 1:ncol(probs))
classified <- colnames(probs)[apply(probs, 1, which.max)]
}
if(as_factor)
classified <- as.factor(classified)
return(classified)
}
log_odds <- function(x, split = NULL, noisy = TRUE){
if(N_distinct(x) == 2){
if(is.factor(x))
x <- as.numeric(x) - 1
p <- mean(if(is.null(split)) x else x[split], na.rm=TRUE)
y <- ifelse(x == 1, log(p/(1 - p)), log((1 - p)/p))
}else{
if(!is.factor(x))
x <- as.factor(x)
if(is.null(split)){
p_reference <- mean(x == levels(x)[1])
y <- matrix(nrow = length(x), ncol = (length(levels(x)) - 1))
colnames(y) <- levels(x)[-1]
for(i in 1:ncol(y)){
p_interest <- mean(x == levels(x)[i + 1])
y[ , i] <- ifelse(x == levels(x)[i + 1],
log(p_interest/p_reference),
log(p_reference/p_interest))
}
}else{ # put whole sample on training scale, so N rows, not N_train
x_train <- x[split]
p_reference <- mean(x_train == levels(x)[1])
y <- matrix(nrow = length(x),
ncol = (length(levels(x_train)) - 1))
colnames(y) <- levels(x_train)[-1]
for(i in 1:ncol(y)){
p_interest <- mean(x_train == levels(x_train)[i + 1])
y[ , i] <- ifelse(x == levels(x_train)[i + 1],
log(p_interest/p_reference),
log(p_reference/p_interest))
}
}
}
if(noisy && sum(is.na(y)))
warning("NAs encountered by log_odds")
return(y)
}
# if !recursive, divides into blocks based on n and max_block
# if recursive, calls block_solve(), rather than solve(), until n/2 < max_block
# note: matrix inversion and several matrix multiplications must be performed on largest blocks!
# assumes matrices are dense; otherwise, use sparse options...
# max_block chosen by trial-and-error on 2017 MacBook Pro i5 with 16 gigs of RAM
# (too small == too much subsetting, too big == matrix calculations too taxing)
# S, crossprod(X), will be crossprod(X) only at outer call
# Either S or X should be provided, but not both
# S = | A B |
# | C D |
# for full expressions used below: https://en.wikipedia.org/wiki/Invertible_matrix#Blockwise_inversion
# returns NULL if inversion fails either due to collinearity or memory exhaustion
block_solve <- function(S = NULL, X = NULL, max_block = 250, A_inv = NULL, recursive=TRUE, noisy=TRUE){
if(is.null(S) == is.null(X))
stop("Please provide either rectangular matrix as X or a square matrix as S to be inverted by block_solve(). (If X is provided, (X'X)^{-1} is returned but in a more memory efficient manner than providing S = X'X directly).")
if(!is.null(A_inv) && is.null(X))
stop("If A_inv is provided, X must be provided to block_solve() too. (Suppose A_inv has p columns; A must be equal to solve(crossprod(X[,1:p])) or, equivalently, block_solve(X=X[,1:p]).")
solvable <- function(A, noisy=TRUE){
tried <- try(solve(A), silent = noisy)
if(noisy) cat(".")
if(inherits(tried, "try-error")) return(NULL) else return(tried)
}
if(is.null(X)){
stopifnot(nrow(S) == ncol(S))
symmetric <- isSymmetric(S)
n <- ncol(S) # if S is crossprod(X), this is really a p * p matrix
k <- floor(n/2)
A <- S[1:k, 1:k]
B <- S[1:k, (k + 1):n]
D <- S[(k + 1):n, (k + 1):n]
}else{
n <- ncol(X) # n refers to the resulting crossproduct of S as above
if(is.null(A_inv)){
k <- floor(n/2)
A <- crossprod(X[,1:k])
}else{
k <- ncol(A_inv)
}
B <- crossprod(X[,1:k], X[,(k+1):n])
D <- crossprod(X[,(k+1):n])
symmetric <- TRUE # refers to S, not A, B, or D (B in general will be rectangular...)
}
invert <- if(recursive && (k > max_block)) block_solve else solvable
if(is.null(A_inv)){
A_inv <- invert(A, noisy=noisy)
remove(A)
}
if(!is.null(A_inv)){
if(symmetric){
# S, crossprod(X), will be symmetric at highest level but not at lower levels
# want memory savings from that symmetry when it applies
# by symmetry, B == t(C), so C is never constructed
if(exists("S")) remove(S)
C.A_inv <- crossprod(B, A_inv) # really C %*% A_inv since C == t(B)
schur_inv <- invert(D - C.A_inv %*% B)
remove(D)
if(!is.null(schur_inv)){
S_inv <- matrix(nrow=n, ncol=n)
S_inv[1:k, 1:k] <- A_inv + A_inv %*% B %*% schur_inv %*% C.A_inv
remove(B, A_inv)
S_inv[(k+1):n, 1:k] <- -schur_inv %*% C.A_inv
S_inv[(k+1):n, (k+1):n] <- schur_inv
remove(schur_inv, C.A_inv)
S_inv[1:k, (k+1):n] <- t(S_inv[(k+1):n, 1:k]) # since symmetric matrices have symm inverses
return(S_inv)
}else{
return(NULL)
}
}else{
C.A_inv <- crossprod(B, A_inv) # S[(k+1):n, 1:k] %*% A_inv # really C %*% A_inv
if(exists("C.A_inv")){
if(exists("S")) remove(S)
schur_inv <- invert(D - C.A_inv %*% B, noisy=noisy)
remove(D)
S_inv <- matrix(nrow=n, ncol=n)
S_inv[1:k, 1:k] <- A_inv + A_inv %*% B %*% schur_inv %*% C.A_inv
S_inv[(k+1):n, 1:k] <- -schur_inv %*% C.A_inv
remove(C.A_inv)
S_inv[(k+1):n, (k+1):n] <- schur_inv
S_inv[1:k, (k+1):n] <- -A_inv %*% B %*% schur_inv
remove(B, A_inv, schur_inv)
return(S_inv)
}else{
return(NULL)
}
}
}else{
return(NULL)
}
}
ols <- function(object, Xy, m, train = TRUE, y = NULL, y_test = NULL){
X <- if(train){
model_matrix(formula(object$models$formula[m]),
Xy[object$split == "train", ],
noisy = object$noisy, intercept=TRUE)
}else{
model_matrix(formula(object$models$formula[m]),
Xy, noisy = object$noisy, intercept=TRUE)
}
if(exists("X")){
if(is.null(y))
y <- if(train) Xy[object$split == "train", ncol(Xy)] else Xy[, ncol(Xy)]
if(ncol(X) >= length(y) && object$noisy){
message("There are too few training observations to estimate further models (model == ",
m, "). Exiting.")
object$unable_to_estimate <- object$max_fails
}else{
XtX_inv <- block_solve(X = X, max_block = object$max_block,
A_inv = object$XtX_inv_accepted)
# initialized to NULL,
# which block_solve interprets as 'start from scratch'
if(!is.null(XtX_inv)){
object[[mod(m)]][["coeffs"]] <- tcrossprod(XtX_inv, X) %*% y
if(complete(object[[mod(m)]][["coeffs"]])){
object$models$estimated[m] <- TRUE
object <- post_estimation(object, Xy, m, y_test)
if(object$models$accepted[m])
object$XtX_inv_accepted <- XtX_inv
remove(XtX_inv)
}
}
}
}
if(!object$models$estimated[m]){
warning("Unable to estimate model", m, "\n\n")
object$unable_to_estimate <- object$unable_to_estimate + 1
}
if(object$noisy) cat("\n")
return(object)
}
post_estimation <- function(object, Xy, m, y_test = NULL){
P <- if(object$outcome == "multinomial")
nrow(object[[mod(m)]][["coeffs"]]) else length(object[[mod(m)]][["coeffs"]])
object$models$P[m] <- object[[mod(m)]][["p"]] <- P
if(is.null(y_test))
y_test <- Xy[object$split == "test", ncol(Xy)]
if(object$outcome == "continuous"){
object[[mod(m)]][["y_hat"]] <- predict(object, Xy[object$split=="test", ], m, standardize = FALSE)
MAPE <- object$y_scale * mean(abs(object[[mod(m)]][["y_hat"]] - y_test))
object$models$MAPE[m] <- object[[mod(m)]][["MAPE"]] <- MAPE
}else{
pred <- predict(object, Xy[object$split=="test", ], m, standardize = FALSE)
object[[mod(m)]][["y_hat"]] <- pred$probs
object[[mod(m)]][["classified"]] <- pred$classified
object$models$test_accuracy[m] <- mean(as.character(pred$classified) == object$y_test_labels)
if(!object$linear_estimation){
object$models$AIC[m] <- if(object$outcome == "binary")
object[[mod(m)]][["fit"]][["aic"]] else
object[[mod(m)]][["fit"]][["AIC"]]
object$models$BIC[m] <- object$models$AIC[m] - 2*P + log(object$N_train)*P
}
}
if(object$outcome != "multinomial"){
R2 <- cor(object[[mod(m)]][["y_hat"]], as.numeric(y_test))^2
adjR2 <- (object$N_train - P - 1)/(object$N_train - 1)*R2
object$models$test_adjR2[m] <- object[[mod(m)]][["adj_R2"]] <- adjR2
improvement <- adjR2 - object$best_test_adjR2
}else{
adj_accuracy <- (object$N_train - P)/(object$N_train - 1)*object$models$test_accuracy[m]
object$models$test_adj_accuracy[m] <- adj_accuracy
improvement <- adj_accuracy - object$best_test_adj_accuracy
}
object[["improvement"]] <- improvement
if(object$improvement > object$threshold_include){
object[["best_formula"]] <- object$models$formula[m]
object[["best_coeffs"]] <- object[[mod(m)]][["coeffs"]]
if(object$outcome == "multinomial"){
object[["best_test_adj_accuracy"]] <- adj_accuracy
}else{
object[["best_test_adjR2"]] <- adjR2
}
object$models$accepted[m] <- TRUE
if(object$outcome == "continuous")
object[["best_MAPE"]] <- MAPE
}
return(object)
}
# 09/11/18, NM: moved this function out of polyFit(), now standalone,
# for readability
applyPCA <- function(x,pcaMethod,pcaPortion) {
if (pcaMethod == "prcomp") { # use prcomp for pca
tmp <- system.time(
#xy.pca <- prcomp(x[,-ncol(xy)])
xy.pca <- prcomp(x)
)
cat('PCA time: ',tmp,'\n')
if (pcaPortion >= 1.0) k <- pcaPortion else {
k <- 0
pcNo = cumsum(xy.pca$sdev)/sum(xy.pca$sdev)
for (k in 1:length(pcNo)) {
if (pcNo[k] >= pcaPortion)
break
}
}
cat(k,' principal comps used\n')
xdata <- xy.pca$x[,1:k, drop=FALSE]
} else { # use RSpectra for PCA
#requireNamespace(RSpectra)
xy.cov <- cov(x)
k <- pcaPortion
xy.eig <- eigs(xy.cov,k)
xy.pca <- xy.eig
cat(k,' principal comps used\n')
#xdata <- as.matrix(x[,-ncol(x)]) %*% xy.eig$vectors[,1:k]
xdata <- as.matrix(x) %*% xy.eig$vectors[,1:k]
}
return(list(xdata=xdata,xy.pca=xy.pca,k=k))
}
|
# Script for generating the list masks, using specific games as guides
# JSONS won't work without making individuals for every repeated form
# That is -- a boxscore has an unknown number of players, thus,
# we can't do a json level relist, we'd have to one per player (so the bottom of
# each list structure, where no more lists occur.)
# Instead, we can make parsed-level masks.
player_mask <- c("id",
"firstName",
"lastName",
"primaryNumber",
"birthDate",
"birthCity",
"birthStateProvince",
"birthCountry",
"nationality",
"height",
"weight",
"active",
"captain",
"alternateCaptain",
"rookie",
"shootsCatches",
"rosterStatus",
"currentTeam.id",
"primaryPosition.abbreviation",
"primaryPosition.name",
"primaryPosition.type",
"primaryPosition.code")
#Skater Template
skater_template<-c(
"id",
"gameID",
"goals",
"assists",
"shots",
"hits",
"powerPlayGoals",
"powerPlayAssists",
"penaltyMinutes",
"faceOffWins",
"faceoffTaken",
"faceOffPct",
"takeaways",
"giveaways",
"shortHandedGoals",
"shortHandedAssists",
"blocked",
"plusMinus",
"timeOnIce",
"evenTimeOnIce",
"powerPlayTimeOnIce",
"shortHandedTimeOnIce"
)
#Goalie Template
goalie_template<-c(
"id",
"gameID",
"goals",
"timeOnIce",
"assists",
"shots",
"pim",
"saves",
"shots",
"powerPlaySaves",
"powerPlayShotsAgainst",
"shortHandedSaves",
"shortHandedShotsAgainst",
"evenSaves",
"evenShotsAgainst",
"decision",
"savePercentage",
"powerPlaySavePercentage",
"shortHandedSavePercentage",
"evenStrengthSavePercentage"
)
usethis::use_data(player_mask, goalie_template, skater_template, internal = TRUE, overwrite = TRUE)
| /inst/mask_maker.R | no_license | anthonyshook/nhldata | R | false | false | 1,996 | r |
# Script for generating the list masks, using specific games as guides
# JSONS won't work without making individuals for every repeated form
# That is -- a boxscore has an unknown number of players, thus,
# we can't do a json level relist, we'd have to one per player (so the bottom of
# each list structure, where no more lists occur.)
# Instead, we can make parsed-level masks.
player_mask <- c("id",
"firstName",
"lastName",
"primaryNumber",
"birthDate",
"birthCity",
"birthStateProvince",
"birthCountry",
"nationality",
"height",
"weight",
"active",
"captain",
"alternateCaptain",
"rookie",
"shootsCatches",
"rosterStatus",
"currentTeam.id",
"primaryPosition.abbreviation",
"primaryPosition.name",
"primaryPosition.type",
"primaryPosition.code")
#Skater Template
skater_template<-c(
"id",
"gameID",
"goals",
"assists",
"shots",
"hits",
"powerPlayGoals",
"powerPlayAssists",
"penaltyMinutes",
"faceOffWins",
"faceoffTaken",
"faceOffPct",
"takeaways",
"giveaways",
"shortHandedGoals",
"shortHandedAssists",
"blocked",
"plusMinus",
"timeOnIce",
"evenTimeOnIce",
"powerPlayTimeOnIce",
"shortHandedTimeOnIce"
)
#Goalie Template
goalie_template<-c(
"id",
"gameID",
"goals",
"timeOnIce",
"assists",
"shots",
"pim",
"saves",
"shots",
"powerPlaySaves",
"powerPlayShotsAgainst",
"shortHandedSaves",
"shortHandedShotsAgainst",
"evenSaves",
"evenShotsAgainst",
"decision",
"savePercentage",
"powerPlaySavePercentage",
"shortHandedSavePercentage",
"evenStrengthSavePercentage"
)
usethis::use_data(player_mask, goalie_template, skater_template, internal = TRUE, overwrite = TRUE)
|
rm(list=ls(all=TRUE))
setwd("~/Downloads/2018_Fall/682/proj")
library(R2jags)
pbc<-read.csv("https://raw.githubusercontent.com/MLSurvival/ESP/master/ESP_TKDE2016/Dataset/pbc.csv")
pbc$drug <- 1*(pbc$treatment==1)
pbc$female <- 1*(pbc$sex == 1)
pbc$stage4 <- 1*(pbc$stage == 4)
pbc$edema1 <- 1*((pbc$edema == 1)|(pbc$edema == 0.5))
Y = pbc$time
X = pbc
X$time = rep(1, nrow(X))
colnames(X)[1] = "intercept"
X <- X[,c("drug","sex","ascites","hepatom","spiders","edema1","age","bili","chol","albumin","copper","alk","sgot","trig","platelet","prothrombin","stage4")]
#split data into train and test
set.seed(8102)
train_int <- sample(nrow(pbc),floor(nrow(pbc)*0.8))
Y_train = Y[train_int ]
Y_test = Y[-train_int]
X_train = X[train_int, ]
X_test = X[-train_int,]
# --- JAGS_AFT function: Spike & Slab prior --- #
JAGS_SpikeSlab = function(Y_train,X_train,X_test,n.iter=10000,n.burnin=1000){
JAGS_AFT = function() {
# Likelihood
for (i in 1:n_train) {
Y_train[i] ~ dlnorm(mu[i],inv_sigma2)
mu[i] <- beta0 + inprod(X_train[i,],beta) + sigma*W
}
#prior for beta
for(l in 1:p){
beta[l] ~ dnorm(0,inv_tau2[l])
inv_tau2[l] <- (1-gamma[l])*1000+gamma[l]*0.01
gamma[l] ~ dbern(0.5)
}
#prior for beta0
beta0 ~ dnorm(0, 0.0001)
# Prior for the inverse variance
inv_sigma2 ~ dgamma(0.0001, 0.0001)
sigma <- sqrt(1.0/inv_sigma2)
#prior for W
W ~ dnorm(0,1)
#prediction
for (i in 1:n_pred) {
Y_pred[i] ~ dlnorm(mu_pred[i],inv_sigma2)
mu_pred[i] <- beta0 + inprod(X_test[i,],beta) + sigma*W
}
}
AFT.data = list(Y_train=Y_train,
X_train=X_train,
X_test =X_test ,
n_train=as.integer(nrow(X_train)),
n_pred =as.integer(nrow(X_test)),
p=ncol(X_train)
)
#set parameters to simulate
fit_JAGS_AFT = jags(
data = AFT.data,
inits = list(list(inv_sigma2=1,
beta=rnorm(17),
beta0=rnorm(1),
gamma=rep(1,length=17),
W=0)),
parameters.to.save = c("Y_pred", "beta0", "beta"),
n.chains=1,
n.iter=n.iter,
n.burnin=n.burnin,
model.file=JAGS_AFT
)
mcmc_fit = as.mcmc(fit_JAGS_AFT)
#predicted Y
Y_pred_sample = mcmc_fit[[1]][,paste("Y_pred[",1:nrow(X_test),"]",sep="")]
Y_pred=apply(Y_pred_sample,2,mean)
Y_pred_CI = apply(Y_pred_sample,2,quantile,prob=c(0.025,0.975))
#beta
beta_res = mcmc_fit[[1]][,c(paste0("beta[",1:17,"]"), "beta0")]
return(list(Y_pred=Y_pred,
beta_res=beta_res,
Y_pred_lcl = Y_pred_CI[1,],
Y_pred_ucl = Y_pred_CI[2,],
DIC = fit_JAGS_AFT$BUGSoutput$DIC,
pD = fit_JAGS_AFT$BUGSoutput$pD,#check model complexity(if model has extra parameter, pD is small)
fit.JAGS=fit_JAGS_AFT))
}
plot_res = function(res,main=""){
plot(X_test$age,res$Y_pred_ucl,type="l",
ylim=c(0,5000),xlab="age/day",ylab="log(Time)",
cex.lab=1.5,cex.axis=1.5,main=main)
lines(X_test$age,res$Y_pred_lcl)
lines(X_test$age,res$Y_pred,col="blue")
points(X_test$age, Y_test,col="red")
}
summary_res = function(res,Y_test){
PMSE = mean((res$Y_pred-Y_test)^2)
coverage = mean((Y_test>res$Y_pred_lcl)&(Y_test<res$Y_pred_ucl))
return(c(PMSE=PMSE,coverage=coverage))
}
res_aft = JAGS_SpikeSlab(Y_train=Y_train,
X_train=X_train,
X_test=X_test)
# check output
#par(mfcol=c(2,2))
plot_res(res_aft,main=sprintf("Spike and Slab, DIC = %.2f",res_aft$DIC))
summary_res(res_aft, Y_test)
# plots
xyplot(res_aft$beta_res[,1:3])
densityplot(res_aft$beta_res)
traceplot(res_aft$beta_res)
autocorr.plot(res_aft$beta_res)
#Update MCMC:
mcmc_fit = as.mcmc(pbcjags)
#predicted Y
Y_pred_sample = mcmc_fit[[1]][,paste("t.pred[",1:nrow(x.test),"]",sep="")]
Y_pred=apply(Y_pred_sample,2,mean)
Y_pred_CI = apply(Y_pred_sample,2,quantile,prob=c(0.025,0.975))
Y_pred_lcl = Y_pred_CI[1,]
Y_pred_ucl = Y_pred_CI[2,]
PMSE = mean((Y_pred_new-t.test.new)^2)
coverage = mean((t.test.new>Y_pred_lcl)&(t.test.new<Y_pred_ucl))
| /SpikeSlab.R | no_license | discmagnet/biostat.682.final.project | R | false | false | 4,952 | r | rm(list=ls(all=TRUE))
setwd("~/Downloads/2018_Fall/682/proj")
library(R2jags)
pbc<-read.csv("https://raw.githubusercontent.com/MLSurvival/ESP/master/ESP_TKDE2016/Dataset/pbc.csv")
pbc$drug <- 1*(pbc$treatment==1)
pbc$female <- 1*(pbc$sex == 1)
pbc$stage4 <- 1*(pbc$stage == 4)
pbc$edema1 <- 1*((pbc$edema == 1)|(pbc$edema == 0.5))
Y = pbc$time
X = pbc
X$time = rep(1, nrow(X))
colnames(X)[1] = "intercept"
X <- X[,c("drug","sex","ascites","hepatom","spiders","edema1","age","bili","chol","albumin","copper","alk","sgot","trig","platelet","prothrombin","stage4")]
#split data into train and test
set.seed(8102)
train_int <- sample(nrow(pbc),floor(nrow(pbc)*0.8))
Y_train = Y[train_int ]
Y_test = Y[-train_int]
X_train = X[train_int, ]
X_test = X[-train_int,]
# --- JAGS_AFT function: Spike & Slab prior --- #
JAGS_SpikeSlab = function(Y_train,X_train,X_test,n.iter=10000,n.burnin=1000){
JAGS_AFT = function() {
# Likelihood
for (i in 1:n_train) {
Y_train[i] ~ dlnorm(mu[i],inv_sigma2)
mu[i] <- beta0 + inprod(X_train[i,],beta) + sigma*W
}
#prior for beta
for(l in 1:p){
beta[l] ~ dnorm(0,inv_tau2[l])
inv_tau2[l] <- (1-gamma[l])*1000+gamma[l]*0.01
gamma[l] ~ dbern(0.5)
}
#prior for beta0
beta0 ~ dnorm(0, 0.0001)
# Prior for the inverse variance
inv_sigma2 ~ dgamma(0.0001, 0.0001)
sigma <- sqrt(1.0/inv_sigma2)
#prior for W
W ~ dnorm(0,1)
#prediction
for (i in 1:n_pred) {
Y_pred[i] ~ dlnorm(mu_pred[i],inv_sigma2)
mu_pred[i] <- beta0 + inprod(X_test[i,],beta) + sigma*W
}
}
AFT.data = list(Y_train=Y_train,
X_train=X_train,
X_test =X_test ,
n_train=as.integer(nrow(X_train)),
n_pred =as.integer(nrow(X_test)),
p=ncol(X_train)
)
#set parameters to simulate
fit_JAGS_AFT = jags(
data = AFT.data,
inits = list(list(inv_sigma2=1,
beta=rnorm(17),
beta0=rnorm(1),
gamma=rep(1,length=17),
W=0)),
parameters.to.save = c("Y_pred", "beta0", "beta"),
n.chains=1,
n.iter=n.iter,
n.burnin=n.burnin,
model.file=JAGS_AFT
)
mcmc_fit = as.mcmc(fit_JAGS_AFT)
#predicted Y
Y_pred_sample = mcmc_fit[[1]][,paste("Y_pred[",1:nrow(X_test),"]",sep="")]
Y_pred=apply(Y_pred_sample,2,mean)
Y_pred_CI = apply(Y_pred_sample,2,quantile,prob=c(0.025,0.975))
#beta
beta_res = mcmc_fit[[1]][,c(paste0("beta[",1:17,"]"), "beta0")]
return(list(Y_pred=Y_pred,
beta_res=beta_res,
Y_pred_lcl = Y_pred_CI[1,],
Y_pred_ucl = Y_pred_CI[2,],
DIC = fit_JAGS_AFT$BUGSoutput$DIC,
pD = fit_JAGS_AFT$BUGSoutput$pD,#check model complexity(if model has extra parameter, pD is small)
fit.JAGS=fit_JAGS_AFT))
}
plot_res = function(res,main=""){
plot(X_test$age,res$Y_pred_ucl,type="l",
ylim=c(0,5000),xlab="age/day",ylab="log(Time)",
cex.lab=1.5,cex.axis=1.5,main=main)
lines(X_test$age,res$Y_pred_lcl)
lines(X_test$age,res$Y_pred,col="blue")
points(X_test$age, Y_test,col="red")
}
summary_res = function(res,Y_test){
PMSE = mean((res$Y_pred-Y_test)^2)
coverage = mean((Y_test>res$Y_pred_lcl)&(Y_test<res$Y_pred_ucl))
return(c(PMSE=PMSE,coverage=coverage))
}
res_aft = JAGS_SpikeSlab(Y_train=Y_train,
X_train=X_train,
X_test=X_test)
# check output
#par(mfcol=c(2,2))
plot_res(res_aft,main=sprintf("Spike and Slab, DIC = %.2f",res_aft$DIC))
summary_res(res_aft, Y_test)
# plots
xyplot(res_aft$beta_res[,1:3])
densityplot(res_aft$beta_res)
traceplot(res_aft$beta_res)
autocorr.plot(res_aft$beta_res)
#Update MCMC:
mcmc_fit = as.mcmc(pbcjags)
#predicted Y
Y_pred_sample = mcmc_fit[[1]][,paste("t.pred[",1:nrow(x.test),"]",sep="")]
Y_pred=apply(Y_pred_sample,2,mean)
Y_pred_CI = apply(Y_pred_sample,2,quantile,prob=c(0.025,0.975))
Y_pred_lcl = Y_pred_CI[1,]
Y_pred_ucl = Y_pred_CI[2,]
PMSE = mean((Y_pred_new-t.test.new)^2)
coverage = mean((t.test.new>Y_pred_lcl)&(t.test.new<Y_pred_ucl))
|
library(dplyr)
library(reshape2)
library(stringr)
df<-read.csv(file = "O:/Data Dashboard/All Raw Data/Postsecondary Indicators/Enrollment.csv", stringsAsFactors = FALSE)
TwoYear<- c("ATA College", "Elizabethtown Community & Technical College", "Jefferson Community and Technical College", "Ivy Tech Community College")
df <- melt(df[,2:length(df)], id=c("Institution.Name"), direction = "long")
df$variable<-sapply(strsplit(as.character(df$variable), "EF"), "[", 2)
df$variable<-sapply(strsplit(as.character(df$variable), "All"), "[", 1)
df$variable<-gsub("[[:punct:]]", "", df$variable)
df$Classification<- ifelse(df$Institution.Name %in% TwoYear, "2-Year", "4-Year" )
df<-df[,c(2,4,1,3)]
colnames(df)<-c("Year", "Classification","Institution","Enrolled")
write.csv(df, file = "O:/Data Dashboard/Dashboard Data/Jefferson County Area College Enrollment.csv")
| /Enrollment.R | no_license | kristopherdelane/55000-Degrees-Dashboard | R | false | false | 883 | r | library(dplyr)
library(reshape2)
library(stringr)
df<-read.csv(file = "O:/Data Dashboard/All Raw Data/Postsecondary Indicators/Enrollment.csv", stringsAsFactors = FALSE)
TwoYear<- c("ATA College", "Elizabethtown Community & Technical College", "Jefferson Community and Technical College", "Ivy Tech Community College")
df <- melt(df[,2:length(df)], id=c("Institution.Name"), direction = "long")
df$variable<-sapply(strsplit(as.character(df$variable), "EF"), "[", 2)
df$variable<-sapply(strsplit(as.character(df$variable), "All"), "[", 1)
df$variable<-gsub("[[:punct:]]", "", df$variable)
df$Classification<- ifelse(df$Institution.Name %in% TwoYear, "2-Year", "4-Year" )
df<-df[,c(2,4,1,3)]
colnames(df)<-c("Year", "Classification","Institution","Enrolled")
write.csv(df, file = "O:/Data Dashboard/Dashboard Data/Jefferson County Area College Enrollment.csv")
|
#Unzip file and upload the data.
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"), header = TRUE, sep=";", na.strings="?")
unlink(temp)
# Subset the data from the dates 2007-02-01 and 2007-02-02.
subdata <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
# Convert the Date and Time variables to Date/Time classes.
subdata$datetime <- paste(subdata$Date, subdata$Time)
subdata$datetime <- strptime(subdata$datetime, "%d/%m/%Y %H:%M:%S")
# Plot the Global Active Power per days clustered by sub_metering.
par(mar=c(4, 4, 2, 1))
with(subdata, plot(subdata$datetime, subdata$Sub_metering_1, type="n", ylab="Energy sub metering", xlab=" ", cex.lab=.8, col="black"))
lines(subdata$datetime, subdata$Sub_metering_1, type="l", col="black")
lines(subdata$datetime, subdata$Sub_metering_2, type="l", col="red")
lines(subdata$datetime, subdata$Sub_metering_3, type="l", col="blue")
legend("topright", col = c("black", "red", "blue"), lty=c(1,1,1), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Save plot as PNG
dev.copy(png,"plot3.png",width=480,height=480,units="px")
dev.off() | /plot3.R | no_license | RMBATCHO/ExData_Plotting1 | R | false | false | 1,280 | r | #Unzip file and upload the data.
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"), header = TRUE, sep=";", na.strings="?")
unlink(temp)
# Subset the data from the dates 2007-02-01 and 2007-02-02.
subdata <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
# Convert the Date and Time variables to Date/Time classes.
subdata$datetime <- paste(subdata$Date, subdata$Time)
subdata$datetime <- strptime(subdata$datetime, "%d/%m/%Y %H:%M:%S")
# Plot the Global Active Power per days clustered by sub_metering.
par(mar=c(4, 4, 2, 1))
with(subdata, plot(subdata$datetime, subdata$Sub_metering_1, type="n", ylab="Energy sub metering", xlab=" ", cex.lab=.8, col="black"))
lines(subdata$datetime, subdata$Sub_metering_1, type="l", col="black")
lines(subdata$datetime, subdata$Sub_metering_2, type="l", col="red")
lines(subdata$datetime, subdata$Sub_metering_3, type="l", col="blue")
legend("topright", col = c("black", "red", "blue"), lty=c(1,1,1), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Save plot as PNG
dev.copy(png,"plot3.png",width=480,height=480,units="px")
dev.off() |
library(XML)
url = "https://au.finance.yahoo.com/q/hp?s=DJIA"
# extract all tables on the page
tabs = readHTMLTable(url, stringsAsFactors = F)
# locate tables containing call and put information
call_tab = tabs[[11]]
put_tab = tabs[[15]]
# parse url into html tree
doc = htmlTreeParse(url, useInternalNodes = T) | /web-scrape-yahoo-4.R | no_license | triadicaxis/quickr | R | false | false | 316 | r | library(XML)
url = "https://au.finance.yahoo.com/q/hp?s=DJIA"
# extract all tables on the page
tabs = readHTMLTable(url, stringsAsFactors = F)
# locate tables containing call and put information
call_tab = tabs[[11]]
put_tab = tabs[[15]]
# parse url into html tree
doc = htmlTreeParse(url, useInternalNodes = T) |
# Benötigte Pakete
library(tree) # Klassifizierungsbäume
library(randomForest) # Random Forests
library(gbm) # Boosting
library(rpart) # Recursive Partitioning
# Daten einlesen.
setwd("D:/Dropbox/Privat/KIT/05_Master/Seminare/Daten")
#data.train <- read.csv(file="train.csv") # Der unveränderte Trainingsdatensatz
#data.train <- read.csv(file="data.train.low.csv") # Transformierter Datensatz
#data.train <- read.csv(file="data.train.mid.csv") # Transformierter Datensatz
#data.train <- read.csv(file="data.train.high.csv") # Transformierter Datensatz
#data.train <- read.csv(file="Summen_Spalten.csv") # Transformierter Datensatz
#data.train <- read.csv(file="Summen_Zeilen.csv") # Transformierter Datensatz
data.train <- read.csv(file="maxima.csv") # Transformierter Datensatz
data.train[,1] <- ifelse(data.train[,1]==7,1,0) # Umwandlung in ein binäres Problem -> "7 oder nicht 7"
data.train[,1] <- as.factor(data.train[,1]) # Umwandlung der labels in Faktoren
set.seed(1)
training <- sample(1:nrow(data.train),size=nrow(data.train)/2) # Aufteilung des Datensatzes in 50:50 Trainings- und Testdaten
# Gewöhnlichen Klassifizierungsbaum (Classification Tree) über alle Trainingsdaten erstellen, anzeigen und beschriften
set.seed(1)
time.start <- Sys.time()
tree.MNIST <- tree(label~.,data=data.train,subset=training) # Anpassung des Entscheidungsbaums
time.end <- Sys.time()
tree.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(tree.MNIST); text(tree.MNIST,pretty=1,cex=0.8) # Plotten des Entscheidungsbaums
tree.pred <- predict(tree.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
tree.tabelle <- table(pred=tree.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
tree.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(tree.genauigkeit)){
spaltensumme <- sum(tree.tabelle[,i])
for(j in 1:nrow(tree.genauigkeit)){
tree.genauigkeit[j,i] <- round(tree.tabelle[j,i]/spaltensumme,2)
}
}
tree.prozent <- sum(diag(tree.tabelle))/sum(tree.tabelle)
# Gewöhnlichen Klassifizierungsbaum über alle Trainingsdaten erstellen, analog zu oben, diesmal mit rpart statt tree.
set.seed(1)
time.start <- Sys.time()
rpart.MNIST <- rpart(label~.,data=data.train,subset=training,method="class")
time.end <- Sys.time()
rpart.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(rpart.MNIST); text(rpart.MNIST,use.n=TRUE,all=TRUE,cex=.8) # Plotten des Entscheidungsbaums
rpart.pred <- predict(rpart.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
rpart.tabelle <- table(pred=rpart.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
rpart.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(rpart.genauigkeit)){
spaltensumme <- sum(rpart.tabelle[,i])
for(j in 1:nrow(rpart.genauigkeit)){
rpart.genauigkeit[j,i] <- round(rpart.tabelle[j,i]/spaltensumme,2)
}
}
rpart.prozent <- sum(diag(rpart.tabelle))/sum(rpart.tabelle)
# Random Forests
set.seed(1)
time.start <- Sys.time()
randomForest.MNIST <- randomForest(label~.,data=data.train,subset=training)
time.end <- Sys.time()
randomForest.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
randomForest.pred <- predict(randomForest.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
randomForest.tabelle <- table(pred=randomForest.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
randomForest.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(randomForest.genauigkeit)){
spaltensumme <- sum(randomForest.tabelle[,i])
for(j in 1:nrow(randomForest.genauigkeit)){
randomForest.genauigkeit[j,i] <- round(randomForest.tabelle[j,i]/spaltensumme,2)
}
}
randomForest.prozent <- sum(diag(randomForest.tabelle))/sum(randomForest.tabelle)
# Boosting
set.seed(1)
data.train.boost <- data.train
data.train.boost[,1] <- as.numeric(data.train.boost[,1])-1
time.start <- Sys.time()
boosting.MNIST <- gbm(label~.,data=data.train.boost[training,],distribution="bernoulli",n.trees=500,interaction.depth=32,shrinkage=0.5,n.cores=4)
time.end <- Sys.time()
boosting.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
boosting.pred <- predict(boosting.MNIST,type="response",newdata=data.train.boost[-training,],n.trees=500) # Einordnung der Testdaten anhand des Modells
boosting.pred <- ifelse(boosting.pred>0.1,1,0) # Umwandeln der W'keiten in 0/1
boosting.tabelle <- table(pred=boosting.pred,true=data.train.boost[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
boosting.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(boosting.genauigkeit)){
spaltensumme <- sum(boosting.tabelle[,i])
for(j in 1:nrow(boosting.genauigkeit)){
boosting.genauigkeit[j,i] <- round(boosting.tabelle[j,i]/spaltensumme,2)
}
}
boosting.prozent <- sum(diag(boosting.tabelle))/sum(boosting.tabelle)
# Bagging (Random Forests mit m=p)
set.seed(1)
time.start <- Sys.time()
bagging.MNIST <- randomForest(label~.,data=data.train,subset=training,mtry=ncol(data.train)-1)
time.end <- Sys.time()
bagging.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
bagging.pred <- predict(bagging.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
bagging.tabelle <- table(pred=bagging.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
bagging.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(bagging.genauigkeit)){
spaltensumme <- sum(bagging.tabelle[,i])
for(j in 1:nrow(bagging.genauigkeit)){
bagging.genauigkeit[j,i] <- round(bagging.tabelle[j,i]/spaltensumme,2)
}
}
bagging.prozent <- sum(diag(bagging.tabelle))/sum(bagging.tabelle)
#### Remove 0-columns
data.train.pure <- data.train[,-(which(colSums(data.train[,-1])==0)+1)]
# Gewöhnlichen Klassifizierungsbaum (Classification Tree) über alle Trainingsdaten erstellen, anzeigen und beschriften
set.seed(1)
time.start <- Sys.time()
tree.MNIST.pure <- tree(label~.,data=data.train.pure,subset=training)
time.end <- Sys.time()
tree.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(tree.MNIST.pure);text(tree.MNIST.pure,pretty=1,cex=0.8) # Plotten des Entscheidungsbaums
tree.pred.pure <- predict(tree.MNIST.pure,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
tree.tabelle.pure <- table(pred=tree.pred.pure,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
tree.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(tree.genauigkeit.pure)){
spaltensumme <- sum(tree.tabelle.pure[,i])
for(j in 1:nrow(tree.genauigkeit.pure)){
tree.genauigkeit.pure[j,i] <- round(tree.tabelle.pure[j,i]/spaltensumme,2)
}
}
tree.prozent.pure <- sum(diag(tree.tabelle.pure))/sum(tree.tabelle.pure)
# Gewöhnlichen Klassifizierungsbaum über alle Trainingsdaten erstellen, analog zu oben, diesmal mit rpart statt tree.
set.seed(1)
time.start <- Sys.time()
rpart.MNIST.pure <- rpart(label~.,data=data.train.pure,subset=training,method="class")
time.end <- Sys.time()
rpart.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(rpart.MNIST.pure); text(rpart.MNIST.pure,use.n=TRUE,all=TRUE,cex=.8) # Plotten des Entscheidungsbaums
rpart.pred.pure <- predict(rpart.MNIST.pure,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
rpart.tabelle.pure <- table(pred=rpart.pred.pure,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
rpart.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(rpart.genauigkeit.pure)){
spaltensumme <- sum(rpart.tabelle.pure[,i])
for(j in 1:nrow(rpart.genauigkeit.pure)){
rpart.genauigkeit.pure[j,i] <- round(rpart.tabelle.pure[j,i]/spaltensumme,2)
}
}
rpart.prozent.pure <- sum(diag(rpart.tabelle.pure))/sum(rpart.tabelle.pure)
# Random Forests - pure
set.seed(1)
time.start <- Sys.time()
randomForest.MNIST.pure <- randomForest(label~.,data=data.train.pure,subset=training)
time.end <- Sys.time()
randomForest.duration.pure <- time.end - time.start # Messung der Zeitdauer der Berechnung
randomForest.pred.pure <- predict(randomForest.MNIST.pure,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
randomForest.tabelle.pure <- table(pred=randomForest.pred.pure,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
randomForest.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(randomForest.genauigkeit.pure)){
spaltensumme <- sum(randomForest.tabelle.pure[,i])
for(j in 1:nrow(randomForest.genauigkeit.pure)){
randomForest.genauigkeit.pure[j,i] <- round(randomForest.tabelle.pure[j,i]/spaltensumme,2)
}
}
randomForest.prozent.pure <- sum(diag(randomForest.tabelle.pure))/sum(randomForest.tabelle.pure)
# Boosting - pure
set.seed(1)
data.train.boost.pure <- data.train.pure
data.train.boost.pure[,1] <- as.numeric(data.train.boost.pure[,1])-1
time.start <- Sys.time()
boosting.MNIST.pure <- gbm(label~.,data=data.train.boost.pure[training,],distribution="bernoulli",n.trees=500,interaction.depth=32,shrinkage=0.5,n.cores=4)
time.end <- Sys.time()
boosting.duration.pure <- time.end - time.start # Messung der Zeitdauer der Berechnung
boosting.pred.pure <- predict(boosting.MNIST.pure,type="response",newdata=data.train.boost.pure[-training,],n.trees=500) # Einordnung der Testdaten anhand des Modells
boosting.pred.pure <- ifelse(boosting.pred.pure>0.1,1,0) # Umwandeln der W'keiten in 0/1
boosting.tabelle.pure <- table(pred=boosting.pred.pure,true=data.train.boost.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
boosting.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(boosting.genauigkeit.pure)){
spaltensumme <- sum(boosting.tabelle.pure[,i])
for(j in 1:nrow(boosting.genauigkeit.pure)){
boosting.genauigkeit.pure[j,i] <- round(boosting.tabelle.pure[j,i]/spaltensumme,2)
}
}
boosting.prozent.pure <- sum(diag(boosting.tabelle.pure))/sum(boosting.tabelle.pure)
# Bagging - pure (Random Forests mit m=p)
set.seed(1)
time.start <- Sys.time()
bagging.MNIST <- randomForest(label~.,data=data.train.pure,subset=training,mtry=ncol(data.train.pure)-1)
time.end <- Sys.time()
bagging.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
bagging.pred <- predict(bagging.MNIST,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
bagging.tabelle <- table(pred=bagging.pred,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
bagging.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(bagging.genauigkeit)){
spaltensumme <- sum(bagging.tabelle[,i])
for(j in 1:nrow(bagging.genauigkeit)){
bagging.genauigkeit[j,i] <- round(bagging.tabelle[j,i]/spaltensumme,2)
}
}
bagging.prozent <- sum(diag(bagging.tabelle))/sum(bagging.tabelle) | /Trees_Binär.R | no_license | steffens93/machinelearning | R | false | false | 11,066 | r | # Benötigte Pakete
library(tree) # Klassifizierungsbäume
library(randomForest) # Random Forests
library(gbm) # Boosting
library(rpart) # Recursive Partitioning
# Daten einlesen.
setwd("D:/Dropbox/Privat/KIT/05_Master/Seminare/Daten")
#data.train <- read.csv(file="train.csv") # Der unveränderte Trainingsdatensatz
#data.train <- read.csv(file="data.train.low.csv") # Transformierter Datensatz
#data.train <- read.csv(file="data.train.mid.csv") # Transformierter Datensatz
#data.train <- read.csv(file="data.train.high.csv") # Transformierter Datensatz
#data.train <- read.csv(file="Summen_Spalten.csv") # Transformierter Datensatz
#data.train <- read.csv(file="Summen_Zeilen.csv") # Transformierter Datensatz
data.train <- read.csv(file="maxima.csv") # Transformierter Datensatz
data.train[,1] <- ifelse(data.train[,1]==7,1,0) # Umwandlung in ein binäres Problem -> "7 oder nicht 7"
data.train[,1] <- as.factor(data.train[,1]) # Umwandlung der labels in Faktoren
set.seed(1)
training <- sample(1:nrow(data.train),size=nrow(data.train)/2) # Aufteilung des Datensatzes in 50:50 Trainings- und Testdaten
# Gewöhnlichen Klassifizierungsbaum (Classification Tree) über alle Trainingsdaten erstellen, anzeigen und beschriften
set.seed(1)
time.start <- Sys.time()
tree.MNIST <- tree(label~.,data=data.train,subset=training) # Anpassung des Entscheidungsbaums
time.end <- Sys.time()
tree.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(tree.MNIST); text(tree.MNIST,pretty=1,cex=0.8) # Plotten des Entscheidungsbaums
tree.pred <- predict(tree.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
tree.tabelle <- table(pred=tree.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
tree.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(tree.genauigkeit)){
spaltensumme <- sum(tree.tabelle[,i])
for(j in 1:nrow(tree.genauigkeit)){
tree.genauigkeit[j,i] <- round(tree.tabelle[j,i]/spaltensumme,2)
}
}
tree.prozent <- sum(diag(tree.tabelle))/sum(tree.tabelle)
# Gewöhnlichen Klassifizierungsbaum über alle Trainingsdaten erstellen, analog zu oben, diesmal mit rpart statt tree.
set.seed(1)
time.start <- Sys.time()
rpart.MNIST <- rpart(label~.,data=data.train,subset=training,method="class")
time.end <- Sys.time()
rpart.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(rpart.MNIST); text(rpart.MNIST,use.n=TRUE,all=TRUE,cex=.8) # Plotten des Entscheidungsbaums
rpart.pred <- predict(rpart.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
rpart.tabelle <- table(pred=rpart.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
rpart.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(rpart.genauigkeit)){
spaltensumme <- sum(rpart.tabelle[,i])
for(j in 1:nrow(rpart.genauigkeit)){
rpart.genauigkeit[j,i] <- round(rpart.tabelle[j,i]/spaltensumme,2)
}
}
rpart.prozent <- sum(diag(rpart.tabelle))/sum(rpart.tabelle)
# Random Forests
set.seed(1)
time.start <- Sys.time()
randomForest.MNIST <- randomForest(label~.,data=data.train,subset=training)
time.end <- Sys.time()
randomForest.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
randomForest.pred <- predict(randomForest.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
randomForest.tabelle <- table(pred=randomForest.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
randomForest.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(randomForest.genauigkeit)){
spaltensumme <- sum(randomForest.tabelle[,i])
for(j in 1:nrow(randomForest.genauigkeit)){
randomForest.genauigkeit[j,i] <- round(randomForest.tabelle[j,i]/spaltensumme,2)
}
}
randomForest.prozent <- sum(diag(randomForest.tabelle))/sum(randomForest.tabelle)
# Boosting
set.seed(1)
data.train.boost <- data.train
data.train.boost[,1] <- as.numeric(data.train.boost[,1])-1
time.start <- Sys.time()
boosting.MNIST <- gbm(label~.,data=data.train.boost[training,],distribution="bernoulli",n.trees=500,interaction.depth=32,shrinkage=0.5,n.cores=4)
time.end <- Sys.time()
boosting.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
boosting.pred <- predict(boosting.MNIST,type="response",newdata=data.train.boost[-training,],n.trees=500) # Einordnung der Testdaten anhand des Modells
boosting.pred <- ifelse(boosting.pred>0.1,1,0) # Umwandeln der W'keiten in 0/1
boosting.tabelle <- table(pred=boosting.pred,true=data.train.boost[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
boosting.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(boosting.genauigkeit)){
spaltensumme <- sum(boosting.tabelle[,i])
for(j in 1:nrow(boosting.genauigkeit)){
boosting.genauigkeit[j,i] <- round(boosting.tabelle[j,i]/spaltensumme,2)
}
}
boosting.prozent <- sum(diag(boosting.tabelle))/sum(boosting.tabelle)
# Bagging (Random Forests mit m=p)
set.seed(1)
time.start <- Sys.time()
bagging.MNIST <- randomForest(label~.,data=data.train,subset=training,mtry=ncol(data.train)-1)
time.end <- Sys.time()
bagging.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
bagging.pred <- predict(bagging.MNIST,type="class",newdata=data.train[-training,]) # Einordnung der Testdaten anhand des Modells
bagging.tabelle <- table(pred=bagging.pred,true=data.train[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
bagging.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(bagging.genauigkeit)){
spaltensumme <- sum(bagging.tabelle[,i])
for(j in 1:nrow(bagging.genauigkeit)){
bagging.genauigkeit[j,i] <- round(bagging.tabelle[j,i]/spaltensumme,2)
}
}
bagging.prozent <- sum(diag(bagging.tabelle))/sum(bagging.tabelle)
#### Remove 0-columns
data.train.pure <- data.train[,-(which(colSums(data.train[,-1])==0)+1)]
# Gewöhnlichen Klassifizierungsbaum (Classification Tree) über alle Trainingsdaten erstellen, anzeigen und beschriften
set.seed(1)
time.start <- Sys.time()
tree.MNIST.pure <- tree(label~.,data=data.train.pure,subset=training)
time.end <- Sys.time()
tree.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(tree.MNIST.pure);text(tree.MNIST.pure,pretty=1,cex=0.8) # Plotten des Entscheidungsbaums
tree.pred.pure <- predict(tree.MNIST.pure,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
tree.tabelle.pure <- table(pred=tree.pred.pure,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
tree.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(tree.genauigkeit.pure)){
spaltensumme <- sum(tree.tabelle.pure[,i])
for(j in 1:nrow(tree.genauigkeit.pure)){
tree.genauigkeit.pure[j,i] <- round(tree.tabelle.pure[j,i]/spaltensumme,2)
}
}
tree.prozent.pure <- sum(diag(tree.tabelle.pure))/sum(tree.tabelle.pure)
# Gewöhnlichen Klassifizierungsbaum über alle Trainingsdaten erstellen, analog zu oben, diesmal mit rpart statt tree.
set.seed(1)
time.start <- Sys.time()
rpart.MNIST.pure <- rpart(label~.,data=data.train.pure,subset=training,method="class")
time.end <- Sys.time()
rpart.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
plot(rpart.MNIST.pure); text(rpart.MNIST.pure,use.n=TRUE,all=TRUE,cex=.8) # Plotten des Entscheidungsbaums
rpart.pred.pure <- predict(rpart.MNIST.pure,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
rpart.tabelle.pure <- table(pred=rpart.pred.pure,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
rpart.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(rpart.genauigkeit.pure)){
spaltensumme <- sum(rpart.tabelle.pure[,i])
for(j in 1:nrow(rpart.genauigkeit.pure)){
rpart.genauigkeit.pure[j,i] <- round(rpart.tabelle.pure[j,i]/spaltensumme,2)
}
}
rpart.prozent.pure <- sum(diag(rpart.tabelle.pure))/sum(rpart.tabelle.pure)
# Random Forests - pure
set.seed(1)
time.start <- Sys.time()
randomForest.MNIST.pure <- randomForest(label~.,data=data.train.pure,subset=training)
time.end <- Sys.time()
randomForest.duration.pure <- time.end - time.start # Messung der Zeitdauer der Berechnung
randomForest.pred.pure <- predict(randomForest.MNIST.pure,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
randomForest.tabelle.pure <- table(pred=randomForest.pred.pure,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
randomForest.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(randomForest.genauigkeit.pure)){
spaltensumme <- sum(randomForest.tabelle.pure[,i])
for(j in 1:nrow(randomForest.genauigkeit.pure)){
randomForest.genauigkeit.pure[j,i] <- round(randomForest.tabelle.pure[j,i]/spaltensumme,2)
}
}
randomForest.prozent.pure <- sum(diag(randomForest.tabelle.pure))/sum(randomForest.tabelle.pure)
# Boosting - pure
set.seed(1)
data.train.boost.pure <- data.train.pure
data.train.boost.pure[,1] <- as.numeric(data.train.boost.pure[,1])-1
time.start <- Sys.time()
boosting.MNIST.pure <- gbm(label~.,data=data.train.boost.pure[training,],distribution="bernoulli",n.trees=500,interaction.depth=32,shrinkage=0.5,n.cores=4)
time.end <- Sys.time()
boosting.duration.pure <- time.end - time.start # Messung der Zeitdauer der Berechnung
boosting.pred.pure <- predict(boosting.MNIST.pure,type="response",newdata=data.train.boost.pure[-training,],n.trees=500) # Einordnung der Testdaten anhand des Modells
boosting.pred.pure <- ifelse(boosting.pred.pure>0.1,1,0) # Umwandeln der W'keiten in 0/1
boosting.tabelle.pure <- table(pred=boosting.pred.pure,true=data.train.boost.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
boosting.genauigkeit.pure <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(boosting.genauigkeit.pure)){
spaltensumme <- sum(boosting.tabelle.pure[,i])
for(j in 1:nrow(boosting.genauigkeit.pure)){
boosting.genauigkeit.pure[j,i] <- round(boosting.tabelle.pure[j,i]/spaltensumme,2)
}
}
boosting.prozent.pure <- sum(diag(boosting.tabelle.pure))/sum(boosting.tabelle.pure)
# Bagging - pure (Random Forests mit m=p)
set.seed(1)
time.start <- Sys.time()
bagging.MNIST <- randomForest(label~.,data=data.train.pure,subset=training,mtry=ncol(data.train.pure)-1)
time.end <- Sys.time()
bagging.duration <- time.end - time.start # Messung der Zeitdauer der Berechnung
bagging.pred <- predict(bagging.MNIST,type="class",newdata=data.train.pure[-training,]) # Einordnung der Testdaten anhand des Modells
bagging.tabelle <- table(pred=bagging.pred,true=data.train.pure[-training,1]) # Und folgende: Berechnung der Testgenauigkeit
bagging.genauigkeit <- matrix(0,nrow=2,ncol=2)
for(i in 1:ncol(bagging.genauigkeit)){
spaltensumme <- sum(bagging.tabelle[,i])
for(j in 1:nrow(bagging.genauigkeit)){
bagging.genauigkeit[j,i] <- round(bagging.tabelle[j,i]/spaltensumme,2)
}
}
bagging.prozent <- sum(diag(bagging.tabelle))/sum(bagging.tabelle) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mapCountries}
\alias{mapCountries}
\title{World Map of Countries}
\format{
A SpatialPolygonsDataFrame
}
\source{
Made with Natural Earth. \url{http://www5.statcan.gc.ca/cansim/}
}
\usage{
mapCountries
}
\description{
World Map of Countries
}
\examples{
\dontrun{
library(sp); library(rmapdata)
sp::plot(mapCountries)
head(mapCountries@data)
}
}
\keyword{datasets}
| /man/mapCountries.Rd | no_license | JGCRI/rmapdata | R | false | true | 473 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mapCountries}
\alias{mapCountries}
\title{World Map of Countries}
\format{
A SpatialPolygonsDataFrame
}
\source{
Made with Natural Earth. \url{http://www5.statcan.gc.ca/cansim/}
}
\usage{
mapCountries
}
\description{
World Map of Countries
}
\examples{
\dontrun{
library(sp); library(rmapdata)
sp::plot(mapCountries)
head(mapCountries@data)
}
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractCoef.r
\name{extract.coef.rxLogit}
\alias{extract.coef.rxLogit}
\title{extract.coef.rxLogit}
\usage{
\method{extract.coef}{rxLogit}(model, ...)
}
\arguments{
\item{model}{Model object to extract information from.}
\item{...}{Further arguments}
}
\value{
A \code{\link{data.frame}} containing the coefficient, the standard error and the variable name.
}
\description{
Extract Coefficient Information from rxLogit Models
}
\details{
Gets the coefficient values and standard errors, and variable names from an rxLogit model.
}
\examples{
\dontrun{
require(ggplot2)
data(diamonds)
mod6 <- rxLogit(price > 10000 ~ carat + cut + x, data=diamonds)
extract.coef(mod6)
}
}
\author{
Jared P. Lander
}
| /man/extract.coef.rxLogit.Rd | no_license | xfim/coefplot | R | false | true | 779 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractCoef.r
\name{extract.coef.rxLogit}
\alias{extract.coef.rxLogit}
\title{extract.coef.rxLogit}
\usage{
\method{extract.coef}{rxLogit}(model, ...)
}
\arguments{
\item{model}{Model object to extract information from.}
\item{...}{Further arguments}
}
\value{
A \code{\link{data.frame}} containing the coefficient, the standard error and the variable name.
}
\description{
Extract Coefficient Information from rxLogit Models
}
\details{
Gets the coefficient values and standard errors, and variable names from an rxLogit model.
}
\examples{
\dontrun{
require(ggplot2)
data(diamonds)
mod6 <- rxLogit(price > 10000 ~ carat + cut + x, data=diamonds)
extract.coef(mod6)
}
}
\author{
Jared P. Lander
}
|
skip_on_cran()
test_that("tbl_cross- throws error if both `col` and `row`` are not specified", {
expect_error(
tbl_cross(trial, col = trt),
NULL
)
expect_error(
tbl_cross(trial, row = trt),
NULL
)
})
test_that("tbl_cross- works if no `col` or `row` specified", {
expect_error(
tbl_cross(trial, col = trt, row = response),
NA
)
})
test_that("tbl_cross- works in character inputs for `col` and `row", {
col_variable <- "trt"
row_variable <- "response"
expect_error(
tbl_cross(trial, col = col_variable,
row = row_variable),
NA
)
})
test_that("tbl_cross- creates output without error with continuous args", {
expect_error(
tbl_cross(mtcars, row = gear, col = am),
NA
)
})
test_that("tbl_cross- returns errors with bad inputs", {
expect_error(
tbl_cross(tibble::tibble()),
NULL
)
expect_error(
tbl_cross(tibble::tibble(t = integer())),
NULL
)
expect_error(
tbl_cross(trial, col = THIS_IS_NOT_A_VARIABLE),
NULL
)
})
# Labels Argument ------------------------------------------------------------
test_that("tbl_cross- labels work", {
expect_error(
tbl_cross(mtcars, row = am, col = cyl, label = list(am = "AM LABEL",
cyl = "New cyl")),
NA
)
expect_error(
tbl_cross(mtcars, row = am, col = cyl,
label = vars(am) ~ "AM LABEL"),
NA
)
})
# Stats and Percent Argument ---------------------------------------------------
test_that("tbl_cross- statistics argument works", {
expect_error(
tbl_cross(trial, statistic = "{p}"),
NA
)
expect_error(
tbl_cross(trial, percent = "cell"),
NA
)
})
test_that("tbl_cross- passing percent without stat works and produces %", {
expect_error(
tbl_cross(trial, percent = "cell"),
NA
)
x <- tbl_cross(trial, percent = "cell")
expect_equal(sum(str_detect(x$table_body$stat_1, "%"), na.rm = TRUE) > 1,
TRUE)
})
# Missing Argument -------------------------------------------------------------
test_that("tbl_cross- test 'no' missing throws message", {
expect_message(
x <- tbl_cross(trial,
row = trt,
col = response,
missing = "no"),
NULL
)
})
test_that("tbl_cross- test no missing omits all NAs", {
x <- tbl_cross(trial,
row = trt,
col = response,
missing = "no")
expect_equal(
"Unknown" %in% x$table_body$label,
FALSE
)
})
test_that("tbl_cross- test ifany missing returns Unknown when missing", {
x <- tbl_cross(trial,
row = response,
col = trt,
missing = "ifany")
expect_equal(
"Unknown" %in% x$table_body$label,
TRUE
)
})
test_that("tbl_cross- test 'always' missing returns Unknown even when none", {
x <- tbl_cross(trial,
row = trt,
col = grade,
missing = "always")
expect_equal(
"Unknown" %in% x$table_body$label,
TRUE
)
})
test_that("tbl_cross- works with grouped data (it ungroups it first)", {
expect_error(
trial %>% dplyr::group_by(response) %>% tbl_cross(death, trt),
NA
)
})
# Test Dichotomous -> Categorical -------------------------------------------
test_that("tbl_cross- test 'no' missing throws message", {
data <- data.frame( X = rep(c("Yes", "No"), 3),
Y = rep(c("Yes", "No"), each = 3))
table <- data %>% tbl_cross(row = X, col = Y)
type <- table$meta_data %>%
filter(variable == "X") %>%
pull(summary_type)
expect_equal(type, "categorical")
})
# Margin Argument -------------------------------------------
test_that("tbl_cross- test NULL margin argument", {
margins <- tbl_cross(trial,
row = trt,
col = response
)
no_margins <- tbl_cross(trial,
row = trt,
col = response,
margin = NULL
)
# test row margins ------
expect_equal(
"..total.." %in% margins$table_body$variable,
TRUE
)
expect_equal(
"..total.." %in% no_margins$table_body$variable,
FALSE
)
# test col margins ------
expect_equal(
"stat_0" %in% names(margins$table_body),
TRUE
)
expect_equal(
"stat_0" %in% names(no_margins$table_body),
FALSE
)
})
| /tests/testthat/test-tbl_cross.R | permissive | mtysar/gtsummary | R | false | false | 4,318 | r | skip_on_cran()
test_that("tbl_cross- throws error if both `col` and `row`` are not specified", {
expect_error(
tbl_cross(trial, col = trt),
NULL
)
expect_error(
tbl_cross(trial, row = trt),
NULL
)
})
test_that("tbl_cross- works if no `col` or `row` specified", {
expect_error(
tbl_cross(trial, col = trt, row = response),
NA
)
})
test_that("tbl_cross- works in character inputs for `col` and `row", {
col_variable <- "trt"
row_variable <- "response"
expect_error(
tbl_cross(trial, col = col_variable,
row = row_variable),
NA
)
})
test_that("tbl_cross- creates output without error with continuous args", {
expect_error(
tbl_cross(mtcars, row = gear, col = am),
NA
)
})
test_that("tbl_cross- returns errors with bad inputs", {
expect_error(
tbl_cross(tibble::tibble()),
NULL
)
expect_error(
tbl_cross(tibble::tibble(t = integer())),
NULL
)
expect_error(
tbl_cross(trial, col = THIS_IS_NOT_A_VARIABLE),
NULL
)
})
# Labels Argument ------------------------------------------------------------
test_that("tbl_cross- labels work", {
expect_error(
tbl_cross(mtcars, row = am, col = cyl, label = list(am = "AM LABEL",
cyl = "New cyl")),
NA
)
expect_error(
tbl_cross(mtcars, row = am, col = cyl,
label = vars(am) ~ "AM LABEL"),
NA
)
})
# Stats and Percent Argument ---------------------------------------------------
test_that("tbl_cross- statistics argument works", {
expect_error(
tbl_cross(trial, statistic = "{p}"),
NA
)
expect_error(
tbl_cross(trial, percent = "cell"),
NA
)
})
test_that("tbl_cross- passing percent without stat works and produces %", {
expect_error(
tbl_cross(trial, percent = "cell"),
NA
)
x <- tbl_cross(trial, percent = "cell")
expect_equal(sum(str_detect(x$table_body$stat_1, "%"), na.rm = TRUE) > 1,
TRUE)
})
# Missing Argument -------------------------------------------------------------
test_that("tbl_cross- test 'no' missing throws message", {
expect_message(
x <- tbl_cross(trial,
row = trt,
col = response,
missing = "no"),
NULL
)
})
test_that("tbl_cross- test no missing omits all NAs", {
x <- tbl_cross(trial,
row = trt,
col = response,
missing = "no")
expect_equal(
"Unknown" %in% x$table_body$label,
FALSE
)
})
test_that("tbl_cross- test ifany missing returns Unknown when missing", {
x <- tbl_cross(trial,
row = response,
col = trt,
missing = "ifany")
expect_equal(
"Unknown" %in% x$table_body$label,
TRUE
)
})
test_that("tbl_cross- test 'always' missing returns Unknown even when none", {
x <- tbl_cross(trial,
row = trt,
col = grade,
missing = "always")
expect_equal(
"Unknown" %in% x$table_body$label,
TRUE
)
})
test_that("tbl_cross- works with grouped data (it ungroups it first)", {
expect_error(
trial %>% dplyr::group_by(response) %>% tbl_cross(death, trt),
NA
)
})
# Test Dichotomous -> Categorical -------------------------------------------
test_that("tbl_cross- test 'no' missing throws message", {
data <- data.frame( X = rep(c("Yes", "No"), 3),
Y = rep(c("Yes", "No"), each = 3))
table <- data %>% tbl_cross(row = X, col = Y)
type <- table$meta_data %>%
filter(variable == "X") %>%
pull(summary_type)
expect_equal(type, "categorical")
})
# Margin Argument -------------------------------------------
test_that("tbl_cross- test NULL margin argument", {
margins <- tbl_cross(trial,
row = trt,
col = response
)
no_margins <- tbl_cross(trial,
row = trt,
col = response,
margin = NULL
)
# test row margins ------
expect_equal(
"..total.." %in% margins$table_body$variable,
TRUE
)
expect_equal(
"..total.." %in% no_margins$table_body$variable,
FALSE
)
# test col margins ------
expect_equal(
"stat_0" %in% names(margins$table_body),
TRUE
)
expect_equal(
"stat_0" %in% names(no_margins$table_body),
FALSE
)
})
|
###################################################################
### METHYLKIT METHREAD AND FILTERING
### R script to create tabix files of filtered cytosine methylation
### by sequence context
###################################################################
# set up environment
setwd("/scratch/nia/manuFinal")
library(methylKit)
load(file=".RData")
# read in methylation proportion files and filter
print("CpG ----------------------------------------------------------------")
CpG.raw=methRead(list("/scratch/nia/manuFinal/aMut_CpG.txt", "/scratch/nia/manuFinal/bMut_CpG.txt",
"/scratch/nia/manuFinal/cMut_CpG.txt", "/scratch/nia/manuFinal/aWT_CpG.txt",
"/scratch/nia/manuFinal/bWT_CpG.txt", "/scratch/nia/manuFinal/cWT_CpG.txt"),
sample.id=list("aMutCpG","bMutCpG","cMutCpG","aWTCpG", "bWTCpG", "cWTCpG"),
assembly="b73", treatment=c(1,1,1,0,0,0), context="CpG", dbtype = "tabix", dbdir = "methylDB", mincov=1)
CpG.3x=filterByCoverage(CpG.raw,lo.count=3,lo.perc=NULL,hi.count=NULL,hi.perc=99.9, suffix="3x", dbdir="methylDB")
print("CHG ----------------------------------------------------------------")
CHG.raw=methRead(list("/scratch/nia/manuFinal/aMut_CHG.txt", "/scratch/nia/manuFinal/bMut_CHG.txt",
"/scratch/nia/manuFinal/cMut_CHG.txt", "/scratch/nia/manuFinal/aWT_CHG.txt",
"/scratch/nia/manuFinal/bWT_CHG.txt", "/scratch/nia/manuFinal/cWT_CHG.txt"),
sample.id=list("aMutCHG","bMutCHG","cMutCHG","aWTCHG", "bWTCHG", "cWTCHG"),
assembly="b73", treatment=c(1,1,1,0,0,0), context="CHG", dbtype = "tabix", dbdir = "methylDB", mincov=1)
CHG.3x=filterByCoverage(CHG.raw,lo.count=3,lo.perc=NULL,hi.count=NULL,hi.perc=99.9, suffix="3x", dbdir="methylDB")
print("CHH ----------------------------------------------------------------")
CHH.raw=methRead(list("/scratch/nia/manuFinal/aMut_CHH.txt", "/scratch/nia/manuFinal/bMut_CHH.txt",
"/scratch/nia/manuFinal/cMut_CHH.txt", "/scratch/nia/manuFinal/aWT_CHH.txt",
"/scratch/nia/manuFinal/bWT_CHH.txt", "/scratch/nia/manuFinal/cWT_CHH.txt"),
sample.id=list("aMutCHH","bMutCHH","cMutCHH","aWTCHH", "bWTCHH", "cWTCHH"),
assembly="b73", treatment=c(1,1,1,0,0,0), context="CHH", dbtype = "tabix", dbdir = "methylDB", mincov=1)
CHH.3x=filterByCoverage(CHH.raw,lo.count=3,lo.perc=NULL,hi.count=NULL,hi.perc=99.9, suffix="3x", dbdir="methylDB")
# save workspace image for later loading
save.image(file=".RData")
save.image(file="backupRData/2.1-backup.RData")
q(save="yes") | /2-methylCalling/2.1-methReadandFilter.R | no_license | niahughes/maizemethylation | R | false | false | 2,662 | r | ###################################################################
### METHYLKIT METHREAD AND FILTERING
### R script to create tabix files of filtered cytosine methylation
### by sequence context
###################################################################
# set up environment
setwd("/scratch/nia/manuFinal")
library(methylKit)
load(file=".RData")
# read in methylation proportion files and filter
print("CpG ----------------------------------------------------------------")
CpG.raw=methRead(list("/scratch/nia/manuFinal/aMut_CpG.txt", "/scratch/nia/manuFinal/bMut_CpG.txt",
"/scratch/nia/manuFinal/cMut_CpG.txt", "/scratch/nia/manuFinal/aWT_CpG.txt",
"/scratch/nia/manuFinal/bWT_CpG.txt", "/scratch/nia/manuFinal/cWT_CpG.txt"),
sample.id=list("aMutCpG","bMutCpG","cMutCpG","aWTCpG", "bWTCpG", "cWTCpG"),
assembly="b73", treatment=c(1,1,1,0,0,0), context="CpG", dbtype = "tabix", dbdir = "methylDB", mincov=1)
CpG.3x=filterByCoverage(CpG.raw,lo.count=3,lo.perc=NULL,hi.count=NULL,hi.perc=99.9, suffix="3x", dbdir="methylDB")
print("CHG ----------------------------------------------------------------")
CHG.raw=methRead(list("/scratch/nia/manuFinal/aMut_CHG.txt", "/scratch/nia/manuFinal/bMut_CHG.txt",
"/scratch/nia/manuFinal/cMut_CHG.txt", "/scratch/nia/manuFinal/aWT_CHG.txt",
"/scratch/nia/manuFinal/bWT_CHG.txt", "/scratch/nia/manuFinal/cWT_CHG.txt"),
sample.id=list("aMutCHG","bMutCHG","cMutCHG","aWTCHG", "bWTCHG", "cWTCHG"),
assembly="b73", treatment=c(1,1,1,0,0,0), context="CHG", dbtype = "tabix", dbdir = "methylDB", mincov=1)
CHG.3x=filterByCoverage(CHG.raw,lo.count=3,lo.perc=NULL,hi.count=NULL,hi.perc=99.9, suffix="3x", dbdir="methylDB")
print("CHH ----------------------------------------------------------------")
CHH.raw=methRead(list("/scratch/nia/manuFinal/aMut_CHH.txt", "/scratch/nia/manuFinal/bMut_CHH.txt",
"/scratch/nia/manuFinal/cMut_CHH.txt", "/scratch/nia/manuFinal/aWT_CHH.txt",
"/scratch/nia/manuFinal/bWT_CHH.txt", "/scratch/nia/manuFinal/cWT_CHH.txt"),
sample.id=list("aMutCHH","bMutCHH","cMutCHH","aWTCHH", "bWTCHH", "cWTCHH"),
assembly="b73", treatment=c(1,1,1,0,0,0), context="CHH", dbtype = "tabix", dbdir = "methylDB", mincov=1)
CHH.3x=filterByCoverage(CHH.raw,lo.count=3,lo.perc=NULL,hi.count=NULL,hi.perc=99.9, suffix="3x", dbdir="methylDB")
# save workspace image for later loading
save.image(file=".RData")
save.image(file="backupRData/2.1-backup.RData")
q(save="yes") |
# モンテカルロ積分
#
# 重心を求める
# p(x) : N([1,1],[1 0.4 0.4 0.7]) + N([3,-1],[1 -0.7 -0.7 0.9])
# h(x) : x
# E[h(x)] : xの期待値=重心?
library(ggplot2)
library(mvtnorm)
N=1000
sample <- rbind( rmvnorm( N/2, c(1,1), matrix(c(1, 0.4, 0.4,0.7),2,2) ),
rmvnorm( N/2, c(3,-1), matrix(c(1,-0.7,-0.7,0.9),2,2) ) )
h_x <- sample
E_x <- apply(h_x,2,sum)/N
print( E_x )
plot.data = data.frame( x=sample[,1], y=sample[,2] )
gp = ggplot( plot.data )
gp = gp + geom_point( aes( x=x, y=y ) )
gp = gp + annotate( "point", x=E_x[1], y=E_x[2], col="red", size=5 )
print( gp )
| /TimeSeries/ex2_center_of_gravity.R | no_license | takechu/study | R | false | false | 608 | r | # モンテカルロ積分
#
# 重心を求める
# p(x) : N([1,1],[1 0.4 0.4 0.7]) + N([3,-1],[1 -0.7 -0.7 0.9])
# h(x) : x
# E[h(x)] : xの期待値=重心?
library(ggplot2)
library(mvtnorm)
N=1000
sample <- rbind( rmvnorm( N/2, c(1,1), matrix(c(1, 0.4, 0.4,0.7),2,2) ),
rmvnorm( N/2, c(3,-1), matrix(c(1,-0.7,-0.7,0.9),2,2) ) )
h_x <- sample
E_x <- apply(h_x,2,sum)/N
print( E_x )
plot.data = data.frame( x=sample[,1], y=sample[,2] )
gp = ggplot( plot.data )
gp = gp + geom_point( aes( x=x, y=y ) )
gp = gp + annotate( "point", x=E_x[1], y=E_x[2], col="red", size=5 )
print( gp )
|
read_table <- function(filename, params) {
params <- subset_by_function(read.table, params)
do.call(read.table, c(list(filename), params))
}
write_table <- function(obj, filename, params) {
if (!is.data.frame(obj)) {
stop("Object provided is not a dataframe, cannot write to table format.")
}
params <- subset_by_function(read.table, params)
params$stringsAsFactors <- FALSE
do.call(write.table, c(list(obj, filename), params))
}
table_interface <- DiskInterface$new(read_table, write_table)
| /R/table.R | permissive | abelcastilloavant/csmpi | R | false | false | 530 | r | read_table <- function(filename, params) {
params <- subset_by_function(read.table, params)
do.call(read.table, c(list(filename), params))
}
write_table <- function(obj, filename, params) {
if (!is.data.frame(obj)) {
stop("Object provided is not a dataframe, cannot write to table format.")
}
params <- subset_by_function(read.table, params)
params$stringsAsFactors <- FALSE
do.call(write.table, c(list(obj, filename), params))
}
table_interface <- DiskInterface$new(read_table, write_table)
|
library(ape)
testtree <- read.tree("11179_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11179_0_unrooted.txt") | /codeml_files/newick_trees_processed/11179_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("11179_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11179_0_unrooted.txt") |
# Loading in package.
library(dplyr)
#Read CSV file
Mechacar_df <- read.csv("MechaCar_mpg.csv")
# looking at dataframe.
head(Mechacar_df)
# Performing linear regression
Mecha_regression <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle +
ground_clearance + AWD, data=Mechacar_df)
## Determining the P-Value
summary(Mecha_regression)
# Creating Visualizations for the Trip Data.
suspension_df <- read.csv("Suspension_Coil.csv")
#Analyzing the df
head(suspension_df)
# Creating a df of summary statistics.
total_summary <- suspension_df %>% summarize(Mean=mean(PSI), Median=median(PSI),
Variance=var(PSI), SD=sd(PSI))
# Creating a summary by lot
lot_summary <- suspension_df %>% group_by(Manufacturing_Lot) %>%
summarize(Mean=format(round(mean(PSI),2),2), Median=format(round(median(PSI),1),1),
Variance=format(round(var(PSI),7),7), SD=format(round(sd(PSI),7),7),
.groups = 'keep')
## Performing t-test
t.test(log10(suspension_df$PSI),mu=1500)
### t-test() for lot 1.
t.test(subset(suspension_df, Manufacturing_Lot=='Lot1')$PSI,mu=1500)
### t-test for lot 2
t.test(subset(suspension_df, Manufacturing_Lot=='Lot2')$PSI,mu=1500)
### t-test for lot 3
t.test(subset(suspension_df, Manufacturing_Lot=='Lot3')$PSI,mu=1500)
| /MechaCarChallenge.RScript.R | no_license | EBelizor/MechaCar_Statistical_Analysis | R | false | false | 1,341 | r | # Loading in package.
library(dplyr)
#Read CSV file
Mechacar_df <- read.csv("MechaCar_mpg.csv")
# looking at dataframe.
head(Mechacar_df)
# Performing linear regression
Mecha_regression <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle +
ground_clearance + AWD, data=Mechacar_df)
## Determining the P-Value
summary(Mecha_regression)
# Creating Visualizations for the Trip Data.
suspension_df <- read.csv("Suspension_Coil.csv")
#Analyzing the df
head(suspension_df)
# Creating a df of summary statistics.
total_summary <- suspension_df %>% summarize(Mean=mean(PSI), Median=median(PSI),
Variance=var(PSI), SD=sd(PSI))
# Creating a summary by lot
lot_summary <- suspension_df %>% group_by(Manufacturing_Lot) %>%
summarize(Mean=format(round(mean(PSI),2),2), Median=format(round(median(PSI),1),1),
Variance=format(round(var(PSI),7),7), SD=format(round(sd(PSI),7),7),
.groups = 'keep')
## Performing t-test
t.test(log10(suspension_df$PSI),mu=1500)
### t-test() for lot 1.
t.test(subset(suspension_df, Manufacturing_Lot=='Lot1')$PSI,mu=1500)
### t-test for lot 2
t.test(subset(suspension_df, Manufacturing_Lot=='Lot2')$PSI,mu=1500)
### t-test for lot 3
t.test(subset(suspension_df, Manufacturing_Lot=='Lot3')$PSI,mu=1500)
|
####phasing with eagle
# input: data= path to data to phase in vcf,gz job exec points to eagle2 map= path that points to txt file with genetic map (4 columns) out= path pointing to the
# not-yet extistint outputfile reference.vcf= path pointing to referencepanel ref.on= 0 or 1 binary, 0 phases without refpanel, 1 phases with refpanel
# ouput: path to now existing outputfile in vcf.gz
##wrap function to phase with eagle for batchtools
#data as defined by batchtools call, which is the data to be imputed
#jobe is defined by batchtools call
#exec expexts the path to the eagle Executable
#outpref expects a path with a prefix of how the output files should be named.
# the full name will be generated by the function including the phasing process suffix for data type
#map.with.chr expects the path to the map file inlcuding a chromosome column
#chr.flag needs to be set according to which chromosome is the imputation carried out on
#reference.vcf expects the path to the reference panel in vcf form
#ref.on is a flag, TRUE means the phasing will be carried out with the reference panel
eagle_wrap <- function(data,
job,
exec,
outpref,
map.with.chr,
reference.vcf,
ref.on,
chr.flag, ...) {
#phasing, if reference panel should be included
if (ref.on) {
system2(exec, c("--vcfTarget", data,
"--vcfRef", reference.vcf,
"--outPrefix", paste(outpref, "_mit_ref", sep = ""),
"--vcfOutFormat z", "--geneticMapFile", map.with.chr,
"2>&1 | tee ", paste(paste(outpref, "_mit_ref", sep = ""), "log", sep = ".")))
#create new file in hap format for imputation with IMPUTE2 and IMPUTE4
vcf_hap(paste(outpref, "_mit_ref", sep = ""))
#rename for consistency
file.rename(paste(outpref, "_mit_ref", ".hap.gz", sep = ""), paste(outpref, "_mit_ref", ".haps.gz", sep = ""))
#create new file for imputation with PBWT
pbwt_prep(paste(outpref, "_mit_ref", sep = ""), reference.vcf, "phased_eagle_ref_on", chr.flag)
#return location and prefix of phased dataset and flag for which phasing was used.
#Later functions will add the needed suffix for the data type themselves
return(c(paste(outpref, "_mit_ref", sep = ""), "phased_eagle_ref_on"))
} else {
#phasing, if reference panel is not included, step description analogous to description above
system2(exec, c("--vcf", data, "--outPrefix", paste(outpref, "_ohne_ref", sep = ""),
"--geneticMapFile", map.with.chr, "2>&1 | tee ", paste(outpref, "log", sep = ".")))
vcf_hap(paste(outpref, "_ohne_ref", sep = ""))
file.rename(paste(outpref, "_ohne_ref", ".hap.gz", sep = ""), paste(outpref, "_ohne_ref", ".haps.gz", sep = ""))
pbwt_prep(paste(outpref, "_ohne_ref", sep = ""), reference.vcf, "phased_eagle_ref_off",chr.flag)
return(c(paste(outpref, "_ohne_ref", sep = ""), "phased_eagle_ref_off"))
}
}
| /functions/eagle.R | no_license | StahlKt/ImputationComparisonPaper2021 | R | false | false | 3,238 | r | ####phasing with eagle
# input: data= path to data to phase in vcf,gz job exec points to eagle2 map= path that points to txt file with genetic map (4 columns) out= path pointing to the
# not-yet extistint outputfile reference.vcf= path pointing to referencepanel ref.on= 0 or 1 binary, 0 phases without refpanel, 1 phases with refpanel
# ouput: path to now existing outputfile in vcf.gz
##wrap function to phase with eagle for batchtools
#data as defined by batchtools call, which is the data to be imputed
#jobe is defined by batchtools call
#exec expexts the path to the eagle Executable
#outpref expects a path with a prefix of how the output files should be named.
# the full name will be generated by the function including the phasing process suffix for data type
#map.with.chr expects the path to the map file inlcuding a chromosome column
#chr.flag needs to be set according to which chromosome is the imputation carried out on
#reference.vcf expects the path to the reference panel in vcf form
#ref.on is a flag, TRUE means the phasing will be carried out with the reference panel
eagle_wrap <- function(data,
job,
exec,
outpref,
map.with.chr,
reference.vcf,
ref.on,
chr.flag, ...) {
#phasing, if reference panel should be included
if (ref.on) {
system2(exec, c("--vcfTarget", data,
"--vcfRef", reference.vcf,
"--outPrefix", paste(outpref, "_mit_ref", sep = ""),
"--vcfOutFormat z", "--geneticMapFile", map.with.chr,
"2>&1 | tee ", paste(paste(outpref, "_mit_ref", sep = ""), "log", sep = ".")))
#create new file in hap format for imputation with IMPUTE2 and IMPUTE4
vcf_hap(paste(outpref, "_mit_ref", sep = ""))
#rename for consistency
file.rename(paste(outpref, "_mit_ref", ".hap.gz", sep = ""), paste(outpref, "_mit_ref", ".haps.gz", sep = ""))
#create new file for imputation with PBWT
pbwt_prep(paste(outpref, "_mit_ref", sep = ""), reference.vcf, "phased_eagle_ref_on", chr.flag)
#return location and prefix of phased dataset and flag for which phasing was used.
#Later functions will add the needed suffix for the data type themselves
return(c(paste(outpref, "_mit_ref", sep = ""), "phased_eagle_ref_on"))
} else {
#phasing, if reference panel is not included, step description analogous to description above
system2(exec, c("--vcf", data, "--outPrefix", paste(outpref, "_ohne_ref", sep = ""),
"--geneticMapFile", map.with.chr, "2>&1 | tee ", paste(outpref, "log", sep = ".")))
vcf_hap(paste(outpref, "_ohne_ref", sep = ""))
file.rename(paste(outpref, "_ohne_ref", ".hap.gz", sep = ""), paste(outpref, "_ohne_ref", ".haps.gz", sep = ""))
pbwt_prep(paste(outpref, "_ohne_ref", sep = ""), reference.vcf, "phased_eagle_ref_off",chr.flag)
return(c(paste(outpref, "_ohne_ref", sep = ""), "phased_eagle_ref_off"))
}
}
|
# short_term_forecast.R
# Copyright 2013 Finlay Scott and Chato Osio
# Maintainer: Finlay Scott, JRC, finlay.scott@jrc.ec.europa.eu
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#--------------------------------------------------------------------------
# Generic script for running short-term forecasts (STF).
# This script assumes that have already run your assessment and that you have a fully specified age-structured FLStock object
### Running on:
# R version 3.0.1 (2013-05-16)
# Platform: i386-w64-mingw32/i386 (32-bit)
#------------------------------------------------------------------
# Libraries and data
rm(list=ls())
library(FLCore)
library(FLAssess)
library(FLash)
library(ggplotFL)
library(FLBRP)
#library(plyr)
#library(reshape2)
# Example data set - use your own
# You need a full specified FLStock object
#DATpath <- file.path(getwd(), "SS_3.0.1/FOR_ASSESSMENT/BaseCase/")
#load(paste0(DATpath, "WHOM_SS3results.RData"))
#RESpath <- paste0(DATpath, "forecast")
# Load your own data, probably using the load() function
#stk <- data
source(file=file.path(getwd(),"Scripts","0.Setup.R"))
stk <- WG18
# Quick check that the stock object is correct
summary(stk)
plot(stk)
# For the STF we would like to run a F0.1 scenario
# Use FLBRP to get F0.1
# stk_brp <- brp(FLBRP(window(data,start=1983,end=2017)))
# refpts(stk_brp)
# f01 <- c(refpts(stk_brp)["f0.1","harvest"])
# f01
# Is this number sensible?
# ========================================
# F AND M BEFORE SPAWNING!!
# stk@harvest.spwn <- FLQuant(0.21, dimnames=list(age=0:20, year=1982:2017), units='diff')
# stk@m.spwn <- FLQuant(0.21, dimnames=list(age=0:20, year=1982:2017), units='diff')
# ========================================
#stk@m.spwn <- FLQuant(0.21, dim=c(21,36))
# We also need F status quo - the geometric mean of the last X years
# Here we use 3 years
no_stk_years <- dim(rec(stk))[2]
no_fbar_years <- 1
fbars <- fbar(stk)[,no_stk_years]
fbar_status_quo <- an(fbars)
#--------------------------------------------------------------------------
# STF
# Here we run the STF for 3 years, 2013, 2014, 2015
# You can change these as appropriate
# The first year of the STF should be the next one after the final year in your stock data
# For example, the final year in the dummy stk object is 2012 so the first year of the STF is 2013
stf_nyears <- 3
final_year <- max(as.numeric(dimnames(stock.n(stk))[[2]]))
stf_years <- (final_year+1):(final_year+stf_nyears)
no_stf_years <- length(stf_years)
# Set up the future stock object.
# Here we use the default assumptions about what happens to weights, maturity and selection pattern in the future
# (e.g. weights are means of the last 3 years)
# NOTE: You may want to change some of these assumptions by hand
# See the help page for stf: ?stf for more details
stf_stk <- stf(stk, nyears = no_stf_years, wts.nyears = 10)
# Set up future recruitment to be mean of last X years
# Here we set as geometric mean of the last 3 years
#no_rec_years <- 3 # Change number of years as appropriate
recs <- window(rec(stk), 1983, final_year)
#recs <- rec(stk)[,(no_stk_years - no_rec_years + 1):no_stk_years]
#mean_rec <- exp(mean(log(c(rec(stk)[,ac(myy),]))))
mean_rec <- exp(mean(log(c(recs))))
# We are going to run several F scenarios for the STF
# The scenarios are based on 'F status quo', which we calculated above as the mean F of the last X years
# An STF is for three years - you could change this but if you do you will have to hack the code below
# For a three year STF the F pattern is:
# year 1: fbar_status_quo
# year 2: fbar_status_quo * fbar_multiplier
# year 3: fbar_status_quo * fbar_multiplier
# The fbar_multiplier is the same for years 2 and 3
# We are going to run several STFs with different values for the fbar_multiplier
# The fbar_multiplier ranges from 0.1 to 2 by 0.1
#fbar_multiplier <- seq(1.68, 1.7, 0.0001)
fbar_multiplier <- seq(0, 2, 0.01)
for (ii in seq(121000,200000,by=1000)) {
# We are going to build a data.frame that builds these scenarios
# Each column in the dataframe is a year
# Each row is a scenario
# Set up the fbar scenarios - note that if you project for more than 3 years you will need to add more columns / years to the matrix
fbar_scenarios <- cbind(rep(fbar_status_quo,length(fbar_multiplier)),
fbar_multiplier*fbar_status_quo,
fbar_multiplier*fbar_status_quo)
# Add the F0.1 scenario as a final scenario
#fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,f01,f01))
#fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,fbar_status_quo,fbar_status_quo))
# There are various results we want to extract from the STF
# Make an empty matrix in which to store the results
stf_results <- matrix(NA,nrow = nrow(fbar_scenarios),ncol = 11)
# Update column names
colnames(stf_results) <- c('Ffactor',
'Fbar',
paste('Catch',final_year,sep="_"),
paste('Catch',final_year+1,sep="_"),
paste('Catch',final_year+2,sep="_"),
paste('Catch',final_year+3,sep="_"),
paste('SSB',final_year+1,sep="_"),
paste('SSB',final_year+2,sep="_"),
paste('SSB',final_year+3,sep="_"),
paste('Change_SSB_',final_year+2,'-',final_year+3,'(%)',sep=""),
paste('Change_Catch_',final_year+1,'-',final_year+2,'(%)',sep=""))
# Store the FLStock each time
stk_stf <- FLStocks()
# set FMSY
FMSY <- 0.1079
#Intermediate year catch assumption
#ImY <- 95500 #WGWIDE2017 assumption for 2017 ImY catch
#ImY <- 115470 #WGWIDE2018 assumption for 2018 ImY catch
#ImY <- 104370 #WGWIDE2018 with updated ImY based on new 2017 advice with relative RPs
#ImY <- 94987 #based on 2017 advice from relative RPs in contemporary period
#ImY <- 100000
ImY <- ii
# Loop over the scenarios
for (scenario in 1:nrow(fbar_scenarios)) {
cat("Scenario: ", scenario, "\n")
# Make a target object withe F values for that scenario
# ctrl_target <- data.frame(year = stf_years,
# quantity = "f",
# val = fbar_scenarios[scenario,])
ctrl_target <- data.frame(year = stf_years,
quantity = c(rep("catch",3),rep("f",3)),
val = c(c(ImY,NA,NA),c(NA,fbar_scenarios[scenario,2:3]))) # TAC 2018
# Set the control object - year, quantity and value for the moment
ctrl_f <- fwdControl(ctrl_target)
# Run the forward projection. We include an additional argument, maxF.
# By default the value of maxF is 2.0
# Here we increase it to 10.0 so that F is not limited
stk_stf_fwd <- fwd(stf_stk, ctrl = ctrl_f, sr = list(model="mean", params=FLPar(a = mean_rec)), maxF = 10.0)
## Check it has worked - uncomment out to check scenario by scenario
#plot(stk_stf_fwd)
# Store the result - if you want to, comment out if unnecessary
stk_stf[[as.character(scenario)]] <- stk_stf_fwd
# Fill results table
stf_results[scenario,1] <- fbar_scenarios[scenario,2] / fbar_scenarios[scenario,1] # fbar status quo ratio
stf_results[scenario,2] <- fbar(stk_stf_fwd)[,ac(stf_years[stf_nyears])] # final stf year
stf_results[scenario,3] <- catch(stk_stf_fwd)[,ac(final_year)] # last 'true' year
stf_results[scenario,4] <- catch(stk_stf_fwd)[,ac(final_year+1)] # 1st stf year
stf_results[scenario,5] <- catch(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,6] <- catch(stk_stf_fwd)[,ac(final_year+3)] # final stf year
stf_results[scenario,7] <- ssb(stk_stf_fwd)[,ac(final_year+1)] # 2nd stf year
stf_results[scenario,8] <- ssb(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,9] <- ssb(stk_stf_fwd)[,ac(final_year+3)] # final stf year
# Change in SSB
stf_results[scenario,10] <- (ssb(stk_stf_fwd)[,ac(final_year+3)]-ssb(stk_stf_fwd)[,ac(final_year+2)])/ssb(stk_stf_fwd)[,ac(final_year+2)]*100 # change in ssb in last two stf years
stf_results[scenario,11] <- (catch(stk_stf_fwd)[,ac(final_year+2)]-catch(stk_stf_fwd)[,ac(final_year+1)])/catch(stk_stf_fwd)[,ac(final_year+1)]*100 # change in catch from true year, to 2nd to last stf year
}
# Look at the table of results
stf_results
write.csv(stf_results, file=paste0("STF_WGWIDE2018_IMY",ImY,".csv"), quote=F, row.names = F)
# export this if necessary
#write.csv(stf_results, file="stf_results.csv")
}
# Plotting
# Plotting is not necessary for the report but here is a crude one anyway
plot(window(stk_stf, start=2001, end=final_year+3))
stf_results
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#### Catch scenario including 15% area 9
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Intermediate year catch = 107803.2 t
fbar_multiplier <- seq(0, 2, 0.1)
# We are going to build a data.frame that builds these scenarios
# Each column in the dataframe is a year
# Each row is a scenario
# Set up the fbar scenarios - note that if you project for more than 3 years you will need to add more columns / years to the matrix
fbar_scenarios <- cbind(rep(fbar_status_quo,length(fbar_multiplier)),
fbar_multiplier*fbar_status_quo,
fbar_multiplier*fbar_status_quo)
# Add the F0.1 scenario as a final scenario
#fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,f01,f01))
fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,fbar_status_quo,fbar_status_quo))
# There are various results we want to extract from the STF
# Make an empty matrix in which to store the results
stf_results <- matrix(NA,nrow = nrow(fbar_scenarios),ncol = 11)
# Update column names
colnames(stf_results) <- c('Ffactor',
'Fbar',
paste('Catch',final_year,sep="_"),
paste('Catch',final_year+1,sep="_"),
paste('Catch',final_year+2,sep="_"),
paste('Catch',final_year+3,sep="_"),
paste('SSB',final_year+1,sep="_"),
paste('SSB',final_year+2,sep="_"),
paste('SSB',final_year+3,sep="_"),
paste('Change_SSB_',final_year+2,'-',final_year+3,'(%)',sep=""),
paste('Change_Catch_',final_year+1,'-',final_year+2,'(%)',sep=""))
# Store the FLStock each time
stk_stf <- FLStocks()
# set FMSY
FMSY <- 0.1079
# Loop over the scenarios
for (scenario in 1:nrow(fbar_scenarios)) {
cat("Scenario: ", scenario, "\n")
# Make a target object withe F values for that scenario
# ctrl_target <- data.frame(year = stf_years,
# quantity = "f",
# val = fbar_scenarios[scenario,])
ctrl_target <- data.frame(year = stf_years,
quantity = c(rep("catch",3),rep("f",3)),
val = c(c(107803.2,NA,NA),c(NA,fbar_scenarios[scenario,2:3]))) # TAC 2017
# Set the control object - year, quantity and value for the moment
ctrl_f <- fwdControl(ctrl_target)
# Run the forward projection. We include an additional argument, maxF.
# By default the value of maxF is 2.0
# Here we increase it to 10.0 so that F is not limited
stk_stf_fwd <- fwd(stf_stk, ctrl = ctrl_f, sr = list(model="mean", params=FLPar(a = mean_rec)), maxF = 10.0)
## Check it has worked - uncomment out to check scenario by scenario
#plot(stk_stf_fwd)
# Store the result - if you want to, comment out if unnecessary
stk_stf[[as.character(scenario)]] <- stk_stf_fwd
# Fill results table
stf_results[scenario,1] <- fbar_scenarios[scenario,2] / fbar_scenarios[scenario,1] # fbar status quo ratio
stf_results[scenario,2] <- fbar(stk_stf_fwd)[,ac(stf_years[stf_nyears])] # final stf year
stf_results[scenario,3] <- catch(stk_stf_fwd)[,ac(final_year)] # last 'true' year
stf_results[scenario,4] <- catch(stk_stf_fwd)[,ac(final_year+1)] # 1st stf year
stf_results[scenario,5] <- catch(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,6] <- catch(stk_stf_fwd)[,ac(final_year+3)] # final stf year
stf_results[scenario,7] <- ssb(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,8] <- ssb(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,9] <- ssb(stk_stf_fwd)[,ac(final_year+3)] # final stf year
# Change in SSB
stf_results[scenario,10] <- (ssb(stk_stf_fwd)[,ac(final_year+3)]-ssb(stk_stf_fwd)[,ac(final_year+2)])/ssb(stk_stf_fwd)[,ac(final_year+2)]*100 # change in ssb in last two stf years
stf_results[scenario,11] <- (catch(stk_stf_fwd)[,ac(final_year+2)]-catch(stk_stf_fwd)[,ac(final_year+1)])/catch(stk_stf_fwd)[,ac(final_year+1)]*100 # change in catch from true year, to 2nd to last stf year
}
# Look at the table of results
stf_results
write.csv(stf_results, file=paste(RESpath, "WHOM_STF_IncreasedCatch.csv", sep="/"), quote=F, sep=",", row.names = F)
# export this if necessary
| /RefPts_IBP_2019/Scripts/YPR_and_Forecast_New.R | no_license | ices-eg/wk_WKREBUILD | R | false | false | 13,729 | r | # short_term_forecast.R
# Copyright 2013 Finlay Scott and Chato Osio
# Maintainer: Finlay Scott, JRC, finlay.scott@jrc.ec.europa.eu
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#--------------------------------------------------------------------------
# Generic script for running short-term forecasts (STF).
# This script assumes that have already run your assessment and that you have a fully specified age-structured FLStock object
### Running on:
# R version 3.0.1 (2013-05-16)
# Platform: i386-w64-mingw32/i386 (32-bit)
#------------------------------------------------------------------
# Libraries and data
rm(list=ls())
library(FLCore)
library(FLAssess)
library(FLash)
library(ggplotFL)
library(FLBRP)
#library(plyr)
#library(reshape2)
# Example data set - use your own
# You need a full specified FLStock object
#DATpath <- file.path(getwd(), "SS_3.0.1/FOR_ASSESSMENT/BaseCase/")
#load(paste0(DATpath, "WHOM_SS3results.RData"))
#RESpath <- paste0(DATpath, "forecast")
# Load your own data, probably using the load() function
#stk <- data
source(file=file.path(getwd(),"Scripts","0.Setup.R"))
stk <- WG18
# Quick check that the stock object is correct
summary(stk)
plot(stk)
# For the STF we would like to run a F0.1 scenario
# Use FLBRP to get F0.1
# stk_brp <- brp(FLBRP(window(data,start=1983,end=2017)))
# refpts(stk_brp)
# f01 <- c(refpts(stk_brp)["f0.1","harvest"])
# f01
# Is this number sensible?
# ========================================
# F AND M BEFORE SPAWNING!!
# stk@harvest.spwn <- FLQuant(0.21, dimnames=list(age=0:20, year=1982:2017), units='diff')
# stk@m.spwn <- FLQuant(0.21, dimnames=list(age=0:20, year=1982:2017), units='diff')
# ========================================
#stk@m.spwn <- FLQuant(0.21, dim=c(21,36))
# We also need F status quo - the geometric mean of the last X years
# Here we use 3 years
no_stk_years <- dim(rec(stk))[2]
no_fbar_years <- 1
fbars <- fbar(stk)[,no_stk_years]
fbar_status_quo <- an(fbars)
#--------------------------------------------------------------------------
# STF
# Here we run the STF for 3 years, 2013, 2014, 2015
# You can change these as appropriate
# The first year of the STF should be the next one after the final year in your stock data
# For example, the final year in the dummy stk object is 2012 so the first year of the STF is 2013
stf_nyears <- 3
final_year <- max(as.numeric(dimnames(stock.n(stk))[[2]]))
stf_years <- (final_year+1):(final_year+stf_nyears)
no_stf_years <- length(stf_years)
# Set up the future stock object.
# Here we use the default assumptions about what happens to weights, maturity and selection pattern in the future
# (e.g. weights are means of the last 3 years)
# NOTE: You may want to change some of these assumptions by hand
# See the help page for stf: ?stf for more details
stf_stk <- stf(stk, nyears = no_stf_years, wts.nyears = 10)
# Set up future recruitment to be mean of last X years
# Here we set as geometric mean of the last 3 years
#no_rec_years <- 3 # Change number of years as appropriate
recs <- window(rec(stk), 1983, final_year)
#recs <- rec(stk)[,(no_stk_years - no_rec_years + 1):no_stk_years]
#mean_rec <- exp(mean(log(c(rec(stk)[,ac(myy),]))))
mean_rec <- exp(mean(log(c(recs))))
# We are going to run several F scenarios for the STF
# The scenarios are based on 'F status quo', which we calculated above as the mean F of the last X years
# An STF is for three years - you could change this but if you do you will have to hack the code below
# For a three year STF the F pattern is:
# year 1: fbar_status_quo
# year 2: fbar_status_quo * fbar_multiplier
# year 3: fbar_status_quo * fbar_multiplier
# The fbar_multiplier is the same for years 2 and 3
# We are going to run several STFs with different values for the fbar_multiplier
# The fbar_multiplier ranges from 0.1 to 2 by 0.1
#fbar_multiplier <- seq(1.68, 1.7, 0.0001)
fbar_multiplier <- seq(0, 2, 0.01)
for (ii in seq(121000,200000,by=1000)) {
# We are going to build a data.frame that builds these scenarios
# Each column in the dataframe is a year
# Each row is a scenario
# Set up the fbar scenarios - note that if you project for more than 3 years you will need to add more columns / years to the matrix
fbar_scenarios <- cbind(rep(fbar_status_quo,length(fbar_multiplier)),
fbar_multiplier*fbar_status_quo,
fbar_multiplier*fbar_status_quo)
# Add the F0.1 scenario as a final scenario
#fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,f01,f01))
#fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,fbar_status_quo,fbar_status_quo))
# There are various results we want to extract from the STF
# Make an empty matrix in which to store the results
stf_results <- matrix(NA,nrow = nrow(fbar_scenarios),ncol = 11)
# Update column names
colnames(stf_results) <- c('Ffactor',
'Fbar',
paste('Catch',final_year,sep="_"),
paste('Catch',final_year+1,sep="_"),
paste('Catch',final_year+2,sep="_"),
paste('Catch',final_year+3,sep="_"),
paste('SSB',final_year+1,sep="_"),
paste('SSB',final_year+2,sep="_"),
paste('SSB',final_year+3,sep="_"),
paste('Change_SSB_',final_year+2,'-',final_year+3,'(%)',sep=""),
paste('Change_Catch_',final_year+1,'-',final_year+2,'(%)',sep=""))
# Store the FLStock each time
stk_stf <- FLStocks()
# set FMSY
FMSY <- 0.1079
#Intermediate year catch assumption
#ImY <- 95500 #WGWIDE2017 assumption for 2017 ImY catch
#ImY <- 115470 #WGWIDE2018 assumption for 2018 ImY catch
#ImY <- 104370 #WGWIDE2018 with updated ImY based on new 2017 advice with relative RPs
#ImY <- 94987 #based on 2017 advice from relative RPs in contemporary period
#ImY <- 100000
ImY <- ii
# Loop over the scenarios
for (scenario in 1:nrow(fbar_scenarios)) {
cat("Scenario: ", scenario, "\n")
# Make a target object withe F values for that scenario
# ctrl_target <- data.frame(year = stf_years,
# quantity = "f",
# val = fbar_scenarios[scenario,])
ctrl_target <- data.frame(year = stf_years,
quantity = c(rep("catch",3),rep("f",3)),
val = c(c(ImY,NA,NA),c(NA,fbar_scenarios[scenario,2:3]))) # TAC 2018
# Set the control object - year, quantity and value for the moment
ctrl_f <- fwdControl(ctrl_target)
# Run the forward projection. We include an additional argument, maxF.
# By default the value of maxF is 2.0
# Here we increase it to 10.0 so that F is not limited
stk_stf_fwd <- fwd(stf_stk, ctrl = ctrl_f, sr = list(model="mean", params=FLPar(a = mean_rec)), maxF = 10.0)
## Check it has worked - uncomment out to check scenario by scenario
#plot(stk_stf_fwd)
# Store the result - if you want to, comment out if unnecessary
stk_stf[[as.character(scenario)]] <- stk_stf_fwd
# Fill results table
stf_results[scenario,1] <- fbar_scenarios[scenario,2] / fbar_scenarios[scenario,1] # fbar status quo ratio
stf_results[scenario,2] <- fbar(stk_stf_fwd)[,ac(stf_years[stf_nyears])] # final stf year
stf_results[scenario,3] <- catch(stk_stf_fwd)[,ac(final_year)] # last 'true' year
stf_results[scenario,4] <- catch(stk_stf_fwd)[,ac(final_year+1)] # 1st stf year
stf_results[scenario,5] <- catch(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,6] <- catch(stk_stf_fwd)[,ac(final_year+3)] # final stf year
stf_results[scenario,7] <- ssb(stk_stf_fwd)[,ac(final_year+1)] # 2nd stf year
stf_results[scenario,8] <- ssb(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,9] <- ssb(stk_stf_fwd)[,ac(final_year+3)] # final stf year
# Change in SSB
stf_results[scenario,10] <- (ssb(stk_stf_fwd)[,ac(final_year+3)]-ssb(stk_stf_fwd)[,ac(final_year+2)])/ssb(stk_stf_fwd)[,ac(final_year+2)]*100 # change in ssb in last two stf years
stf_results[scenario,11] <- (catch(stk_stf_fwd)[,ac(final_year+2)]-catch(stk_stf_fwd)[,ac(final_year+1)])/catch(stk_stf_fwd)[,ac(final_year+1)]*100 # change in catch from true year, to 2nd to last stf year
}
# Look at the table of results
stf_results
write.csv(stf_results, file=paste0("STF_WGWIDE2018_IMY",ImY,".csv"), quote=F, row.names = F)
# export this if necessary
#write.csv(stf_results, file="stf_results.csv")
}
# Plotting
# Plotting is not necessary for the report but here is a crude one anyway
plot(window(stk_stf, start=2001, end=final_year+3))
stf_results
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#### Catch scenario including 15% area 9
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Intermediate year catch = 107803.2 t
fbar_multiplier <- seq(0, 2, 0.1)
# We are going to build a data.frame that builds these scenarios
# Each column in the dataframe is a year
# Each row is a scenario
# Set up the fbar scenarios - note that if you project for more than 3 years you will need to add more columns / years to the matrix
fbar_scenarios <- cbind(rep(fbar_status_quo,length(fbar_multiplier)),
fbar_multiplier*fbar_status_quo,
fbar_multiplier*fbar_status_quo)
# Add the F0.1 scenario as a final scenario
#fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,f01,f01))
fbar_scenarios <- rbind(fbar_scenarios, c(fbar_status_quo,fbar_status_quo,fbar_status_quo))
# There are various results we want to extract from the STF
# Make an empty matrix in which to store the results
stf_results <- matrix(NA,nrow = nrow(fbar_scenarios),ncol = 11)
# Update column names
colnames(stf_results) <- c('Ffactor',
'Fbar',
paste('Catch',final_year,sep="_"),
paste('Catch',final_year+1,sep="_"),
paste('Catch',final_year+2,sep="_"),
paste('Catch',final_year+3,sep="_"),
paste('SSB',final_year+1,sep="_"),
paste('SSB',final_year+2,sep="_"),
paste('SSB',final_year+3,sep="_"),
paste('Change_SSB_',final_year+2,'-',final_year+3,'(%)',sep=""),
paste('Change_Catch_',final_year+1,'-',final_year+2,'(%)',sep=""))
# Store the FLStock each time
stk_stf <- FLStocks()
# set FMSY
FMSY <- 0.1079
# Loop over the scenarios
for (scenario in 1:nrow(fbar_scenarios)) {
cat("Scenario: ", scenario, "\n")
# Make a target object withe F values for that scenario
# ctrl_target <- data.frame(year = stf_years,
# quantity = "f",
# val = fbar_scenarios[scenario,])
ctrl_target <- data.frame(year = stf_years,
quantity = c(rep("catch",3),rep("f",3)),
val = c(c(107803.2,NA,NA),c(NA,fbar_scenarios[scenario,2:3]))) # TAC 2017
# Set the control object - year, quantity and value for the moment
ctrl_f <- fwdControl(ctrl_target)
# Run the forward projection. We include an additional argument, maxF.
# By default the value of maxF is 2.0
# Here we increase it to 10.0 so that F is not limited
stk_stf_fwd <- fwd(stf_stk, ctrl = ctrl_f, sr = list(model="mean", params=FLPar(a = mean_rec)), maxF = 10.0)
## Check it has worked - uncomment out to check scenario by scenario
#plot(stk_stf_fwd)
# Store the result - if you want to, comment out if unnecessary
stk_stf[[as.character(scenario)]] <- stk_stf_fwd
# Fill results table
stf_results[scenario,1] <- fbar_scenarios[scenario,2] / fbar_scenarios[scenario,1] # fbar status quo ratio
stf_results[scenario,2] <- fbar(stk_stf_fwd)[,ac(stf_years[stf_nyears])] # final stf year
stf_results[scenario,3] <- catch(stk_stf_fwd)[,ac(final_year)] # last 'true' year
stf_results[scenario,4] <- catch(stk_stf_fwd)[,ac(final_year+1)] # 1st stf year
stf_results[scenario,5] <- catch(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,6] <- catch(stk_stf_fwd)[,ac(final_year+3)] # final stf year
stf_results[scenario,7] <- ssb(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,8] <- ssb(stk_stf_fwd)[,ac(final_year+2)] # 2nd stf year
stf_results[scenario,9] <- ssb(stk_stf_fwd)[,ac(final_year+3)] # final stf year
# Change in SSB
stf_results[scenario,10] <- (ssb(stk_stf_fwd)[,ac(final_year+3)]-ssb(stk_stf_fwd)[,ac(final_year+2)])/ssb(stk_stf_fwd)[,ac(final_year+2)]*100 # change in ssb in last two stf years
stf_results[scenario,11] <- (catch(stk_stf_fwd)[,ac(final_year+2)]-catch(stk_stf_fwd)[,ac(final_year+1)])/catch(stk_stf_fwd)[,ac(final_year+1)]*100 # change in catch from true year, to 2nd to last stf year
}
# Look at the table of results
stf_results
write.csv(stf_results, file=paste(RESpath, "WHOM_STF_IncreasedCatch.csv", sep="/"), quote=F, sep=",", row.names = F)
# export this if necessary
|
open.highlighted <- function(){
ctx = rstudioapi::getSourceEditorContext()
slct = rstudioapi::getSourceEditorContext()$selection[[1]]
# print()
a <- slct$text
if(a == ""){
a <- "."
}
rstudioapi::sendToConsole(paste0("system('open ",a,"')"),execute = T)
}
| /R/open_highlighted.R | no_license | YutongWangUMich/labnotedown | R | false | false | 273 | r | open.highlighted <- function(){
ctx = rstudioapi::getSourceEditorContext()
slct = rstudioapi::getSourceEditorContext()$selection[[1]]
# print()
a <- slct$text
if(a == ""){
a <- "."
}
rstudioapi::sendToConsole(paste0("system('open ",a,"')"),execute = T)
}
|
doc<-readRDS(file="Datasets/JAGS_DOC_july18.rds")
col<-readRDS(file="Datasets/JAGS_Color_july18.rds")
tn<-readRDS(file="Datasets/JAGS_TN_july18.rds")
tp<-readRDS(file="Datasets/JAGS_TP_july18.rds")
no3<-readRDS(file="Datasets/JAGS_NO3_july18.rds")
chl<-readRDS(file="Datasets/JAGS_Chla_july18.rds")
doc$var="doc"
col$var="color"
tn$var="tn"
tp$var="tp"
no3$var="no3"
chl$var="chl"
results.all=rbind(doc, col, tn, tp, no3)
results.all$pctchg<-results.all$slopemean*100
results.c<-rbind(doc, col)
results.c$pctchg<-results.c$slopemean*100
##save results all to combine w geo data for JF
saveRDS(results.all, file="Datasets/SlopesAllVars.rds")
saveRDS(results.c, file="Datasets/SlopesCVars.rds")
boxplot(pctchg~var, data=results.all)
axis(1, log="y")
#do some magic to get this on a log scale even though some values are neg
pos<-results.all[results.all$slopeSign==1,]
neg<-results.all[results.all$slopeSign==0,]
pos$logslope=log(1+pos$slopemean)
neg$logslope=log(1+abs(neg$slopemean))
neg$logslope=-1*neg$logslope
combologged<-rbind(pos,neg)
boxplot(logslope~var, data=combologged, yaxt='n')
| /Code/Model/summarize_slopes.R | no_license | limnoliver/LAGOS_DOC | R | false | false | 1,098 | r | doc<-readRDS(file="Datasets/JAGS_DOC_july18.rds")
col<-readRDS(file="Datasets/JAGS_Color_july18.rds")
tn<-readRDS(file="Datasets/JAGS_TN_july18.rds")
tp<-readRDS(file="Datasets/JAGS_TP_july18.rds")
no3<-readRDS(file="Datasets/JAGS_NO3_july18.rds")
chl<-readRDS(file="Datasets/JAGS_Chla_july18.rds")
doc$var="doc"
col$var="color"
tn$var="tn"
tp$var="tp"
no3$var="no3"
chl$var="chl"
results.all=rbind(doc, col, tn, tp, no3)
results.all$pctchg<-results.all$slopemean*100
results.c<-rbind(doc, col)
results.c$pctchg<-results.c$slopemean*100
##save results all to combine w geo data for JF
saveRDS(results.all, file="Datasets/SlopesAllVars.rds")
saveRDS(results.c, file="Datasets/SlopesCVars.rds")
boxplot(pctchg~var, data=results.all)
axis(1, log="y")
#do some magic to get this on a log scale even though some values are neg
pos<-results.all[results.all$slopeSign==1,]
neg<-results.all[results.all$slopeSign==0,]
pos$logslope=log(1+pos$slopemean)
neg$logslope=log(1+abs(neg$slopemean))
neg$logslope=-1*neg$logslope
combologged<-rbind(pos,neg)
boxplot(logslope~var, data=combologged, yaxt='n')
|
### inc
library(plyr)
### par
dir <- "/home/datasets/GAIT1/GWAS/SFBR/Impute"
### list dir. like `c4.12001.12500`
stopifnot(file.exists(dir))
dirs <- list.dirs(dir, full.names = FALSE, recursive = FALSE)
dirs <- grep("^c[1-9]\\.*",dirs, value = TRUE)
stopifnot(length(dirs) > 0)
### extract infro. from `dirs`
out <- strsplit(dirs, "\\.")
num.snps <- laply(dirs, function(x)
length(readLines(file.path(dir, x, "snp.geno-list"))))
### tab
tab <- data.frame(dir = dirs,
chr = as.integer(laply(out, function(x) gsub("c", "", x[1]))),
start = as.integer(laply(out, function(x) x[2])),
end = as.integer(laply(out, function(x) x[3])),
num.snps = num.snps)
# order
ord <- with(tab, order(chr, start))
tab <- tab[ord, ]
### print
print(head(tab))
| /projects/01-gait1/R/02-gait1-snps.R | no_license | ugcd/solarius | R | false | false | 757 | r | ### inc
library(plyr)
### par
dir <- "/home/datasets/GAIT1/GWAS/SFBR/Impute"
### list dir. like `c4.12001.12500`
stopifnot(file.exists(dir))
dirs <- list.dirs(dir, full.names = FALSE, recursive = FALSE)
dirs <- grep("^c[1-9]\\.*",dirs, value = TRUE)
stopifnot(length(dirs) > 0)
### extract infro. from `dirs`
out <- strsplit(dirs, "\\.")
num.snps <- laply(dirs, function(x)
length(readLines(file.path(dir, x, "snp.geno-list"))))
### tab
tab <- data.frame(dir = dirs,
chr = as.integer(laply(out, function(x) gsub("c", "", x[1]))),
start = as.integer(laply(out, function(x) x[2])),
end = as.integer(laply(out, function(x) x[3])),
num.snps = num.snps)
# order
ord <- with(tab, order(chr, start))
tab <- tab[ord, ]
### print
print(head(tab))
|
#!/usr/bin/Rscript
# This script was written by Oliver Pain whilst at King's College London University.
start.time <- Sys.time()
suppressMessages(library("optparse"))
option_list = list(
make_option("--ref_plink", action="store", default=NA, type='character',
help="Path to per chromosome reference PLINK files [required]"),
make_option("--ref_keep", action="store", default=NA, type='character',
help="Keep file to subset individuals in reference for clumping [required]"),
make_option("--ref_freq_chr", action="store", default=NA, type='character',
help="Path to per chromosome reference PLINK .frq files [required]"),
make_option("--ref_pop_scale", action="store", default=NA, type='character',
help="File containing the population code and location of the keep file [required]"),
make_option("--plink", action="store", default='plink', type='character',
help="Path PLINK software binary [required]"),
make_option("--output", action="store", default='./Output', type='character',
help="Path for output files [required]"),
make_option("--memory", action="store", default=5000, type='numeric',
help="Memory limit [optional]"),
make_option("--n_cores", action="store", default=1, type='numeric',
help="Number of cores for parallel computing [optional]"),
make_option("--sumstats", action="store", default=NA, type='character',
help="GWAS summary statistics in LDSC format [required]"),
make_option("--gcta", action="store", default=NA, type='character',
help="Path to GCTA binary [required]"),
make_option("--ldsc", action="store", default=NA, type='character',
help="Path to LD-score regression binary [required]"),
make_option("--ldsc_ref", action="store", default=NA, type='character',
help="Path to LD-score regression reference data 'eur_w_ld_chr' [required]"),
make_option("--prune_hla", action="store", default=T, type='logical',
help="Retain only top assocaited variant in HLA region [optional]")
)
opt = parse_args(OptionParser(option_list=option_list))
library(data.table)
library(foreach)
library(doMC)
registerDoMC(opt$n_cores)
tmp<-sub('.*/','',opt$output)
opt$output_dir<-sub(paste0(tmp,'*.'),'',opt$output)
system(paste0('mkdir -p ',opt$output_dir))
sink(file = paste(opt$output,'.log',sep=''), append = F)
cat(
'#################################################################
# polygenic_score_file_creator_SBLUP.R V1.0
# For questions contact Oliver Pain (oliver.pain@kcl.ac.uk)
#################################################################
Analysis started at',as.character(start.time),'
Options are:\n')
cat('Options are:\n')
print(opt)
cat('Analysis started at',as.character(start.time),'\n')
sink()
#####
# Estimate the SNP-heritability
#####
system(paste0(opt$ldsc,' --h2 ',opt$sumstats,' --ref-ld-chr ',opt$ldsc_ref,'/ --w-ld-chr ',opt$ldsc_ref,'/ --out ', opt$output_dir,'ldsc_snp_h2_temp'))
ldsc_log<-read.table(paste0(opt$output_dir,'ldsc_snp_h2_temp.log'), header=F, sep='&')
ldsc_h2<-ldsc_log[grepl('Total Observed scale h2', ldsc_log$V1),]
ldsc_h2<-gsub('Total Observed scale h2: ','', ldsc_h2)
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('SNP-heritability estimate = ',ldsc_h2,'.\n',sep='')
sink()
ldsc_h2<-as.numeric(gsub(' .*','', ldsc_h2))
#####
# Read in sumstats and insert p-values
#####
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('Reading in GWAS and harmonising with reference.\n')
sink()
GWAS<-fread(cmd=paste0('zcat ',opt$sumstats))
GWAS<-GWAS[complete.cases(GWAS),]
GWAS$P<-2*pnorm(-abs(GWAS$Z))
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('GWAS contains',dim(GWAS)[1],'variants.\n')
sink()
GWAS$IUPAC[GWAS$A1 == 'A' & GWAS$A2 =='T' | GWAS$A1 == 'T' & GWAS$A2 =='A']<-'W'
GWAS$IUPAC[GWAS$A1 == 'C' & GWAS$A2 =='G' | GWAS$A1 == 'G' & GWAS$A2 =='C']<-'S'
GWAS$IUPAC[GWAS$A1 == 'A' & GWAS$A2 =='G' | GWAS$A1 == 'G' & GWAS$A2 =='A']<-'R'
GWAS$IUPAC[GWAS$A1 == 'C' & GWAS$A2 =='T' | GWAS$A1 == 'T' & GWAS$A2 =='C']<-'Y'
GWAS$IUPAC[GWAS$A1 == 'G' & GWAS$A2 =='T' | GWAS$A1 == 'T' & GWAS$A2 =='G']<-'K'
GWAS$IUPAC[GWAS$A1 == 'A' & GWAS$A2 =='C' | GWAS$A1 == 'C' & GWAS$A2 =='A']<-'M'
# Extract SNPs that match the reference
bim<-fread(paste0(opt$ref_plink,'.bim'))
bim$IUPAC[bim$V5 == 'A' & bim$V6 =='T' | bim$V5 == 'T' & bim$V6 =='A']<-'W'
bim$IUPAC[bim$V5 == 'C' & bim$V6 =='G' | bim$V5 == 'G' & bim$V6 =='C']<-'S'
bim$IUPAC[bim$V5 == 'A' & bim$V6 =='G' | bim$V5 == 'G' & bim$V6 =='A']<-'R'
bim$IUPAC[bim$V5 == 'C' & bim$V6 =='T' | bim$V5 == 'T' & bim$V6 =='C']<-'Y'
bim$IUPAC[bim$V5 == 'G' & bim$V6 =='T' | bim$V5 == 'T' & bim$V6 =='G']<-'K'
bim$IUPAC[bim$V5 == 'A' & bim$V6 =='C' | bim$V5 == 'C' & bim$V6 =='A']<-'M'
bim_GWAS<-merge(bim,GWAS, by.x='V2', by.y='SNP')
GWAS_clean<-bim_GWAS[bim_GWAS$IUPAC.x == bim_GWAS$IUPAC.y,]
GWAS_clean<-GWAS_clean[,c('V2','A1','A2','Z','P','N')]
names(GWAS_clean)<-c('SNP','A1','A2','Z','P','N')
nsnp<-dim(GWAS_clean)[1]
###
# Change to COJO format
###
# Insert frq of each variant based on reference data
freq<-NULL
for(i in 1:22){
freq_tmp<-fread(paste0(opt$ref_freq_chr,i,'.frq'))
freq<-rbind(freq, freq_tmp)
}
GWAS_clean_frq_match<-merge(GWAS_clean, freq, by=c('SNP','A1','A2'))
GWAS_clean_frq_switch<-merge(GWAS_clean, freq, by.x=c('SNP','A1','A2'), by.y=c('SNP','A2','A1'))
GWAS_clean_frq_switch$MAF<-1-GWAS_clean_frq_switch$MAF
GWAS_clean<-rbind(GWAS_clean_frq_match, GWAS_clean_frq_switch)
GWAS_clean<-GWAS_clean[,c('SNP','A1','A2','Z','P','N','MAF')]
# Remove invariant SNPs
GWAS_clean<-GWAS_clean[GWAS_clean$MAF != 0,]
GWAS_clean<-GWAS_clean[GWAS_clean$MAF != 1,]
# Transform Z score to beta and se using formula from https://www.ncbi.nlm.nih.gov/pubmed/27019110
# Note, we could use full sumstats rather than munged which would contain more accurate beta and se.
GWAS_clean$beta<-GWAS_clean$Z/sqrt((2*GWAS_clean$MAF)*(1-GWAS_clean$MAF)*(GWAS_clean$N+sqrt(abs(GWAS_clean$Z))))
GWAS_clean$se<-abs(GWAS_clean$beta)/abs(GWAS_clean$Z)
GWAS_clean<-GWAS_clean[,c('SNP','A1','A2','MAF','beta','se','P','N'),with=F]
names(GWAS_clean)<-c('SNP','A1','A2','freq','b','se','p','N')
fwrite(GWAS_clean, paste0(opt$output_dir,'GWAS_sumstats_COJO.txt'), sep=' ', na = "NA", quote=F)
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('After harmonisation with the reference,',dim(GWAS_clean)[1],'variants remain.\n')
sink()
#####
# Run GCTA SBLUP
#####
system(paste0(opt$gcta,' --bfile ',opt$ref_plink,' --keep ',opt$ref_keep,' --cojo-file ',opt$output_dir,'GWAS_sumstats_COJO.txt --cojo-sblup ',nsnp*(1/ldsc_h2-1),' --cojo-wind 1000 --thread-num ',opt$n_cores,' --out ',opt$output_dir,'GWAS_sumstats_SBLUP'))
####
# Calculate mean and sd of polygenic scores at each threshold
####
# Calculate polygenic scores for reference individuals
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('Calculating polygenic scores in reference...')
sink()
system(paste0(opt$plink, ' --bfile ',opt$ref_plink,' --score ',opt$output_dir,'GWAS_sumstats_SBLUP.sblup.cojo 1 2 4 sum --out ',opt$output_dir,'ref.profiles --memory ',floor(opt$memory*0.7)))
# Read in the reference scores
scores<-fread(paste0(opt$output_dir,'ref.profiles.profile'))
# Calculate the mean and sd of scores for each population specified in pop_scale
pop_keep_files<-read.table(opt$ref_pop_scale, header=F, stringsAsFactors=F)
for(k in 1:dim(pop_keep_files)[1]){
pop<-pop_keep_files$V1[k]
keep<-fread(pop_keep_files$V2[k], header=F)
scores_keep<-scores[(scores$FID %in% keep$V1),]
ref_scale<-data.frame( Mean=round(mean(scores_keep$SCORESUM),3),
SD=round(sd(scores_keep$SCORESUM),3))
fwrite(ref_scale, paste0(opt$output,'.',pop,'.scale'), sep=' ')
}
###
# Clean up temporary files
###
system(paste0('rm ',opt$output_dir,'ref.profiles.*'))
system(paste0('rm ',opt$output_dir,'ldsc_snp_h2_temp.log'))
system(paste0('rm ',opt$output_dir,'GWAS_sumstats_COJO.txt'))
end.time <- Sys.time()
time.taken <- end.time - start.time
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('Analysis finished at',as.character(end.time),'\n')
cat('Analysis duration was',as.character(round(time.taken,2)),attr(time.taken, 'units'),'\n')
sink()
| /Scripts/polygenic_score_file_creator_SBLUP/polygenic_score_file_creator_SBLUP.R | no_license | applyfun/GenoPred | R | false | false | 8,317 | r | #!/usr/bin/Rscript
# This script was written by Oliver Pain whilst at King's College London University.
start.time <- Sys.time()
suppressMessages(library("optparse"))
option_list = list(
make_option("--ref_plink", action="store", default=NA, type='character',
help="Path to per chromosome reference PLINK files [required]"),
make_option("--ref_keep", action="store", default=NA, type='character',
help="Keep file to subset individuals in reference for clumping [required]"),
make_option("--ref_freq_chr", action="store", default=NA, type='character',
help="Path to per chromosome reference PLINK .frq files [required]"),
make_option("--ref_pop_scale", action="store", default=NA, type='character',
help="File containing the population code and location of the keep file [required]"),
make_option("--plink", action="store", default='plink', type='character',
help="Path PLINK software binary [required]"),
make_option("--output", action="store", default='./Output', type='character',
help="Path for output files [required]"),
make_option("--memory", action="store", default=5000, type='numeric',
help="Memory limit [optional]"),
make_option("--n_cores", action="store", default=1, type='numeric',
help="Number of cores for parallel computing [optional]"),
make_option("--sumstats", action="store", default=NA, type='character',
help="GWAS summary statistics in LDSC format [required]"),
make_option("--gcta", action="store", default=NA, type='character',
help="Path to GCTA binary [required]"),
make_option("--ldsc", action="store", default=NA, type='character',
help="Path to LD-score regression binary [required]"),
make_option("--ldsc_ref", action="store", default=NA, type='character',
help="Path to LD-score regression reference data 'eur_w_ld_chr' [required]"),
make_option("--prune_hla", action="store", default=T, type='logical',
help="Retain only top assocaited variant in HLA region [optional]")
)
opt = parse_args(OptionParser(option_list=option_list))
library(data.table)
library(foreach)
library(doMC)
registerDoMC(opt$n_cores)
tmp<-sub('.*/','',opt$output)
opt$output_dir<-sub(paste0(tmp,'*.'),'',opt$output)
system(paste0('mkdir -p ',opt$output_dir))
sink(file = paste(opt$output,'.log',sep=''), append = F)
cat(
'#################################################################
# polygenic_score_file_creator_SBLUP.R V1.0
# For questions contact Oliver Pain (oliver.pain@kcl.ac.uk)
#################################################################
Analysis started at',as.character(start.time),'
Options are:\n')
cat('Options are:\n')
print(opt)
cat('Analysis started at',as.character(start.time),'\n')
sink()
#####
# Estimate the SNP-heritability
#####
system(paste0(opt$ldsc,' --h2 ',opt$sumstats,' --ref-ld-chr ',opt$ldsc_ref,'/ --w-ld-chr ',opt$ldsc_ref,'/ --out ', opt$output_dir,'ldsc_snp_h2_temp'))
ldsc_log<-read.table(paste0(opt$output_dir,'ldsc_snp_h2_temp.log'), header=F, sep='&')
ldsc_h2<-ldsc_log[grepl('Total Observed scale h2', ldsc_log$V1),]
ldsc_h2<-gsub('Total Observed scale h2: ','', ldsc_h2)
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('SNP-heritability estimate = ',ldsc_h2,'.\n',sep='')
sink()
ldsc_h2<-as.numeric(gsub(' .*','', ldsc_h2))
#####
# Read in sumstats and insert p-values
#####
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('Reading in GWAS and harmonising with reference.\n')
sink()
GWAS<-fread(cmd=paste0('zcat ',opt$sumstats))
GWAS<-GWAS[complete.cases(GWAS),]
GWAS$P<-2*pnorm(-abs(GWAS$Z))
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('GWAS contains',dim(GWAS)[1],'variants.\n')
sink()
GWAS$IUPAC[GWAS$A1 == 'A' & GWAS$A2 =='T' | GWAS$A1 == 'T' & GWAS$A2 =='A']<-'W'
GWAS$IUPAC[GWAS$A1 == 'C' & GWAS$A2 =='G' | GWAS$A1 == 'G' & GWAS$A2 =='C']<-'S'
GWAS$IUPAC[GWAS$A1 == 'A' & GWAS$A2 =='G' | GWAS$A1 == 'G' & GWAS$A2 =='A']<-'R'
GWAS$IUPAC[GWAS$A1 == 'C' & GWAS$A2 =='T' | GWAS$A1 == 'T' & GWAS$A2 =='C']<-'Y'
GWAS$IUPAC[GWAS$A1 == 'G' & GWAS$A2 =='T' | GWAS$A1 == 'T' & GWAS$A2 =='G']<-'K'
GWAS$IUPAC[GWAS$A1 == 'A' & GWAS$A2 =='C' | GWAS$A1 == 'C' & GWAS$A2 =='A']<-'M'
# Extract SNPs that match the reference
bim<-fread(paste0(opt$ref_plink,'.bim'))
bim$IUPAC[bim$V5 == 'A' & bim$V6 =='T' | bim$V5 == 'T' & bim$V6 =='A']<-'W'
bim$IUPAC[bim$V5 == 'C' & bim$V6 =='G' | bim$V5 == 'G' & bim$V6 =='C']<-'S'
bim$IUPAC[bim$V5 == 'A' & bim$V6 =='G' | bim$V5 == 'G' & bim$V6 =='A']<-'R'
bim$IUPAC[bim$V5 == 'C' & bim$V6 =='T' | bim$V5 == 'T' & bim$V6 =='C']<-'Y'
bim$IUPAC[bim$V5 == 'G' & bim$V6 =='T' | bim$V5 == 'T' & bim$V6 =='G']<-'K'
bim$IUPAC[bim$V5 == 'A' & bim$V6 =='C' | bim$V5 == 'C' & bim$V6 =='A']<-'M'
bim_GWAS<-merge(bim,GWAS, by.x='V2', by.y='SNP')
GWAS_clean<-bim_GWAS[bim_GWAS$IUPAC.x == bim_GWAS$IUPAC.y,]
GWAS_clean<-GWAS_clean[,c('V2','A1','A2','Z','P','N')]
names(GWAS_clean)<-c('SNP','A1','A2','Z','P','N')
nsnp<-dim(GWAS_clean)[1]
###
# Change to COJO format
###
# Insert frq of each variant based on reference data
freq<-NULL
for(i in 1:22){
freq_tmp<-fread(paste0(opt$ref_freq_chr,i,'.frq'))
freq<-rbind(freq, freq_tmp)
}
GWAS_clean_frq_match<-merge(GWAS_clean, freq, by=c('SNP','A1','A2'))
GWAS_clean_frq_switch<-merge(GWAS_clean, freq, by.x=c('SNP','A1','A2'), by.y=c('SNP','A2','A1'))
GWAS_clean_frq_switch$MAF<-1-GWAS_clean_frq_switch$MAF
GWAS_clean<-rbind(GWAS_clean_frq_match, GWAS_clean_frq_switch)
GWAS_clean<-GWAS_clean[,c('SNP','A1','A2','Z','P','N','MAF')]
# Remove invariant SNPs
GWAS_clean<-GWAS_clean[GWAS_clean$MAF != 0,]
GWAS_clean<-GWAS_clean[GWAS_clean$MAF != 1,]
# Transform Z score to beta and se using formula from https://www.ncbi.nlm.nih.gov/pubmed/27019110
# Note, we could use full sumstats rather than munged which would contain more accurate beta and se.
GWAS_clean$beta<-GWAS_clean$Z/sqrt((2*GWAS_clean$MAF)*(1-GWAS_clean$MAF)*(GWAS_clean$N+sqrt(abs(GWAS_clean$Z))))
GWAS_clean$se<-abs(GWAS_clean$beta)/abs(GWAS_clean$Z)
GWAS_clean<-GWAS_clean[,c('SNP','A1','A2','MAF','beta','se','P','N'),with=F]
names(GWAS_clean)<-c('SNP','A1','A2','freq','b','se','p','N')
fwrite(GWAS_clean, paste0(opt$output_dir,'GWAS_sumstats_COJO.txt'), sep=' ', na = "NA", quote=F)
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('After harmonisation with the reference,',dim(GWAS_clean)[1],'variants remain.\n')
sink()
#####
# Run GCTA SBLUP
#####
system(paste0(opt$gcta,' --bfile ',opt$ref_plink,' --keep ',opt$ref_keep,' --cojo-file ',opt$output_dir,'GWAS_sumstats_COJO.txt --cojo-sblup ',nsnp*(1/ldsc_h2-1),' --cojo-wind 1000 --thread-num ',opt$n_cores,' --out ',opt$output_dir,'GWAS_sumstats_SBLUP'))
####
# Calculate mean and sd of polygenic scores at each threshold
####
# Calculate polygenic scores for reference individuals
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('Calculating polygenic scores in reference...')
sink()
system(paste0(opt$plink, ' --bfile ',opt$ref_plink,' --score ',opt$output_dir,'GWAS_sumstats_SBLUP.sblup.cojo 1 2 4 sum --out ',opt$output_dir,'ref.profiles --memory ',floor(opt$memory*0.7)))
# Read in the reference scores
scores<-fread(paste0(opt$output_dir,'ref.profiles.profile'))
# Calculate the mean and sd of scores for each population specified in pop_scale
pop_keep_files<-read.table(opt$ref_pop_scale, header=F, stringsAsFactors=F)
for(k in 1:dim(pop_keep_files)[1]){
pop<-pop_keep_files$V1[k]
keep<-fread(pop_keep_files$V2[k], header=F)
scores_keep<-scores[(scores$FID %in% keep$V1),]
ref_scale<-data.frame( Mean=round(mean(scores_keep$SCORESUM),3),
SD=round(sd(scores_keep$SCORESUM),3))
fwrite(ref_scale, paste0(opt$output,'.',pop,'.scale'), sep=' ')
}
###
# Clean up temporary files
###
system(paste0('rm ',opt$output_dir,'ref.profiles.*'))
system(paste0('rm ',opt$output_dir,'ldsc_snp_h2_temp.log'))
system(paste0('rm ',opt$output_dir,'GWAS_sumstats_COJO.txt'))
end.time <- Sys.time()
time.taken <- end.time - start.time
sink(file = paste(opt$output,'.log',sep=''), append = T)
cat('Analysis finished at',as.character(end.time),'\n')
cat('Analysis duration was',as.character(round(time.taken,2)),attr(time.taken, 'units'),'\n')
sink()
|
# The conceptual figures use random draws from SADs.
# Therefore they can look slightly different every time you run this script
##########################
# load packages and define some function
library(tidyverse)
library(vegan)
library(cowplot)
library(mobsim)
library(betaC)
rarefy_long <- function(x) {
if(is.matrix(x)==F) x=matrix(x,nrow = 1, byrow =T, dimnames= list("x", names(x)))
alphas <-
lapply(row.names(x), function(i)
return(as.numeric(vegan::rarefy(
x[i, ], sample = 1:sum(x[i, ])
)))) %>%
lapply(function(x)
return(data.frame(
S_n = as.numeric(x), N = 1:length(x)
)))
names(alphas) <- rownames(x)
alphas <- alphas %>% plyr::ldply(.id = "Curve")
alphas$type = "minor"
mean_alpha <-
data.frame(
Curve = "mean_alpha",
S_n = colMeans(as.matrix(vegan::rarefy(
x, 1:min(rowSums(x))
))),
N = 1:min(rowSums(x)),
type = "major"
)
gamma <-
data.frame(
Curve = "gamma",
S_n = as.numeric(vegan::rarefy(colSums(x), 1:sum(x))),
N = 1:sum(x),
type = "major"
)
out = alphas %>% full_join(mean_alpha, by = c("Curve", "S_n", "N", "type")) %>% full_join(gamma, by = c("Curve", "S_n", "N", "type"))
return(out)
}
splitgamma <-
function(x,
type = c("distinct", "random", "on_curve"),
n = round(sum(x) / 2),
iter = 150) {
if (type == "distinct") {
alpha1 = x
alpha2 = x
#index1=sample(1:length(x),length(x)/2)
#index2=setdiff(1:length(x),index1)
#alpha1[index1] = 0
#alpha2[index2] = 0
# alpha1[seq(1, length(x), 2)] = 0
# alpha2[seq(2, length(x), 2)] = 0
alpha2[1]=0
for (i in 2: length(x)){
if(sum(alpha1) > sum(alpha2)){
alpha1[i]=0
}else{
alpha2[i]=0
}
}
}
if (type == "random") {
alpha1 = sample_sad_N(x,N = n, replace = F)
alpha2 = x - alpha1
}
if (type == "on_curve") {
cases = lapply(1:iter, function(i)
sample_sad_N(x = x, N = n))
curves = lapply(cases, function(m)
rarefy(m, 1:n))
gamma_curve = rarefy(x, 1:n)
SS = sapply(curves, function(Sn) {
return(sum((gamma_curve - Sn) ^ 2))
})
alpha1 = cases[[order(SS)[1]]]
alpha2 = x - alpha1
}
return(rbind(alpha1, alpha2))
}
# Take subsamples of (Meta-)Community abundance vectors (individual based)
sample_sad_N<-function(x,N, replace=F){
sample_fun<-function(x,N, replace){
index=1:length(x)
y=rep(index,x)
samp<-data.frame(Species=sample(y, size = N, replace = replace)) %>%
group_by_all() %>%
count()
missing=data.frame(Species=setdiff(index, samp$Species))
samp=samp %>% full_join(missing, by = "Species") %>% arrange(Species) %>% pull(var = n)
samp[is.na(samp)]<-0
return(samp)
}
if(is.data.frame(x))x<- as.matrix(x)
if(is.vector(x)){
names=names(x)
x<-matrix(x, byrow = T, nrow = 1)
} else{
names<- dimnames(x)
}
if(any(rowSums(x)==0)) stop("Remove sites without individuals!")
out<-apply(x,1,sample_fun, replace = replace, N= N)
out<-t(out)
if(dim(out)[1]==1){
out= out[1,,drop=T]
names(out)= names
}else{
dimnames(out)<-names
}
return(out)
}
########################################################################################
# Styling
theme_set(theme_cowplot())
mytheme= theme(legend.position = "none",
axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title = element_text(size=8,face = "bold"))
text_size= 8*5/ 14
#########################################################################
# Figure 1
# reference meta-community
# color palette
pal2<-viridisLite::magma(5)[c(1,4)]
base = as.integer(sim_sad(s_pool = 450, n_sim = 1000, sad_coef = list(cv_abund =2)) )
base_m = splitgamma(base, type = "on_curve",iter =300 )
base_curve <- rarefy_long(base_m)
base_curve <-base_curve %>% mutate(Curve = relevel(Curve, "gamma"))
base_plot <-
base_curve %>% filter(type == "major") %>% ggplot() +
geom_abline(intercept = specnumber(base), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(specnumber(base_m)), slope = 0, linetype=5, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(aes(N, S_n, col = Curve),size = 1) +
annotate("text", size= text_size, x= 1000, y= 25,col=1, label=paste0("beta == ", round(specnumber(base)/mean(specnumber(base_m)),2)),parse = T ,hjust="right",vjust="center")+
annotate("text", size= text_size,x= 600, y= 25,col=1, label="beta[S[n]] == 1", nudge_y = -30,parse = T ,hjust="right", vjust="center")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F)+
labs(title = "reference", x= "Individuals", y= "Rarefied richness")+
mytheme + scale_color_manual(values = pal2)
# fewer individuals
individuals = splitgamma(base, type = "on_curve", iter = 300,n = 500)[1, ]
individuals_m = splitgamma(individuals, type = "on_curve", iter = 200)
individuals_curve <- rarefy_long(individuals_m)
individuals_curve <-individuals_curve %>% mutate(Curve = relevel(Curve, "gamma"))
individuals_plot <-
individuals_curve %>% filter(type == "major") %>% ggplot(aes(N, S_n, col = Curve)) +
geom_abline(intercept = specnumber(individuals), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(specnumber(individuals_m)), slope = 0, linetype=5,, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(data=base_curve %>% filter(type=="major", Curve=="gamma"), linetype= "dotted", size=1, col= "grey") +
geom_line(size = 1) +
annotate("text", size= text_size,x= 1000, y= 25,col=1, label=paste0("beta == ", round(specnumber(individuals)/mean(specnumber(individuals_m)),2)), nudge_y = -30,parse = T ,hjust="right", vjust="center")+
annotate("text", size= text_size,x= 600, y= 25,col=1, label="beta[S[n]] == 1", nudge_y = -30,parse = T ,hjust="right",vjust="center")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
labs(title = "fewer individuals", x= "Individuals", y= "Rarefied richness")+
mytheme + scale_color_manual(values = pal2)
# SAD change
pool = as.integer(sim_sad(s_pool = 80, n_sim = 1000, sad_coef = list(cv_abund = 2)) )# sim_ENS(30, 85, 1000)
pool_m = splitgamma(pool, type = "on_curve")
pool_curve <- rarefy_long(pool_m)
pool_curve <-pool_curve %>% mutate(Curve = relevel(Curve, "gamma"))
pool_plot <-
pool_curve %>% filter(type == "major") %>% ggplot(aes(N, S_n, col = Curve)) +
geom_abline(intercept = specnumber(pool), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(specnumber(pool_m)), slope = 0, linetype=5, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(data=base_curve %>% filter(type=="major", Curve=="gamma"), linetype= "dotted", size=1, col= "grey") +
geom_line(size = 1) +
annotate("text", size= text_size,x= 1000, y=25,col=1, label=paste0("beta == ", round(specnumber(pool)/mean(specnumber(pool_m)),2)), nudge_y = -30,parse = T ,hjust="right", vjust="center")+
annotate("text", size= text_size,x= 600, y= 25,col=1, label="beta[S[n]] == 1", nudge_y = -30,parse = T ,hjust="right", vjust="center")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
labs(title = "smaller species pool", x= "Individuals", y= "Rarefied richness")+
mytheme + scale_color_manual(values = pal2)
# aggregation
space_m = splitgamma(base, type = "distinct")
space_curve <- rarefy_long(space_m)
space_curve <-space_curve %>% mutate(Curve = relevel(Curve, "gamma"))
space_plot <-
space_curve %>% filter(type == "major") %>% ggplot(aes(N, S_n, col = Curve)) +
geom_abline(intercept = specnumber(base), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(rarefy(space_m, min(rowSums(space_m)))), slope = 0, linetype=5, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(size = 1) +
annotate("text", size= text_size,x= 1000, y= 25,col=1, label=paste0("beta == ", round(specnumber(base)/mean(specnumber(space_m)),2)), nudge_y = -30,parse = T ,hjust="right", vjust="center")+
annotate("text",size= text_size,x= 690, y= 25,col=1, label="beta[S[n]] == 1.38", nudge_y = -30,parse = T ,hjust="right",vjust="center")+
labs(title = "intraspecific aggregation", x= "Individuals", y= "Rarefied richness")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
mytheme + scale_color_manual(values = pal2)
Figure1 <- plot_grid(
NULL,
base_plot,
NULL,
pool_plot,
individuals_plot,
space_plot,
ncol = 3,
labels = c(NA, "A", NA, "B", "C", "D"),
align= "hv"
)
pdf("Figures/Figure1a.pdf",width = 15.6*0.393701,height = 10.4*0.393701,useDingbats = F)
Figure1
dev.off()
save_plot("Figures/Figure1.pdf",plot = Figure1, ncol = 3,nrow = 2,base_height = 5.2,base_asp = 1, units="cm")
ggsave("conceptual figures/Figure1.jpg",Figure1, width = 18, height = 12, units="cm")
########################################################################################################
# Figures 2 and 3
library(mobsim)
# color palette
pal <- viridisLite::viridis(10)[c(1,8)]
names(pal) <- c("large", "small")
base = sim_sad(s_pool = 100, n_sim = 1000, sad_coef = list(cv_abund = 2))
space_m = splitgamma(base, type = "distinct")
space_curve <- rarefy_long(space_m)
pool2 = sim_sad(s_pool = 500, n_sim = 1000, sad_coef = list(cv_abund =2))
space2_m = splitgamma(pool2, type = "distinct")
space2_curve <- rarefy_long(space2_m)
N1<- min(rowSums(space_m))
gamma_Sn1<-rarefy(base,N1)
alpha_Sn1<- mean(rarefy(space_m,N1))
cov_value= Chat(pool2,min(rowSums(space2_m)))
cov_value_small= Chat(base,min(rowSums(space_m)))
N_low<-round(invChat(base, cov_value))
SnC_gamma <-D0.hat(base, N_low)
SnC_alpha <- mean(apply(space_m,1,D0.hat,m=N_low))
betaC = SnC_gamma/SnC_alpha
beta_C_small<-beta_C(space_m, cov_value)
beta_C_large<-beta_C(space2_m, cov_value)
space_curve$Curve<-relevel(space_curve$Curve, "gamma")
space2_curve$Curve<-relevel(space2_curve$Curve, "gamma")
small_plot_C <-
ggplot() +
geom_line(size = 1) +
#geom_hline(yintercept = specnumber(base), linetype=5)+
geom_hline(yintercept =SnC_gamma, linetype=5, col= "darkgrey")+
geom_vline(xintercept = N_low, linetype = "dashed" , col= "darkgrey")+
geom_hline(yintercept =SnC_alpha, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value, intercept = SnC_gamma - ((1-cov_value)*N_low), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space_curve,type == "major"), size = 1,col= pal[2]) +
#geom_text(aes(x= N_low, y= 0), label=paste0("n = ", round(N_low,2)),nudge_x = 20, nudge_y = 10,parse = F ,hjust="left")+
geom_text(size= text_size,aes(x= 1000, y= SnC_alpha+4), label=paste0("beta[C] == ", round(beta_C_small,3)),nudge_x = , nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
labs(title="Small species pool\n(100 spp.)", x= "Individuals", y= "Rarefied richness")+ #"small pool - beta\nstandardised by coverage\nof large pool"
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
mytheme + theme(plot.title = element_text(colour = pal[2]))
small_plot_N <-
ggplot(data =NULL) +
geom_hline(yintercept =alpha_Sn1, linetype=5, col= "darkgrey")+
geom_vline(xintercept = min(rowSums(space_m)), linetype = "dashed", col= "darkgrey")+
geom_hline(yintercept = gamma_Sn1, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value_small, intercept = gamma_Sn1 - ((1-cov_value_small)*N1), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space_curve,type == "major"), size = 1, col= pal[2]) +
labs(title="Small species pool\n(100 spp.)", x= "Individuals", y= "Rarefied richness")+
#geom_text(aes(x= N1,y= 0), label=paste0("n = ", round(N1,2)),nudge_x = 20, nudge_y = 10, parse = F, hjust="left" )+
geom_text(size= text_size,aes(x= 1000, y= alpha_Sn1), label=paste0("beta[s[n]] == ", round(gamma_Sn1/alpha_Sn1,3)),nudge_x = , nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300), expand = F) +
mytheme + theme(plot.title = element_text(colour = pal[2]))
N_low2 <-min(rowSums(space2_m))
gamma_Sn<-D0.hat(pool2,N_low2)
alpha_Sn<- mean(apply(space2_m,1, D0.hat,N_low2))
large_plot_N <-
ggplot(data =NULL) +
geom_hline(yintercept =alpha_Sn, linetype=5, col= "darkgrey")+
geom_vline(xintercept = min(rowSums(space2_m)), linetype = "dashed", col= "darkgrey")+
geom_hline(yintercept = gamma_Sn, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value, intercept = gamma_Sn - ((1-cov_value)*N_low2), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space2_curve,type == "major"), size = 1, col= pal[1]) +
labs(title="Large species pool\n(500 spp.)", x= "Individuals", y= "Rarefied richness")+
#geom_text(aes(x= N_low2,y= 0), label=paste0("n = ", round(N_low2,2)),nudge_x = 20, nudge_y = 10, parse = F, hjust="left" )+
geom_text(size= text_size, aes(x= 1000, y= alpha_Sn), label=paste0("beta[s[n]] == ", round(gamma_Sn/alpha_Sn,3)), nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300), expand = F) +
mytheme + theme(plot.title = element_text(colour = pal[1]))
large_plot_C <-
ggplot() +
geom_hline(yintercept =alpha_Sn, linetype=5, col= "darkgrey")+
geom_vline(xintercept = min(rowSums(space2_m)), linetype = "dashed", col= "darkgrey")+
geom_hline(yintercept = gamma_Sn, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value, intercept = gamma_Sn - ((1-cov_value)*N_low2), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space2_curve,type == "major"), size = 1, col= pal[1]) +
labs(title="Large species pool\n(500 spp.)", x= "Individuals", y= "Rarefied richness")+
geom_text(size= text_size,aes(x= 1000, y= alpha_Sn), label=paste0("beta[C] == ", round(beta_C_large,3)), nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300), expand = F) +
mytheme +
theme(plot.title = element_text(colour = pal[1]))
# scaling relationship
dat1=tibble(N=1:(2*min(rowSums(space_m))),
C=map_dbl(N,function(N) Chat(colSums(space_m), N)),
beta_Sn = map_dbl(N,function(N)beta_SN(space_m, N)),
Species_pool= "small"
)
dat2=tibble(N=1:(2*min(rowSums(space2_m))),
C=map_dbl(N,function(N) Chat(colSums(space2_m), N)),
beta_Sn = map_dbl(N,function(N)beta_SN(space2_m, N)),
Species_pool= "large"
)
dat=bind_rows(dat1,dat2)
N_plot<-ggplot(data=dat)+
geom_line(aes(x=N, y=beta_Sn, col= Species_pool), size=1)+
theme(legend.position = "bottom")+
labs( x= "Individuals", y= expression(beta[s[n]]))+scale_color_manual(values = pal)+mytheme
C_plot<-ggplot(data=dat)+
geom_line(aes(x=C, y=beta_Sn, col= Species_pool),size=1)+
theme(legend.position = "bottom")+
labs( x= "Estimated coverage", y= expression(beta[C]))+
scale_color_manual(values = pal)+
geom_vline(xintercept = cov_value, linetype = "dashed", col= "darkgrey")+
mytheme
Figure2<-plot_grid(large_plot_N,small_plot_N, N_plot,ncol = 3, labels = "AUTO")
Figure3<-plot_grid(large_plot_C,small_plot_C, C_plot, ncol = 3, labels = "AUTO")
Figure2
Figure3
save_plot("Figures/Figure2.pdf",Figure2,ncol = 3,nrow = 1,base_height = 9,base_width = 5.2, units="cm" )
save_plot("Figures/Figure3.pdf",Figure3,ncol = 3,nrow = 1,base_height = 9,base_width = 5.2, units="cm" )
ggsave("conceptual figures/Figure2.jpg",Figure2, width = 18, height = 10, units="cm")
ggsave("conceptual figures/Figure3.jpg",Figure3, width = 18, height = 10, units="cm")
| /conceptual figures/conceptual figures.R | no_license | T-Engel/betaC | R | false | false | 16,503 | r | # The conceptual figures use random draws from SADs.
# Therefore they can look slightly different every time you run this script
##########################
# load packages and define some function
library(tidyverse)
library(vegan)
library(cowplot)
library(mobsim)
library(betaC)
rarefy_long <- function(x) {
if(is.matrix(x)==F) x=matrix(x,nrow = 1, byrow =T, dimnames= list("x", names(x)))
alphas <-
lapply(row.names(x), function(i)
return(as.numeric(vegan::rarefy(
x[i, ], sample = 1:sum(x[i, ])
)))) %>%
lapply(function(x)
return(data.frame(
S_n = as.numeric(x), N = 1:length(x)
)))
names(alphas) <- rownames(x)
alphas <- alphas %>% plyr::ldply(.id = "Curve")
alphas$type = "minor"
mean_alpha <-
data.frame(
Curve = "mean_alpha",
S_n = colMeans(as.matrix(vegan::rarefy(
x, 1:min(rowSums(x))
))),
N = 1:min(rowSums(x)),
type = "major"
)
gamma <-
data.frame(
Curve = "gamma",
S_n = as.numeric(vegan::rarefy(colSums(x), 1:sum(x))),
N = 1:sum(x),
type = "major"
)
out = alphas %>% full_join(mean_alpha, by = c("Curve", "S_n", "N", "type")) %>% full_join(gamma, by = c("Curve", "S_n", "N", "type"))
return(out)
}
splitgamma <-
function(x,
type = c("distinct", "random", "on_curve"),
n = round(sum(x) / 2),
iter = 150) {
if (type == "distinct") {
alpha1 = x
alpha2 = x
#index1=sample(1:length(x),length(x)/2)
#index2=setdiff(1:length(x),index1)
#alpha1[index1] = 0
#alpha2[index2] = 0
# alpha1[seq(1, length(x), 2)] = 0
# alpha2[seq(2, length(x), 2)] = 0
alpha2[1]=0
for (i in 2: length(x)){
if(sum(alpha1) > sum(alpha2)){
alpha1[i]=0
}else{
alpha2[i]=0
}
}
}
if (type == "random") {
alpha1 = sample_sad_N(x,N = n, replace = F)
alpha2 = x - alpha1
}
if (type == "on_curve") {
cases = lapply(1:iter, function(i)
sample_sad_N(x = x, N = n))
curves = lapply(cases, function(m)
rarefy(m, 1:n))
gamma_curve = rarefy(x, 1:n)
SS = sapply(curves, function(Sn) {
return(sum((gamma_curve - Sn) ^ 2))
})
alpha1 = cases[[order(SS)[1]]]
alpha2 = x - alpha1
}
return(rbind(alpha1, alpha2))
}
# Take subsamples of (Meta-)Community abundance vectors (individual based)
sample_sad_N<-function(x,N, replace=F){
sample_fun<-function(x,N, replace){
index=1:length(x)
y=rep(index,x)
samp<-data.frame(Species=sample(y, size = N, replace = replace)) %>%
group_by_all() %>%
count()
missing=data.frame(Species=setdiff(index, samp$Species))
samp=samp %>% full_join(missing, by = "Species") %>% arrange(Species) %>% pull(var = n)
samp[is.na(samp)]<-0
return(samp)
}
if(is.data.frame(x))x<- as.matrix(x)
if(is.vector(x)){
names=names(x)
x<-matrix(x, byrow = T, nrow = 1)
} else{
names<- dimnames(x)
}
if(any(rowSums(x)==0)) stop("Remove sites without individuals!")
out<-apply(x,1,sample_fun, replace = replace, N= N)
out<-t(out)
if(dim(out)[1]==1){
out= out[1,,drop=T]
names(out)= names
}else{
dimnames(out)<-names
}
return(out)
}
########################################################################################
# Styling
theme_set(theme_cowplot())
mytheme= theme(legend.position = "none",
axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title = element_text(size=8,face = "bold"))
text_size= 8*5/ 14
#########################################################################
# Figure 1
# reference meta-community
# color palette
pal2<-viridisLite::magma(5)[c(1,4)]
base = as.integer(sim_sad(s_pool = 450, n_sim = 1000, sad_coef = list(cv_abund =2)) )
base_m = splitgamma(base, type = "on_curve",iter =300 )
base_curve <- rarefy_long(base_m)
base_curve <-base_curve %>% mutate(Curve = relevel(Curve, "gamma"))
base_plot <-
base_curve %>% filter(type == "major") %>% ggplot() +
geom_abline(intercept = specnumber(base), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(specnumber(base_m)), slope = 0, linetype=5, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(aes(N, S_n, col = Curve),size = 1) +
annotate("text", size= text_size, x= 1000, y= 25,col=1, label=paste0("beta == ", round(specnumber(base)/mean(specnumber(base_m)),2)),parse = T ,hjust="right",vjust="center")+
annotate("text", size= text_size,x= 600, y= 25,col=1, label="beta[S[n]] == 1", nudge_y = -30,parse = T ,hjust="right", vjust="center")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F)+
labs(title = "reference", x= "Individuals", y= "Rarefied richness")+
mytheme + scale_color_manual(values = pal2)
# fewer individuals
individuals = splitgamma(base, type = "on_curve", iter = 300,n = 500)[1, ]
individuals_m = splitgamma(individuals, type = "on_curve", iter = 200)
individuals_curve <- rarefy_long(individuals_m)
individuals_curve <-individuals_curve %>% mutate(Curve = relevel(Curve, "gamma"))
individuals_plot <-
individuals_curve %>% filter(type == "major") %>% ggplot(aes(N, S_n, col = Curve)) +
geom_abline(intercept = specnumber(individuals), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(specnumber(individuals_m)), slope = 0, linetype=5,, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(data=base_curve %>% filter(type=="major", Curve=="gamma"), linetype= "dotted", size=1, col= "grey") +
geom_line(size = 1) +
annotate("text", size= text_size,x= 1000, y= 25,col=1, label=paste0("beta == ", round(specnumber(individuals)/mean(specnumber(individuals_m)),2)), nudge_y = -30,parse = T ,hjust="right", vjust="center")+
annotate("text", size= text_size,x= 600, y= 25,col=1, label="beta[S[n]] == 1", nudge_y = -30,parse = T ,hjust="right",vjust="center")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
labs(title = "fewer individuals", x= "Individuals", y= "Rarefied richness")+
mytheme + scale_color_manual(values = pal2)
# SAD change
pool = as.integer(sim_sad(s_pool = 80, n_sim = 1000, sad_coef = list(cv_abund = 2)) )# sim_ENS(30, 85, 1000)
pool_m = splitgamma(pool, type = "on_curve")
pool_curve <- rarefy_long(pool_m)
pool_curve <-pool_curve %>% mutate(Curve = relevel(Curve, "gamma"))
pool_plot <-
pool_curve %>% filter(type == "major") %>% ggplot(aes(N, S_n, col = Curve)) +
geom_abline(intercept = specnumber(pool), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(specnumber(pool_m)), slope = 0, linetype=5, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(data=base_curve %>% filter(type=="major", Curve=="gamma"), linetype= "dotted", size=1, col= "grey") +
geom_line(size = 1) +
annotate("text", size= text_size,x= 1000, y=25,col=1, label=paste0("beta == ", round(specnumber(pool)/mean(specnumber(pool_m)),2)), nudge_y = -30,parse = T ,hjust="right", vjust="center")+
annotate("text", size= text_size,x= 600, y= 25,col=1, label="beta[S[n]] == 1", nudge_y = -30,parse = T ,hjust="right", vjust="center")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
labs(title = "smaller species pool", x= "Individuals", y= "Rarefied richness")+
mytheme + scale_color_manual(values = pal2)
# aggregation
space_m = splitgamma(base, type = "distinct")
space_curve <- rarefy_long(space_m)
space_curve <-space_curve %>% mutate(Curve = relevel(Curve, "gamma"))
space_plot <-
space_curve %>% filter(type == "major") %>% ggplot(aes(N, S_n, col = Curve)) +
geom_abline(intercept = specnumber(base), slope = 0, linetype=5, col=pal2[1])+
geom_abline(intercept = mean(rarefy(space_m, min(rowSums(space_m)))), slope = 0, linetype=5, col=pal2[2])+
geom_vline(xintercept = 250, linetype= "dashed", color ="grey")+
geom_line(size = 1) +
annotate("text", size= text_size,x= 1000, y= 25,col=1, label=paste0("beta == ", round(specnumber(base)/mean(specnumber(space_m)),2)), nudge_y = -30,parse = T ,hjust="right", vjust="center")+
annotate("text",size= text_size,x= 690, y= 25,col=1, label="beta[S[n]] == 1.38", nudge_y = -30,parse = T ,hjust="right",vjust="center")+
labs(title = "intraspecific aggregation", x= "Individuals", y= "Rarefied richness")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
mytheme + scale_color_manual(values = pal2)
Figure1 <- plot_grid(
NULL,
base_plot,
NULL,
pool_plot,
individuals_plot,
space_plot,
ncol = 3,
labels = c(NA, "A", NA, "B", "C", "D"),
align= "hv"
)
pdf("Figures/Figure1a.pdf",width = 15.6*0.393701,height = 10.4*0.393701,useDingbats = F)
Figure1
dev.off()
save_plot("Figures/Figure1.pdf",plot = Figure1, ncol = 3,nrow = 2,base_height = 5.2,base_asp = 1, units="cm")
ggsave("conceptual figures/Figure1.jpg",Figure1, width = 18, height = 12, units="cm")
########################################################################################################
# Figures 2 and 3
library(mobsim)
# color palette
pal <- viridisLite::viridis(10)[c(1,8)]
names(pal) <- c("large", "small")
base = sim_sad(s_pool = 100, n_sim = 1000, sad_coef = list(cv_abund = 2))
space_m = splitgamma(base, type = "distinct")
space_curve <- rarefy_long(space_m)
pool2 = sim_sad(s_pool = 500, n_sim = 1000, sad_coef = list(cv_abund =2))
space2_m = splitgamma(pool2, type = "distinct")
space2_curve <- rarefy_long(space2_m)
N1<- min(rowSums(space_m))
gamma_Sn1<-rarefy(base,N1)
alpha_Sn1<- mean(rarefy(space_m,N1))
cov_value= Chat(pool2,min(rowSums(space2_m)))
cov_value_small= Chat(base,min(rowSums(space_m)))
N_low<-round(invChat(base, cov_value))
SnC_gamma <-D0.hat(base, N_low)
SnC_alpha <- mean(apply(space_m,1,D0.hat,m=N_low))
betaC = SnC_gamma/SnC_alpha
beta_C_small<-beta_C(space_m, cov_value)
beta_C_large<-beta_C(space2_m, cov_value)
space_curve$Curve<-relevel(space_curve$Curve, "gamma")
space2_curve$Curve<-relevel(space2_curve$Curve, "gamma")
small_plot_C <-
ggplot() +
geom_line(size = 1) +
#geom_hline(yintercept = specnumber(base), linetype=5)+
geom_hline(yintercept =SnC_gamma, linetype=5, col= "darkgrey")+
geom_vline(xintercept = N_low, linetype = "dashed" , col= "darkgrey")+
geom_hline(yintercept =SnC_alpha, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value, intercept = SnC_gamma - ((1-cov_value)*N_low), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space_curve,type == "major"), size = 1,col= pal[2]) +
#geom_text(aes(x= N_low, y= 0), label=paste0("n = ", round(N_low,2)),nudge_x = 20, nudge_y = 10,parse = F ,hjust="left")+
geom_text(size= text_size,aes(x= 1000, y= SnC_alpha+4), label=paste0("beta[C] == ", round(beta_C_small,3)),nudge_x = , nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
labs(title="Small species pool\n(100 spp.)", x= "Individuals", y= "Rarefied richness")+ #"small pool - beta\nstandardised by coverage\nof large pool"
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300),expand = F) +
mytheme + theme(plot.title = element_text(colour = pal[2]))
small_plot_N <-
ggplot(data =NULL) +
geom_hline(yintercept =alpha_Sn1, linetype=5, col= "darkgrey")+
geom_vline(xintercept = min(rowSums(space_m)), linetype = "dashed", col= "darkgrey")+
geom_hline(yintercept = gamma_Sn1, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value_small, intercept = gamma_Sn1 - ((1-cov_value_small)*N1), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space_curve,type == "major"), size = 1, col= pal[2]) +
labs(title="Small species pool\n(100 spp.)", x= "Individuals", y= "Rarefied richness")+
#geom_text(aes(x= N1,y= 0), label=paste0("n = ", round(N1,2)),nudge_x = 20, nudge_y = 10, parse = F, hjust="left" )+
geom_text(size= text_size,aes(x= 1000, y= alpha_Sn1), label=paste0("beta[s[n]] == ", round(gamma_Sn1/alpha_Sn1,3)),nudge_x = , nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300), expand = F) +
mytheme + theme(plot.title = element_text(colour = pal[2]))
N_low2 <-min(rowSums(space2_m))
gamma_Sn<-D0.hat(pool2,N_low2)
alpha_Sn<- mean(apply(space2_m,1, D0.hat,N_low2))
large_plot_N <-
ggplot(data =NULL) +
geom_hline(yintercept =alpha_Sn, linetype=5, col= "darkgrey")+
geom_vline(xintercept = min(rowSums(space2_m)), linetype = "dashed", col= "darkgrey")+
geom_hline(yintercept = gamma_Sn, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value, intercept = gamma_Sn - ((1-cov_value)*N_low2), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space2_curve,type == "major"), size = 1, col= pal[1]) +
labs(title="Large species pool\n(500 spp.)", x= "Individuals", y= "Rarefied richness")+
#geom_text(aes(x= N_low2,y= 0), label=paste0("n = ", round(N_low2,2)),nudge_x = 20, nudge_y = 10, parse = F, hjust="left" )+
geom_text(size= text_size, aes(x= 1000, y= alpha_Sn), label=paste0("beta[s[n]] == ", round(gamma_Sn/alpha_Sn,3)), nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300), expand = F) +
mytheme + theme(plot.title = element_text(colour = pal[1]))
large_plot_C <-
ggplot() +
geom_hline(yintercept =alpha_Sn, linetype=5, col= "darkgrey")+
geom_vline(xintercept = min(rowSums(space2_m)), linetype = "dashed", col= "darkgrey")+
geom_hline(yintercept = gamma_Sn, linetype=5, col= "darkgrey")+
geom_abline(slope = 1-cov_value, intercept = gamma_Sn - ((1-cov_value)*N_low2), size=1, col= "darkgrey")+
geom_line(aes(N, S_n, linetype = Curve), data= filter(space2_curve,type == "major"), size = 1, col= pal[1]) +
labs(title="Large species pool\n(500 spp.)", x= "Individuals", y= "Rarefied richness")+
geom_text(size= text_size,aes(x= 1000, y= alpha_Sn), label=paste0("beta[C] == ", round(beta_C_large,3)), nudge_y = -25,parse = T ,hjust="right", vjust="bottom")+
coord_cartesian(xlim = c(0, 1050), ylim = c(0, 300), expand = F) +
mytheme +
theme(plot.title = element_text(colour = pal[1]))
# scaling relationship
dat1=tibble(N=1:(2*min(rowSums(space_m))),
C=map_dbl(N,function(N) Chat(colSums(space_m), N)),
beta_Sn = map_dbl(N,function(N)beta_SN(space_m, N)),
Species_pool= "small"
)
dat2=tibble(N=1:(2*min(rowSums(space2_m))),
C=map_dbl(N,function(N) Chat(colSums(space2_m), N)),
beta_Sn = map_dbl(N,function(N)beta_SN(space2_m, N)),
Species_pool= "large"
)
dat=bind_rows(dat1,dat2)
N_plot<-ggplot(data=dat)+
geom_line(aes(x=N, y=beta_Sn, col= Species_pool), size=1)+
theme(legend.position = "bottom")+
labs( x= "Individuals", y= expression(beta[s[n]]))+scale_color_manual(values = pal)+mytheme
C_plot<-ggplot(data=dat)+
geom_line(aes(x=C, y=beta_Sn, col= Species_pool),size=1)+
theme(legend.position = "bottom")+
labs( x= "Estimated coverage", y= expression(beta[C]))+
scale_color_manual(values = pal)+
geom_vline(xintercept = cov_value, linetype = "dashed", col= "darkgrey")+
mytheme
Figure2<-plot_grid(large_plot_N,small_plot_N, N_plot,ncol = 3, labels = "AUTO")
Figure3<-plot_grid(large_plot_C,small_plot_C, C_plot, ncol = 3, labels = "AUTO")
Figure2
Figure3
save_plot("Figures/Figure2.pdf",Figure2,ncol = 3,nrow = 1,base_height = 9,base_width = 5.2, units="cm" )
save_plot("Figures/Figure3.pdf",Figure3,ncol = 3,nrow = 1,base_height = 9,base_width = 5.2, units="cm" )
ggsave("conceptual figures/Figure2.jpg",Figure2, width = 18, height = 10, units="cm")
ggsave("conceptual figures/Figure3.jpg",Figure3, width = 18, height = 10, units="cm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hr.R
\name{repairHR}
\alias{repairHR}
\title{clean up HR data for a track}
\usage{
repairHR(trackdf, fixHR = TRUE, HRMax = 220, loud = FALSE, ...)
}
\arguments{
\item{trackdf}{data frame or tibble with gps track data}
\item{fixHR}{repair excessive HR values by setting them to NA}
\item{HRMax}{max credible HR value, larger values are errors set to NA}
\item{loud}{display actions taken}
\item{...}{parameters for \code{\link{processSegments}},
\code{\link{repairSensorDropOut}},
\code{\link{repairCadence}},
\code{\link{repairPower}},
\code{\link{statsHeartRate}},
\code{\link{statsCadence}},
\code{\link{statsPower}},
\code{\link{statsGearing}},
\code{\link{statsGrade}},
\code{\link{statsSession}},
\code{\link{statsStops}},
\code{\link{statsTemp}}}
}
\value{
dataframe with HR data repaired
}
\description{
\code{repairHR} processes a gps track file to correct HR data
}
\seealso{
\code{\link{read_ride}},
\code{\link{repairSensorDropOut}},
\code{\link{repairCadence}},
\code{\link{repairPower}},
\code{\link{statsHeartRate}},
\code{\link{statsCadence}},
\code{\link{statsPower}},
\code{\link{statsGearing}},
\code{\link{statsGrade}},
\code{\link{statsSession}},
\code{\link{statsStops}},
\code{\link{statsTemp}}
}
| /man/repairHR.Rd | no_license | CraigMohn/rideReadGPS | R | false | true | 1,335 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hr.R
\name{repairHR}
\alias{repairHR}
\title{clean up HR data for a track}
\usage{
repairHR(trackdf, fixHR = TRUE, HRMax = 220, loud = FALSE, ...)
}
\arguments{
\item{trackdf}{data frame or tibble with gps track data}
\item{fixHR}{repair excessive HR values by setting them to NA}
\item{HRMax}{max credible HR value, larger values are errors set to NA}
\item{loud}{display actions taken}
\item{...}{parameters for \code{\link{processSegments}},
\code{\link{repairSensorDropOut}},
\code{\link{repairCadence}},
\code{\link{repairPower}},
\code{\link{statsHeartRate}},
\code{\link{statsCadence}},
\code{\link{statsPower}},
\code{\link{statsGearing}},
\code{\link{statsGrade}},
\code{\link{statsSession}},
\code{\link{statsStops}},
\code{\link{statsTemp}}}
}
\value{
dataframe with HR data repaired
}
\description{
\code{repairHR} processes a gps track file to correct HR data
}
\seealso{
\code{\link{read_ride}},
\code{\link{repairSensorDropOut}},
\code{\link{repairCadence}},
\code{\link{repairPower}},
\code{\link{statsHeartRate}},
\code{\link{statsCadence}},
\code{\link{statsPower}},
\code{\link{statsGearing}},
\code{\link{statsGrade}},
\code{\link{statsSession}},
\code{\link{statsStops}},
\code{\link{statsTemp}}
}
|
require(shiny)
runApp("camino")
| /sessions/R/session_202_shiny/Run.R | no_license | arrpak/Master-in-Data-Science-1 | R | false | false | 35 | r |
require(shiny)
runApp("camino")
|
# read stopwords from file
con = file("arabic_stop_words.txt", open = "r")
lines = readLines(con, encoding = "UTF-8")
stopw = rep(NA, length(lines))
for (i in 1:length(lines)){
stopw[i] = unlist(strsplit(lines[i], "\t"))
}
close(con)
#since the corpus clean functions don't work on Arabic letters, we need
# to use our own custom clean function defined in "file_level_functions".
reddata$Tweet = sapply(reddata$Tweet, cleanTweets)
# stopwords were taken from http://www.ranks.nl/stopwords/arabic
reddata$Tweet = sapply(reddata$Tweet, function(tw)
{
tw = strsplit(tw, " ")
tw = unlist(tw)
tw = tw[!tw %in% stopw]
tw = paste(tw, collapse = " ")
})
reddata$Tweet = as.vector(reddata$Tweet)
# convert tp vector corpus representation
get_ar_onegram_mat = function(reddata){
Tweets = reddata$Tweet
myCorpus <- Corpus(VectorSource(Tweets))
#Arabic stopwords list from: https://github.com/mohataher/arabic-stop-words/blob/master/list.txt
#myCorpus = tm_map(myCorpus, stripWhitespace)
# following step is required for the function to run on a MAC OS
if(Sys.info()["sysname"] == "Darwin" | Sys.info()["sysname"] == "Linux") {myCorpus = tm_map(myCorpus, PlainTextDocument)}
# myCorpus = tm_map(myCorpus, stemDocument)
tdm = DocumentTermMatrix(myCorpus)
# need to reduce matrix as R throws an error otherwise
# at 200.000 rows a max of 4000-5000 cols is ok.
redtdm = removeSparseTerms(tdm, 0.999)
nTerms(redtdm)
ar.onegram.matrix = as.matrix(redtdm)
return(ar.onegram.matrix)
}
get_ar_bigram_mat = function(reddata){
# convert tp vector corpus representation
myCorpus <- VCorpus(VectorSource(reddata$Tweet))
#create tokenizer function
BiTok<- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
#need to specify the number of threads in the parallel library when
# running on a Mac
options(mc.cores=1)
#
tdm <- DocumentTermMatrix(myCorpus,control=list(tokenize=BiTok))
# need to reduce matrix as R throws an error otherwise
# at 200.000 rows a max of 4000-5000 cols is ok.
redtdm = removeSparseTerms(tdm, 0.999)
nTerms(redtdm)
ar.bigram.matrix = as.matrix(redtdm)
return(ar.bigram.matrix)
}
get_trigram_mat = function(reddata){
# convert tp vector corpus representation
myCorpus <- VCorpus(VectorSource(reddata$Tweet))
# remove urls and punctuation
#create tokenizer function
TriTok<- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
#need to specify the number of threads in the parallel library when
# running on a Mac
options(mc.cores=1)
#
tdm <- DocumentTermMatrix(myCorpus,control=list(tokenize=TriTok))
# need to reduce matrix as R throws an error otherwise
# at 200.000 rows a max of 4000-5000 cols is ok.
redtdm = removeSparseTerms(tdm, 0.999)
nTerms(redtdm)
ar.trigram.matrix = as.matrix(redtdm)
return(ar.trigram.matrix)
} | /functions/transform-arabic_functions.R | no_license | SebastianKirsch123/ensemble_sentiment_classification | R | false | false | 2,996 | r | # read stopwords from file
con = file("arabic_stop_words.txt", open = "r")
lines = readLines(con, encoding = "UTF-8")
stopw = rep(NA, length(lines))
for (i in 1:length(lines)){
stopw[i] = unlist(strsplit(lines[i], "\t"))
}
close(con)
#since the corpus clean functions don't work on Arabic letters, we need
# to use our own custom clean function defined in "file_level_functions".
reddata$Tweet = sapply(reddata$Tweet, cleanTweets)
# stopwords were taken from http://www.ranks.nl/stopwords/arabic
reddata$Tweet = sapply(reddata$Tweet, function(tw)
{
tw = strsplit(tw, " ")
tw = unlist(tw)
tw = tw[!tw %in% stopw]
tw = paste(tw, collapse = " ")
})
reddata$Tweet = as.vector(reddata$Tweet)
# convert tp vector corpus representation
get_ar_onegram_mat = function(reddata){
Tweets = reddata$Tweet
myCorpus <- Corpus(VectorSource(Tweets))
#Arabic stopwords list from: https://github.com/mohataher/arabic-stop-words/blob/master/list.txt
#myCorpus = tm_map(myCorpus, stripWhitespace)
# following step is required for the function to run on a MAC OS
if(Sys.info()["sysname"] == "Darwin" | Sys.info()["sysname"] == "Linux") {myCorpus = tm_map(myCorpus, PlainTextDocument)}
# myCorpus = tm_map(myCorpus, stemDocument)
tdm = DocumentTermMatrix(myCorpus)
# need to reduce matrix as R throws an error otherwise
# at 200.000 rows a max of 4000-5000 cols is ok.
redtdm = removeSparseTerms(tdm, 0.999)
nTerms(redtdm)
ar.onegram.matrix = as.matrix(redtdm)
return(ar.onegram.matrix)
}
get_ar_bigram_mat = function(reddata){
# convert tp vector corpus representation
myCorpus <- VCorpus(VectorSource(reddata$Tweet))
#create tokenizer function
BiTok<- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
#need to specify the number of threads in the parallel library when
# running on a Mac
options(mc.cores=1)
#
tdm <- DocumentTermMatrix(myCorpus,control=list(tokenize=BiTok))
# need to reduce matrix as R throws an error otherwise
# at 200.000 rows a max of 4000-5000 cols is ok.
redtdm = removeSparseTerms(tdm, 0.999)
nTerms(redtdm)
ar.bigram.matrix = as.matrix(redtdm)
return(ar.bigram.matrix)
}
get_trigram_mat = function(reddata){
# convert tp vector corpus representation
myCorpus <- VCorpus(VectorSource(reddata$Tweet))
# remove urls and punctuation
#create tokenizer function
TriTok<- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
#need to specify the number of threads in the parallel library when
# running on a Mac
options(mc.cores=1)
#
tdm <- DocumentTermMatrix(myCorpus,control=list(tokenize=TriTok))
# need to reduce matrix as R throws an error otherwise
# at 200.000 rows a max of 4000-5000 cols is ok.
redtdm = removeSparseTerms(tdm, 0.999)
nTerms(redtdm)
ar.trigram.matrix = as.matrix(redtdm)
return(ar.trigram.matrix)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_autodiff.R
\name{get_sensitivity}
\alias{get_sensitivity}
\title{Retrieve a sensitivitiy from autodiff output}
\usage{
get_sensitivity(res, numerator, denominator, reshape = T)
}
\arguments{
\item{res}{Output from *_AD function.}
\item{numerator}{Character string; the numerator from `available_sensitivity`.}
\item{denominator}{Character string; the denominator from `available_sensitivity`.}
\item{reshape}{T or F; if T, reshape the result into an array.}
}
\description{
Retrieve a sensitivitiy from autodiff output
}
| /man/get_sensitivity.Rd | no_license | ZhuDanCode/BayesSens | R | false | true | 609 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_autodiff.R
\name{get_sensitivity}
\alias{get_sensitivity}
\title{Retrieve a sensitivitiy from autodiff output}
\usage{
get_sensitivity(res, numerator, denominator, reshape = T)
}
\arguments{
\item{res}{Output from *_AD function.}
\item{numerator}{Character string; the numerator from `available_sensitivity`.}
\item{denominator}{Character string; the denominator from `available_sensitivity`.}
\item{reshape}{T or F; if T, reshape the result into an array.}
}
\description{
Retrieve a sensitivitiy from autodiff output
}
|
#' Fitting semi-parametric shared frailty models with the EM algorithm
#'
#' @importFrom survival Surv coxph cox.zph
#' @importFrom stats approx coef model.frame model.matrix pchisq printCoefmat nlm uniroot cor optimize
#' @importFrom magrittr "%>%"
#' @importFrom Rcpp evalCpp
#' @importFrom Matrix bdiag
#' @importFrom numDeriv hessian
#' @useDynLib frailtyEM, .registration=TRUE
#' @include em_fit.R
#' @include emfrail_aux.R
#'
#' @param formula A formula that contains on the left hand side an object of the type \code{Surv}
#' and on the right hand side a \code{+cluster(id)} statement. Two special statments may also be used:
#' \code{+strata()} for specifying a grouping column that will represent different strata and
#' \code{+terminal()}
#' @param data A \code{data.frame} in which the formula argument can be evaluated
#' @param distribution An object as created by \code{\link{emfrail_dist}}
#' @param control An object as created by \code{\link{emfrail_control}}
#' @param model Logical. Should the model frame be returned?
#' @param model.matrix Logical. Should the model matrix be returned?
#' @param ... Other arguments, currently used to warn about deprecated argument names
#' @export
#'
#' @details The \code{emfrail} function fits shared frailty models for processes which have intensity
#' \deqn{\lambda(t) = z \lambda_0(t) \exp(\beta' \mathbf{x})}
#' with a non-parametric (Breslow) baseline intensity \eqn{\lambda_0(t)}. The outcome
#' (left hand side of the \code{formula}) must be a \code{Surv} object.
#'
#' If the object is \code{Surv(tstop, status)} then the usual failure time data is represented.
#' Gap-times between recurrent events are represented in the same way.
#' If the left hand side of the formula is created as \code{Surv(tstart, tstop, status)}, this may represent a number of things:
#' (a) recurrent events episodes in calendar time where a recurrent event episode starts at \code{tstart} and ends at \code{tstop}
#' (b) failure time data with time-dependent covariates where \code{tstop} is the time of a change in covariates or censoring
#' (\code{status = 0}) or an event time (\code{status = 1}) or (c) clustered failure time with left truncation, where
#' \code{tstart} is the individual's left truncation time. Unlike regular Cox models, a major distinction is that in case (c) the
#' distribution of the frailty must be considered conditional on survival up to the left truncation time.
#'
#' The \code{+cluster()} statement specified the column that determines the grouping (the observations that share the same frailty).
#' The \code{+strata()} statement specifies a column that determines different strata, for which different baseline hazards are calculated.
#' The \code{+terminal} specifies a column that contains an indicator for dependent censoring, and then performs a score test
#'
#' The \code{distribution} argument must be generated by a call to \code{\link{emfrail_dist}}. This determines the
#' frailty distribution, which may be one of gamma, positive stable or PVF (power-variance-function), and the starting
#' value for the maximum likelihood estimation. The PVF family
#' also includes a tuning parameter that differentiates between inverse Gaussian and compound Poisson distributions.
#' Note that, with univariate data (at most one event per individual, no clusters), only distributions with finite expectation
#' are identifiable. This means that the positive stable distribution should have a maximum likelihood on the edge of the parameter
#' space (\eqn{theta = +\inf}, corresponding to a Cox model for independent observations).
#'
#' The \code{control} argument must be generated by a call to \code{\link{emfrail_control}}. Several parameters
#' may be adjusted that control the precision of the convergenge criteria or supress the calculation of different
#' quantities.
#'
#' @return An object of class \code{emfrail} that contains the following fields:
#' \item{coefficients}{A named vector of the estimated regression coefficients}
#' \item{hazard}{The breslow estimate of the baseline hazard at each event time point, in chronological order}
#' \item{var}{The variance-covariance matrix corresponding to the coefficients and hazard, assuming \eqn{\theta} constant}
#' \item{var_adj}{The variance-covariance matrx corresponding to the
#' coefficients and hazard, adjusted for the estimation of theta}
#' \item{logtheta}{The logarithm of the point estimate of \eqn{\theta}. For the gamma and
#' PVF family of distributions, this is the inverse of the estimated frailty variance.}
#' \item{var_logtheta}{The variance of the estimated logarithm of \eqn{\theta}}
#' \item{ci_logtheta}{The likelihood-based 95\% confidence interval for the logarithm of \eqn{\theta}}
#' \item{frail}{The posterior (empirical Bayes) estimates of the frailty for each cluster}
#' \item{residuals}{A list with two elements, cluster which is a vector that the sum of the
#' cumulative hazards from each cluster for a frailty value of 1, and
#' individual, which is a vector that contains the cumulative hazard corresponding to each row of the data,
#' multiplied by the corresponding frailty estimate}
#' \item{tev}{The time points of the events in the data set, this is the same length as hazard}
#' \item{nevents_id}{The number of events for each cluster}
#' \item{loglik}{A vector of length two with the log-likelihood of the starting Cox model
#' and the maximized log-likelihood}
#' \item{ca_test}{The results of the Commenges-Andersen test for heterogeneity}
#' \item{cens_test}{The results of the test for dependence between a recurrent event and a terminal event,
#' if the \code{+terminal()} statement is specified and the frailty distribution is gamma}
#' \item{zph}{The result of \code{cox.zph} called on a model with the estimated log-frailties as offset}
#' \item{formula, distribution, control}{The original arguments}
#' \item{nobs, fitted}{Number of observations and fitted values (i.e. \eqn{z \exp(\beta^T x)})}
#' \item{mf}{The \code{model.frame}, if \code{model = TRUE}}
#' \item{mm}{The \code{model.matrix}, if \code{model.matrix = TRUE}}
#'
#' @md
#' @note Several options in the \code{control} arguemnt shorten the running time for \code{emfrail} significantly.
#' These are disabling the adjustemnt of the standard errors (\code{se_adj = FALSE}), disabling the likelihood-based confidence intervals (\code{lik_ci = FALSE}) or
#' disabling the score test for heterogeneity (\code{ca_test = FALSE}).
#'
#' The algorithm is detailed in the package vignette. For the gamma frailty,
#' the results should be identical with those from \code{coxph} with \code{ties = "breslow"}.
#'
#' @seealso \code{\link{plot.emfrail}} and \code{\link{autoplot.emfrail}} for plot functions directly available, \code{\link{emfrail_pll}} for calculating \eqn{\widehat{L}(\theta)} at specific values of \eqn{\theta},
#' \code{\link{summary.emfrail}} for transforming the \code{emfrail} object into a more human-readable format and for
#' visualizing the frailty (empirical Bayes) estimates,
#' \code{\link{predict.emfrail}} for calculating and visalizing conditional and marginal survival and cumulative
#' hazard curves. \code{\link{residuals.emfrail}} for extracting martingale residuals and \code{\link{logLik.emfrail}} for extracting
#' the log-likelihood of the fitted model.
#'
#' @examples
#'
#' m_gamma <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats)
#'
#' # Inverse Gaussian distribution
#' m_ig <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' distribution = emfrail_dist(dist = "pvf"))
#'
#' # for the PVF distribution with m = 0.75
#' m_pvf <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' distribution = emfrail_dist(dist = "pvf", pvfm = 0.75))
#'
#' # for the positive stable distribution
#' m_ps <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' distribution = emfrail_dist(dist = "stable"))
#' \dontrun{
#' # Compare marginal log-likelihoods
#' models <- list(m_gamma, m_ig, m_pvf, m_ps)
#'
#' models
#' logliks <- lapply(models, logLik)
#'
#' names(logliks) <- lapply(models,
#' function(x) with(x$distribution,
#' ifelse(dist == "pvf",
#' paste(dist, "/", pvfm),
#' dist))
#' )
#'
#' logliks
#' }
#'
#' # Stratified analysis
#' \dontrun{
#' m_strat <- emfrail(formula = Surv(time, status) ~ rx + strata(sex) + cluster(litter),
#' data = rats)
#' }
#'
#'
#' # Test for conditional proportional hazards (log-frailty as offset)
#' \dontrun{
#' m_gamma <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats, control = emfrail_control(zph = TRUE))
#' par(mfrow = c(1,2))
#' plot(m_gamma$zph)
#' }
#'
#' # Draw the profile log-likelihood
#' \dontrun{
#' fr_var <- seq(from = 0.01, to = 1.4, length.out = 20)
#'
#' # For gamma the variance is 1/theta (see parametrizations)
#' pll_gamma <- emfrail_pll(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' values = 1/fr_var )
#' plot(fr_var, pll_gamma,
#' type = "l",
#' xlab = "Frailty variance",
#' ylab = "Profile log-likelihood")
#'
#'
#' # Recurrent events
#' mod_rec <- emfrail(Surv(start, stop, status) ~ treatment + cluster(id), bladder1)
#' # The warnings appear from the Surv object, they also appear in coxph.
#'
#' plot(mod_rec, type = "hist")
#' }
#'
#' # Left truncation
#' \dontrun{
#' # We simulate some data with truncation times
#' set.seed(2018)
#' nclus <- 300
#' nind <- 5
#' x <- sample(c(0,1), nind * nclus, TRUE)
#' u <- rep(rgamma(nclus,1,1), each = 3)
#'
#' stime <- rexp(nind * nclus, rate = u * exp(0.5 * x))
#'
#' status <- ifelse(stime > 5, 0, 1)
#' stime[status == 0] <- 5
#'
#' # truncate uniform between 0 and 2
#' ltime <- runif(nind * nclus, min = 0, max = 2)
#'
#' d <- data.frame(id = rep(1:nclus, each = nind),
#' x = x,
#' stime = stime,
#' u = u,
#' ltime = ltime,
#' status = status)
#' d_left <- d[d$stime > d$ltime,]
#'
#' mod <- emfrail(Surv(stime, status)~ x + cluster(id), d)
#' # This model ignores the left truncation, 0.378 frailty variance:
#' mod_1 <- emfrail(Surv(stime, status)~ x + cluster(id), d_left)
#'
#' # This model takes left truncation into account,
#' # but it considers the distribution of the frailty unconditional on the truncation
#' mod_2 <- emfrail(Surv(ltime, stime, status)~ x + cluster(id), d_left)
#'
#' # This is identical with:
#' mod_cox <- coxph(Surv(ltime, stime, status)~ x + frailty(id), data = d_left)
#'
#'
#' # The correct thing is to consider the distribution of the frailty given the truncation
#' mod_3 <- emfrail(Surv(ltime, stime, status)~ x + cluster(id), d_left,
#' distribution = emfrail_dist(left_truncation = TRUE))
#'
#' summary(mod_1)
#' summary(mod_2)
#' summary(mod_3)
#' }
emfrail <- function(formula,
data,
distribution = emfrail_dist(),
control = emfrail_control(),
model = FALSE, model.matrix = FALSE,
...) {
# browser()
# This part is because the update breaks old code
extraargs <- list(...)
if(!inherits(formula, "formula")) {
if(inherits(formula, "data.frame")) warning("You gave a data.frame instead of a formula.
Argument order has changed; now it's emfrail(formula, data, etc..).")
stop("formula is not an object of type formula")
}
if(!inherits(data, "data.frame")) {
if(inherits(data, "formula")) warning("You gave a formula instead of a data.frame.
Argument order has changed; now it's emfrail(formula, data, etc..).")
stop("data is not an object of type data.frame")
}
if(!inherits(distribution, "emfrail_dist"))
stop("distribution argument misspecified; see ?emfrail_dist()")
if(!inherits(control, "emfrail_control"))
stop("control argument misspecified; see ?emfrail_control()")
if(isTRUE(control$em_control$fast_fit)) {
if(!(distribution$dist %in% c("gamma", "pvf"))) {
#message("fast_fit option only available for gamma and pvf with m=-1/2 distributions")
control$em_control$fast_fit <- FALSE
}
# version 0.5.6, the IG fast fit gets super sensitive at small frailty variance...
if(distribution$dist == "pvf")
control$em_control$fast_fit <- FALSE
}
Call <- match.call()
if(missing(formula) | missing(data)) stop("Missing arguments")
cluster <- function(x) x
terminal <- function(x) x
strata <- function(x) x
mf <- model.frame(formula, data)
# Identify the cluster and the ID column
pos_cluster <- grep("cluster", names(mf))
if(length(pos_cluster) != 1) stop("misspecified or non-specified cluster")
id <- mf[[pos_cluster]]
pos_terminal <- grep("terminal", names(mf))
if(length(pos_terminal) > 1) stop("misspecified terminal()")
pos_strata <- grep("strata", names(mf))
if(length(pos_strata) > 0) {
if(length(pos_strata) > 1) stop("only one strata() variable allowed")
strats <- as.numeric(mf[[pos_strata]])
label_strats <- levels(mf[[pos_strata]])
} else {
# else, everyone is in the same strata
strats <- NULL
label_strats <- "1"
}
Y <- mf[[1]]
if(!inherits(Y, "Surv")) stop("left hand side not a survival object")
if(ncol(Y) != 3) {
# making it all in (tstart, tstop) format
Y <- Surv(rep(0, nrow(Y)), Y[,1], Y[,2])
}
X1 <- model.matrix(formula, data)
pos_cluster_X1 <- grep("cluster", colnames(X1))
pos_terminal_X1 <- grep("terminal", colnames(X1))
pos_strata_X1 <- grep("strata", colnames(X1))
X <- X1[,-c(1, pos_cluster_X1, pos_terminal_X1, pos_strata_X1), drop=FALSE]
# note: X has no attributes, in coxph it does.
# mcox also works with empty matrices, but also with NULL as x.
mcox <- survival::agreg.fit(x = X, y = Y, strata = strats, offset = NULL, init = NULL,
control = survival::coxph.control(),
weights = NULL, method = "breslow", rownames = NULL)
# order(strat, -Y[,2])
# the "baseline" case // this will stay constant
if(length(X) == 0) {
newrisk <- 1
exp_g_x <- matrix(rep(1, length(mcox$linear.predictors)), nrow = 1)
g <- 0
g_x <- t(matrix(rep(0, length(mcox$linear.predictors)), nrow = 1))
} else {
x2 <- matrix(rep(0, ncol(X)), nrow = 1, dimnames = list(123, dimnames(X)[[2]]))
x2 <- scale(x2, center = mcox$means, scale = FALSE)
newrisk <- exp(c(x2 %*% mcox$coefficients) + 0)
exp_g_x <- exp(mcox$coefficients %*% t(X))
g <- mcox$coefficients
g_x <- t(mcox$coefficients %*% t(X))
}
explp <- exp(mcox$linear.predictors) # these are with centered covariates
# now thing is that maybe this is not very necessary,
# but it keeps track of which row belongs to which cluster
# and then we don't have to keep on doing this
order_id <- match(id, unique(id))
nev_id <- as.numeric(rowsum(Y[,3], order_id, reorder = FALSE)) # nevent per cluster
names(nev_id) <- unique(id)
# nrisk has the sum with every tstop and the sum of elp at risk at that tstop
# esum has the sum of elp who enter at every tstart
# indx groups which esum is right after each nrisk;
# the difference between the two is the sum of elp really at risk at that time point.
if(!is.null(strats)) {
explp_str <- split(explp, strats)
tstop_str <- split(Y[,2], strats)
tstart_str <- split(Y[,1], strats)
ord_tstop_str <- lapply(tstop_str, function(x) match(x, sort(unique(x))))
ord_tstart_str <- lapply(tstart_str, function(x) match(x, sort(unique(x))))
nrisk <- mapply(FUN = function(explp, y) rowsum_vec(explp, y, max(y)),
explp_str,
ord_tstop_str,
SIMPLIFY = FALSE)
# nrisk <- mapply(FUN = function(explp, y) rev(cumsum(rev(rowsum(explp, y[,2])))),
# split(explp, strats),
# split.data.frame(Y, strats),
# SIMPLIFY = FALSE)
esum <- mapply(FUN = function(explp, y) rowsum_vec(explp, y, max(y)),
explp_str,
ord_tstart_str,
SIMPLIFY = FALSE)
# esum <- mapply(FUN = function(explp, y) rev(cumsum(rev(rowsum(explp, y[,1])))),
# split(explp, strats),
# split.data.frame(Y, strats),
# SIMPLIFY = FALSE)
death <- lapply(
X = split.default(Y[,3], strats),
FUN = function(y) (y == 1)
)
nevent <- mapply(
FUN = function(y, d)
as.vector(rowsum(1 * d, y)),
tstop_str,
death,
SIMPLIFY = FALSE
)
time_str <- lapply(
X = tstop_str,
FUN = function(y) sort(unique(y))
)
delta <- min(diff(sort(unique(Y[,2]))))/2
time <- sort(unique(Y[,2])) # unique tstops
etime <- lapply(
X = tstart_str,
FUN = function(y) c(0, sort(unique(y)), max(y) + delta)
)
indx <-
mapply(FUN = function(time, etime) findInterval(time, etime, left.open = TRUE),
time_str,
etime,
SIMPLIFY = FALSE
)
indx2 <-
mapply(FUN = function(y, time) findInterval(y, time),
tstart_str,
time_str,
SIMPLIFY = FALSE
)
time_to_stop <-
mapply(FUN = function(y, time) match(y, time),
tstop_str,
time_str,
SIMPLIFY = FALSE
)
positions_strata <- do.call(c,split(1:nrow(Y), strats))
atrisk <- list(death = death, nevent = nevent, nev_id = nev_id,
order_id = order_id, time = time, indx = indx, indx2 = indx2,
time_to_stop = time_to_stop,
ord_tstart_str = ord_tstart_str,
ord_tstop_str = ord_tstop_str,
positions_strata = positions_strata,
strats = strats)
nrisk <- mapply(FUN = function(nrisk, esum, indx) nrisk - c(esum, 0,0)[indx],
nrisk,
esum,
indx,
SIMPLIFY = FALSE)
if(newrisk == 0) warning("Hazard ratio very extreme; please check (and/or rescale) your data")
haz <- mapply(FUN = function(nevent, nrisk) nevent/nrisk * newrisk,
nevent,
nrisk,
SIMPLIFY = FALSE)
basehaz_line <- mapply(FUN = function(haz, time_to_stop) haz[time_to_stop],
haz,
time_to_stop,
SIMPLIFY = FALSE)
cumhaz <- lapply(haz, cumsum)
cumhaz_0_line <- mapply(FUN = function(cumhaz, time_to_stop) cumhaz[time_to_stop],
cumhaz,
time_to_stop,
SIMPLIFY = FALSE)
cumhaz_tstart <- mapply(FUN = function(cumhaz, indx2) c(0, cumhaz)[indx2 + 1],
cumhaz,
indx2,
SIMPLIFY = FALSE)
cumhaz_line <- mapply(FUN = function(cumhaz_0_line, cumhaz_tstart, explp)
(cumhaz_0_line - cumhaz_tstart) * explp / newrisk,
cumhaz_0_line,
cumhaz_tstart,
split(explp, strats),
SIMPLIFY = FALSE)
cumhaz_line <- do.call(c, cumhaz_line)[order(positions_strata)]
} else {
ord_tstop <- match(Y[,2], sort(unique(Y[,2])))
ord_tstart <- match(Y[,1], sort(unique(Y[,1])))
nrisk <- rowsum_vec(explp, ord_tstop, max(ord_tstop))
# nrisk <- rev(cumsum(rev(rowsum(explp, Y[, ncol(Y) - 1]))))
esum <- rowsum_vec(explp, ord_tstart, max(ord_tstart))
# esum <- rev(cumsum(rev(rowsum(explp, Y[, 1]))))
death <- (Y[, 3] == 1)
nevent <- as.vector(rowsum(1 * death, Y[, ncol(Y) - 1])) # per time point
time <- sort(unique(Y[,2])) # unique tstops
etime <- c(0, sort(unique(Y[, 1])), max(Y[, 1]) + min(diff(time))/2)
indx <- findInterval(time, etime, left.open = TRUE) # left.open = TRUE is very important
# this gives for every tstart (line variable), after which event time did it come
indx2 <- findInterval(Y[,1], time)
time_to_stop <- match(Y[,2], time)
atrisk <- list(death = death, nevent = nevent, nev_id = nev_id,
order_id = order_id,
time = time, indx = indx, indx2 = indx2,
time_to_stop = time_to_stop,
ord_tstart = ord_tstart, ord_tstop = ord_tstop,
strats = NULL)
nrisk <- nrisk - c(esum, 0,0)[indx]
if(newrisk == 0) warning("Hazard ratio very extreme; please check (and/or rescale) your data")
haz <- nevent/nrisk * newrisk
basehaz_line <- haz[atrisk$time_to_stop]
cumhaz <- cumsum(haz)
cumhaz_0_line <- cumhaz[atrisk$time_to_stop]
cumhaz_tstart <- c(0, cumhaz)[atrisk$indx2 + 1]
cumhaz_line <- (cumhaz[atrisk$time_to_stop] - c(0, cumhaz)[atrisk$indx2 + 1]) * explp / newrisk
}
Cvec <- rowsum(cumhaz_line, order_id, reorder = FALSE)
ca_test <- NULL
# ca_test_fit does not know strata ?!?
if(isTRUE(control$ca_test)) {
if(!is.null(strats)) ca_test <- NULL else
ca_test <- ca_test_fit(mcox, X, atrisk, exp_g_x, cumhaz)
}
if(isTRUE(distribution$left_truncation)) {
if(!is.null(strats))
cumhaz_tstart <- do.call(c, cumhaz_tstart)[order(atrisk$positions_strata)]
Cvec_lt <- rowsum(cumhaz_tstart, atrisk$order_id, reorder = FALSE)
} else Cvec_lt <- 0 * Cvec
# a fit just for the log-likelihood;
if(!isTRUE(control$opt_fit)) {
return(
em_fit(logfrailtypar = log(distribution$theta),
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control)
)
}
# browser()
if(distribution$dist == "stable") {
# thing is: with stable small values of theta mean high dependence
# I have yet to see a very high dependence there; furthermore,
# the likelihood is pretty flat there.
# therefore I would rather drag this towards "no dependence".
distribution$theta <- distribution$theta + 1
}
outer_m <- do.call(nlm, args = c(list(f = em_fit,
p = log(distribution$theta),
hessian = TRUE,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X,
atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec,
lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), control$nlm_control))
# control$lik_interval_stable
if(outer_m$hessian < 1) {
outer_m_opt <- do.call(optimize,
args = c(list(f = em_fit,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X,
atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec,
lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), lower = log(control$lik_interval)[1],
upper = log(control$lik_interval)[2]))
if(outer_m_opt$objective < outer_m$minimum) {
hess <- numDeriv::hessian(func = em_fit, x = outer_m_opt$minimum,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X,
atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec,
lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control)
outer_m <- list(minimum = outer_m_opt$objective,
estimate = outer_m_opt$minimum,
hessian = hess)
}
}
if(outer_m$hessian == 0) warning("Hessian virtually 0; frailty variance might be at the edge of the parameter space.")
if(outer_m$hessian <= 0) hessian <- NA else hessian <- outer_m$hessian
# likelihood-based confidence intervals
theta_low <- theta_high <- NULL
if(isTRUE(control$lik_ci)) {
# With the stable distribution, a problem pops up for small values, i.e. very large association (tau large)
# So there I use another interval for this
if(distribution$dist == "stable") {
control$lik_interval <- control$lik_interval_stable
}
skip_ci <- FALSE
lower_llik <- try(em_fit(log(control$lik_interval[1]),
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), silent = TRUE)
if(class(lower_llik) == "try-error") {
warning("likelihood-based CI could not be calcuated; disable or change lik_interval[1] in emfrail_control")
lower_llik <- NA
log_theta_low <- log_theta_high <- NA
skip_ci <- TRUE
}
upper_llik <- try(em_fit(log(control$lik_interval[2]),
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), silent = TRUE)
if(class(upper_llik) == "try-error") {
warning("likelihood-based CI could not be calcuated; disable or lik_interval[2] in emfrail_control")
upper_llik <- NA
log_theta_low <- log_theta_high <- NA
skip_ci <- TRUE
}
if(!isTRUE(skip_ci)) {
if(lower_llik - outer_m$minimum < 1.92) {
log_theta_low <- log(control$lik_interval[1])
warning("Likelihood-based confidence interval lower limit reached, probably 0;
You can try a lower value for control$lik_interval[1].")
} else
log_theta_low <- uniroot(function(x, ...) outer_m$minimum - em_fit(x, ...) + 1.92,
interval = c(log(control$lik_interval[1]), outer_m$estimate),
f.lower = outer_m$minimum - lower_llik + 1.92, f.upper = 1.92,
tol = .Machine$double.eps^0.1,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control,
maxiter = 100)$root
# this says that if I can't get a significant difference on the right side, then it's infinity
if(upper_llik - outer_m$minimum < 1.92) log_theta_high <- Inf else
log_theta_high <- uniroot(function(x, ...) outer_m$minimum - em_fit(x, ...) + 1.92,
interval = c(outer_m$estimate, log(control$lik_interval[2])),
f.lower = 1.92, f.upper = outer_m$minimum - upper_llik + 1.92,
extendInt = c("downX"),
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control)$root
}
} else
log_theta_low <- log_theta_high <- NA
if(isTRUE(control$se)) {
inner_m <- em_fit(logfrailtypar = outer_m$estimate,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = TRUE,
em_control = control$em_control,
return_loglik = FALSE)
} else
inner_m <- em_fit(logfrailtypar = outer_m$estimate,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control,
return_loglik = FALSE)
# Cox.ZPH stuff
if(isTRUE(control$zph)) {
# Here just fit a Cox model with the log-frailty as offset
if(!is.null(strats))
zph <- cox.zph(coxph(Y ~ X + strata(strats) + offset(inner_m$logz), ties = "breslow"),
transform = control$zph_transform) else
zph <- cox.zph(coxph(Y ~ X + offset(inner_m$logz), ties = "breslow"),
transform = control$zph_transform)
# fix the names for nice output
# if there is only one covariate there is not "GLOBAL" test
attr(zph$table, "dimnames")[[1]][1:length(inner_m$coef)] <- names(inner_m$coef)
attr(zph$y, "dimnames")[[2]] <- names(mcox$coef)
} else zph <- NULL
# adjusted standard error
if(isTRUE(control$se) & isTRUE(attr(inner_m$Vcov, "class") == "try-error")) {
inner_m$Vcov <- matrix(NA, length(inner_m$coef) + length(inner_m$haz))
warning("Information matrix is singular")
}
# adjusted SE: only go on if requested and if Vcov was calculated
if(isTRUE(control$se) &
isTRUE(control$se_adj) &
!all(is.na(inner_m$Vcov))) {
# absolute value should be redundant. but sometimes the "hessian" might be 0.
# in that case it might appear negative; this happened only on Linux...
# h <- as.numeric(sqrt(abs(1/(attr(outer_m, "details")[[3]])))/2)
h<- as.numeric(sqrt(abs(1/hessian))/2)
lfp_minus <- max(outer_m$estimate - h , outer_m$estimate - 5, na.rm = TRUE)
lfp_plus <- min(outer_m$estimate + h , outer_m$estimate + 5, na.rm = TRUE)
final_fit_minus <- em_fit(logfrailtypar = lfp_minus,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control,
return_loglik = FALSE)
final_fit_plus <- em_fit(logfrailtypar = lfp_plus,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control, return_loglik = FALSE)
# instructional: this should be more or less equal to the
# -(final_fit_plus$loglik + final_fit_minus$loglik - 2 * inner_m$loglik)/h^2
# se_logtheta^2 / (2 * (final_fit$loglik -final_fit_plus$loglik ))
if(!is.null(atrisk$strats))
deta_dtheta <- (c(final_fit_plus$coef, do.call(c, final_fit_plus$haz)) -
c(final_fit_minus$coef, do.call(c, final_fit_minus$haz))) / (2*h) else
deta_dtheta <- (c(final_fit_plus$coef, final_fit_plus$haz) -
c(final_fit_minus$coef, final_fit_minus$haz)) / (2*h)
#adj_se <- sqrt(diag(deta_dtheta %*% (1/(attr(opt_object, "details")[[3]])) %*% t(deta_dtheta)))
# vcov_adj = inner_m$Vcov + deta_dtheta %*% (1/(attr(outer_m, "details")[[3]])) %*% t(deta_dtheta)
vcov_adj = inner_m$Vcov + deta_dtheta %*% (1/outer_m$hessian) %*% t(deta_dtheta)
} else
if(all(is.na(inner_m$Vcov)))
vcov_adj <- inner_m$Vcov else
vcov_adj = matrix(NA, nrow(inner_m$Vcov), nrow(inner_m$Vcov))
if(length(pos_terminal_X1) > 0 & distribution$dist == "gamma") {
Y[,3] <- X1[,pos_terminal_X1]
Mres <- survival::agreg.fit(x = X, y = Y, strata = atrisk$strats, offset = NULL, init = NULL,
control = survival::coxph.control(),
weights = NULL, method = "breslow", rownames = NULL)$residuals
Mres_id <- rowsum(Mres, atrisk$order_id, reorder = FALSE)
theta <- exp(outer_m$estimate)
fr <- with(inner_m, estep[,1] / estep[,2])
numerator <- theta + inner_m$nev_id
denominator <- numerator / fr
lfr <- digamma(numerator) - log(denominator)
lfr2 <- (digamma(numerator))^2 + trigamma(numerator) - (log(denominator))^2 - 2 * log(denominator) * lfr
r <- cor(lfr, Mres_id)
tr <- r* sqrt((length(fr) - 2) / (1 - r^2))
p.cor <- pchisq(tr^2, df = 1, lower.tail = F)
cens_test = c(tstat = tr, pval = p.cor)
} else cens_test = NULL
if(!isTRUE(model)) model_frame <- NULL else
model_frame <- mf
if(!isTRUE(model.matrix)) X <- NULL
frail <- inner_m$frail
names(frail) <- unique(id)
haz <- inner_m$haz
tev <- inner_m$tev
if(!is.null(atrisk$strats)) {
names(haz) <- label_strats
names(tev) <- label_strats
}
res <- list(coefficients = inner_m$coef, #
hazard = haz,
var = inner_m$Vcov,
var_adj = vcov_adj,
logtheta = outer_m$estimate,
var_logtheta = 1/hessian,
ci_logtheta = c(log_theta_low, log_theta_high),
frail = frail,
residuals = list(group = inner_m$Cvec,
individual = inner_m$cumhaz_line * inner_m$fitted),
tev = tev,
nevents_id = inner_m$nev_id,
loglik = c(mcox$loglik[length(mcox$loglik)], -outer_m$minimum),
ca_test = ca_test,
cens_test = cens_test,
zph = zph,
formula = formula,
distribution = distribution,
control = control,
nobs = nrow(mf),
fitted = as.numeric(inner_m$fitted),
mf = model_frame,
mm = X)
# these are things that make the predict work and other methods
terms_2 <- delete.response(attr(mf, "terms"))
pos_cluster_2 <- grep("cluster", attr(terms_2, "term.labels"))
if(!is.null(mcox$coefficients)) {
terms <- drop.terms(terms_2, pos_cluster_2)
myxlev <- .getXlevels(terms, mf)
attr(res, "metadata") <- list(terms, myxlev)
}
attr(res, "call") <- Call
attr(res, "class") <- "emfrail"
res
}
| /R/emfrail.R | no_license | AMeddis/frailtyEM | R | false | false | 37,158 | r | #' Fitting semi-parametric shared frailty models with the EM algorithm
#'
#' @importFrom survival Surv coxph cox.zph
#' @importFrom stats approx coef model.frame model.matrix pchisq printCoefmat nlm uniroot cor optimize
#' @importFrom magrittr "%>%"
#' @importFrom Rcpp evalCpp
#' @importFrom Matrix bdiag
#' @importFrom numDeriv hessian
#' @useDynLib frailtyEM, .registration=TRUE
#' @include em_fit.R
#' @include emfrail_aux.R
#'
#' @param formula A formula that contains on the left hand side an object of the type \code{Surv}
#' and on the right hand side a \code{+cluster(id)} statement. Two special statments may also be used:
#' \code{+strata()} for specifying a grouping column that will represent different strata and
#' \code{+terminal()}
#' @param data A \code{data.frame} in which the formula argument can be evaluated
#' @param distribution An object as created by \code{\link{emfrail_dist}}
#' @param control An object as created by \code{\link{emfrail_control}}
#' @param model Logical. Should the model frame be returned?
#' @param model.matrix Logical. Should the model matrix be returned?
#' @param ... Other arguments, currently used to warn about deprecated argument names
#' @export
#'
#' @details The \code{emfrail} function fits shared frailty models for processes which have intensity
#' \deqn{\lambda(t) = z \lambda_0(t) \exp(\beta' \mathbf{x})}
#' with a non-parametric (Breslow) baseline intensity \eqn{\lambda_0(t)}. The outcome
#' (left hand side of the \code{formula}) must be a \code{Surv} object.
#'
#' If the object is \code{Surv(tstop, status)} then the usual failure time data is represented.
#' Gap-times between recurrent events are represented in the same way.
#' If the left hand side of the formula is created as \code{Surv(tstart, tstop, status)}, this may represent a number of things:
#' (a) recurrent events episodes in calendar time where a recurrent event episode starts at \code{tstart} and ends at \code{tstop}
#' (b) failure time data with time-dependent covariates where \code{tstop} is the time of a change in covariates or censoring
#' (\code{status = 0}) or an event time (\code{status = 1}) or (c) clustered failure time with left truncation, where
#' \code{tstart} is the individual's left truncation time. Unlike regular Cox models, a major distinction is that in case (c) the
#' distribution of the frailty must be considered conditional on survival up to the left truncation time.
#'
#' The \code{+cluster()} statement specified the column that determines the grouping (the observations that share the same frailty).
#' The \code{+strata()} statement specifies a column that determines different strata, for which different baseline hazards are calculated.
#' The \code{+terminal} specifies a column that contains an indicator for dependent censoring, and then performs a score test
#'
#' The \code{distribution} argument must be generated by a call to \code{\link{emfrail_dist}}. This determines the
#' frailty distribution, which may be one of gamma, positive stable or PVF (power-variance-function), and the starting
#' value for the maximum likelihood estimation. The PVF family
#' also includes a tuning parameter that differentiates between inverse Gaussian and compound Poisson distributions.
#' Note that, with univariate data (at most one event per individual, no clusters), only distributions with finite expectation
#' are identifiable. This means that the positive stable distribution should have a maximum likelihood on the edge of the parameter
#' space (\eqn{theta = +\inf}, corresponding to a Cox model for independent observations).
#'
#' The \code{control} argument must be generated by a call to \code{\link{emfrail_control}}. Several parameters
#' may be adjusted that control the precision of the convergenge criteria or supress the calculation of different
#' quantities.
#'
#' @return An object of class \code{emfrail} that contains the following fields:
#' \item{coefficients}{A named vector of the estimated regression coefficients}
#' \item{hazard}{The breslow estimate of the baseline hazard at each event time point, in chronological order}
#' \item{var}{The variance-covariance matrix corresponding to the coefficients and hazard, assuming \eqn{\theta} constant}
#' \item{var_adj}{The variance-covariance matrx corresponding to the
#' coefficients and hazard, adjusted for the estimation of theta}
#' \item{logtheta}{The logarithm of the point estimate of \eqn{\theta}. For the gamma and
#' PVF family of distributions, this is the inverse of the estimated frailty variance.}
#' \item{var_logtheta}{The variance of the estimated logarithm of \eqn{\theta}}
#' \item{ci_logtheta}{The likelihood-based 95\% confidence interval for the logarithm of \eqn{\theta}}
#' \item{frail}{The posterior (empirical Bayes) estimates of the frailty for each cluster}
#' \item{residuals}{A list with two elements, cluster which is a vector that the sum of the
#' cumulative hazards from each cluster for a frailty value of 1, and
#' individual, which is a vector that contains the cumulative hazard corresponding to each row of the data,
#' multiplied by the corresponding frailty estimate}
#' \item{tev}{The time points of the events in the data set, this is the same length as hazard}
#' \item{nevents_id}{The number of events for each cluster}
#' \item{loglik}{A vector of length two with the log-likelihood of the starting Cox model
#' and the maximized log-likelihood}
#' \item{ca_test}{The results of the Commenges-Andersen test for heterogeneity}
#' \item{cens_test}{The results of the test for dependence between a recurrent event and a terminal event,
#' if the \code{+terminal()} statement is specified and the frailty distribution is gamma}
#' \item{zph}{The result of \code{cox.zph} called on a model with the estimated log-frailties as offset}
#' \item{formula, distribution, control}{The original arguments}
#' \item{nobs, fitted}{Number of observations and fitted values (i.e. \eqn{z \exp(\beta^T x)})}
#' \item{mf}{The \code{model.frame}, if \code{model = TRUE}}
#' \item{mm}{The \code{model.matrix}, if \code{model.matrix = TRUE}}
#'
#' @md
#' @note Several options in the \code{control} arguemnt shorten the running time for \code{emfrail} significantly.
#' These are disabling the adjustemnt of the standard errors (\code{se_adj = FALSE}), disabling the likelihood-based confidence intervals (\code{lik_ci = FALSE}) or
#' disabling the score test for heterogeneity (\code{ca_test = FALSE}).
#'
#' The algorithm is detailed in the package vignette. For the gamma frailty,
#' the results should be identical with those from \code{coxph} with \code{ties = "breslow"}.
#'
#' @seealso \code{\link{plot.emfrail}} and \code{\link{autoplot.emfrail}} for plot functions directly available, \code{\link{emfrail_pll}} for calculating \eqn{\widehat{L}(\theta)} at specific values of \eqn{\theta},
#' \code{\link{summary.emfrail}} for transforming the \code{emfrail} object into a more human-readable format and for
#' visualizing the frailty (empirical Bayes) estimates,
#' \code{\link{predict.emfrail}} for calculating and visalizing conditional and marginal survival and cumulative
#' hazard curves. \code{\link{residuals.emfrail}} for extracting martingale residuals and \code{\link{logLik.emfrail}} for extracting
#' the log-likelihood of the fitted model.
#'
#' @examples
#'
#' m_gamma <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats)
#'
#' # Inverse Gaussian distribution
#' m_ig <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' distribution = emfrail_dist(dist = "pvf"))
#'
#' # for the PVF distribution with m = 0.75
#' m_pvf <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' distribution = emfrail_dist(dist = "pvf", pvfm = 0.75))
#'
#' # for the positive stable distribution
#' m_ps <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' distribution = emfrail_dist(dist = "stable"))
#' \dontrun{
#' # Compare marginal log-likelihoods
#' models <- list(m_gamma, m_ig, m_pvf, m_ps)
#'
#' models
#' logliks <- lapply(models, logLik)
#'
#' names(logliks) <- lapply(models,
#' function(x) with(x$distribution,
#' ifelse(dist == "pvf",
#' paste(dist, "/", pvfm),
#' dist))
#' )
#'
#' logliks
#' }
#'
#' # Stratified analysis
#' \dontrun{
#' m_strat <- emfrail(formula = Surv(time, status) ~ rx + strata(sex) + cluster(litter),
#' data = rats)
#' }
#'
#'
#' # Test for conditional proportional hazards (log-frailty as offset)
#' \dontrun{
#' m_gamma <- emfrail(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats, control = emfrail_control(zph = TRUE))
#' par(mfrow = c(1,2))
#' plot(m_gamma$zph)
#' }
#'
#' # Draw the profile log-likelihood
#' \dontrun{
#' fr_var <- seq(from = 0.01, to = 1.4, length.out = 20)
#'
#' # For gamma the variance is 1/theta (see parametrizations)
#' pll_gamma <- emfrail_pll(formula = Surv(time, status) ~ rx + sex + cluster(litter),
#' data = rats,
#' values = 1/fr_var )
#' plot(fr_var, pll_gamma,
#' type = "l",
#' xlab = "Frailty variance",
#' ylab = "Profile log-likelihood")
#'
#'
#' # Recurrent events
#' mod_rec <- emfrail(Surv(start, stop, status) ~ treatment + cluster(id), bladder1)
#' # The warnings appear from the Surv object, they also appear in coxph.
#'
#' plot(mod_rec, type = "hist")
#' }
#'
#' # Left truncation
#' \dontrun{
#' # We simulate some data with truncation times
#' set.seed(2018)
#' nclus <- 300
#' nind <- 5
#' x <- sample(c(0,1), nind * nclus, TRUE)
#' u <- rep(rgamma(nclus,1,1), each = 3)
#'
#' stime <- rexp(nind * nclus, rate = u * exp(0.5 * x))
#'
#' status <- ifelse(stime > 5, 0, 1)
#' stime[status == 0] <- 5
#'
#' # truncate uniform between 0 and 2
#' ltime <- runif(nind * nclus, min = 0, max = 2)
#'
#' d <- data.frame(id = rep(1:nclus, each = nind),
#' x = x,
#' stime = stime,
#' u = u,
#' ltime = ltime,
#' status = status)
#' d_left <- d[d$stime > d$ltime,]
#'
#' mod <- emfrail(Surv(stime, status)~ x + cluster(id), d)
#' # This model ignores the left truncation, 0.378 frailty variance:
#' mod_1 <- emfrail(Surv(stime, status)~ x + cluster(id), d_left)
#'
#' # This model takes left truncation into account,
#' # but it considers the distribution of the frailty unconditional on the truncation
#' mod_2 <- emfrail(Surv(ltime, stime, status)~ x + cluster(id), d_left)
#'
#' # This is identical with:
#' mod_cox <- coxph(Surv(ltime, stime, status)~ x + frailty(id), data = d_left)
#'
#'
#' # The correct thing is to consider the distribution of the frailty given the truncation
#' mod_3 <- emfrail(Surv(ltime, stime, status)~ x + cluster(id), d_left,
#' distribution = emfrail_dist(left_truncation = TRUE))
#'
#' summary(mod_1)
#' summary(mod_2)
#' summary(mod_3)
#' }
emfrail <- function(formula,
data,
distribution = emfrail_dist(),
control = emfrail_control(),
model = FALSE, model.matrix = FALSE,
...) {
# browser()
# This part is because the update breaks old code
extraargs <- list(...)
if(!inherits(formula, "formula")) {
if(inherits(formula, "data.frame")) warning("You gave a data.frame instead of a formula.
Argument order has changed; now it's emfrail(formula, data, etc..).")
stop("formula is not an object of type formula")
}
if(!inherits(data, "data.frame")) {
if(inherits(data, "formula")) warning("You gave a formula instead of a data.frame.
Argument order has changed; now it's emfrail(formula, data, etc..).")
stop("data is not an object of type data.frame")
}
if(!inherits(distribution, "emfrail_dist"))
stop("distribution argument misspecified; see ?emfrail_dist()")
if(!inherits(control, "emfrail_control"))
stop("control argument misspecified; see ?emfrail_control()")
if(isTRUE(control$em_control$fast_fit)) {
if(!(distribution$dist %in% c("gamma", "pvf"))) {
#message("fast_fit option only available for gamma and pvf with m=-1/2 distributions")
control$em_control$fast_fit <- FALSE
}
# version 0.5.6, the IG fast fit gets super sensitive at small frailty variance...
if(distribution$dist == "pvf")
control$em_control$fast_fit <- FALSE
}
Call <- match.call()
if(missing(formula) | missing(data)) stop("Missing arguments")
cluster <- function(x) x
terminal <- function(x) x
strata <- function(x) x
mf <- model.frame(formula, data)
# Identify the cluster and the ID column
pos_cluster <- grep("cluster", names(mf))
if(length(pos_cluster) != 1) stop("misspecified or non-specified cluster")
id <- mf[[pos_cluster]]
pos_terminal <- grep("terminal", names(mf))
if(length(pos_terminal) > 1) stop("misspecified terminal()")
pos_strata <- grep("strata", names(mf))
if(length(pos_strata) > 0) {
if(length(pos_strata) > 1) stop("only one strata() variable allowed")
strats <- as.numeric(mf[[pos_strata]])
label_strats <- levels(mf[[pos_strata]])
} else {
# else, everyone is in the same strata
strats <- NULL
label_strats <- "1"
}
Y <- mf[[1]]
if(!inherits(Y, "Surv")) stop("left hand side not a survival object")
if(ncol(Y) != 3) {
# making it all in (tstart, tstop) format
Y <- Surv(rep(0, nrow(Y)), Y[,1], Y[,2])
}
X1 <- model.matrix(formula, data)
pos_cluster_X1 <- grep("cluster", colnames(X1))
pos_terminal_X1 <- grep("terminal", colnames(X1))
pos_strata_X1 <- grep("strata", colnames(X1))
X <- X1[,-c(1, pos_cluster_X1, pos_terminal_X1, pos_strata_X1), drop=FALSE]
# note: X has no attributes, in coxph it does.
# mcox also works with empty matrices, but also with NULL as x.
mcox <- survival::agreg.fit(x = X, y = Y, strata = strats, offset = NULL, init = NULL,
control = survival::coxph.control(),
weights = NULL, method = "breslow", rownames = NULL)
# order(strat, -Y[,2])
# the "baseline" case // this will stay constant
if(length(X) == 0) {
newrisk <- 1
exp_g_x <- matrix(rep(1, length(mcox$linear.predictors)), nrow = 1)
g <- 0
g_x <- t(matrix(rep(0, length(mcox$linear.predictors)), nrow = 1))
} else {
x2 <- matrix(rep(0, ncol(X)), nrow = 1, dimnames = list(123, dimnames(X)[[2]]))
x2 <- scale(x2, center = mcox$means, scale = FALSE)
newrisk <- exp(c(x2 %*% mcox$coefficients) + 0)
exp_g_x <- exp(mcox$coefficients %*% t(X))
g <- mcox$coefficients
g_x <- t(mcox$coefficients %*% t(X))
}
explp <- exp(mcox$linear.predictors) # these are with centered covariates
# now thing is that maybe this is not very necessary,
# but it keeps track of which row belongs to which cluster
# and then we don't have to keep on doing this
order_id <- match(id, unique(id))
nev_id <- as.numeric(rowsum(Y[,3], order_id, reorder = FALSE)) # nevent per cluster
names(nev_id) <- unique(id)
# nrisk has the sum with every tstop and the sum of elp at risk at that tstop
# esum has the sum of elp who enter at every tstart
# indx groups which esum is right after each nrisk;
# the difference between the two is the sum of elp really at risk at that time point.
if(!is.null(strats)) {
explp_str <- split(explp, strats)
tstop_str <- split(Y[,2], strats)
tstart_str <- split(Y[,1], strats)
ord_tstop_str <- lapply(tstop_str, function(x) match(x, sort(unique(x))))
ord_tstart_str <- lapply(tstart_str, function(x) match(x, sort(unique(x))))
nrisk <- mapply(FUN = function(explp, y) rowsum_vec(explp, y, max(y)),
explp_str,
ord_tstop_str,
SIMPLIFY = FALSE)
# nrisk <- mapply(FUN = function(explp, y) rev(cumsum(rev(rowsum(explp, y[,2])))),
# split(explp, strats),
# split.data.frame(Y, strats),
# SIMPLIFY = FALSE)
esum <- mapply(FUN = function(explp, y) rowsum_vec(explp, y, max(y)),
explp_str,
ord_tstart_str,
SIMPLIFY = FALSE)
# esum <- mapply(FUN = function(explp, y) rev(cumsum(rev(rowsum(explp, y[,1])))),
# split(explp, strats),
# split.data.frame(Y, strats),
# SIMPLIFY = FALSE)
death <- lapply(
X = split.default(Y[,3], strats),
FUN = function(y) (y == 1)
)
nevent <- mapply(
FUN = function(y, d)
as.vector(rowsum(1 * d, y)),
tstop_str,
death,
SIMPLIFY = FALSE
)
time_str <- lapply(
X = tstop_str,
FUN = function(y) sort(unique(y))
)
delta <- min(diff(sort(unique(Y[,2]))))/2
time <- sort(unique(Y[,2])) # unique tstops
etime <- lapply(
X = tstart_str,
FUN = function(y) c(0, sort(unique(y)), max(y) + delta)
)
indx <-
mapply(FUN = function(time, etime) findInterval(time, etime, left.open = TRUE),
time_str,
etime,
SIMPLIFY = FALSE
)
indx2 <-
mapply(FUN = function(y, time) findInterval(y, time),
tstart_str,
time_str,
SIMPLIFY = FALSE
)
time_to_stop <-
mapply(FUN = function(y, time) match(y, time),
tstop_str,
time_str,
SIMPLIFY = FALSE
)
positions_strata <- do.call(c,split(1:nrow(Y), strats))
atrisk <- list(death = death, nevent = nevent, nev_id = nev_id,
order_id = order_id, time = time, indx = indx, indx2 = indx2,
time_to_stop = time_to_stop,
ord_tstart_str = ord_tstart_str,
ord_tstop_str = ord_tstop_str,
positions_strata = positions_strata,
strats = strats)
nrisk <- mapply(FUN = function(nrisk, esum, indx) nrisk - c(esum, 0,0)[indx],
nrisk,
esum,
indx,
SIMPLIFY = FALSE)
if(newrisk == 0) warning("Hazard ratio very extreme; please check (and/or rescale) your data")
haz <- mapply(FUN = function(nevent, nrisk) nevent/nrisk * newrisk,
nevent,
nrisk,
SIMPLIFY = FALSE)
basehaz_line <- mapply(FUN = function(haz, time_to_stop) haz[time_to_stop],
haz,
time_to_stop,
SIMPLIFY = FALSE)
cumhaz <- lapply(haz, cumsum)
cumhaz_0_line <- mapply(FUN = function(cumhaz, time_to_stop) cumhaz[time_to_stop],
cumhaz,
time_to_stop,
SIMPLIFY = FALSE)
cumhaz_tstart <- mapply(FUN = function(cumhaz, indx2) c(0, cumhaz)[indx2 + 1],
cumhaz,
indx2,
SIMPLIFY = FALSE)
cumhaz_line <- mapply(FUN = function(cumhaz_0_line, cumhaz_tstart, explp)
(cumhaz_0_line - cumhaz_tstart) * explp / newrisk,
cumhaz_0_line,
cumhaz_tstart,
split(explp, strats),
SIMPLIFY = FALSE)
cumhaz_line <- do.call(c, cumhaz_line)[order(positions_strata)]
} else {
ord_tstop <- match(Y[,2], sort(unique(Y[,2])))
ord_tstart <- match(Y[,1], sort(unique(Y[,1])))
nrisk <- rowsum_vec(explp, ord_tstop, max(ord_tstop))
# nrisk <- rev(cumsum(rev(rowsum(explp, Y[, ncol(Y) - 1]))))
esum <- rowsum_vec(explp, ord_tstart, max(ord_tstart))
# esum <- rev(cumsum(rev(rowsum(explp, Y[, 1]))))
death <- (Y[, 3] == 1)
nevent <- as.vector(rowsum(1 * death, Y[, ncol(Y) - 1])) # per time point
time <- sort(unique(Y[,2])) # unique tstops
etime <- c(0, sort(unique(Y[, 1])), max(Y[, 1]) + min(diff(time))/2)
indx <- findInterval(time, etime, left.open = TRUE) # left.open = TRUE is very important
# this gives for every tstart (line variable), after which event time did it come
indx2 <- findInterval(Y[,1], time)
time_to_stop <- match(Y[,2], time)
atrisk <- list(death = death, nevent = nevent, nev_id = nev_id,
order_id = order_id,
time = time, indx = indx, indx2 = indx2,
time_to_stop = time_to_stop,
ord_tstart = ord_tstart, ord_tstop = ord_tstop,
strats = NULL)
nrisk <- nrisk - c(esum, 0,0)[indx]
if(newrisk == 0) warning("Hazard ratio very extreme; please check (and/or rescale) your data")
haz <- nevent/nrisk * newrisk
basehaz_line <- haz[atrisk$time_to_stop]
cumhaz <- cumsum(haz)
cumhaz_0_line <- cumhaz[atrisk$time_to_stop]
cumhaz_tstart <- c(0, cumhaz)[atrisk$indx2 + 1]
cumhaz_line <- (cumhaz[atrisk$time_to_stop] - c(0, cumhaz)[atrisk$indx2 + 1]) * explp / newrisk
}
Cvec <- rowsum(cumhaz_line, order_id, reorder = FALSE)
ca_test <- NULL
# ca_test_fit does not know strata ?!?
if(isTRUE(control$ca_test)) {
if(!is.null(strats)) ca_test <- NULL else
ca_test <- ca_test_fit(mcox, X, atrisk, exp_g_x, cumhaz)
}
if(isTRUE(distribution$left_truncation)) {
if(!is.null(strats))
cumhaz_tstart <- do.call(c, cumhaz_tstart)[order(atrisk$positions_strata)]
Cvec_lt <- rowsum(cumhaz_tstart, atrisk$order_id, reorder = FALSE)
} else Cvec_lt <- 0 * Cvec
# a fit just for the log-likelihood;
if(!isTRUE(control$opt_fit)) {
return(
em_fit(logfrailtypar = log(distribution$theta),
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control)
)
}
# browser()
if(distribution$dist == "stable") {
# thing is: with stable small values of theta mean high dependence
# I have yet to see a very high dependence there; furthermore,
# the likelihood is pretty flat there.
# therefore I would rather drag this towards "no dependence".
distribution$theta <- distribution$theta + 1
}
outer_m <- do.call(nlm, args = c(list(f = em_fit,
p = log(distribution$theta),
hessian = TRUE,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X,
atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec,
lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), control$nlm_control))
# control$lik_interval_stable
if(outer_m$hessian < 1) {
outer_m_opt <- do.call(optimize,
args = c(list(f = em_fit,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X,
atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec,
lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), lower = log(control$lik_interval)[1],
upper = log(control$lik_interval)[2]))
if(outer_m_opt$objective < outer_m$minimum) {
hess <- numDeriv::hessian(func = em_fit, x = outer_m_opt$minimum,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X,
atrisk = atrisk,
basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec,
lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control)
outer_m <- list(minimum = outer_m_opt$objective,
estimate = outer_m_opt$minimum,
hessian = hess)
}
}
if(outer_m$hessian == 0) warning("Hessian virtually 0; frailty variance might be at the edge of the parameter space.")
if(outer_m$hessian <= 0) hessian <- NA else hessian <- outer_m$hessian
# likelihood-based confidence intervals
theta_low <- theta_high <- NULL
if(isTRUE(control$lik_ci)) {
# With the stable distribution, a problem pops up for small values, i.e. very large association (tau large)
# So there I use another interval for this
if(distribution$dist == "stable") {
control$lik_interval <- control$lik_interval_stable
}
skip_ci <- FALSE
lower_llik <- try(em_fit(log(control$lik_interval[1]),
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), silent = TRUE)
if(class(lower_llik) == "try-error") {
warning("likelihood-based CI could not be calcuated; disable or change lik_interval[1] in emfrail_control")
lower_llik <- NA
log_theta_low <- log_theta_high <- NA
skip_ci <- TRUE
}
upper_llik <- try(em_fit(log(control$lik_interval[2]),
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control), silent = TRUE)
if(class(upper_llik) == "try-error") {
warning("likelihood-based CI could not be calcuated; disable or lik_interval[2] in emfrail_control")
upper_llik <- NA
log_theta_low <- log_theta_high <- NA
skip_ci <- TRUE
}
if(!isTRUE(skip_ci)) {
if(lower_llik - outer_m$minimum < 1.92) {
log_theta_low <- log(control$lik_interval[1])
warning("Likelihood-based confidence interval lower limit reached, probably 0;
You can try a lower value for control$lik_interval[1].")
} else
log_theta_low <- uniroot(function(x, ...) outer_m$minimum - em_fit(x, ...) + 1.92,
interval = c(log(control$lik_interval[1]), outer_m$estimate),
f.lower = outer_m$minimum - lower_llik + 1.92, f.upper = 1.92,
tol = .Machine$double.eps^0.1,
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control,
maxiter = 100)$root
# this says that if I can't get a significant difference on the right side, then it's infinity
if(upper_llik - outer_m$minimum < 1.92) log_theta_high <- Inf else
log_theta_high <- uniroot(function(x, ...) outer_m$minimum - em_fit(x, ...) + 1.92,
interval = c(outer_m$estimate, log(control$lik_interval[2])),
f.lower = 1.92, f.upper = outer_m$minimum - upper_llik + 1.92,
extendInt = c("downX"),
dist = distribution$dist,
pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control)$root
}
} else
log_theta_low <- log_theta_high <- NA
if(isTRUE(control$se)) {
inner_m <- em_fit(logfrailtypar = outer_m$estimate,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = TRUE,
em_control = control$em_control,
return_loglik = FALSE)
} else
inner_m <- em_fit(logfrailtypar = outer_m$estimate,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control,
return_loglik = FALSE)
# Cox.ZPH stuff
if(isTRUE(control$zph)) {
# Here just fit a Cox model with the log-frailty as offset
if(!is.null(strats))
zph <- cox.zph(coxph(Y ~ X + strata(strats) + offset(inner_m$logz), ties = "breslow"),
transform = control$zph_transform) else
zph <- cox.zph(coxph(Y ~ X + offset(inner_m$logz), ties = "breslow"),
transform = control$zph_transform)
# fix the names for nice output
# if there is only one covariate there is not "GLOBAL" test
attr(zph$table, "dimnames")[[1]][1:length(inner_m$coef)] <- names(inner_m$coef)
attr(zph$y, "dimnames")[[2]] <- names(mcox$coef)
} else zph <- NULL
# adjusted standard error
if(isTRUE(control$se) & isTRUE(attr(inner_m$Vcov, "class") == "try-error")) {
inner_m$Vcov <- matrix(NA, length(inner_m$coef) + length(inner_m$haz))
warning("Information matrix is singular")
}
# adjusted SE: only go on if requested and if Vcov was calculated
if(isTRUE(control$se) &
isTRUE(control$se_adj) &
!all(is.na(inner_m$Vcov))) {
# absolute value should be redundant. but sometimes the "hessian" might be 0.
# in that case it might appear negative; this happened only on Linux...
# h <- as.numeric(sqrt(abs(1/(attr(outer_m, "details")[[3]])))/2)
h<- as.numeric(sqrt(abs(1/hessian))/2)
lfp_minus <- max(outer_m$estimate - h , outer_m$estimate - 5, na.rm = TRUE)
lfp_plus <- min(outer_m$estimate + h , outer_m$estimate + 5, na.rm = TRUE)
final_fit_minus <- em_fit(logfrailtypar = lfp_minus,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control,
return_loglik = FALSE)
final_fit_plus <- em_fit(logfrailtypar = lfp_plus,
dist = distribution$dist, pvfm = distribution$pvfm,
Y = Y, Xmat = X, atrisk = atrisk, basehaz_line = basehaz_line,
mcox = list(coefficients = g, loglik = mcox$loglik), # a "fake" cox model
Cvec = Cvec, lt = distribution$left_truncation,
Cvec_lt = Cvec_lt, se = FALSE,
em_control = control$em_control, return_loglik = FALSE)
# instructional: this should be more or less equal to the
# -(final_fit_plus$loglik + final_fit_minus$loglik - 2 * inner_m$loglik)/h^2
# se_logtheta^2 / (2 * (final_fit$loglik -final_fit_plus$loglik ))
if(!is.null(atrisk$strats))
deta_dtheta <- (c(final_fit_plus$coef, do.call(c, final_fit_plus$haz)) -
c(final_fit_minus$coef, do.call(c, final_fit_minus$haz))) / (2*h) else
deta_dtheta <- (c(final_fit_plus$coef, final_fit_plus$haz) -
c(final_fit_minus$coef, final_fit_minus$haz)) / (2*h)
#adj_se <- sqrt(diag(deta_dtheta %*% (1/(attr(opt_object, "details")[[3]])) %*% t(deta_dtheta)))
# vcov_adj = inner_m$Vcov + deta_dtheta %*% (1/(attr(outer_m, "details")[[3]])) %*% t(deta_dtheta)
vcov_adj = inner_m$Vcov + deta_dtheta %*% (1/outer_m$hessian) %*% t(deta_dtheta)
} else
if(all(is.na(inner_m$Vcov)))
vcov_adj <- inner_m$Vcov else
vcov_adj = matrix(NA, nrow(inner_m$Vcov), nrow(inner_m$Vcov))
if(length(pos_terminal_X1) > 0 & distribution$dist == "gamma") {
Y[,3] <- X1[,pos_terminal_X1]
Mres <- survival::agreg.fit(x = X, y = Y, strata = atrisk$strats, offset = NULL, init = NULL,
control = survival::coxph.control(),
weights = NULL, method = "breslow", rownames = NULL)$residuals
Mres_id <- rowsum(Mres, atrisk$order_id, reorder = FALSE)
theta <- exp(outer_m$estimate)
fr <- with(inner_m, estep[,1] / estep[,2])
numerator <- theta + inner_m$nev_id
denominator <- numerator / fr
lfr <- digamma(numerator) - log(denominator)
lfr2 <- (digamma(numerator))^2 + trigamma(numerator) - (log(denominator))^2 - 2 * log(denominator) * lfr
r <- cor(lfr, Mres_id)
tr <- r* sqrt((length(fr) - 2) / (1 - r^2))
p.cor <- pchisq(tr^2, df = 1, lower.tail = F)
cens_test = c(tstat = tr, pval = p.cor)
} else cens_test = NULL
if(!isTRUE(model)) model_frame <- NULL else
model_frame <- mf
if(!isTRUE(model.matrix)) X <- NULL
frail <- inner_m$frail
names(frail) <- unique(id)
haz <- inner_m$haz
tev <- inner_m$tev
if(!is.null(atrisk$strats)) {
names(haz) <- label_strats
names(tev) <- label_strats
}
res <- list(coefficients = inner_m$coef, #
hazard = haz,
var = inner_m$Vcov,
var_adj = vcov_adj,
logtheta = outer_m$estimate,
var_logtheta = 1/hessian,
ci_logtheta = c(log_theta_low, log_theta_high),
frail = frail,
residuals = list(group = inner_m$Cvec,
individual = inner_m$cumhaz_line * inner_m$fitted),
tev = tev,
nevents_id = inner_m$nev_id,
loglik = c(mcox$loglik[length(mcox$loglik)], -outer_m$minimum),
ca_test = ca_test,
cens_test = cens_test,
zph = zph,
formula = formula,
distribution = distribution,
control = control,
nobs = nrow(mf),
fitted = as.numeric(inner_m$fitted),
mf = model_frame,
mm = X)
# these are things that make the predict work and other methods
terms_2 <- delete.response(attr(mf, "terms"))
pos_cluster_2 <- grep("cluster", attr(terms_2, "term.labels"))
if(!is.null(mcox$coefficients)) {
terms <- drop.terms(terms_2, pos_cluster_2)
myxlev <- .getXlevels(terms, mf)
attr(res, "metadata") <- list(terms, myxlev)
}
attr(res, "call") <- Call
attr(res, "class") <- "emfrail"
res
}
|
# context("test-subset_cells")
#
# cds <- load_a549()
#
# test_that("test subset_along_path error messages work", {
# expect_error(cds <- subset_along_path(cds),
# "No dimensionality reduction for UMAP calculated. Please run reduce_dimension with reduction_method = UMAP and partition_cells before running learn_graph.")
# cds <- preprocess_cds(cds)
# expect_error(cds <- subset_along_path(cds),
# "No dimensionality reduction for UMAP calculated. Please run reduce_dimension with reduction_method = UMAP and partition_cells before running learn_graph.")
# cds <- reduce_dimension(cds)
# expect_error(cds <- subset_along_path(cds),
# "No cell partition for UMAP calculated. Please run partition_cells with reduction_method = UMAP before running learn_graph.")
# cds <- partition_cells(cds)
# expect_error(cds <- subset_along_path(cds),
# "No principal_graph for UMAP calculated. Please run learn_graph with reduction_method = UMAP before running subset_along_path")
#
# #expect_error(cds <- learn_graph(cds, learn_graph_control = list(FALSE)), "")
# #expect_error(cds <- subset_along_path(cds, learn_graph_control = list(prune = FALSE)), "Unknown variable in learn_graph_control")
# })
#
# cds <- preprocess_cds(cds)
# cds <- reduce_dimension(cds)
# cds <- reduce_dimension(cds)
# cds <- partition_cells(cds)
# cds <- learn_graph(cds)
#
# # This is a helper function to find the pr graph node that has the highest concentration of vehicle cells
# find_vehicle_pr_node = function(cds){
# cell_ids <- which(colData(cds)[, "vehicle"])
#
# closest_vertex <-
# cds@principal_graph_aux[["UMAP"]]$pr_graph_cell_proj_closest_vertex
# closest_vertex <- as.matrix(closest_vertex[colnames(cds), ])
# root_pr_nodes <-
# igraph::V(principal_graph(cds)[["UMAP"]])$name[as.numeric(names
# (which.max(table(closest_vertex[cell_ids,]))))]
#
# root_pr_nodes
# }
#
# cds = order_cells(cds, root_pr_nodes = find_vehicle_pr_node(cds))
#
# plot_cell_trajectory(cds)
| /tests/testthat/test-subset_cells.R | permissive | bioturing/monocle3 | R | false | false | 2,102 | r | # context("test-subset_cells")
#
# cds <- load_a549()
#
# test_that("test subset_along_path error messages work", {
# expect_error(cds <- subset_along_path(cds),
# "No dimensionality reduction for UMAP calculated. Please run reduce_dimension with reduction_method = UMAP and partition_cells before running learn_graph.")
# cds <- preprocess_cds(cds)
# expect_error(cds <- subset_along_path(cds),
# "No dimensionality reduction for UMAP calculated. Please run reduce_dimension with reduction_method = UMAP and partition_cells before running learn_graph.")
# cds <- reduce_dimension(cds)
# expect_error(cds <- subset_along_path(cds),
# "No cell partition for UMAP calculated. Please run partition_cells with reduction_method = UMAP before running learn_graph.")
# cds <- partition_cells(cds)
# expect_error(cds <- subset_along_path(cds),
# "No principal_graph for UMAP calculated. Please run learn_graph with reduction_method = UMAP before running subset_along_path")
#
# #expect_error(cds <- learn_graph(cds, learn_graph_control = list(FALSE)), "")
# #expect_error(cds <- subset_along_path(cds, learn_graph_control = list(prune = FALSE)), "Unknown variable in learn_graph_control")
# })
#
# cds <- preprocess_cds(cds)
# cds <- reduce_dimension(cds)
# cds <- reduce_dimension(cds)
# cds <- partition_cells(cds)
# cds <- learn_graph(cds)
#
# # This is a helper function to find the pr graph node that has the highest concentration of vehicle cells
# find_vehicle_pr_node = function(cds){
# cell_ids <- which(colData(cds)[, "vehicle"])
#
# closest_vertex <-
# cds@principal_graph_aux[["UMAP"]]$pr_graph_cell_proj_closest_vertex
# closest_vertex <- as.matrix(closest_vertex[colnames(cds), ])
# root_pr_nodes <-
# igraph::V(principal_graph(cds)[["UMAP"]])$name[as.numeric(names
# (which.max(table(closest_vertex[cell_ids,]))))]
#
# root_pr_nodes
# }
#
# cds = order_cells(cds, root_pr_nodes = find_vehicle_pr_node(cds))
#
# plot_cell_trajectory(cds)
|
\name{estMargProb}
\alias{estMargProb}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Estimated Marginal Probabilities
}
\description{
Estimates the marginal probability P(T=t|x) based on estimated hazard rates. The hazard rates may or may not depend on covariates. The covariates have to be equal across all estimated hazard rates. Therefore the given hazard rates should only vary over time.
}
\usage{
estMargProb(haz)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{haz}{
Numeric vector of estimated hazard rates.
}
}
\details{
The argument *haz* must be given for the all intervals [a_0, a_1), [a_1, a_2), ..., [a_{q-1}, a_q), [a_{q}, Inf).
}
\value{
Named vector of estimated marginal probabilities.
}
\references{
Gerhard Tutz and Matthias Schmid, (2016), \emph{Modeling discrete time-to-event data}, Springer series in statistics, Doi: 10.1007/978-3-319-28158-2
}
\author{
Thomas Welchowski \email{welchow@imbie.meb.uni-bonn.de}
}
\note{
It is assumed that all time points up to the last interval [a_q, Inf) are available. If not already present, these can be added manually.
}
\seealso{
\code{\link{estSurv}}
}
\examples{
# Example unemployment data
library(Ecdat)
data(UnempDur)
# Select subsample
subUnempDur <- UnempDur [1:100, ]
# Convert to long format
UnempLong <- dataLong (dataSet=subUnempDur, timeColumn="spell", censColumn="censor1")
head(UnempLong)
# Estimate binomial model with logit link
Fit <- glm(formula=y ~ timeInt + age + logwage, data=UnempLong, family=binomial())
# Estimate discrete survival function given age, logwage of first person
hazard <- predict(Fit, newdata=subset(UnempLong, obj==1), type="response")
# Estimate marginal probabilities given age, logwage of first person
MarginalProbCondX <- estMargProb (c(hazard, 1))
MarginalProbCondX
sum(MarginalProbCondX)==1 # TRUE: Marginal probabilities must sum to 1!
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ survival }
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line | /man/estMargProb.Rd | no_license | imstatsbee/discSurv | R | false | false | 2,140 | rd | \name{estMargProb}
\alias{estMargProb}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Estimated Marginal Probabilities
}
\description{
Estimates the marginal probability P(T=t|x) based on estimated hazard rates. The hazard rates may or may not depend on covariates. The covariates have to be equal across all estimated hazard rates. Therefore the given hazard rates should only vary over time.
}
\usage{
estMargProb(haz)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{haz}{
Numeric vector of estimated hazard rates.
}
}
\details{
The argument *haz* must be given for the all intervals [a_0, a_1), [a_1, a_2), ..., [a_{q-1}, a_q), [a_{q}, Inf).
}
\value{
Named vector of estimated marginal probabilities.
}
\references{
Gerhard Tutz and Matthias Schmid, (2016), \emph{Modeling discrete time-to-event data}, Springer series in statistics, Doi: 10.1007/978-3-319-28158-2
}
\author{
Thomas Welchowski \email{welchow@imbie.meb.uni-bonn.de}
}
\note{
It is assumed that all time points up to the last interval [a_q, Inf) are available. If not already present, these can be added manually.
}
\seealso{
\code{\link{estSurv}}
}
\examples{
# Example unemployment data
library(Ecdat)
data(UnempDur)
# Select subsample
subUnempDur <- UnempDur [1:100, ]
# Convert to long format
UnempLong <- dataLong (dataSet=subUnempDur, timeColumn="spell", censColumn="censor1")
head(UnempLong)
# Estimate binomial model with logit link
Fit <- glm(formula=y ~ timeInt + age + logwage, data=UnempLong, family=binomial())
# Estimate discrete survival function given age, logwage of first person
hazard <- predict(Fit, newdata=subset(UnempLong, obj==1), type="response")
# Estimate marginal probabilities given age, logwage of first person
MarginalProbCondX <- estMargProb (c(hazard, 1))
MarginalProbCondX
sum(MarginalProbCondX)==1 # TRUE: Marginal probabilities must sum to 1!
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ survival }
%%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line |
library(shiny)
library(tidyverse)
library(lubridate)
library(plotly)
# load the soccer prediction data
file <- "https://projects.fivethirtyeight.com/soccer-api/club/spi_matches.csv"
football <- read_csv(file = file)
# define variables used in the app
leagues <- unique(football$league) %>% sort()
date_max <- max(football$date)
# Define UI for application that shows football match predictions
ui <- fluidPage(
# Application title
titlePanel("538 Football Predictions"),
tabsetPanel(
# Define tabpanel for the table
tabPanel("Games",
# Sidebar with filtering options
sidebarLayout(
sidebarPanel(
# Wellpanel for filtering options
wellPanel(
# Wellpanel header
h2("Filters"),
# Select league
selectizeInput(
inputId = "league_g", label = "Choose a League",
choices = c("ALL", leagues), selected = "ALL"
),
# Select team
uiOutput("teams_g"),
# Select starting date to be plotted
dateInput(
inputId = "start_date_g", label = "Starting Date",
min = today() - 1, max = max(football$date)
),
# Select end date to be plotted
uiOutput("end_date_g"),
# Select minimum probability
sliderInput(inputId = "prob", label = "Minimum Probability", min = 0,
max = 100, value = 0, step = 1, post = "%")
)
),
mainPanel(
DT::dataTableOutput("table")
)
)
),
tabPanel("Team",
# Sidebar with filtering options
sidebarLayout(
sidebarPanel(
# Wellpanel for filtering options
wellPanel(
# Wellpanel header
h2("Filters"),
# Select league
selectizeInput(
inputId = "league", label = "Choose a League",
choices = c("ALL", leagues), selected = "ALL"
),
# Select team
uiOutput("teams"),
# Select starting date to be plotted
dateInput(
inputId = "start_date", label = "Starting Date",
min = today() - 1, max = max(football$date)
),
# Select end date to be plotted
uiOutput("end_date"),
tags$small("* can be max 60 days from start date")
)
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("plot")
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
## Table tab
# reactive team selection
output$teams_g <- renderUI({
football_league <- football
if (input$league_g != "ALL") {
football_league <- football_league %>%
filter(league == input$league_g)
}
teams <- c("ALL", unique(c(football_league$team1, football_league$team1))) %>% sort()
selectizeInput(
inputId = "team_g", label = "Choose a Team",
choices = teams, selected = "ALL"
)
})
# Make reactive ending date selection
output$end_date_g <- renderUI({
min_end_date <- input$start_date_g
max_end_date <- date_max
current_selection <- date_max
dateInput(
inputId = "end_date_g", label = "End Date",
value = current_selection, min = min_end_date, max = max_end_date
)
})
# Make reactive data for table
football_games <- reactive({
football %>%
filter(team1 == input$team_g | team2 == input$team_g) %>%
select(date, league, team1, team2, prob1:probtie) %>%
arrange(date) %>%
filter(date >= input$start_date_g, date <= input$end_date_g) %>%
filter(prob1 >= input$prob )
})
# Make datatable output
output$table <- renderDataTable({
DT::datatable(data = football_games(), caption = "Outcome probabilities",
colnames = c("Date", "League", "Home Team", "Away Team", "Home Win", "Away Win", "Tie")
) %>%
DT::formatPercentage(5:7, 2)
})
## Plot tab
# Make reactive team selection
output$teams <- renderUI({
football_league <- football
if (input$league != "ALL") {
football_league <- football_league %>%
filter(league == input$league)
}
teams <- unique(c(football_league$team1, football_league$team1)) %>% sort()
selectizeInput(
inputId = "team", label = "Choose a Team",
choices = teams, selected = "Ajax"
)
})
# Make reactive ending date selection
output$end_date <- renderUI({
min_end_date <- input$start_date
max_end_date <- input$start_date + 60
current_selection <- input$start_date + 14
dateInput(
inputId = "end_date", label = "End Date",
value = current_selection, min = min_end_date, max = max_end_date
)
})
# Make reactive data.frame
football_filtered <- reactive({
football %>%
subset(team1 == input$team | team2 == input$team) %>%
select(date, league, team1, team2, prob1:probtie) %>%
mutate(site = ifelse(input$team == team1, "Home", "Away"),
opponent = ifelse(input$team == team1, team2, team1),
Win = ifelse(site == "Home", prob1, prob2),
Lose = ifelse(site == "Home", prob2, prob1),
Draw = probtie,
team = input$team) %>%
select(-c(team1:probtie)) %>%
gather(key = "outcome", value = "probability", Win, Lose, Draw) %>%
arrange(date) %>%
filter(date >= input$start_date, date <= input$end_date)
})
# Make plot
output$plot <- renderPlotly({
plot_ly(data = football_filtered(),
x = ~date,
y = ~probability,
hoverinfo = "text",
text = ~paste("P:", round(probability, 2), "<br>",
"Opponent:", opponent, "<br>",
"Site:", site, "<br>",
league)) %>%
add_markers(symbol = ~factor(site), hoverinfo = "none") %>%
add_lines(color = ~fct_rev(outcome), colors = c("#66DF90", "#D15656", "#6692DF")) %>%
layout(xaxis = list(title = "Date", tickangle = 45, type = "date",
tickformat = "%d %B (%a)<br>%Y"),
yaxis = list(title = "Probability"),
title = input$team,
hovermode = "compare")
})
}
# Run the application
shinyApp(ui = ui, server = server) | /predictify/app1.R | no_license | lukasklima/shiny_apps | R | false | false | 7,315 | r | library(shiny)
library(tidyverse)
library(lubridate)
library(plotly)
# load the soccer prediction data
file <- "https://projects.fivethirtyeight.com/soccer-api/club/spi_matches.csv"
football <- read_csv(file = file)
# define variables used in the app
leagues <- unique(football$league) %>% sort()
date_max <- max(football$date)
# Define UI for application that shows football match predictions
ui <- fluidPage(
# Application title
titlePanel("538 Football Predictions"),
tabsetPanel(
# Define tabpanel for the table
tabPanel("Games",
# Sidebar with filtering options
sidebarLayout(
sidebarPanel(
# Wellpanel for filtering options
wellPanel(
# Wellpanel header
h2("Filters"),
# Select league
selectizeInput(
inputId = "league_g", label = "Choose a League",
choices = c("ALL", leagues), selected = "ALL"
),
# Select team
uiOutput("teams_g"),
# Select starting date to be plotted
dateInput(
inputId = "start_date_g", label = "Starting Date",
min = today() - 1, max = max(football$date)
),
# Select end date to be plotted
uiOutput("end_date_g"),
# Select minimum probability
sliderInput(inputId = "prob", label = "Minimum Probability", min = 0,
max = 100, value = 0, step = 1, post = "%")
)
),
mainPanel(
DT::dataTableOutput("table")
)
)
),
tabPanel("Team",
# Sidebar with filtering options
sidebarLayout(
sidebarPanel(
# Wellpanel for filtering options
wellPanel(
# Wellpanel header
h2("Filters"),
# Select league
selectizeInput(
inputId = "league", label = "Choose a League",
choices = c("ALL", leagues), selected = "ALL"
),
# Select team
uiOutput("teams"),
# Select starting date to be plotted
dateInput(
inputId = "start_date", label = "Starting Date",
min = today() - 1, max = max(football$date)
),
# Select end date to be plotted
uiOutput("end_date"),
tags$small("* can be max 60 days from start date")
)
),
# Show a plot of the generated distribution
mainPanel(
plotlyOutput("plot")
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
## Table tab
# reactive team selection
output$teams_g <- renderUI({
football_league <- football
if (input$league_g != "ALL") {
football_league <- football_league %>%
filter(league == input$league_g)
}
teams <- c("ALL", unique(c(football_league$team1, football_league$team1))) %>% sort()
selectizeInput(
inputId = "team_g", label = "Choose a Team",
choices = teams, selected = "ALL"
)
})
# Make reactive ending date selection
output$end_date_g <- renderUI({
min_end_date <- input$start_date_g
max_end_date <- date_max
current_selection <- date_max
dateInput(
inputId = "end_date_g", label = "End Date",
value = current_selection, min = min_end_date, max = max_end_date
)
})
# Make reactive data for table
football_games <- reactive({
football %>%
filter(team1 == input$team_g | team2 == input$team_g) %>%
select(date, league, team1, team2, prob1:probtie) %>%
arrange(date) %>%
filter(date >= input$start_date_g, date <= input$end_date_g) %>%
filter(prob1 >= input$prob )
})
# Make datatable output
output$table <- renderDataTable({
DT::datatable(data = football_games(), caption = "Outcome probabilities",
colnames = c("Date", "League", "Home Team", "Away Team", "Home Win", "Away Win", "Tie")
) %>%
DT::formatPercentage(5:7, 2)
})
## Plot tab
# Make reactive team selection
output$teams <- renderUI({
football_league <- football
if (input$league != "ALL") {
football_league <- football_league %>%
filter(league == input$league)
}
teams <- unique(c(football_league$team1, football_league$team1)) %>% sort()
selectizeInput(
inputId = "team", label = "Choose a Team",
choices = teams, selected = "Ajax"
)
})
# Make reactive ending date selection
output$end_date <- renderUI({
min_end_date <- input$start_date
max_end_date <- input$start_date + 60
current_selection <- input$start_date + 14
dateInput(
inputId = "end_date", label = "End Date",
value = current_selection, min = min_end_date, max = max_end_date
)
})
# Make reactive data.frame
football_filtered <- reactive({
football %>%
subset(team1 == input$team | team2 == input$team) %>%
select(date, league, team1, team2, prob1:probtie) %>%
mutate(site = ifelse(input$team == team1, "Home", "Away"),
opponent = ifelse(input$team == team1, team2, team1),
Win = ifelse(site == "Home", prob1, prob2),
Lose = ifelse(site == "Home", prob2, prob1),
Draw = probtie,
team = input$team) %>%
select(-c(team1:probtie)) %>%
gather(key = "outcome", value = "probability", Win, Lose, Draw) %>%
arrange(date) %>%
filter(date >= input$start_date, date <= input$end_date)
})
# Make plot
output$plot <- renderPlotly({
plot_ly(data = football_filtered(),
x = ~date,
y = ~probability,
hoverinfo = "text",
text = ~paste("P:", round(probability, 2), "<br>",
"Opponent:", opponent, "<br>",
"Site:", site, "<br>",
league)) %>%
add_markers(symbol = ~factor(site), hoverinfo = "none") %>%
add_lines(color = ~fct_rev(outcome), colors = c("#66DF90", "#D15656", "#6692DF")) %>%
layout(xaxis = list(title = "Date", tickangle = 45, type = "date",
tickformat = "%d %B (%a)<br>%Y"),
yaxis = list(title = "Probability"),
title = input$team,
hovermode = "compare")
})
}
# Run the application
shinyApp(ui = ui, server = server) |
/Logistic Regression University Dropouts (average marginal effects).R | no_license | ManuelaKochRogge/Docs-and-things | R | false | false | 2,766 | r | ||
library(tidyverse)
library(keras)
require(gbm)
require(data.table)
library(pROC)
library(rpart)
library(ROSE)
library(DMwR) # Loading DMwr to balance the unbalanced class
# data <- read.csv('./data/carclaims.csv')
# glimpse(data)
data <- read.csv('./data/Pre-Processed.csv')
str(data)
# data$MakeGLM <- as.integer(data$Make)
# data$AccidentAreaGLM <- as.integer(data$AccidentArea)
# data$SexGLM <- as.integer(data$Sex)
# data$MaritalStatusGLM <- as.integer(data$MaritalStatus)
# data$FraudFound <- ifelse(data$FraudFound == "Yes", 1, 0)
#
# levels(data$MaritalStatus)
# levels(data$PastNumberOfClaims)
# data$PastNumberOfClaims <- ordered(data$PastNumberOfClaims, levels = c( "none", "1", "2 to 4", "more than 4"))
# levels(data$Days.Policy.Accident)
# data$Days.Policy.Accident <- ordered(data$Days.Policy.Accident)
# levels(data$Days.Policy.Claim)
# data$Days.Policy.Claim <- ordered(data$Days.Policy.Claim, levels = c("8 to 15", "15 to 30", "more than 30"))
# levels(data$AgeOfVehicle)
# data$AgeOfVehicle <- ordered(data$AgeOfVehicle, levels = c("less than 4 years", "4 to 6 years", "more than 7"))
# levels(data$NumberOfSuppliments)
# data$NumberOfSuppliments <- ordered(data$NumberOfSuppliments, levels = c("none", "1 to 2", "3 to 5", "more than 5"))
# levels(data$AddressChange.Claim)
# data$AddressChange.Claim <- ordered(data$AddressChange.Claim, levels = c("no change", "0 to 3 years", "4 to 8 years"))
# levels(data$NumberOfCars)
# data$NumberOfCars <- ordered(data$NumberOfCars)
levels(data$MaritalStatus)
levels(data$PastNumberOfClaims)
data$PastNumberOfClaims <- factor(data$PastNumberOfClaims, levels = c( "none", "1", "2 to 4", "more than 4"))
levels(data$Days.Policy.Accident)
data$Days.Policy.Accident <- factor(data$Days.Policy.Accident)
levels(data$Days.Policy.Claim)
data$Days.Policy.Claim <- factor(data$Days.Policy.Claim, levels = c("8 to 15", "15 to 30", "more than 30"))
levels(data$AgeOfVehicle)
data$AgeOfVehicle <- factor(data$AgeOfVehicle, levels = c("less than 4 years", "4 to 6 years", "more than 7"))
levels(data$NumberOfSuppliments)
data$NumberOfSuppliments <- factor(data$NumberOfSuppliments, levels = c("none", "1 to 2", "3 to 5", "more than 5"))
levels(data$AddressChange.Claim)
data$AddressChange.Claim <- factor(data$AddressChange.Claim, levels = c("no change", "0 to 3 years", "4 to 8 years"))
levels(data$NumberOfCars)
data$NumberOfCars <- factor(data$NumberOfCars)
data$FraudFound <- as.factor(data$FraudFound)
str(data)
###############################################
######### choosing learning and test sample
###############################################
## Smote : Synthetic Minority Oversampling Technique To Handle Class Imbalancy In Binary Classification
# balanced.data <- SMOTE(FraudFound ~., data, perc.over = (14000/923)*100, k = 5, perc.under = 105)
# table(balanced.data$FraudFound)
data_balanced_under <- ovun.sample(FraudFound ~ ., data = data, method = "under", N = 923*3, seed = 1)$data
table(data_balanced_under$FraudFound)
data <- data_balanced_under
# data <- balanced.data
set.seed(100)
ll <- sample(c(1:nrow(data)), round(0.8*nrow(data)), replace = FALSE)
learn <- data[ll,]
test <- data[-ll,]
(n_l <- nrow(learn))
(n_t <- nrow(test))
# sum(learn$ClaimNb)/sum(learn$Exposure)
##############################################
############### GLM analysis ###############
##############################################
dataGLM <- data
dataGLM$FraudFound <- ifelse(dataGLM$FraudFound == "Yes", 1, 0)
learnGLM <- dataGLM[ll,]
testGLM <- dataGLM[-ll,]
(n_l <- nrow(learnGLM))
(n_t <- nrow(testGLM))
{t1 <- proc.time()
d.glm <- glm(FraudFound ~ daysDiff + Deductible + Age + Fault + PastNumberOfClaims +
VehiclePrice + AddressChange.Claim + Make + DriverRating + VehicleCategory +
NumberOfSuppliments + MaritalStatus + BasePolicy + AccidentArea + PoliceReportFiled,
data=learnGLM, family=binomial())
(proc.time()-t1)}
summary(d.glm)
learnGLM$fitGLM <- fitted(d.glm)
testGLM$fitGLM <- predict(d.glm, newdata=testGLM, type="response")
dataGLM$fitGLM <- predict(d.glm, newdata=dataGLM, type="response")
result.roc <- roc(testGLM$FraudFound, testGLM$fitGLM)
auc(result.roc)
# plot(result.roc, print.thres="best", print.thres.best.method="closest.topleft")
# Get some more values.
result.coords <- coords(
result.roc, "best", best.method="closest.topleft", ret=c("threshold", "accuracy"))
print(result.coords)
pred<-prediction(testGLM$fitGLM,testGLM$FraudFound)
perf <- performance(pred,"tpr","fpr")
plot(perf)
abline(a=0,b=1, col="red", lty=2)
# Make prediction using the best top-left cutoff.
result.predicted.label <- ifelse(testGLM$fitGLM > result.coords[1,1], 1, 0)
xtabs(~ result.predicted.label + testGLM$FraudFound)
accuracy.meas(testGLM$FraudFound, result.predicted.label)
######################################################
######### feature pre-processing for (CA)NN Embedding
######################################################
PreProcess.Continuous <- function(var1, dat2){
names(dat2)[names(dat2) == var1] <- "V1"
dat2$X <- as.numeric(dat2$V1)
dat2$X <- 2*(dat2$X-min(dat2$X))/(max(dat2$X)-min(dat2$X))-1
names(dat2)[names(dat2) == "V1"] <- var1
names(dat2)[names(dat2) == "X"] <- paste(var1,"X", sep="")
dat2
}
Features.PreProcess <- function(dat2){
dat2 <- PreProcess.Continuous("daysDiff", dat2)
dat2 <- PreProcess.Continuous("Deductible", dat2)
dat2 <- PreProcess.Continuous("Age", dat2)
dat2 <- PreProcess.Continuous("Fault", dat2)
dat2 <- PreProcess.Continuous("PastNumberOfClaims", dat2)
dat2 <- PreProcess.Continuous("VehiclePrice", dat2)
dat2 <- PreProcess.Continuous("AddressChange.Claim", dat2)
dat2 <- PreProcess.Continuous("Make", dat2)
dat2 <- PreProcess.Continuous("DriverRating", dat2)
dat2 <- PreProcess.Continuous("VehicleCategory", dat2)
dat2 <- PreProcess.Continuous("NumberOfSuppliments", dat2)
dat2 <- PreProcess.Continuous("MaritalStatus", dat2)
dat2 <- PreProcess.Continuous("BasePolicy", dat2)
dat2 <- PreProcess.Continuous("AccidentArea", dat2)
dat2 <- PreProcess.Continuous("PoliceReportFiled", dat2)
dat2
}
dataNN <- Features.PreProcess(dataGLM)
###############################################
######### choosing learning and test sample
###############################################
table(dataNN$FraudFound)
# dataNN$FraudFound <- ifelse(dataNN$FraudFound == "Yes", 1, 0)
# data_balanced_under <- ovun.sample(FraudFound ~ ., data = dataNN, method = "under", N = 923*2, seed = 1)$data
# table(data_balanced_under$FraudFound)
# set.seed(100)
# ll <- sample(c(1:nrow(data_balanced_under)), round(0.8*nrow(data_balanced_under)), replace = FALSE)
learnNN <- dataNN[ll,]
testNN <- dataNN[-ll,]
(n_l <- nrow(learnNN))
(n_t <- nrow(testNN))
#######################################################
######### neural network definitions for model (3.11)
#######################################################
learnNN.x <- list(as.matrix(learnNN[,c("VehiclePriceX", "MakeX", "VehicleCategoryX","AgeX", "FaultX", "DriverRatingX",
"MaritalStatusX", "PoliceReportFiledX", "daysDiffX", "DeductibleX",
"PastNumberOfClaimsX", "AddressChange.ClaimX", "NumberOfSupplimentsX",
"BasePolicyX", "AccidentAreaX")]))
# as.matrix(learnNN$fitGLM) )
testNN.x <- list(as.matrix(testNN[,c("VehiclePriceX", "MakeX", "VehicleCategoryX","AgeX", "FaultX", "DriverRatingX",
"MaritalStatusX", "PoliceReportFiledX", "daysDiffX", "DeductibleX",
"PastNumberOfClaimsX", "AddressChange.ClaimX", "NumberOfSupplimentsX",
"BasePolicyX", "AccidentAreaX")]))
# as.matrix(testNN$fitGLM) )
neurons <- c(20,15,10)
# No.Labels <- length(unique(learn$VehBrandX))
###############################################
######### definition of neural network (3.11)
###############################################
model.2IA <- function(){
Cont1 <- layer_input(shape = c(15), dtype = 'float32', name='Cont1')
Cont2 <- layer_input(shape = c(5), dtype = 'float32', name='Cont2')
Cont3 <- layer_input(shape = c(7), dtype = 'float32', name='Cont3')
# GLM <- layer_input(shape = c(1), dtype = 'float32', name = 'GLM')
x.input <- c(Cont1)
#
# Cat1_embed = Cat1 %>%
# layer_embedding(input_dim = No.Labels, output_dim = 2, trainable=TRUE,
# input_length = 1, name = 'Cat1_embed') %>%
# layer_flatten(name='Cat1_flat')
#
# NNetwork1 = list(Cont1, Cat1_embed) %>% layer_concatenate(name='cont') %>%
# layer_dense(units=neurons[1], activation='tanh', name='hidden1') %>%
# layer_dense(units=neurons[2], activation='tanh', name='hidden2') %>%
# layer_dense(units=neurons[3], activation='tanh', name='hidden3') %>%
# layer_dense(units=1, activation='linear', name='NNetwork1',
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
NNetwork1 = Cont1 %>%
layer_dense(units=neurons[1], activation='tanh', name='hidden1') %>%
layer_dense(units=neurons[2], activation='tanh', name='hidden2') %>%
layer_dense(units=neurons[3], activation='tanh', name='hidden3') %>%
layer_dense(units=1, activation='sigmoid', name='NNetwork1')
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
#
NNetwork2 = Cont2 %>%
layer_dense(units=neurons[1], activation='tanh', name='hidden4') %>%
layer_dense(units=neurons[2], activation='tanh', name='hidden5') %>%
layer_dense(units=neurons[3], activation='tanh', name='hidden6') %>%
layer_dense(units=1, activation='tanh', name='NNetwork2')
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
#
NNetwork3 = Cont3 %>%
layer_dense(units=neurons[1], activation='tanh', name='hidden7') %>%
layer_dense(units=neurons[2], activation='tanh', name='hidden8') %>%
layer_dense(units=neurons[3], activation='tanh', name='hidden9') %>%
layer_dense(units=1, activation='tanh', name='NNetwork3')
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
#
# NNoutput = list(NNetwork1) %>% layer_add(name='Add') %>%
# layer_dense(units=1, activation='sigmoid', name = 'NNoutput')
# # trainable=TRUE, weights=list(array(c(1), dim=c(1,1)), array(0, dim=c(1))))
model <- keras_model(inputs = x.input, outputs = c(NNetwork1))
model %>% compile(optimizer = optimizer_nadam(), loss = 'binary_crossentropy')
model
}
model <- model.2IA()
summary(model)
# may take a couple of minutes if epochs is more than 100
{t1 <- proc.time()
fit <- model %>% fit(learnNN.x, as.matrix(learnNN$FraudFound), epochs=400, batch_size=500, verbose=0,
validation_data=list(testNN.x, as.matrix(testNN$FraudFound)))
(proc.time()-t1)}
# This plot should not be studied because in a thorough analyis one should not track
# out-of-sample losses on the epochs, however, it is quite illustrative, here.
# oos <- 200* fit[[2]]$val_loss + 200*(-mean(test$ClaimNb)+mean(log(test$ClaimNb^test$ClaimNb)))
# plot(oos, type='l', ylim=c(31.5,32.1), xlab="epochs", ylab="out-of-sample loss", cex=1.5, cex.lab=1.5, main=list(paste("Model GAM+ calibration", sep=""), cex=1.5) )
# abline(h=c(32.07597, 31.50136), col="orange", lty=2)
learn0 <- learnNN
learn0$fitGANPlus <- as.vector(model %>% predict(learnNN.x))
test0 <- testNN
test0$fitGANPlus <- as.vector(model %>% predict(testNN.x))
pred<-prediction(test0$fitGANPlus,test0$FraudFound)
perf <- performance(pred,"tpr","fpr")
plot(perf)
abline(a=0,b=1, col="red", lty=2)
# Draw ROC curve.
result.roc <- roc(test0$FraudFound, test0$fitGANPlus)
auc(result.roc)
# plot(result.roc, print.thres="best", print.thres.best.method="closest.topleft")
# Get some more values.
result.coords <- coords(
result.roc, "best", best.method="closest.topleft", ret=c("threshold", "accuracy"))
print(result.coords)
# Make prediction using the best top-left cutoff.
result.predicted.label <- ifelse(test0$fitGANPlus > result.coords[1,1], 1, 0)
xtabs(~ result.predicted.label + test0$FraudFound)
accuracy.meas(test0$FraudFound, result.predicted.label)
| /Health Insurance/03_Undersampled/under_SimpleNN.R | no_license | RohanYashraj/CANN-for-Fraud-Detection | R | false | false | 12,325 | r | library(tidyverse)
library(keras)
require(gbm)
require(data.table)
library(pROC)
library(rpart)
library(ROSE)
library(DMwR) # Loading DMwr to balance the unbalanced class
# data <- read.csv('./data/carclaims.csv')
# glimpse(data)
data <- read.csv('./data/Pre-Processed.csv')
str(data)
# data$MakeGLM <- as.integer(data$Make)
# data$AccidentAreaGLM <- as.integer(data$AccidentArea)
# data$SexGLM <- as.integer(data$Sex)
# data$MaritalStatusGLM <- as.integer(data$MaritalStatus)
# data$FraudFound <- ifelse(data$FraudFound == "Yes", 1, 0)
#
# levels(data$MaritalStatus)
# levels(data$PastNumberOfClaims)
# data$PastNumberOfClaims <- ordered(data$PastNumberOfClaims, levels = c( "none", "1", "2 to 4", "more than 4"))
# levels(data$Days.Policy.Accident)
# data$Days.Policy.Accident <- ordered(data$Days.Policy.Accident)
# levels(data$Days.Policy.Claim)
# data$Days.Policy.Claim <- ordered(data$Days.Policy.Claim, levels = c("8 to 15", "15 to 30", "more than 30"))
# levels(data$AgeOfVehicle)
# data$AgeOfVehicle <- ordered(data$AgeOfVehicle, levels = c("less than 4 years", "4 to 6 years", "more than 7"))
# levels(data$NumberOfSuppliments)
# data$NumberOfSuppliments <- ordered(data$NumberOfSuppliments, levels = c("none", "1 to 2", "3 to 5", "more than 5"))
# levels(data$AddressChange.Claim)
# data$AddressChange.Claim <- ordered(data$AddressChange.Claim, levels = c("no change", "0 to 3 years", "4 to 8 years"))
# levels(data$NumberOfCars)
# data$NumberOfCars <- ordered(data$NumberOfCars)
levels(data$MaritalStatus)
levels(data$PastNumberOfClaims)
data$PastNumberOfClaims <- factor(data$PastNumberOfClaims, levels = c( "none", "1", "2 to 4", "more than 4"))
levels(data$Days.Policy.Accident)
data$Days.Policy.Accident <- factor(data$Days.Policy.Accident)
levels(data$Days.Policy.Claim)
data$Days.Policy.Claim <- factor(data$Days.Policy.Claim, levels = c("8 to 15", "15 to 30", "more than 30"))
levels(data$AgeOfVehicle)
data$AgeOfVehicle <- factor(data$AgeOfVehicle, levels = c("less than 4 years", "4 to 6 years", "more than 7"))
levels(data$NumberOfSuppliments)
data$NumberOfSuppliments <- factor(data$NumberOfSuppliments, levels = c("none", "1 to 2", "3 to 5", "more than 5"))
levels(data$AddressChange.Claim)
data$AddressChange.Claim <- factor(data$AddressChange.Claim, levels = c("no change", "0 to 3 years", "4 to 8 years"))
levels(data$NumberOfCars)
data$NumberOfCars <- factor(data$NumberOfCars)
data$FraudFound <- as.factor(data$FraudFound)
str(data)
###############################################
######### choosing learning and test sample
###############################################
## Smote : Synthetic Minority Oversampling Technique To Handle Class Imbalancy In Binary Classification
# balanced.data <- SMOTE(FraudFound ~., data, perc.over = (14000/923)*100, k = 5, perc.under = 105)
# table(balanced.data$FraudFound)
data_balanced_under <- ovun.sample(FraudFound ~ ., data = data, method = "under", N = 923*3, seed = 1)$data
table(data_balanced_under$FraudFound)
data <- data_balanced_under
# data <- balanced.data
set.seed(100)
ll <- sample(c(1:nrow(data)), round(0.8*nrow(data)), replace = FALSE)
learn <- data[ll,]
test <- data[-ll,]
(n_l <- nrow(learn))
(n_t <- nrow(test))
# sum(learn$ClaimNb)/sum(learn$Exposure)
##############################################
############### GLM analysis ###############
##############################################
dataGLM <- data
dataGLM$FraudFound <- ifelse(dataGLM$FraudFound == "Yes", 1, 0)
learnGLM <- dataGLM[ll,]
testGLM <- dataGLM[-ll,]
(n_l <- nrow(learnGLM))
(n_t <- nrow(testGLM))
{t1 <- proc.time()
d.glm <- glm(FraudFound ~ daysDiff + Deductible + Age + Fault + PastNumberOfClaims +
VehiclePrice + AddressChange.Claim + Make + DriverRating + VehicleCategory +
NumberOfSuppliments + MaritalStatus + BasePolicy + AccidentArea + PoliceReportFiled,
data=learnGLM, family=binomial())
(proc.time()-t1)}
summary(d.glm)
learnGLM$fitGLM <- fitted(d.glm)
testGLM$fitGLM <- predict(d.glm, newdata=testGLM, type="response")
dataGLM$fitGLM <- predict(d.glm, newdata=dataGLM, type="response")
result.roc <- roc(testGLM$FraudFound, testGLM$fitGLM)
auc(result.roc)
# plot(result.roc, print.thres="best", print.thres.best.method="closest.topleft")
# Get some more values.
result.coords <- coords(
result.roc, "best", best.method="closest.topleft", ret=c("threshold", "accuracy"))
print(result.coords)
pred<-prediction(testGLM$fitGLM,testGLM$FraudFound)
perf <- performance(pred,"tpr","fpr")
plot(perf)
abline(a=0,b=1, col="red", lty=2)
# Make prediction using the best top-left cutoff.
result.predicted.label <- ifelse(testGLM$fitGLM > result.coords[1,1], 1, 0)
xtabs(~ result.predicted.label + testGLM$FraudFound)
accuracy.meas(testGLM$FraudFound, result.predicted.label)
######################################################
######### feature pre-processing for (CA)NN Embedding
######################################################
PreProcess.Continuous <- function(var1, dat2){
names(dat2)[names(dat2) == var1] <- "V1"
dat2$X <- as.numeric(dat2$V1)
dat2$X <- 2*(dat2$X-min(dat2$X))/(max(dat2$X)-min(dat2$X))-1
names(dat2)[names(dat2) == "V1"] <- var1
names(dat2)[names(dat2) == "X"] <- paste(var1,"X", sep="")
dat2
}
Features.PreProcess <- function(dat2){
dat2 <- PreProcess.Continuous("daysDiff", dat2)
dat2 <- PreProcess.Continuous("Deductible", dat2)
dat2 <- PreProcess.Continuous("Age", dat2)
dat2 <- PreProcess.Continuous("Fault", dat2)
dat2 <- PreProcess.Continuous("PastNumberOfClaims", dat2)
dat2 <- PreProcess.Continuous("VehiclePrice", dat2)
dat2 <- PreProcess.Continuous("AddressChange.Claim", dat2)
dat2 <- PreProcess.Continuous("Make", dat2)
dat2 <- PreProcess.Continuous("DriverRating", dat2)
dat2 <- PreProcess.Continuous("VehicleCategory", dat2)
dat2 <- PreProcess.Continuous("NumberOfSuppliments", dat2)
dat2 <- PreProcess.Continuous("MaritalStatus", dat2)
dat2 <- PreProcess.Continuous("BasePolicy", dat2)
dat2 <- PreProcess.Continuous("AccidentArea", dat2)
dat2 <- PreProcess.Continuous("PoliceReportFiled", dat2)
dat2
}
dataNN <- Features.PreProcess(dataGLM)
###############################################
######### choosing learning and test sample
###############################################
table(dataNN$FraudFound)
# dataNN$FraudFound <- ifelse(dataNN$FraudFound == "Yes", 1, 0)
# data_balanced_under <- ovun.sample(FraudFound ~ ., data = dataNN, method = "under", N = 923*2, seed = 1)$data
# table(data_balanced_under$FraudFound)
# set.seed(100)
# ll <- sample(c(1:nrow(data_balanced_under)), round(0.8*nrow(data_balanced_under)), replace = FALSE)
learnNN <- dataNN[ll,]
testNN <- dataNN[-ll,]
(n_l <- nrow(learnNN))
(n_t <- nrow(testNN))
#######################################################
######### neural network definitions for model (3.11)
#######################################################
learnNN.x <- list(as.matrix(learnNN[,c("VehiclePriceX", "MakeX", "VehicleCategoryX","AgeX", "FaultX", "DriverRatingX",
"MaritalStatusX", "PoliceReportFiledX", "daysDiffX", "DeductibleX",
"PastNumberOfClaimsX", "AddressChange.ClaimX", "NumberOfSupplimentsX",
"BasePolicyX", "AccidentAreaX")]))
# as.matrix(learnNN$fitGLM) )
testNN.x <- list(as.matrix(testNN[,c("VehiclePriceX", "MakeX", "VehicleCategoryX","AgeX", "FaultX", "DriverRatingX",
"MaritalStatusX", "PoliceReportFiledX", "daysDiffX", "DeductibleX",
"PastNumberOfClaimsX", "AddressChange.ClaimX", "NumberOfSupplimentsX",
"BasePolicyX", "AccidentAreaX")]))
# as.matrix(testNN$fitGLM) )
neurons <- c(20,15,10)
# No.Labels <- length(unique(learn$VehBrandX))
###############################################
######### definition of neural network (3.11)
###############################################
model.2IA <- function(){
Cont1 <- layer_input(shape = c(15), dtype = 'float32', name='Cont1')
Cont2 <- layer_input(shape = c(5), dtype = 'float32', name='Cont2')
Cont3 <- layer_input(shape = c(7), dtype = 'float32', name='Cont3')
# GLM <- layer_input(shape = c(1), dtype = 'float32', name = 'GLM')
x.input <- c(Cont1)
#
# Cat1_embed = Cat1 %>%
# layer_embedding(input_dim = No.Labels, output_dim = 2, trainable=TRUE,
# input_length = 1, name = 'Cat1_embed') %>%
# layer_flatten(name='Cat1_flat')
#
# NNetwork1 = list(Cont1, Cat1_embed) %>% layer_concatenate(name='cont') %>%
# layer_dense(units=neurons[1], activation='tanh', name='hidden1') %>%
# layer_dense(units=neurons[2], activation='tanh', name='hidden2') %>%
# layer_dense(units=neurons[3], activation='tanh', name='hidden3') %>%
# layer_dense(units=1, activation='linear', name='NNetwork1',
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
NNetwork1 = Cont1 %>%
layer_dense(units=neurons[1], activation='tanh', name='hidden1') %>%
layer_dense(units=neurons[2], activation='tanh', name='hidden2') %>%
layer_dense(units=neurons[3], activation='tanh', name='hidden3') %>%
layer_dense(units=1, activation='sigmoid', name='NNetwork1')
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
#
NNetwork2 = Cont2 %>%
layer_dense(units=neurons[1], activation='tanh', name='hidden4') %>%
layer_dense(units=neurons[2], activation='tanh', name='hidden5') %>%
layer_dense(units=neurons[3], activation='tanh', name='hidden6') %>%
layer_dense(units=1, activation='tanh', name='NNetwork2')
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
#
NNetwork3 = Cont3 %>%
layer_dense(units=neurons[1], activation='tanh', name='hidden7') %>%
layer_dense(units=neurons[2], activation='tanh', name='hidden8') %>%
layer_dense(units=neurons[3], activation='tanh', name='hidden9') %>%
layer_dense(units=1, activation='tanh', name='NNetwork3')
# weights=list(array(0, dim=c(neurons[3],1)), array(0, dim=c(1))))
#
# NNoutput = list(NNetwork1) %>% layer_add(name='Add') %>%
# layer_dense(units=1, activation='sigmoid', name = 'NNoutput')
# # trainable=TRUE, weights=list(array(c(1), dim=c(1,1)), array(0, dim=c(1))))
model <- keras_model(inputs = x.input, outputs = c(NNetwork1))
model %>% compile(optimizer = optimizer_nadam(), loss = 'binary_crossentropy')
model
}
model <- model.2IA()
summary(model)
# may take a couple of minutes if epochs is more than 100
{t1 <- proc.time()
fit <- model %>% fit(learnNN.x, as.matrix(learnNN$FraudFound), epochs=400, batch_size=500, verbose=0,
validation_data=list(testNN.x, as.matrix(testNN$FraudFound)))
(proc.time()-t1)}
# This plot should not be studied because in a thorough analyis one should not track
# out-of-sample losses on the epochs, however, it is quite illustrative, here.
# oos <- 200* fit[[2]]$val_loss + 200*(-mean(test$ClaimNb)+mean(log(test$ClaimNb^test$ClaimNb)))
# plot(oos, type='l', ylim=c(31.5,32.1), xlab="epochs", ylab="out-of-sample loss", cex=1.5, cex.lab=1.5, main=list(paste("Model GAM+ calibration", sep=""), cex=1.5) )
# abline(h=c(32.07597, 31.50136), col="orange", lty=2)
learn0 <- learnNN
learn0$fitGANPlus <- as.vector(model %>% predict(learnNN.x))
test0 <- testNN
test0$fitGANPlus <- as.vector(model %>% predict(testNN.x))
pred<-prediction(test0$fitGANPlus,test0$FraudFound)
perf <- performance(pred,"tpr","fpr")
plot(perf)
abline(a=0,b=1, col="red", lty=2)
# Draw ROC curve.
result.roc <- roc(test0$FraudFound, test0$fitGANPlus)
auc(result.roc)
# plot(result.roc, print.thres="best", print.thres.best.method="closest.topleft")
# Get some more values.
result.coords <- coords(
result.roc, "best", best.method="closest.topleft", ret=c("threshold", "accuracy"))
print(result.coords)
# Make prediction using the best top-left cutoff.
result.predicted.label <- ifelse(test0$fitGANPlus > result.coords[1,1], 1, 0)
xtabs(~ result.predicted.label + test0$FraudFound)
accuracy.meas(test0$FraudFound, result.predicted.label)
|
#!/share/nas2/genome/biosoft/R/2.15.1/lib64/R/bin/Rscript
#####################################################################
# Copyright 2015, BMK
#
# Author:tengh <tengh@biomarker.com.cn>
#
# Function: draw genomewide cytosine coverage distribution map
#
# Modify date: 20150819
# Note: delete group label
# reset opt$color="#263C8B,#4E74A6,#BDBF78,#BFA524"
#####################################################################
library("grid")
library("RColorBrewer")
library("scales")
library("gtable")
lo = function(rown, coln, nrow, ncol, cellheight = NA, cellwidth = NA, treeheight_col, treeheight_row, legend, annotation_row, annotation_col, annotation_colors, annotation_legend, main, fontsize, fontsize_row, fontsize_col, gaps_row, gaps_col, ...){
# Get height of colnames and length of rownames
if(!is.null(coln[1])){
t = c(coln, colnames(annotation_row))
longest_coln = which.max(strwidth(t, units = 'in'))
gp = list(fontsize = fontsize_col, ...)
coln_height = unit(1, "grobheight", textGrob(t[longest_coln], rot = 90, gp = do.call(gpar, gp))) + unit(10, "bigpts")
}else{
coln_height = unit(5, "bigpts")
}
if(!is.null(rown[1])){
#t = c(rown, colnames(annotation_col))
t = c(rown, "") #20150819
longest_rown = which.max(strwidth(t, units = 'in'))
gp = list(fontsize = fontsize_row, ...)
rown_width = unit(1, "grobwidth", textGrob(t[longest_rown], gp = do.call(gpar, gp))) + unit(10, "bigpts")
}else{
rown_width = unit(5, "bigpts")
}
gp = list(fontsize = fontsize, ...)
# Legend position
if(!is.na(legend[1])){
longest_break = which.max(nchar(names(legend)))
longest_break = unit(1.1, "grobwidth", textGrob(as.character(names(legend))[longest_break], gp = do.call(gpar, gp)))
title_length = unit(1.1, "grobwidth", textGrob("Scale", gp = gpar(fontface = "bold", ...)))
legend_width = unit(12, "bigpts") + longest_break * 1.2
legend_width = max(title_length, legend_width)
}else{
legend_width = unit(0, "bigpts")
}
# Set main title height
if(is.na(main)){
main_height = unit(0, "npc")
}else{
main_height = unit(1.5, "grobheight", textGrob(main, gp = gpar(fontsize = 1.3 * fontsize, ...)))
}
# Column annotations
textheight = unit(fontsize, "bigpts")
if(!is.na(annotation_col[[1]][1])){
# Column annotation height
annot_col_height = ncol(annotation_col) * (textheight + unit(2, "bigpts")) + unit(2, "bigpts")
# Width of the correponding legend
#t = c(as.vector(as.matrix(annotation_col)), colnames(annotation_col))
t = c(as.vector(as.matrix(annotation_col)),"") #20150819
annot_col_legend_width = unit(1.2, "grobwidth", textGrob(t[which.max(nchar(t))], gp = gpar(...))) + unit(12, "bigpts")
if(!annotation_legend){
annot_col_legend_width = unit(0, "npc")
}
}else{
annot_col_height = unit(0, "bigpts")
annot_col_legend_width = unit(0, "bigpts")
}
# Row annotations
if(!is.na(annotation_row[[1]][1])){
# Row annotation width
annot_row_width = ncol(annotation_row) * (textheight + unit(2, "bigpts")) + unit(2, "bigpts")
# Width of the correponding legend
t = c(as.vector(as.matrix(annotation_row)), colnames(annotation_row))
annot_row_legend_width = unit(1.2, "grobwidth", textGrob(t[which.max(nchar(t))], gp = gpar(...))) + unit(12, "bigpts")
if(!annotation_legend){
annot_row_legend_width = unit(0, "npc")
}
}else{
annot_row_width = unit(0, "bigpts")
annot_row_legend_width = unit(0, "bigpts")
}
annot_legend_width = max(annot_row_legend_width, annot_col_legend_width)
# Tree height
treeheight_col = unit(treeheight_col, "bigpts") + unit(5, "bigpts")
treeheight_row = unit(treeheight_row, "bigpts") + unit(5, "bigpts")
# Set cell sizes
if(is.na(cellwidth)){
mat_width = unit(1, "npc") - rown_width - legend_width - treeheight_row - annot_row_width - annot_legend_width
}else{
mat_width = unit(cellwidth * ncol, "bigpts") + length(gaps_col) * unit(4, "bigpts")
}
if(is.na(cellheight)){
mat_height = unit(1, "npc") - main_height - coln_height - treeheight_col - annot_col_height
}else{
mat_height = unit(cellheight * nrow, "bigpts") + length(gaps_row) * unit(4, "bigpts")
}
# Produce gtable
gt = gtable(widths = unit.c(treeheight_row, annot_row_width, mat_width, rown_width, legend_width, annot_legend_width), heights = unit.c(main_height, treeheight_col, annot_col_height, mat_height, coln_height), vp = viewport(gp = do.call(gpar, gp)))
cw = convertWidth(mat_width - (length(gaps_col) * unit(4, "bigpts")), "bigpts", valueOnly = T) / ncol
ch = convertHeight(mat_height - (length(gaps_row) * unit(4, "bigpts")), "bigpts", valueOnly = T) / nrow
# Return minimal cell dimension in bigpts to decide if borders are drawn
mindim = min(cw, ch)
res = list(gt = gt, mindim = mindim)
return(res)
}
find_coordinates = function(n, gaps, m = 1:n){
if(length(gaps) == 0){
return(list(coord = unit(m / n, "npc"), size = unit(1 / n, "npc") ))
}
if(max(gaps) > n){
stop("Gaps do not match with matrix size")
}
size = (1 / n) * (unit(1, "npc") - length(gaps) * unit("4", "bigpts"))
gaps2 = apply(sapply(gaps, function(gap, x){x > gap}, m), 1, sum)
coord = m * size + (gaps2 * unit("4", "bigpts"))
return(list(coord = coord, size = size))
}
draw_dendrogram = function(hc, gaps, horizontal = T){
h = hc$height / max(hc$height) / 1.05
m = hc$merge
o = hc$order
n = length(o)
m[m > 0] = n + m[m > 0]
m[m < 0] = abs(m[m < 0])
dist = matrix(0, nrow = 2 * n - 1, ncol = 2, dimnames = list(NULL, c("x", "y")))
dist[1:n, 1] = 1 / n / 2 + (1 / n) * (match(1:n, o) - 1)
for(i in 1:nrow(m)){
dist[n + i, 1] = (dist[m[i, 1], 1] + dist[m[i, 2], 1]) / 2
dist[n + i, 2] = h[i]
}
draw_connection = function(x1, x2, y1, y2, y){
res = list(
x = c(x1, x1, x2, x2),
y = c(y1, y, y, y2)
)
return(res)
}
x = rep(NA, nrow(m) * 4)
y = rep(NA, nrow(m) * 4)
id = rep(1:nrow(m), rep(4, nrow(m)))
for(i in 1:nrow(m)){
c = draw_connection(dist[m[i, 1], 1], dist[m[i, 2], 1], dist[m[i, 1], 2], dist[m[i, 2], 2], h[i])
k = (i - 1) * 4 + 1
x[k : (k + 3)] = c$x
y[k : (k + 3)] = c$y
}
x = find_coordinates(n, gaps, x * n)$coord
y = unit(y, "npc")
if(!horizontal){
a = x
x = unit(1, "npc") - y
y = unit(1, "npc") - a
}
res = polylineGrob(x = x, y = y, id = id)
return(res)
}
draw_matrix = function(matrix, border_color, gaps_rows, gaps_cols, fmat, fontsize_number, number_color){
n = nrow(matrix)
m = ncol(matrix)
coord_x = find_coordinates(m, gaps_cols)
coord_y = find_coordinates(n, gaps_rows)
x = coord_x$coord - 0.5 * coord_x$size
y = unit(1, "npc") - (coord_y$coord - 0.5 * coord_y$size)
coord = expand.grid(y = y, x = x)
res = gList()
res[["rect"]] = rectGrob(x = coord$x, y = coord$y, width = coord_x$size, height = coord_y$size, gp = gpar(fill = matrix, col = border_color))
if(attr(fmat, "draw")){
res[["text"]] = textGrob(x = coord$x, y = coord$y, label = fmat, gp = gpar(col = number_color, fontsize = fontsize_number))
}
res = gTree(children = res)
return(res)
}
draw_colnames = function(coln, gaps, ...){
coord = find_coordinates(length(coln), gaps)
x = coord$coord - 0.5 * coord$size
res = textGrob(coln, x = x, y = unit(1, "npc") - unit(3, "bigpts"), vjust = 0.5, hjust = 0, rot = 270, gp = gpar(...))
return(res)
}
draw_rownames = function(rown, gaps, ...){
coord = find_coordinates(length(rown), gaps)
y = unit(1, "npc") - (coord$coord - 0.5 * coord$size)
res = textGrob(rown, x = unit(3, "bigpts"), y = y, vjust = 0.5, hjust = 0, gp = gpar(...))
return(res)
}
draw_legend = function(color, breaks, legend, ...){
height = min(unit(1, "npc"), unit(150, "bigpts"))
#message(paste(c("legend=",legend),collapse = "\t"))
#message(paste(c("min(breaks)=",min(breaks)),collapse = "\t"))
legend_pos = (legend - min(breaks)) / (max(breaks) - min(breaks))
legend_pos = height * legend_pos + (unit(1, "npc") - height)
breaks = (breaks - min(breaks)) / (max(breaks) - min(breaks))
breaks = height * breaks + (unit(1, "npc") - height)
h = breaks[-1] - breaks[-length(breaks)]
rect = rectGrob(x = 0, y = breaks[-length(breaks)], width = unit(10, "bigpts"), height = h, hjust = 0, vjust = 0, gp = gpar(fill = color, col = "#FFFFFF00"))
text = textGrob(names(legend), x = unit(14, "bigpts"), y = legend_pos, hjust = 0, gp = gpar(...))
res = grobTree(rect, text)
return(res)
}
convert_annotations = function(annotation, annotation_colors){
new = annotation
for(i in 1:ncol(annotation)){
a = annotation[, i]
b = annotation_colors[[colnames(annotation)[i]]]
if(is.character(a) | is.factor(a)){
a = as.character(a)
if(length(setdiff(a, names(b))) > 0){
stop(sprintf("Factor levels on variable %s do not match with annotation_colors", colnames(annotation)[i]))
}
new[, i] = b[a]
}else{
a = cut(a, breaks = 100)
new[, i] = colorRampPalette(b)(100)[a]
}
}
return(as.matrix(new))
}
draw_annotations = function(converted_annotations, border_color, gaps, fontsize, horizontal){
n = ncol(converted_annotations)
m = nrow(converted_annotations)
coord_x = find_coordinates(m, gaps)
x = coord_x$coord - 0.5 * coord_x$size
# y = cumsum(rep(fontsize, n)) - 4 + cumsum(rep(2, n))
y = cumsum(rep(fontsize, n)) + cumsum(rep(2, n)) - fontsize / 2 + 1
y = unit(y, "bigpts")
if(horizontal){
coord = expand.grid(x = x, y = y)
res = rectGrob(x = coord$x, y = coord$y, width = coord_x$size, height = unit(fontsize, "bigpts"), gp = gpar(fill = converted_annotations, col = border_color))
}else{
a = x
x = unit(1, "npc") - y
y = unit(1, "npc") - a
coord = expand.grid(y = y, x = x)
res = rectGrob(x = coord$x, y = coord$y, width = unit(fontsize, "bigpts"), height = coord_x$size, gp = gpar(fill = converted_annotations, col = border_color))
}
return(res)
}
draw_annotation_names = function(annotations, fontsize, horizontal){
n = ncol(annotations)
x = unit(3, "bigpts")
y = cumsum(rep(fontsize, n)) + cumsum(rep(2, n)) - fontsize / 2 + 1
y = unit(y, "bigpts")
if(horizontal){
res = textGrob(colnames(annotations), x = x, y = y, hjust = 0, gp = gpar(fontsize = fontsize, fontface = 2))
}else{
a = x
x = unit(1, "npc") - y
y = unit(1, "npc") - a
res = textGrob(colnames(annotations), x = x, y = y, vjust = 0.5, hjust = 0, rot = 270, gp = gpar(fontsize = fontsize, fontface = 2))
}
return(res)
}
draw_annotation_legend = function(annotation, annotation_colors, border_color, ...){
y = unit(1, "npc")
text_height = unit(1, "grobheight", textGrob("FGH", gp = gpar(...)))
res = gList()
for(i in names(annotation)){
res[[i]] = textGrob(i, x = 0, y = y, vjust = 1, hjust = 0, gp = gpar(fontface = "bold", ...))
y = y - 1.5 * text_height
if(is.character(annotation[[i]]) | is.factor(annotation[[i]])){
n = length(annotation_colors[[i]])
yy = y - (1:n - 1) * 2 * text_height
res[[paste(i, "r")]] = rectGrob(x = unit(0, "npc"), y = yy, hjust = 0, vjust = 1, height = 2 * text_height, width = 2 * text_height, gp = gpar(col = border_color, fill = annotation_colors[[i]]))
res[[paste(i, "t")]] = textGrob(names(annotation_colors[[i]]), x = text_height * 2.4, y = yy - text_height, hjust = 0, vjust = 0.5, gp = gpar(...))
y = y - n * 2 * text_height
}else{
yy = y - 8 * text_height + seq(0, 1, 0.25)[-1] * 8 * text_height
h = 8 * text_height * 0.25
res[[paste(i, "r")]] = rectGrob(x = unit(0, "npc"), y = yy, hjust = 0, vjust = 1, height = h, width = 2 * text_height, gp = gpar(col = NA, fill = colorRampPalette(annotation_colors[[i]])(4)))
res[[paste(i, "r2")]] = rectGrob(x = unit(0, "npc"), y = y, hjust = 0, vjust = 1, height = 8 * text_height, width = 2 * text_height, gp = gpar(col = border_color))
txt = rev(range(grid.pretty(range(annotation[[i]], na.rm = TRUE))))
yy = y - c(1, 7) * text_height
res[[paste(i, "t")]] = textGrob(txt, x = text_height * 2.4, y = yy, hjust = 0, vjust = 0.5, gp = gpar(...))
y = y - 8 * text_height
}
y = y - 1.5 * text_height
}
res = gTree(children = res)
return(res)
}
draw_main = function(text, ...){
res = textGrob(text, gp = gpar(fontface = "bold", ...))
return(res)
}
vplayout = function(x, y){
return(viewport(layout.pos.row = x, layout.pos.col = y))
}
heatmap_motor = function(matrix, border_color, cellwidth, cellheight, tree_col, tree_row, treeheight_col, treeheight_row, filename, width, height, breaks, color, legend, annotation_row, annotation_col, annotation_colors, annotation_legend, main, fontsize, fontsize_row, fontsize_col, fmat, fontsize_number, number_color, gaps_col, gaps_row, labels_row, labels_col, ...){
# Set layout
lo = lo(coln = labels_col, rown = labels_row, nrow = nrow(matrix), ncol = ncol(matrix), cellwidth = cellwidth, cellheight = cellheight, treeheight_col = treeheight_col, treeheight_row = treeheight_row, legend = legend, annotation_col = annotation_col, annotation_row = annotation_row, annotation_colors = annotation_colors, annotation_legend = annotation_legend, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, gaps_row = gaps_row, gaps_col = gaps_col, ...)
res = lo$gt
mindim = lo$mindim
if(!is.na(filename)){
if(is.na(height)){
height = convertHeight(gtable_height(res), "inches", valueOnly = T)
}
if(is.na(width)){
width = convertWidth(gtable_width(res), "inches", valueOnly = T)
}
# Get file type
r = regexpr("\\.[a-zA-Z]*$", filename)
if(r == -1) stop("Improper filename")
ending = substr(filename, r + 1, r + attr(r, "match.length"))
f = switch(ending,
pdf = function(x, ...) pdf(x, ...),
png = function(x, ...) png(x, units = "in", res = 500, ...),
jpeg = function(x, ...) jpeg(x, units = "in", res = 500, ...),
jpg = function(x, ...) jpeg(x, units = "in", res = 500, ...),
tiff = function(x, ...) tiff(x, units = "in", res = 500, compression = "lzw", ...),
bmp = function(x, ...) bmp(x, units = "in", res = 500, ...),
stop("File type should be: pdf, png, bmp, jpg, tiff")
)
# print(sprintf("height:%f width:%f", height, width))
# gt = heatmap_motor(matrix, cellwidth = cellwidth, cellheight = cellheight, border_color = border_color, tree_col = tree_col, tree_row = tree_row, treeheight_col = treeheight_col, treeheight_row = treeheight_row, breaks = breaks, color = color, legend = legend, annotation_col = annotation_col, annotation_row = annotation_row, annotation_colors = annotation_colors, annotation_legend = annotation_legend, filename = NA, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, fmat = fmat, fontsize_number = fontsize_number, number_color = number_color, labels_row = labels_row, labels_col = labels_col, gaps_col = gaps_col, gaps_row = gaps_row, ...)
f(filename, height = height, width = width)
gt = heatmap_motor(matrix, cellwidth = cellwidth, cellheight = cellheight, border_color = border_color, tree_col = tree_col, tree_row = tree_row, treeheight_col = treeheight_col, treeheight_row = treeheight_row, breaks = breaks, color = color, legend = legend, annotation_col = annotation_col, annotation_row = annotation_row, annotation_colors = annotation_colors, annotation_legend = annotation_legend, filename = NA, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, fmat = fmat, fontsize_number = fontsize_number, number_color = number_color, labels_row = labels_row, labels_col = labels_col, gaps_col = gaps_col, gaps_row = gaps_row, ...)
grid.draw(gt)
dev.off()
return(NULL)
}
# Omit border color if cell size is too small
if(mindim < 3) border_color = NA
# Draw title
if(!is.na(main)){
elem = draw_main(main, fontsize = 1.3 * fontsize, ...)
res = gtable_add_grob(res, elem, t = 1, l = 3, name = "main")
}
# Draw tree for the columns
if(!is.na(tree_col[[1]][1]) & treeheight_col != 0){
elem = draw_dendrogram(tree_col, gaps_col, horizontal = T)
res = gtable_add_grob(res, elem, t = 2, l = 3, name = "col_tree")
}
# Draw tree for the rows
if(!is.na(tree_row[[1]][1]) & treeheight_row != 0){
elem = draw_dendrogram(tree_row, gaps_row, horizontal = F)
res = gtable_add_grob(res, elem, t = 4, l = 1, name = "row_tree")
}
# Draw matrix
elem = draw_matrix(matrix, border_color, gaps_row, gaps_col, fmat, fontsize_number, number_color)
res = gtable_add_grob(res, elem, t = 4, l = 3, clip = "off", name = "matrix")
# Draw colnames
if(length(labels_col) != 0){
pars = list(labels_col, gaps = gaps_col, fontsize = fontsize_col, ...)
elem = do.call(draw_colnames, pars)
res = gtable_add_grob(res, elem, t = 5, l = 3, clip = "off", name = "col_names")
}
# Draw rownames
if(length(labels_row) != 0){
pars = list(labels_row, gaps = gaps_row, fontsize = fontsize_row, ...)
elem = do.call(draw_rownames, pars)
res = gtable_add_grob(res, elem, t = 4, l = 4, clip = "off", name = "row_names")
}
# Draw annotation tracks on cols
if(!is.na(annotation_col[[1]][1])){
# Draw tracks
converted_annotation = convert_annotations(annotation_col, annotation_colors)
elem = draw_annotations(converted_annotation, border_color, gaps_col, fontsize, horizontal = T)
res = gtable_add_grob(res, elem, t = 3, l = 3, clip = "off", name = "col_annotation")
# Draw names
annotation_col.tmp<-annotation_col
colnames(annotation_col.tmp)<-""
elem = draw_annotation_names(annotation_col.tmp, fontsize, horizontal = T)
res = gtable_add_grob(res, elem, t = 3, l = 4, clip = "off", name = "row_annotation_names")
}
# Draw annotation tracks on rows
if(!is.na(annotation_row[[1]][1])){
# Draw tracks
converted_annotation = convert_annotations(annotation_row, annotation_colors)
elem = draw_annotations(converted_annotation, border_color, gaps_row, fontsize, horizontal = F)
res = gtable_add_grob(res, elem, t = 4, l = 2, clip = "off", name = "row_annotation")
# Draw names
elem = draw_annotation_names(annotation_row, fontsize, horizontal = F)
res = gtable_add_grob(res, elem, t = 5, l = 2, clip = "off", name = "row_annotation_names")
}
# Draw annotation legend
annotation = c(annotation_col[length(annotation_col):1], annotation_row[length(annotation_row):1])
annotation = annotation[unlist(lapply(annotation, function(x) !is.na(x[1])))]
if(length(annotation) > 0 & annotation_legend){
elem = draw_annotation_legend(annotation, annotation_colors, border_color, fontsize = fontsize, ...)
t = ifelse(is.null(labels_row), 4, 3)
res = gtable_add_grob(res, elem, t = t, l = 6, b = 5, clip = "off", name = "annotation_legend")
}
# Draw legend
if(!is.na(legend[1])){
elem = draw_legend(color, breaks, legend, fontsize = fontsize, ...)
t = ifelse(is.null(labels_row), 4, 3)
res = gtable_add_grob(res, elem, t = t, l = 5, b = 5, clip = "off", name = "legend")
}
return(res)
}
generate_breaks = function(x, n, center = F){
if(center){
m = max(abs(c(min(x, na.rm = T), max(x, na.rm = T))))
res = seq(-m, m, length.out = n + 1)
}else{
res = seq(min(x, na.rm = T), max(x, na.rm = T), length.out = n + 1)
}
return(res)
}
scale_vec_colours = function(x, col = rainbow(10), breaks = NA){
return(col[as.numeric(cut(x, breaks = breaks, include.lowest = T))])
}
scale_colours = function(mat, col = rainbow(10), breaks = NA){
mat = as.matrix(mat)
return(matrix(scale_vec_colours(as.vector(mat), col = col, breaks = breaks), nrow(mat), ncol(mat), dimnames = list(rownames(mat), colnames(mat))))
}
cluster_mat = function(mat, distance, method){
if(!(method %in% c("ward.D2", "ward", "single", "complete", "average", "mcquitty", "median", "centroid"))){
stop("clustering method has to one form the list: 'ward', 'ward.D2', 'single', 'complete', 'average', 'mcquitty', 'median' or 'centroid'.")
}
if(!(distance[1] %in% c("correlation", "euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski")) & class(distance) != "dist"){
stop("distance has to be a dissimilarity structure as produced by dist or one measure form the list: 'correlation', 'euclidean', 'maximum', 'manhattan', 'canberra', 'binary', 'minkowski'")
}
if(distance[1] == "correlation"){
d = as.dist(1 - cor(t(mat)))
}else{
if(class(distance) == "dist"){
d = distance
}else{
d = dist(mat, method = distance)
}
}
return(hclust(d, method = method))
}
scale_rows = function(x){
m = apply(x, 1, mean, na.rm = T)
s = apply(x, 1, sd, na.rm = T)
return((x - m) / s)
}
scale_mat = function(mat, scale){
if(!(scale %in% c("none", "row", "column"))){
stop("scale argument shoud take values: 'none', 'row' or 'column'")
}
mat = switch(scale, none = mat, row = scale_rows(mat), column = t(scale_rows(t(mat))))
return(mat)
}
generate_annotation_colours = function(annotation, annotation_colors, drop){
if(is.na(annotation_colors)[[1]][1]){
annotation_colors = list()
}
count = 0
for(i in 1:length(annotation)){
if(is.character(annotation[[i]]) | is.factor(annotation[[i]])){
if (is.factor(annotation[[i]]) & !drop){
count = count + length(levels(annotation[[i]]))
}else{
count = count + length(unique(annotation[[i]]))
}
}
}
factor_colors = dscale(factor(1:count), hue_pal(l = 75))
set.seed(3453)
cont_counter = 2
for(i in 1:length(annotation)){
if(!(names(annotation)[i] %in% names(annotation_colors))){
if(is.character(annotation[[i]]) | is.factor(annotation[[i]])){
n = length(unique(annotation[[i]]))
if (is.factor(annotation[[i]]) & !drop){
n = length(levels(annotation[[i]]))
}
ind = sample(1:length(factor_colors), n)
annotation_colors[[names(annotation)[i]]] = factor_colors[ind]
l = levels(as.factor(annotation[[i]]))
l = l[l %in% unique(annotation[[i]])]
if (is.factor(annotation[[i]]) & !drop){
l = levels(annotation[[i]])
}
names(annotation_colors[[names(annotation)[i]]]) = l
factor_colors = factor_colors[-ind]
}else{
annotation_colors[[names(annotation)[i]]] = brewer_pal("seq", cont_counter)(5)[1:4]
cont_counter = cont_counter + 1
}
}
}
return(annotation_colors)
}
kmeans_pheatmap = function(mat, k = min(nrow(mat), 150), sd_limit = NA, ...){
# Filter data
if(!is.na(sd_limit)){
s = apply(mat, 1, sd)
mat = mat[s > sd_limit, ]
}
# Cluster data
set.seed(1245678)
km = kmeans(mat, k, iter.max = 100)
mat2 = km$centers
# Compose rownames
t = table(km$cluster)
rownames(mat2) = sprintf("cl%s_size_%d", names(t), t)
# Draw heatmap
pheatmap2(mat2, ...)
}
find_gaps = function(tree, cutree_n){
v = cutree(tree, cutree_n)[tree$order]
gaps = which((v[-1] - v[-length(v)]) != 0)
}
#' A function to draw clustered heatmaps.
#'
#' A function to draw clustered heatmaps where one has better control over some graphical
#' parameters such as cell size, etc.
#'
#' The function also allows to aggregate the rows using kmeans clustering. This is
#' advisable if number of rows is so big that R cannot handle their hierarchical
#' clustering anymore, roughly more than 1000. Instead of showing all the rows
#' separately one can cluster the rows in advance and show only the cluster centers.
#' The number of clusters can be tuned with parameter kmeans_k.
#'
#' @param mat numeric matrix of the values to be plotted.
#' @param color vector of colors used in heatmap.
#' @param kmeans_k the number of kmeans clusters to make, if we want to agggregate the
#' rows before drawing heatmap. If NA then the rows are not aggregated.
#' @param breaks a sequence of numbers that covers the range of values in mat and is one
#' element longer than color vector. Used for mapping values to colors. Useful, if needed
#' to map certain values to certain colors, to certain values. If value is NA then the
#' breaks are calculated automatically.
#' @param border_color color of cell borders on heatmap, use NA if no border should be
#' drawn.
#' @param cellwidth individual cell width in points. If left as NA, then the values
#' depend on the size of plotting window.
#' @param cellheight individual cell height in points. If left as NA,
#' then the values depend on the size of plotting window.
#' @param scale character indicating if the values should be centered and scaled in
#' either the row direction or the column direction, or none. Corresponding values are
#' \code{"row"}, \code{"column"} and \code{"none"}
#' @param cluster_rows boolean values determining if rows should be clustered,
#' @param cluster_cols boolean values determining if columns should be clustered.
#' @param clustering_distance_rows distance measure used in clustering rows. Possible
#' values are \code{"correlation"} for Pearson correlation and all the distances
#' supported by \code{\link{dist}}, such as \code{"euclidean"}, etc. If the value is none
#' of the above it is assumed that a distance matrix is provided.
#' @param clustering_distance_cols distance measure used in clustering columns. Possible
#' values the same as for clustering_distance_rows.
#' @param clustering_method clustering method used. Accepts the same values as
#' \code{\link{hclust}}.
#' @param cutree_rows number of clusters the rows are divided into, based on the
#' hierarchical clustering (using cutree), if rows are not clustered, the
#' argument is ignored
#' @param cutree_cols similar to \code{cutree_rows}, but for columns
#' @param treeheight_row the height of a tree for rows, if these are clustered.
#' Default value 50 points.
#' @param treeheight_col the height of a tree for columns, if these are clustered.
#' Default value 50 points.
#' @param legend logical to determine if legend should be drawn or not.
#' @param legend_breaks vector of breakpoints for the legend.
#' @param legend_labels vector of labels for the \code{legend_breaks}.
#' @param annotation_row data frame that specifies the annotations shown on left
#' side of the heatmap. Each row defines the features for a specific row. The
#' rows in the data and in the annotation are matched using corresponding row
#' names. Note that color schemes takes into account if variable is continuous
#' or discrete.
#' @param annotation_col similar to annotation_row, but for columns.
#' @param annotation deprecated parameter that currently sets the annotation_col if it is missing
#' @param annotation_colors list for specifying annotation_row and
#' annotation_col track colors manually. It is possible to define the colors
#' for only some of the features. Check examples for details.
#' @param annotation_legend boolean value showing if the legend for annotation
#' tracks should be drawn.
#' @param drop_levels logical to determine if unused levels are also shown in
#' the legend
#' @param show_rownames boolean specifying if column names are be shown.
#' @param show_colnames boolean specifying if column names are be shown.
#' @param main the title of the plot
#' @param fontsize base fontsize for the plot
#' @param fontsize_row fontsize for rownames (Default: fontsize)
#' @param fontsize_col fontsize for colnames (Default: fontsize)
#' @param display_numbers logical determining if the numeric values are also printed to
#' the cells. If this is a matrix (with same dimensions as original matrix), the contents
#' of the matrix are shown instead of original values.
#' @param number_format format strings (C printf style) of the numbers shown in cells.
#' For example "\code{\%.2f}" shows 2 decimal places and "\code{\%.1e}" shows exponential
#' notation (see more in \code{\link{sprintf}}).
#' @param number_color color of the text
#' @param fontsize_number fontsize of the numbers displayed in cells
#' @param gaps_row vector of row indices that show shere to put gaps into
#' heatmap. Used only if the rows are not clustered. See \code{cutree_row}
#' to see how to introduce gaps to clustered rows.
#' @param gaps_col similar to gaps_row, but for columns.
#' @param labels_row custom labels for rows that are used instead of rownames.
#' @param labels_col similar to labels_row, but for columns.
#' @param filename file path where to save the picture. Filetype is decided by
#' the extension in the path. Currently following formats are supported: png, pdf, tiff,
#' bmp, jpeg. Even if the plot does not fit into the plotting window, the file size is
#' calculated so that the plot would fit there, unless specified otherwise.
#' @param width manual option for determining the output file width in inches.
#' @param height manual option for determining the output file height in inches.
#' @param silent do not draw the plot (useful when using the gtable output)
#' @param \dots graphical parameters for the text used in plot. Parameters passed to
#' \code{\link{grid.text}}, see \code{\link{gpar}}.
#'
#' @return
#' Invisibly a list of components
#' \itemize{
#' \item \code{tree_row} the clustering of rows as \code{\link{hclust}} object
#' \item \code{tree_col} the clustering of columns as \code{\link{hclust}} object
#' \item \code{kmeans} the kmeans clustering of rows if parameter \code{kmeans_k} was
#' specified
#' }
#'
#' @author Raivo Kolde <rkolde@@gmail.com>
#' @examples
#' # Create test matrix
#' test = matrix(rnorm(200), 20, 10)
#' test[1:10, seq(1, 10, 2)] = test[1:10, seq(1, 10, 2)] + 3
#' test[11:20, seq(2, 10, 2)] = test[11:20, seq(2, 10, 2)] + 2
#' test[15:20, seq(2, 10, 2)] = test[15:20, seq(2, 10, 2)] + 4
#' colnames(test) = paste("Test", 1:10, sep = "")
#' rownames(test) = paste("Gene", 1:20, sep = "")
#'
#' # Draw heatmaps
#' pheatmap2(test)
#' pheatmap2(test, kmeans_k = 2)
#' pheatmap2(test, scale = "row", clustering_distance_rows = "correlation")
#' pheatmap2(test, color = colorRampPalette(c("navy", "white", "firebrick3"))(50))
#' pheatmap2(test, cluster_row = FALSE)
#' pheatmap2(test, legend = FALSE)
#'
#' # Show text within cells
#' pheatmap2(test, display_numbers = TRUE)
#' pheatmap2(test, display_numbers = TRUE, number_format = "\%.1e")
#' pheatmap2(test, display_numbers = matrix(ifelse(test > 5, "*", ""), nrow(test)))
#' pheatmap2(test, cluster_row = FALSE, legend_breaks = -1:4, legend_labels = c("0",
#' "1e-4", "1e-3", "1e-2", "1e-1", "1"))
#'
#' # Fix cell sizes and save to file with correct size
#' pheatmap2(test, cellwidth = 15, cellheight = 12, main = "Example heatmap")
#' pheatmap2(test, cellwidth = 15, cellheight = 12, fontsize = 8, filename = "test.pdf")
#'
#' # Generate annotations for rows and columns
#' annotation_col = data.frame(
#' CellType = factor(rep(c("CT1", "CT2"), 5)),
#' Time = 1:5
#' )
#' rownames(annotation_col) = paste("Test", 1:10, sep = "")
#'
#' annotation_row = data.frame(
#' GeneClass = factor(rep(c("Path1", "Path2", "Path3"), c(10, 4, 6)))
#' )
#' rownames(annotation_row) = paste("Gene", 1:20, sep = "")
#'
#' # Display row and color annotations
#' pheatmap2(test, annotation_col = annotation_col)
#' pheatmap2(test, annotation_col = annotation_col, annotation_legend = FALSE)
#' pheatmap2(test, annotation_col = annotation_col, annotation_row = annotation_row)
#'
#'
#' # Specify colors
#' ann_colors = list(
#' Time = c("white", "firebrick"),
#' CellType = c(CT1 = "#1B9E77", CT2 = "#D95F02"),
#' GeneClass = c(Path1 = "#7570B3", Path2 = "#E7298A", Path3 = "#66A61E")
#' )
#'
#' pheatmap2(test, annotation_col = annotation_col, annotation_colors = ann_colors, main = "Title")
#' pheatmap2(test, annotation_col = annotation_col, annotation_row = annotation_row,
#' annotation_colors = ann_colors)
#' pheatmap2(test, annotation_col = annotation_col, annotation_colors = ann_colors[2])
#'
#' # Gaps in heatmaps
#' pheatmap2(test, annotation_col = annotation_col, cluster_rows = FALSE, gaps_row = c(10, 14))
#' pheatmap2(test, annotation_col = annotation_col, cluster_rows = FALSE, gaps_row = c(10, 14),
#' cutree_col = 2)
#'
#' # Show custom strings as row/col names
#' labels_row = c("", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
#' "", "", "Il10", "Il15", "Il1b")
#'
#' pheatmap2(test, annotation_col = annotation_col, labels_row = labels_row)
#'
#' # Specifying clustering from distance matrix
#' drows = dist(test, method = "minkowski")
#' dcols = dist(t(test), method = "minkowski")
#' pheatmap2(test, clustering_distance_rows = drows, clustering_distance_cols = dcols)
#'
#' @export
pheatmap2 = function(mat,color = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100), kmeans_k = NA, breaks = NA, border_color = "grey60", cellwidth = NA, cellheight = NA, scale = "none", cluster_rows = TRUE, cluster_cols = TRUE, clustering_distance_rows = "euclidean", clustering_distance_cols = "euclidean", clustering_method = "complete", cutree_rows = NA, cutree_cols = NA, treeheight_row = ifelse(cluster_rows, 50, 0), treeheight_col = ifelse(cluster_cols, 50, 0), legend = TRUE, legend_breaks = NA, legend_labels = NA, annotation_row = NA, annotation_col = NA, annotation = NA, annotation_colors = NA, annotation_legend =FALSE, drop_levels = TRUE, show_rownames = T, show_colnames = T, main = NA, fontsize = 10, fontsize_row = fontsize, fontsize_col = fontsize, display_numbers = F, number_format = "%.2f", number_color = "grey30", fontsize_number = 0.8 * fontsize, gaps_row = NULL, gaps_col = NULL, labels_row = NULL, labels_col = NULL, filename = NA, width = NA, height = NA, silent = FALSE, ...){
# Set labels
if(is.null(labels_row)){
labels_row = rownames(mat)
}
if(is.null(labels_col)){
labels_col = colnames(mat)
}
# Preprocess matrix
mat = as.matrix(mat)
if(scale != "none"){
mat = scale_mat(mat, scale)
if(is.na(breaks)){
breaks = generate_breaks(mat, length(color), center = T)
}
}
# Kmeans
if(!is.na(kmeans_k)){
# Cluster data
km = kmeans(mat, kmeans_k, iter.max = 100)
mat = km$centers
# Compose rownames
t = table(km$cluster)
labels_row = sprintf("Cluster: %s Size: %d", names(t), t)
}else{
km = NA
}
# Format numbers to be displayed in cells
if(is.matrix(display_numbers) | is.data.frame(display_numbers)){
if(nrow(display_numbers) != nrow(mat) | ncol(display_numbers) != ncol(mat)){
stop("If display_numbers provided as matrix, its dimensions have to match with mat")
}
display_numbers = as.matrix(display_numbers)
fmat = matrix(as.character(display_numbers), nrow = nrow(display_numbers), ncol = ncol(display_numbers))
fmat_draw = TRUE
}else{
if(display_numbers){
fmat = matrix(sprintf(number_format, mat), nrow = nrow(mat), ncol = ncol(mat))
fmat_draw = TRUE
}else{
fmat = matrix(NA, nrow = nrow(mat), ncol = ncol(mat))
fmat_draw = FALSE
}
}
# Do clustering
if(cluster_rows){
tree_row = cluster_mat(mat, distance = clustering_distance_rows, method = clustering_method)
mat = mat[tree_row$order, , drop = FALSE]
fmat = fmat[tree_row$order, , drop = FALSE]
labels_row = labels_row[tree_row$order]
if(!is.na(cutree_rows)){
gaps_row = find_gaps(tree_row, cutree_rows)
}else{
gaps_row = NULL
}
}else{
tree_row = NA
treeheight_row = 0
}
if(cluster_cols){
tree_col = cluster_mat(t(mat), distance = clustering_distance_cols, method = clustering_method)
mat = mat[, tree_col$order, drop = FALSE]
fmat = fmat[, tree_col$order, drop = FALSE]
labels_col = labels_col[tree_col$order]
if(!is.na(cutree_cols)){
gaps_col = find_gaps(tree_col, cutree_cols)
} else{
gaps_col = NULL
}
}else{
tree_col = NA
treeheight_col = 0
}
attr(fmat, "draw") = fmat_draw
# Colors and scales
if(!is.na(legend_breaks[1]) & !is.na(legend_labels[1])){
if(length(legend_breaks) != length(legend_labels)){
stop("Lengths of legend_breaks and legend_labels must be the same")
}
}
if(is.na(breaks[1])){
breaks = generate_breaks(as.vector(mat), length(color))
}
if (legend & is.na(legend_breaks[1])) {
legend = grid.pretty(range(as.vector(breaks)))
names(legend) = legend
}else if(legend & !is.na(legend_breaks[1])){
legend = legend_breaks[legend_breaks >= min(breaks) & legend_breaks <= max(breaks)]
if(!is.na(legend_labels[1])){
legend_labels = legend_labels[legend_breaks >= min(breaks) & legend_breaks <= max(breaks)]
names(legend) = legend_labels
}else{
names(legend) = legend
}
}else {
legend = NA
}
mat = scale_colours(mat, col = color, breaks = breaks)
# Preparing annotations
if(is.na(annotation_col[[1]][1]) & !is.na(annotation[[1]][1])){
annotation_col = annotation
}
# Select only the ones present in the matrix
if(!is.na(annotation_col[[1]][1])){
annotation_col = annotation_col[colnames(mat), , drop = F]
}
if(!is.na(annotation_row[[1]][1])){
annotation_row = annotation_row[rownames(mat), , drop = F]
}
annotation = c(annotation_row, annotation_col)
annotation = annotation[unlist(lapply(annotation, function(x) !is.na(x[1])))]
if(length(annotation) != 0){
annotation_colors = generate_annotation_colours(annotation, annotation_colors, drop = drop_levels)
} else{
annotation_colors = NA
}
if(!show_rownames){
labels_row = NULL
}
if(!show_colnames){
labels_col = NULL
}
# Draw heatmap
gt = heatmap_motor(mat, border_color = border_color, cellwidth = cellwidth, cellheight = cellheight, treeheight_col = treeheight_col, treeheight_row = treeheight_row, tree_col = tree_col, tree_row = tree_row, filename = filename, width = width, height = height, breaks = breaks, color = color, legend = legend, annotation_row = annotation_row, annotation_col = annotation_col, annotation_colors = annotation_colors, annotation_legend = annotation_legend, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, fmat = fmat, fontsize_number = fontsize_number, number_color = number_color, gaps_row = gaps_row, gaps_col = gaps_col, labels_row = labels_row, labels_col = labels_col)
if(is.na(filename) & !silent){
grid.newpage()
grid.draw(gt)
}
invisible(list(tree_row = tree_row, tree_col = tree_col, kmeans = km, gtable = gt))
}
# load library
library('getopt');
#opt<-data.frame(infile="E:/R_workplace/20150626heatmap/T1_T2_vs_T3_T4.DEG.final.cluster",groupfile="E:/R_workplace/20150626heatmap/groupfile.heatmap")
.sourcePath<-"/share/nas1/tengh/research/Rsource/"
#-----------------------------------------------------------------
# getting parameters
#-----------------------------------------------------------------
#get options, using the spec as defined by the enclosed list.
#we read the options from the default: commandArgs(TRUE).
spec = matrix(c(
'help' , 'h', 0, "logical",
'infile' , 'i', 1, "character",
'groupfile' , 'G', 2, "character",
'outfile','o',2,"character",
'cell.width' , 'w', 2, "double",
'cell.height','e',2,"double",
'title','t',2,"character",
'width','W',2,"integer",
'height','H',2,"integer",
'size','s',2,"double",
'rowname','R',2,"logical",
'colname','C',2,"logical",
'color','c',2,"character" ,
'zero','z',2,"double",
'log','l',2,"character",
'scale','S',2,"character"
), byrow=TRUE, ncol=4);
opt = getopt(spec);
#遗传图与基因组的共线性分析
# define usage function
print_usage <- function(spec=NULL){
cat(getopt(spec, usage=TRUE));
cat("Usage example: \n")
cat("
Usage example:
1) Rscript heatmap.R --infile in.heatmap --outfile heatmap --color BrBG
2) Rscript heatmap.R --infile in.heatmap --outfile heatmap --groupfile group.heatmap --title heatmap --size 10 --rownames F
3) Rscript heatmap.R --infile in.heatmap --outfile heatmap --title heatmap --size 10 --cell.width 7 --cell.height 7
Options:
--help -h NULL get this help
--infile -i character the tab delimited input file saving numeric matrix of the values to be plotted.[forced]
--outfile -o character file path where to save the picture. Filetype is decided by the extension in the path. [optional,heatmap in current working directory]
--groupfile -G character the tab delimited input file saving data frame that specifies the annotations shown on top side of the heatmap [optional, default:NA]
--cell.width -w double individual cell width in points[optional, default: 7]
--cell.height -e double individual cell height in points[optional, default: 7]
--size -s double base fontsize for the plot[optional, default: 10]
--width -W double manual option for determining the output file width in pixel.[optional, default: NA]
--heigth -H double manual option for determining the output file height in pixel.[optional, default:NA]
--title -t character a title for the plot[optional, default: ]
--rowname -R logical boolean specifying if row names are be shown.[optional, default: TRUE]
--colname -C logical boolean specifying if column names are be shown.[optional, default:NA]
--color -c character choose the colour set(redgreen BrBG PiYG PRGn PuOr RdBu RdGy RdYlBu RdYlGn Spectral)or set colour splited by , .[optional, default: BrBG]
--zero -z double Set the minima vlaue: set mat values less than minima to minima.[optional, default:1]
--log -l character a logarithmic log scale is in use.[optional, default:log2]
--scale -S character character indicating if the values should be centered and scaled in either the row direction or the column direction, or none..[optional, default:none]
\n")
q(status=1);
}
#if(file.exists(paste(.sourcePath,"heatmap/pheatmap2.r",sep="")))source(paste(.sourcePath,"heatmap/pheatmap2.r",sep=""))else stop(paste(.sourcePath,"heatmap/pheatmap2.r does not exist!",sep=""))
# if help was asked for print a friendly message
# and exit with a non-zero error code
if (!is.null(opt$help)) { print_usage(spec) }
# check non-null args
if ( is.null(opt$infile) ) { print_usage(spec) }else {opt$infile<-gsub("\\\\",replacement = "/",x = opt$infile)}
#set some reasonable defaults for the options that are needed,
#but were not specified.
if ( is.null(opt$groupfile) ) { opt$groupfile=NA }else {opt$groupfile<-gsub("\\\\",replacement = "/",x = opt$groupfile)}
if( is.null(opt$outfile))opt$outfile="heatmap"
if(is.null(opt$title))opt$title=""
if(is.null(opt$width)){
opt$width=NA
}else if(!(is.numeric(opt$width)&&opt$width>0)){
stop("Parameter Error:outfile width must be positive integer")
}else{
opt$width=opt$width/500
}
if(is.null(opt$height)){
opt$height=NA
}else if(!(is.numeric(opt$height)&&opt$height>0)){
stop("Parameter Error:outfile height must be positive integer")
}else{
opt$height=opt$height/500
}
if(is.null(opt$cell.width)){
opt$cell.width=ifelse(is.na(opt$width),7,NA)
}else if(!(is.numeric(opt$cell.width)&&opt$cell.width>0)){
stop("Parameter Error:cell width must be positive integer")
}
if(is.null(opt$cell.height)){
opt$cell.height=ifelse(is.na(opt$height),7,NA )
}else if(!(is.numeric(opt$cell.height)&&opt$cell.height>0)){
stop("Parameter Error:cell height must be positive integer")
}
if(is.null(opt$rowname))opt$rowname=T
if(is.null(opt$colname))opt$colname=T
#if(is.null(opt$color))opt$color="RdYlGn"
if(is.null(opt$color))opt$color="#263C8B,#4E74A6,#BDBF78,#BFA524"
if(is.null(opt$zero))opt$zero=1
if(is.null(opt$size))opt$size=10
if(is.null(opt$log))opt$log="log2"
if(is.null(opt$scale))opt$scale="none"
##import data
rawdat<-read.table(opt$infile,head=T,sep="\t",comment.char = "",check.names =F)
message(nrow(rawdat))
if(nrow(rawdat)>30){
#rawdat<-read.table(as.vector(opt$infile),header=T,sep="\t",comment.char = "")
rawdat <- rawdat[1:30,]
}else if(nrow(rawdat) >0 && nrow(rawdat) < 30){
rawdat <- rawdat
}
rownames(rawdat)<-as.matrix(rawdat)[,1]
#rownames(rawdat)
#rawdat=as.matrix(rawdat[,grepl("[0-9]+$",colnames(rawdat))])
#rawdat<-as.matrix(rawdat[,2:(ncol(rawdat)-3)])
rawdat<-as.matrix(rawdat[,-1])
rawdat<-rawdat+opt$zero
if(opt$log=="log2"){
rawdat<-log2(rawdat)
}else if(opt$log=="log10"){
rawdat<-log10(rawdat)
}else if(is.na(opt$log)){
rawdat=rawdat
}else{
stop("Paramter error: a logarithmic scale parameter log can only be NA log10 or log2!")
}
#
if(is.na(opt$groupfile)){
anColor = NA
colGroup =NA
heat.dat=rawdat
}else{
groupdat<-read.table(as.vector(opt$groupfile),header=F,sep="\t",comment.char = "")
group<-as.vector(groupdat[,2])
names(group)<-as.vector(groupdat[,1])
if(sum(!is.element(names(group),colnames(rawdat)))>0){
stop(paste(c("the following samples in group file not exist:",setdiff(names(group),colnames(rawdat)),"please check your groupfile!"),sep="\n"))
}
if(sum(!is.element(colnames(rawdat),names(group)))>0){
warning(paste(c("the following samples in infile will not be ploted:",setdiff(names(group),colnames(rawdat))),sep="\n"))
}
#多类样品热图添加分类条
heat.dat<-rawdat[,names(group)]
colGroup<-data.frame(Group=group)
colGroup$Group= factor(colGroup$Group, levels = c(unique(group), "other"))
row.names(colGroup)<-names(group)#设置样品颜色类
gColor<-c( "#7FC97F","#BEAED4","#FDC086","#FFFF99","#386CB0","#F0027F","#BF5B17","#666666", "#B3E2CD","#FDCDAC","#CBD5E8","#F4CAE4","#E6F5C9","#FFF2AE","#F1E2CC","#CCCCCC")
gColor=gColor[1:length(unique(group))]
names(gColor)<-unique(group)
anColor<-list(Group=gColor)
}
if(length(opt$color)==1&&is.element(opt$color,c("BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral"))){
require(RColorBrewer)
hmColors=colorRampPalette(rev(brewer.pal(n = 7, name = opt$color)))(100)
}else if(length(opt$color)==1&&(opt$color=="redgreen")){
library(gplots)
message(paste("color=",opt$color,sep=""))
hmColors=redgreen(255)
}else{
hmColors<-strsplit(opt$color,split = ",")[[1]]
hmColors=colorRampPalette(hmColors)(256)
}
hl<-hclust(dist(heat.dat))
capture.output(str(as.dendrogram(hclust(dist(heat.dat)))),file =paste(c(opt$outfile,".txt"),collapse =""))
#message(c("width",opt$width,"height",opt$height))
pheatmap2(filename =paste(c(opt$outfile,".png"),collapse =""),width = opt$width,height = opt$height,mat=heat.dat,cellwidth=opt$cell.width,color = hmColors,cellheight=opt$cell.height,main=opt$title,cluster_rows=T,cluster_cols=T,annotation_col = colGroup,annotation = colGroup,annotation_colors = anColor,fontsize=opt$size,col=hmColors,show_rownames=opt$rowname,show_colnames=opt$colname,fontsize_col=ifelse(is.na(opt$cell.width),opt$size,min(opt$size,opt$cell.width)),fontsize_row=ifelse(is.na(opt$cell.height),opt$size,min(opt$size,opt$cell.height)),scale=opt$scale)
dev.off()
pheatmap2(filename =paste(c(opt$outfile,".pdf"),collapse =""),width = opt$width,height = opt$height,mat=heat.dat,cellwidth=opt$cell.width,color = hmColors,cellheight=opt$cell.height,main=opt$title,cluster_rows=T,cluster_cols=T,annotation_col = colGroup,annotation = colGroup,annotation_colors = anColor,fontsize=opt$size,col=hmColors,show_rownames=opt$rowname,show_colnames=opt$colname,fontsize_col=ifelse(is.na(opt$cell.width),opt$size,min(opt$size,opt$cell.width)),fontsize_row=ifelse(is.na(opt$cell.height),opt$size,min(opt$size,opt$cell.height)),scale=opt$scale)
dev.off()
| /bin/lnc_diff/v3.4/bin/draw_anno_cluster/anno_cluster_heatmap2.r | no_license | baibaijingjing/LncRNA | R | false | false | 50,463 | r | #!/share/nas2/genome/biosoft/R/2.15.1/lib64/R/bin/Rscript
#####################################################################
# Copyright 2015, BMK
#
# Author:tengh <tengh@biomarker.com.cn>
#
# Function: draw genomewide cytosine coverage distribution map
#
# Modify date: 20150819
# Note: delete group label
# reset opt$color="#263C8B,#4E74A6,#BDBF78,#BFA524"
#####################################################################
library("grid")
library("RColorBrewer")
library("scales")
library("gtable")
lo = function(rown, coln, nrow, ncol, cellheight = NA, cellwidth = NA, treeheight_col, treeheight_row, legend, annotation_row, annotation_col, annotation_colors, annotation_legend, main, fontsize, fontsize_row, fontsize_col, gaps_row, gaps_col, ...){
# Get height of colnames and length of rownames
if(!is.null(coln[1])){
t = c(coln, colnames(annotation_row))
longest_coln = which.max(strwidth(t, units = 'in'))
gp = list(fontsize = fontsize_col, ...)
coln_height = unit(1, "grobheight", textGrob(t[longest_coln], rot = 90, gp = do.call(gpar, gp))) + unit(10, "bigpts")
}else{
coln_height = unit(5, "bigpts")
}
if(!is.null(rown[1])){
#t = c(rown, colnames(annotation_col))
t = c(rown, "") #20150819
longest_rown = which.max(strwidth(t, units = 'in'))
gp = list(fontsize = fontsize_row, ...)
rown_width = unit(1, "grobwidth", textGrob(t[longest_rown], gp = do.call(gpar, gp))) + unit(10, "bigpts")
}else{
rown_width = unit(5, "bigpts")
}
gp = list(fontsize = fontsize, ...)
# Legend position
if(!is.na(legend[1])){
longest_break = which.max(nchar(names(legend)))
longest_break = unit(1.1, "grobwidth", textGrob(as.character(names(legend))[longest_break], gp = do.call(gpar, gp)))
title_length = unit(1.1, "grobwidth", textGrob("Scale", gp = gpar(fontface = "bold", ...)))
legend_width = unit(12, "bigpts") + longest_break * 1.2
legend_width = max(title_length, legend_width)
}else{
legend_width = unit(0, "bigpts")
}
# Set main title height
if(is.na(main)){
main_height = unit(0, "npc")
}else{
main_height = unit(1.5, "grobheight", textGrob(main, gp = gpar(fontsize = 1.3 * fontsize, ...)))
}
# Column annotations
textheight = unit(fontsize, "bigpts")
if(!is.na(annotation_col[[1]][1])){
# Column annotation height
annot_col_height = ncol(annotation_col) * (textheight + unit(2, "bigpts")) + unit(2, "bigpts")
# Width of the correponding legend
#t = c(as.vector(as.matrix(annotation_col)), colnames(annotation_col))
t = c(as.vector(as.matrix(annotation_col)),"") #20150819
annot_col_legend_width = unit(1.2, "grobwidth", textGrob(t[which.max(nchar(t))], gp = gpar(...))) + unit(12, "bigpts")
if(!annotation_legend){
annot_col_legend_width = unit(0, "npc")
}
}else{
annot_col_height = unit(0, "bigpts")
annot_col_legend_width = unit(0, "bigpts")
}
# Row annotations
if(!is.na(annotation_row[[1]][1])){
# Row annotation width
annot_row_width = ncol(annotation_row) * (textheight + unit(2, "bigpts")) + unit(2, "bigpts")
# Width of the correponding legend
t = c(as.vector(as.matrix(annotation_row)), colnames(annotation_row))
annot_row_legend_width = unit(1.2, "grobwidth", textGrob(t[which.max(nchar(t))], gp = gpar(...))) + unit(12, "bigpts")
if(!annotation_legend){
annot_row_legend_width = unit(0, "npc")
}
}else{
annot_row_width = unit(0, "bigpts")
annot_row_legend_width = unit(0, "bigpts")
}
annot_legend_width = max(annot_row_legend_width, annot_col_legend_width)
# Tree height
treeheight_col = unit(treeheight_col, "bigpts") + unit(5, "bigpts")
treeheight_row = unit(treeheight_row, "bigpts") + unit(5, "bigpts")
# Set cell sizes
if(is.na(cellwidth)){
mat_width = unit(1, "npc") - rown_width - legend_width - treeheight_row - annot_row_width - annot_legend_width
}else{
mat_width = unit(cellwidth * ncol, "bigpts") + length(gaps_col) * unit(4, "bigpts")
}
if(is.na(cellheight)){
mat_height = unit(1, "npc") - main_height - coln_height - treeheight_col - annot_col_height
}else{
mat_height = unit(cellheight * nrow, "bigpts") + length(gaps_row) * unit(4, "bigpts")
}
# Produce gtable
gt = gtable(widths = unit.c(treeheight_row, annot_row_width, mat_width, rown_width, legend_width, annot_legend_width), heights = unit.c(main_height, treeheight_col, annot_col_height, mat_height, coln_height), vp = viewport(gp = do.call(gpar, gp)))
cw = convertWidth(mat_width - (length(gaps_col) * unit(4, "bigpts")), "bigpts", valueOnly = T) / ncol
ch = convertHeight(mat_height - (length(gaps_row) * unit(4, "bigpts")), "bigpts", valueOnly = T) / nrow
# Return minimal cell dimension in bigpts to decide if borders are drawn
mindim = min(cw, ch)
res = list(gt = gt, mindim = mindim)
return(res)
}
find_coordinates = function(n, gaps, m = 1:n){
if(length(gaps) == 0){
return(list(coord = unit(m / n, "npc"), size = unit(1 / n, "npc") ))
}
if(max(gaps) > n){
stop("Gaps do not match with matrix size")
}
size = (1 / n) * (unit(1, "npc") - length(gaps) * unit("4", "bigpts"))
gaps2 = apply(sapply(gaps, function(gap, x){x > gap}, m), 1, sum)
coord = m * size + (gaps2 * unit("4", "bigpts"))
return(list(coord = coord, size = size))
}
draw_dendrogram = function(hc, gaps, horizontal = T){
h = hc$height / max(hc$height) / 1.05
m = hc$merge
o = hc$order
n = length(o)
m[m > 0] = n + m[m > 0]
m[m < 0] = abs(m[m < 0])
dist = matrix(0, nrow = 2 * n - 1, ncol = 2, dimnames = list(NULL, c("x", "y")))
dist[1:n, 1] = 1 / n / 2 + (1 / n) * (match(1:n, o) - 1)
for(i in 1:nrow(m)){
dist[n + i, 1] = (dist[m[i, 1], 1] + dist[m[i, 2], 1]) / 2
dist[n + i, 2] = h[i]
}
draw_connection = function(x1, x2, y1, y2, y){
res = list(
x = c(x1, x1, x2, x2),
y = c(y1, y, y, y2)
)
return(res)
}
x = rep(NA, nrow(m) * 4)
y = rep(NA, nrow(m) * 4)
id = rep(1:nrow(m), rep(4, nrow(m)))
for(i in 1:nrow(m)){
c = draw_connection(dist[m[i, 1], 1], dist[m[i, 2], 1], dist[m[i, 1], 2], dist[m[i, 2], 2], h[i])
k = (i - 1) * 4 + 1
x[k : (k + 3)] = c$x
y[k : (k + 3)] = c$y
}
x = find_coordinates(n, gaps, x * n)$coord
y = unit(y, "npc")
if(!horizontal){
a = x
x = unit(1, "npc") - y
y = unit(1, "npc") - a
}
res = polylineGrob(x = x, y = y, id = id)
return(res)
}
draw_matrix = function(matrix, border_color, gaps_rows, gaps_cols, fmat, fontsize_number, number_color){
n = nrow(matrix)
m = ncol(matrix)
coord_x = find_coordinates(m, gaps_cols)
coord_y = find_coordinates(n, gaps_rows)
x = coord_x$coord - 0.5 * coord_x$size
y = unit(1, "npc") - (coord_y$coord - 0.5 * coord_y$size)
coord = expand.grid(y = y, x = x)
res = gList()
res[["rect"]] = rectGrob(x = coord$x, y = coord$y, width = coord_x$size, height = coord_y$size, gp = gpar(fill = matrix, col = border_color))
if(attr(fmat, "draw")){
res[["text"]] = textGrob(x = coord$x, y = coord$y, label = fmat, gp = gpar(col = number_color, fontsize = fontsize_number))
}
res = gTree(children = res)
return(res)
}
draw_colnames = function(coln, gaps, ...){
coord = find_coordinates(length(coln), gaps)
x = coord$coord - 0.5 * coord$size
res = textGrob(coln, x = x, y = unit(1, "npc") - unit(3, "bigpts"), vjust = 0.5, hjust = 0, rot = 270, gp = gpar(...))
return(res)
}
draw_rownames = function(rown, gaps, ...){
coord = find_coordinates(length(rown), gaps)
y = unit(1, "npc") - (coord$coord - 0.5 * coord$size)
res = textGrob(rown, x = unit(3, "bigpts"), y = y, vjust = 0.5, hjust = 0, gp = gpar(...))
return(res)
}
draw_legend = function(color, breaks, legend, ...){
height = min(unit(1, "npc"), unit(150, "bigpts"))
#message(paste(c("legend=",legend),collapse = "\t"))
#message(paste(c("min(breaks)=",min(breaks)),collapse = "\t"))
legend_pos = (legend - min(breaks)) / (max(breaks) - min(breaks))
legend_pos = height * legend_pos + (unit(1, "npc") - height)
breaks = (breaks - min(breaks)) / (max(breaks) - min(breaks))
breaks = height * breaks + (unit(1, "npc") - height)
h = breaks[-1] - breaks[-length(breaks)]
rect = rectGrob(x = 0, y = breaks[-length(breaks)], width = unit(10, "bigpts"), height = h, hjust = 0, vjust = 0, gp = gpar(fill = color, col = "#FFFFFF00"))
text = textGrob(names(legend), x = unit(14, "bigpts"), y = legend_pos, hjust = 0, gp = gpar(...))
res = grobTree(rect, text)
return(res)
}
convert_annotations = function(annotation, annotation_colors){
new = annotation
for(i in 1:ncol(annotation)){
a = annotation[, i]
b = annotation_colors[[colnames(annotation)[i]]]
if(is.character(a) | is.factor(a)){
a = as.character(a)
if(length(setdiff(a, names(b))) > 0){
stop(sprintf("Factor levels on variable %s do not match with annotation_colors", colnames(annotation)[i]))
}
new[, i] = b[a]
}else{
a = cut(a, breaks = 100)
new[, i] = colorRampPalette(b)(100)[a]
}
}
return(as.matrix(new))
}
draw_annotations = function(converted_annotations, border_color, gaps, fontsize, horizontal){
n = ncol(converted_annotations)
m = nrow(converted_annotations)
coord_x = find_coordinates(m, gaps)
x = coord_x$coord - 0.5 * coord_x$size
# y = cumsum(rep(fontsize, n)) - 4 + cumsum(rep(2, n))
y = cumsum(rep(fontsize, n)) + cumsum(rep(2, n)) - fontsize / 2 + 1
y = unit(y, "bigpts")
if(horizontal){
coord = expand.grid(x = x, y = y)
res = rectGrob(x = coord$x, y = coord$y, width = coord_x$size, height = unit(fontsize, "bigpts"), gp = gpar(fill = converted_annotations, col = border_color))
}else{
a = x
x = unit(1, "npc") - y
y = unit(1, "npc") - a
coord = expand.grid(y = y, x = x)
res = rectGrob(x = coord$x, y = coord$y, width = unit(fontsize, "bigpts"), height = coord_x$size, gp = gpar(fill = converted_annotations, col = border_color))
}
return(res)
}
draw_annotation_names = function(annotations, fontsize, horizontal){
n = ncol(annotations)
x = unit(3, "bigpts")
y = cumsum(rep(fontsize, n)) + cumsum(rep(2, n)) - fontsize / 2 + 1
y = unit(y, "bigpts")
if(horizontal){
res = textGrob(colnames(annotations), x = x, y = y, hjust = 0, gp = gpar(fontsize = fontsize, fontface = 2))
}else{
a = x
x = unit(1, "npc") - y
y = unit(1, "npc") - a
res = textGrob(colnames(annotations), x = x, y = y, vjust = 0.5, hjust = 0, rot = 270, gp = gpar(fontsize = fontsize, fontface = 2))
}
return(res)
}
draw_annotation_legend = function(annotation, annotation_colors, border_color, ...){
y = unit(1, "npc")
text_height = unit(1, "grobheight", textGrob("FGH", gp = gpar(...)))
res = gList()
for(i in names(annotation)){
res[[i]] = textGrob(i, x = 0, y = y, vjust = 1, hjust = 0, gp = gpar(fontface = "bold", ...))
y = y - 1.5 * text_height
if(is.character(annotation[[i]]) | is.factor(annotation[[i]])){
n = length(annotation_colors[[i]])
yy = y - (1:n - 1) * 2 * text_height
res[[paste(i, "r")]] = rectGrob(x = unit(0, "npc"), y = yy, hjust = 0, vjust = 1, height = 2 * text_height, width = 2 * text_height, gp = gpar(col = border_color, fill = annotation_colors[[i]]))
res[[paste(i, "t")]] = textGrob(names(annotation_colors[[i]]), x = text_height * 2.4, y = yy - text_height, hjust = 0, vjust = 0.5, gp = gpar(...))
y = y - n * 2 * text_height
}else{
yy = y - 8 * text_height + seq(0, 1, 0.25)[-1] * 8 * text_height
h = 8 * text_height * 0.25
res[[paste(i, "r")]] = rectGrob(x = unit(0, "npc"), y = yy, hjust = 0, vjust = 1, height = h, width = 2 * text_height, gp = gpar(col = NA, fill = colorRampPalette(annotation_colors[[i]])(4)))
res[[paste(i, "r2")]] = rectGrob(x = unit(0, "npc"), y = y, hjust = 0, vjust = 1, height = 8 * text_height, width = 2 * text_height, gp = gpar(col = border_color))
txt = rev(range(grid.pretty(range(annotation[[i]], na.rm = TRUE))))
yy = y - c(1, 7) * text_height
res[[paste(i, "t")]] = textGrob(txt, x = text_height * 2.4, y = yy, hjust = 0, vjust = 0.5, gp = gpar(...))
y = y - 8 * text_height
}
y = y - 1.5 * text_height
}
res = gTree(children = res)
return(res)
}
draw_main = function(text, ...){
res = textGrob(text, gp = gpar(fontface = "bold", ...))
return(res)
}
vplayout = function(x, y){
return(viewport(layout.pos.row = x, layout.pos.col = y))
}
heatmap_motor = function(matrix, border_color, cellwidth, cellheight, tree_col, tree_row, treeheight_col, treeheight_row, filename, width, height, breaks, color, legend, annotation_row, annotation_col, annotation_colors, annotation_legend, main, fontsize, fontsize_row, fontsize_col, fmat, fontsize_number, number_color, gaps_col, gaps_row, labels_row, labels_col, ...){
# Set layout
lo = lo(coln = labels_col, rown = labels_row, nrow = nrow(matrix), ncol = ncol(matrix), cellwidth = cellwidth, cellheight = cellheight, treeheight_col = treeheight_col, treeheight_row = treeheight_row, legend = legend, annotation_col = annotation_col, annotation_row = annotation_row, annotation_colors = annotation_colors, annotation_legend = annotation_legend, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, gaps_row = gaps_row, gaps_col = gaps_col, ...)
res = lo$gt
mindim = lo$mindim
if(!is.na(filename)){
if(is.na(height)){
height = convertHeight(gtable_height(res), "inches", valueOnly = T)
}
if(is.na(width)){
width = convertWidth(gtable_width(res), "inches", valueOnly = T)
}
# Get file type
r = regexpr("\\.[a-zA-Z]*$", filename)
if(r == -1) stop("Improper filename")
ending = substr(filename, r + 1, r + attr(r, "match.length"))
f = switch(ending,
pdf = function(x, ...) pdf(x, ...),
png = function(x, ...) png(x, units = "in", res = 500, ...),
jpeg = function(x, ...) jpeg(x, units = "in", res = 500, ...),
jpg = function(x, ...) jpeg(x, units = "in", res = 500, ...),
tiff = function(x, ...) tiff(x, units = "in", res = 500, compression = "lzw", ...),
bmp = function(x, ...) bmp(x, units = "in", res = 500, ...),
stop("File type should be: pdf, png, bmp, jpg, tiff")
)
# print(sprintf("height:%f width:%f", height, width))
# gt = heatmap_motor(matrix, cellwidth = cellwidth, cellheight = cellheight, border_color = border_color, tree_col = tree_col, tree_row = tree_row, treeheight_col = treeheight_col, treeheight_row = treeheight_row, breaks = breaks, color = color, legend = legend, annotation_col = annotation_col, annotation_row = annotation_row, annotation_colors = annotation_colors, annotation_legend = annotation_legend, filename = NA, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, fmat = fmat, fontsize_number = fontsize_number, number_color = number_color, labels_row = labels_row, labels_col = labels_col, gaps_col = gaps_col, gaps_row = gaps_row, ...)
f(filename, height = height, width = width)
gt = heatmap_motor(matrix, cellwidth = cellwidth, cellheight = cellheight, border_color = border_color, tree_col = tree_col, tree_row = tree_row, treeheight_col = treeheight_col, treeheight_row = treeheight_row, breaks = breaks, color = color, legend = legend, annotation_col = annotation_col, annotation_row = annotation_row, annotation_colors = annotation_colors, annotation_legend = annotation_legend, filename = NA, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, fmat = fmat, fontsize_number = fontsize_number, number_color = number_color, labels_row = labels_row, labels_col = labels_col, gaps_col = gaps_col, gaps_row = gaps_row, ...)
grid.draw(gt)
dev.off()
return(NULL)
}
# Omit border color if cell size is too small
if(mindim < 3) border_color = NA
# Draw title
if(!is.na(main)){
elem = draw_main(main, fontsize = 1.3 * fontsize, ...)
res = gtable_add_grob(res, elem, t = 1, l = 3, name = "main")
}
# Draw tree for the columns
if(!is.na(tree_col[[1]][1]) & treeheight_col != 0){
elem = draw_dendrogram(tree_col, gaps_col, horizontal = T)
res = gtable_add_grob(res, elem, t = 2, l = 3, name = "col_tree")
}
# Draw tree for the rows
if(!is.na(tree_row[[1]][1]) & treeheight_row != 0){
elem = draw_dendrogram(tree_row, gaps_row, horizontal = F)
res = gtable_add_grob(res, elem, t = 4, l = 1, name = "row_tree")
}
# Draw matrix
elem = draw_matrix(matrix, border_color, gaps_row, gaps_col, fmat, fontsize_number, number_color)
res = gtable_add_grob(res, elem, t = 4, l = 3, clip = "off", name = "matrix")
# Draw colnames
if(length(labels_col) != 0){
pars = list(labels_col, gaps = gaps_col, fontsize = fontsize_col, ...)
elem = do.call(draw_colnames, pars)
res = gtable_add_grob(res, elem, t = 5, l = 3, clip = "off", name = "col_names")
}
# Draw rownames
if(length(labels_row) != 0){
pars = list(labels_row, gaps = gaps_row, fontsize = fontsize_row, ...)
elem = do.call(draw_rownames, pars)
res = gtable_add_grob(res, elem, t = 4, l = 4, clip = "off", name = "row_names")
}
# Draw annotation tracks on cols
if(!is.na(annotation_col[[1]][1])){
# Draw tracks
converted_annotation = convert_annotations(annotation_col, annotation_colors)
elem = draw_annotations(converted_annotation, border_color, gaps_col, fontsize, horizontal = T)
res = gtable_add_grob(res, elem, t = 3, l = 3, clip = "off", name = "col_annotation")
# Draw names
annotation_col.tmp<-annotation_col
colnames(annotation_col.tmp)<-""
elem = draw_annotation_names(annotation_col.tmp, fontsize, horizontal = T)
res = gtable_add_grob(res, elem, t = 3, l = 4, clip = "off", name = "row_annotation_names")
}
# Draw annotation tracks on rows
if(!is.na(annotation_row[[1]][1])){
# Draw tracks
converted_annotation = convert_annotations(annotation_row, annotation_colors)
elem = draw_annotations(converted_annotation, border_color, gaps_row, fontsize, horizontal = F)
res = gtable_add_grob(res, elem, t = 4, l = 2, clip = "off", name = "row_annotation")
# Draw names
elem = draw_annotation_names(annotation_row, fontsize, horizontal = F)
res = gtable_add_grob(res, elem, t = 5, l = 2, clip = "off", name = "row_annotation_names")
}
# Draw annotation legend
annotation = c(annotation_col[length(annotation_col):1], annotation_row[length(annotation_row):1])
annotation = annotation[unlist(lapply(annotation, function(x) !is.na(x[1])))]
if(length(annotation) > 0 & annotation_legend){
elem = draw_annotation_legend(annotation, annotation_colors, border_color, fontsize = fontsize, ...)
t = ifelse(is.null(labels_row), 4, 3)
res = gtable_add_grob(res, elem, t = t, l = 6, b = 5, clip = "off", name = "annotation_legend")
}
# Draw legend
if(!is.na(legend[1])){
elem = draw_legend(color, breaks, legend, fontsize = fontsize, ...)
t = ifelse(is.null(labels_row), 4, 3)
res = gtable_add_grob(res, elem, t = t, l = 5, b = 5, clip = "off", name = "legend")
}
return(res)
}
generate_breaks = function(x, n, center = F){
if(center){
m = max(abs(c(min(x, na.rm = T), max(x, na.rm = T))))
res = seq(-m, m, length.out = n + 1)
}else{
res = seq(min(x, na.rm = T), max(x, na.rm = T), length.out = n + 1)
}
return(res)
}
scale_vec_colours = function(x, col = rainbow(10), breaks = NA){
return(col[as.numeric(cut(x, breaks = breaks, include.lowest = T))])
}
scale_colours = function(mat, col = rainbow(10), breaks = NA){
mat = as.matrix(mat)
return(matrix(scale_vec_colours(as.vector(mat), col = col, breaks = breaks), nrow(mat), ncol(mat), dimnames = list(rownames(mat), colnames(mat))))
}
cluster_mat = function(mat, distance, method){
if(!(method %in% c("ward.D2", "ward", "single", "complete", "average", "mcquitty", "median", "centroid"))){
stop("clustering method has to one form the list: 'ward', 'ward.D2', 'single', 'complete', 'average', 'mcquitty', 'median' or 'centroid'.")
}
if(!(distance[1] %in% c("correlation", "euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski")) & class(distance) != "dist"){
stop("distance has to be a dissimilarity structure as produced by dist or one measure form the list: 'correlation', 'euclidean', 'maximum', 'manhattan', 'canberra', 'binary', 'minkowski'")
}
if(distance[1] == "correlation"){
d = as.dist(1 - cor(t(mat)))
}else{
if(class(distance) == "dist"){
d = distance
}else{
d = dist(mat, method = distance)
}
}
return(hclust(d, method = method))
}
scale_rows = function(x){
m = apply(x, 1, mean, na.rm = T)
s = apply(x, 1, sd, na.rm = T)
return((x - m) / s)
}
scale_mat = function(mat, scale){
if(!(scale %in% c("none", "row", "column"))){
stop("scale argument shoud take values: 'none', 'row' or 'column'")
}
mat = switch(scale, none = mat, row = scale_rows(mat), column = t(scale_rows(t(mat))))
return(mat)
}
generate_annotation_colours = function(annotation, annotation_colors, drop){
if(is.na(annotation_colors)[[1]][1]){
annotation_colors = list()
}
count = 0
for(i in 1:length(annotation)){
if(is.character(annotation[[i]]) | is.factor(annotation[[i]])){
if (is.factor(annotation[[i]]) & !drop){
count = count + length(levels(annotation[[i]]))
}else{
count = count + length(unique(annotation[[i]]))
}
}
}
factor_colors = dscale(factor(1:count), hue_pal(l = 75))
set.seed(3453)
cont_counter = 2
for(i in 1:length(annotation)){
if(!(names(annotation)[i] %in% names(annotation_colors))){
if(is.character(annotation[[i]]) | is.factor(annotation[[i]])){
n = length(unique(annotation[[i]]))
if (is.factor(annotation[[i]]) & !drop){
n = length(levels(annotation[[i]]))
}
ind = sample(1:length(factor_colors), n)
annotation_colors[[names(annotation)[i]]] = factor_colors[ind]
l = levels(as.factor(annotation[[i]]))
l = l[l %in% unique(annotation[[i]])]
if (is.factor(annotation[[i]]) & !drop){
l = levels(annotation[[i]])
}
names(annotation_colors[[names(annotation)[i]]]) = l
factor_colors = factor_colors[-ind]
}else{
annotation_colors[[names(annotation)[i]]] = brewer_pal("seq", cont_counter)(5)[1:4]
cont_counter = cont_counter + 1
}
}
}
return(annotation_colors)
}
kmeans_pheatmap = function(mat, k = min(nrow(mat), 150), sd_limit = NA, ...){
# Filter data
if(!is.na(sd_limit)){
s = apply(mat, 1, sd)
mat = mat[s > sd_limit, ]
}
# Cluster data
set.seed(1245678)
km = kmeans(mat, k, iter.max = 100)
mat2 = km$centers
# Compose rownames
t = table(km$cluster)
rownames(mat2) = sprintf("cl%s_size_%d", names(t), t)
# Draw heatmap
pheatmap2(mat2, ...)
}
find_gaps = function(tree, cutree_n){
v = cutree(tree, cutree_n)[tree$order]
gaps = which((v[-1] - v[-length(v)]) != 0)
}
#' A function to draw clustered heatmaps.
#'
#' A function to draw clustered heatmaps where one has better control over some graphical
#' parameters such as cell size, etc.
#'
#' The function also allows to aggregate the rows using kmeans clustering. This is
#' advisable if number of rows is so big that R cannot handle their hierarchical
#' clustering anymore, roughly more than 1000. Instead of showing all the rows
#' separately one can cluster the rows in advance and show only the cluster centers.
#' The number of clusters can be tuned with parameter kmeans_k.
#'
#' @param mat numeric matrix of the values to be plotted.
#' @param color vector of colors used in heatmap.
#' @param kmeans_k the number of kmeans clusters to make, if we want to agggregate the
#' rows before drawing heatmap. If NA then the rows are not aggregated.
#' @param breaks a sequence of numbers that covers the range of values in mat and is one
#' element longer than color vector. Used for mapping values to colors. Useful, if needed
#' to map certain values to certain colors, to certain values. If value is NA then the
#' breaks are calculated automatically.
#' @param border_color color of cell borders on heatmap, use NA if no border should be
#' drawn.
#' @param cellwidth individual cell width in points. If left as NA, then the values
#' depend on the size of plotting window.
#' @param cellheight individual cell height in points. If left as NA,
#' then the values depend on the size of plotting window.
#' @param scale character indicating if the values should be centered and scaled in
#' either the row direction or the column direction, or none. Corresponding values are
#' \code{"row"}, \code{"column"} and \code{"none"}
#' @param cluster_rows boolean values determining if rows should be clustered,
#' @param cluster_cols boolean values determining if columns should be clustered.
#' @param clustering_distance_rows distance measure used in clustering rows. Possible
#' values are \code{"correlation"} for Pearson correlation and all the distances
#' supported by \code{\link{dist}}, such as \code{"euclidean"}, etc. If the value is none
#' of the above it is assumed that a distance matrix is provided.
#' @param clustering_distance_cols distance measure used in clustering columns. Possible
#' values the same as for clustering_distance_rows.
#' @param clustering_method clustering method used. Accepts the same values as
#' \code{\link{hclust}}.
#' @param cutree_rows number of clusters the rows are divided into, based on the
#' hierarchical clustering (using cutree), if rows are not clustered, the
#' argument is ignored
#' @param cutree_cols similar to \code{cutree_rows}, but for columns
#' @param treeheight_row the height of a tree for rows, if these are clustered.
#' Default value 50 points.
#' @param treeheight_col the height of a tree for columns, if these are clustered.
#' Default value 50 points.
#' @param legend logical to determine if legend should be drawn or not.
#' @param legend_breaks vector of breakpoints for the legend.
#' @param legend_labels vector of labels for the \code{legend_breaks}.
#' @param annotation_row data frame that specifies the annotations shown on left
#' side of the heatmap. Each row defines the features for a specific row. The
#' rows in the data and in the annotation are matched using corresponding row
#' names. Note that color schemes takes into account if variable is continuous
#' or discrete.
#' @param annotation_col similar to annotation_row, but for columns.
#' @param annotation deprecated parameter that currently sets the annotation_col if it is missing
#' @param annotation_colors list for specifying annotation_row and
#' annotation_col track colors manually. It is possible to define the colors
#' for only some of the features. Check examples for details.
#' @param annotation_legend boolean value showing if the legend for annotation
#' tracks should be drawn.
#' @param drop_levels logical to determine if unused levels are also shown in
#' the legend
#' @param show_rownames boolean specifying if column names are be shown.
#' @param show_colnames boolean specifying if column names are be shown.
#' @param main the title of the plot
#' @param fontsize base fontsize for the plot
#' @param fontsize_row fontsize for rownames (Default: fontsize)
#' @param fontsize_col fontsize for colnames (Default: fontsize)
#' @param display_numbers logical determining if the numeric values are also printed to
#' the cells. If this is a matrix (with same dimensions as original matrix), the contents
#' of the matrix are shown instead of original values.
#' @param number_format format strings (C printf style) of the numbers shown in cells.
#' For example "\code{\%.2f}" shows 2 decimal places and "\code{\%.1e}" shows exponential
#' notation (see more in \code{\link{sprintf}}).
#' @param number_color color of the text
#' @param fontsize_number fontsize of the numbers displayed in cells
#' @param gaps_row vector of row indices that show shere to put gaps into
#' heatmap. Used only if the rows are not clustered. See \code{cutree_row}
#' to see how to introduce gaps to clustered rows.
#' @param gaps_col similar to gaps_row, but for columns.
#' @param labels_row custom labels for rows that are used instead of rownames.
#' @param labels_col similar to labels_row, but for columns.
#' @param filename file path where to save the picture. Filetype is decided by
#' the extension in the path. Currently following formats are supported: png, pdf, tiff,
#' bmp, jpeg. Even if the plot does not fit into the plotting window, the file size is
#' calculated so that the plot would fit there, unless specified otherwise.
#' @param width manual option for determining the output file width in inches.
#' @param height manual option for determining the output file height in inches.
#' @param silent do not draw the plot (useful when using the gtable output)
#' @param \dots graphical parameters for the text used in plot. Parameters passed to
#' \code{\link{grid.text}}, see \code{\link{gpar}}.
#'
#' @return
#' Invisibly a list of components
#' \itemize{
#' \item \code{tree_row} the clustering of rows as \code{\link{hclust}} object
#' \item \code{tree_col} the clustering of columns as \code{\link{hclust}} object
#' \item \code{kmeans} the kmeans clustering of rows if parameter \code{kmeans_k} was
#' specified
#' }
#'
#' @author Raivo Kolde <rkolde@@gmail.com>
#' @examples
#' # Create test matrix
#' test = matrix(rnorm(200), 20, 10)
#' test[1:10, seq(1, 10, 2)] = test[1:10, seq(1, 10, 2)] + 3
#' test[11:20, seq(2, 10, 2)] = test[11:20, seq(2, 10, 2)] + 2
#' test[15:20, seq(2, 10, 2)] = test[15:20, seq(2, 10, 2)] + 4
#' colnames(test) = paste("Test", 1:10, sep = "")
#' rownames(test) = paste("Gene", 1:20, sep = "")
#'
#' # Draw heatmaps
#' pheatmap2(test)
#' pheatmap2(test, kmeans_k = 2)
#' pheatmap2(test, scale = "row", clustering_distance_rows = "correlation")
#' pheatmap2(test, color = colorRampPalette(c("navy", "white", "firebrick3"))(50))
#' pheatmap2(test, cluster_row = FALSE)
#' pheatmap2(test, legend = FALSE)
#'
#' # Show text within cells
#' pheatmap2(test, display_numbers = TRUE)
#' pheatmap2(test, display_numbers = TRUE, number_format = "\%.1e")
#' pheatmap2(test, display_numbers = matrix(ifelse(test > 5, "*", ""), nrow(test)))
#' pheatmap2(test, cluster_row = FALSE, legend_breaks = -1:4, legend_labels = c("0",
#' "1e-4", "1e-3", "1e-2", "1e-1", "1"))
#'
#' # Fix cell sizes and save to file with correct size
#' pheatmap2(test, cellwidth = 15, cellheight = 12, main = "Example heatmap")
#' pheatmap2(test, cellwidth = 15, cellheight = 12, fontsize = 8, filename = "test.pdf")
#'
#' # Generate annotations for rows and columns
#' annotation_col = data.frame(
#' CellType = factor(rep(c("CT1", "CT2"), 5)),
#' Time = 1:5
#' )
#' rownames(annotation_col) = paste("Test", 1:10, sep = "")
#'
#' annotation_row = data.frame(
#' GeneClass = factor(rep(c("Path1", "Path2", "Path3"), c(10, 4, 6)))
#' )
#' rownames(annotation_row) = paste("Gene", 1:20, sep = "")
#'
#' # Display row and color annotations
#' pheatmap2(test, annotation_col = annotation_col)
#' pheatmap2(test, annotation_col = annotation_col, annotation_legend = FALSE)
#' pheatmap2(test, annotation_col = annotation_col, annotation_row = annotation_row)
#'
#'
#' # Specify colors
#' ann_colors = list(
#' Time = c("white", "firebrick"),
#' CellType = c(CT1 = "#1B9E77", CT2 = "#D95F02"),
#' GeneClass = c(Path1 = "#7570B3", Path2 = "#E7298A", Path3 = "#66A61E")
#' )
#'
#' pheatmap2(test, annotation_col = annotation_col, annotation_colors = ann_colors, main = "Title")
#' pheatmap2(test, annotation_col = annotation_col, annotation_row = annotation_row,
#' annotation_colors = ann_colors)
#' pheatmap2(test, annotation_col = annotation_col, annotation_colors = ann_colors[2])
#'
#' # Gaps in heatmaps
#' pheatmap2(test, annotation_col = annotation_col, cluster_rows = FALSE, gaps_row = c(10, 14))
#' pheatmap2(test, annotation_col = annotation_col, cluster_rows = FALSE, gaps_row = c(10, 14),
#' cutree_col = 2)
#'
#' # Show custom strings as row/col names
#' labels_row = c("", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
#' "", "", "Il10", "Il15", "Il1b")
#'
#' pheatmap2(test, annotation_col = annotation_col, labels_row = labels_row)
#'
#' # Specifying clustering from distance matrix
#' drows = dist(test, method = "minkowski")
#' dcols = dist(t(test), method = "minkowski")
#' pheatmap2(test, clustering_distance_rows = drows, clustering_distance_cols = dcols)
#'
#' @export
pheatmap2 = function(mat,color = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100), kmeans_k = NA, breaks = NA, border_color = "grey60", cellwidth = NA, cellheight = NA, scale = "none", cluster_rows = TRUE, cluster_cols = TRUE, clustering_distance_rows = "euclidean", clustering_distance_cols = "euclidean", clustering_method = "complete", cutree_rows = NA, cutree_cols = NA, treeheight_row = ifelse(cluster_rows, 50, 0), treeheight_col = ifelse(cluster_cols, 50, 0), legend = TRUE, legend_breaks = NA, legend_labels = NA, annotation_row = NA, annotation_col = NA, annotation = NA, annotation_colors = NA, annotation_legend =FALSE, drop_levels = TRUE, show_rownames = T, show_colnames = T, main = NA, fontsize = 10, fontsize_row = fontsize, fontsize_col = fontsize, display_numbers = F, number_format = "%.2f", number_color = "grey30", fontsize_number = 0.8 * fontsize, gaps_row = NULL, gaps_col = NULL, labels_row = NULL, labels_col = NULL, filename = NA, width = NA, height = NA, silent = FALSE, ...){
# Set labels
if(is.null(labels_row)){
labels_row = rownames(mat)
}
if(is.null(labels_col)){
labels_col = colnames(mat)
}
# Preprocess matrix
mat = as.matrix(mat)
if(scale != "none"){
mat = scale_mat(mat, scale)
if(is.na(breaks)){
breaks = generate_breaks(mat, length(color), center = T)
}
}
# Kmeans
if(!is.na(kmeans_k)){
# Cluster data
km = kmeans(mat, kmeans_k, iter.max = 100)
mat = km$centers
# Compose rownames
t = table(km$cluster)
labels_row = sprintf("Cluster: %s Size: %d", names(t), t)
}else{
km = NA
}
# Format numbers to be displayed in cells
if(is.matrix(display_numbers) | is.data.frame(display_numbers)){
if(nrow(display_numbers) != nrow(mat) | ncol(display_numbers) != ncol(mat)){
stop("If display_numbers provided as matrix, its dimensions have to match with mat")
}
display_numbers = as.matrix(display_numbers)
fmat = matrix(as.character(display_numbers), nrow = nrow(display_numbers), ncol = ncol(display_numbers))
fmat_draw = TRUE
}else{
if(display_numbers){
fmat = matrix(sprintf(number_format, mat), nrow = nrow(mat), ncol = ncol(mat))
fmat_draw = TRUE
}else{
fmat = matrix(NA, nrow = nrow(mat), ncol = ncol(mat))
fmat_draw = FALSE
}
}
# Do clustering
if(cluster_rows){
tree_row = cluster_mat(mat, distance = clustering_distance_rows, method = clustering_method)
mat = mat[tree_row$order, , drop = FALSE]
fmat = fmat[tree_row$order, , drop = FALSE]
labels_row = labels_row[tree_row$order]
if(!is.na(cutree_rows)){
gaps_row = find_gaps(tree_row, cutree_rows)
}else{
gaps_row = NULL
}
}else{
tree_row = NA
treeheight_row = 0
}
if(cluster_cols){
tree_col = cluster_mat(t(mat), distance = clustering_distance_cols, method = clustering_method)
mat = mat[, tree_col$order, drop = FALSE]
fmat = fmat[, tree_col$order, drop = FALSE]
labels_col = labels_col[tree_col$order]
if(!is.na(cutree_cols)){
gaps_col = find_gaps(tree_col, cutree_cols)
} else{
gaps_col = NULL
}
}else{
tree_col = NA
treeheight_col = 0
}
attr(fmat, "draw") = fmat_draw
# Colors and scales
if(!is.na(legend_breaks[1]) & !is.na(legend_labels[1])){
if(length(legend_breaks) != length(legend_labels)){
stop("Lengths of legend_breaks and legend_labels must be the same")
}
}
if(is.na(breaks[1])){
breaks = generate_breaks(as.vector(mat), length(color))
}
if (legend & is.na(legend_breaks[1])) {
legend = grid.pretty(range(as.vector(breaks)))
names(legend) = legend
}else if(legend & !is.na(legend_breaks[1])){
legend = legend_breaks[legend_breaks >= min(breaks) & legend_breaks <= max(breaks)]
if(!is.na(legend_labels[1])){
legend_labels = legend_labels[legend_breaks >= min(breaks) & legend_breaks <= max(breaks)]
names(legend) = legend_labels
}else{
names(legend) = legend
}
}else {
legend = NA
}
mat = scale_colours(mat, col = color, breaks = breaks)
# Preparing annotations
if(is.na(annotation_col[[1]][1]) & !is.na(annotation[[1]][1])){
annotation_col = annotation
}
# Select only the ones present in the matrix
if(!is.na(annotation_col[[1]][1])){
annotation_col = annotation_col[colnames(mat), , drop = F]
}
if(!is.na(annotation_row[[1]][1])){
annotation_row = annotation_row[rownames(mat), , drop = F]
}
annotation = c(annotation_row, annotation_col)
annotation = annotation[unlist(lapply(annotation, function(x) !is.na(x[1])))]
if(length(annotation) != 0){
annotation_colors = generate_annotation_colours(annotation, annotation_colors, drop = drop_levels)
} else{
annotation_colors = NA
}
if(!show_rownames){
labels_row = NULL
}
if(!show_colnames){
labels_col = NULL
}
# Draw heatmap
gt = heatmap_motor(mat, border_color = border_color, cellwidth = cellwidth, cellheight = cellheight, treeheight_col = treeheight_col, treeheight_row = treeheight_row, tree_col = tree_col, tree_row = tree_row, filename = filename, width = width, height = height, breaks = breaks, color = color, legend = legend, annotation_row = annotation_row, annotation_col = annotation_col, annotation_colors = annotation_colors, annotation_legend = annotation_legend, main = main, fontsize = fontsize, fontsize_row = fontsize_row, fontsize_col = fontsize_col, fmat = fmat, fontsize_number = fontsize_number, number_color = number_color, gaps_row = gaps_row, gaps_col = gaps_col, labels_row = labels_row, labels_col = labels_col)
if(is.na(filename) & !silent){
grid.newpage()
grid.draw(gt)
}
invisible(list(tree_row = tree_row, tree_col = tree_col, kmeans = km, gtable = gt))
}
# load library
library('getopt');
#opt<-data.frame(infile="E:/R_workplace/20150626heatmap/T1_T2_vs_T3_T4.DEG.final.cluster",groupfile="E:/R_workplace/20150626heatmap/groupfile.heatmap")
.sourcePath<-"/share/nas1/tengh/research/Rsource/"
#-----------------------------------------------------------------
# getting parameters
#-----------------------------------------------------------------
#get options, using the spec as defined by the enclosed list.
#we read the options from the default: commandArgs(TRUE).
spec = matrix(c(
'help' , 'h', 0, "logical",
'infile' , 'i', 1, "character",
'groupfile' , 'G', 2, "character",
'outfile','o',2,"character",
'cell.width' , 'w', 2, "double",
'cell.height','e',2,"double",
'title','t',2,"character",
'width','W',2,"integer",
'height','H',2,"integer",
'size','s',2,"double",
'rowname','R',2,"logical",
'colname','C',2,"logical",
'color','c',2,"character" ,
'zero','z',2,"double",
'log','l',2,"character",
'scale','S',2,"character"
), byrow=TRUE, ncol=4);
opt = getopt(spec);
#遗传图与基因组的共线性分析
# define usage function
print_usage <- function(spec=NULL){
cat(getopt(spec, usage=TRUE));
cat("Usage example: \n")
cat("
Usage example:
1) Rscript heatmap.R --infile in.heatmap --outfile heatmap --color BrBG
2) Rscript heatmap.R --infile in.heatmap --outfile heatmap --groupfile group.heatmap --title heatmap --size 10 --rownames F
3) Rscript heatmap.R --infile in.heatmap --outfile heatmap --title heatmap --size 10 --cell.width 7 --cell.height 7
Options:
--help -h NULL get this help
--infile -i character the tab delimited input file saving numeric matrix of the values to be plotted.[forced]
--outfile -o character file path where to save the picture. Filetype is decided by the extension in the path. [optional,heatmap in current working directory]
--groupfile -G character the tab delimited input file saving data frame that specifies the annotations shown on top side of the heatmap [optional, default:NA]
--cell.width -w double individual cell width in points[optional, default: 7]
--cell.height -e double individual cell height in points[optional, default: 7]
--size -s double base fontsize for the plot[optional, default: 10]
--width -W double manual option for determining the output file width in pixel.[optional, default: NA]
--heigth -H double manual option for determining the output file height in pixel.[optional, default:NA]
--title -t character a title for the plot[optional, default: ]
--rowname -R logical boolean specifying if row names are be shown.[optional, default: TRUE]
--colname -C logical boolean specifying if column names are be shown.[optional, default:NA]
--color -c character choose the colour set(redgreen BrBG PiYG PRGn PuOr RdBu RdGy RdYlBu RdYlGn Spectral)or set colour splited by , .[optional, default: BrBG]
--zero -z double Set the minima vlaue: set mat values less than minima to minima.[optional, default:1]
--log -l character a logarithmic log scale is in use.[optional, default:log2]
--scale -S character character indicating if the values should be centered and scaled in either the row direction or the column direction, or none..[optional, default:none]
\n")
q(status=1);
}
#if(file.exists(paste(.sourcePath,"heatmap/pheatmap2.r",sep="")))source(paste(.sourcePath,"heatmap/pheatmap2.r",sep=""))else stop(paste(.sourcePath,"heatmap/pheatmap2.r does not exist!",sep=""))
# if help was asked for print a friendly message
# and exit with a non-zero error code
if (!is.null(opt$help)) { print_usage(spec) }
# check non-null args
if ( is.null(opt$infile) ) { print_usage(spec) }else {opt$infile<-gsub("\\\\",replacement = "/",x = opt$infile)}
#set some reasonable defaults for the options that are needed,
#but were not specified.
if ( is.null(opt$groupfile) ) { opt$groupfile=NA }else {opt$groupfile<-gsub("\\\\",replacement = "/",x = opt$groupfile)}
if( is.null(opt$outfile))opt$outfile="heatmap"
if(is.null(opt$title))opt$title=""
if(is.null(opt$width)){
opt$width=NA
}else if(!(is.numeric(opt$width)&&opt$width>0)){
stop("Parameter Error:outfile width must be positive integer")
}else{
opt$width=opt$width/500
}
if(is.null(opt$height)){
opt$height=NA
}else if(!(is.numeric(opt$height)&&opt$height>0)){
stop("Parameter Error:outfile height must be positive integer")
}else{
opt$height=opt$height/500
}
if(is.null(opt$cell.width)){
opt$cell.width=ifelse(is.na(opt$width),7,NA)
}else if(!(is.numeric(opt$cell.width)&&opt$cell.width>0)){
stop("Parameter Error:cell width must be positive integer")
}
if(is.null(opt$cell.height)){
opt$cell.height=ifelse(is.na(opt$height),7,NA )
}else if(!(is.numeric(opt$cell.height)&&opt$cell.height>0)){
stop("Parameter Error:cell height must be positive integer")
}
if(is.null(opt$rowname))opt$rowname=T
if(is.null(opt$colname))opt$colname=T
#if(is.null(opt$color))opt$color="RdYlGn"
if(is.null(opt$color))opt$color="#263C8B,#4E74A6,#BDBF78,#BFA524"
if(is.null(opt$zero))opt$zero=1
if(is.null(opt$size))opt$size=10
if(is.null(opt$log))opt$log="log2"
if(is.null(opt$scale))opt$scale="none"
##import data
rawdat<-read.table(opt$infile,head=T,sep="\t",comment.char = "",check.names =F)
message(nrow(rawdat))
if(nrow(rawdat)>30){
#rawdat<-read.table(as.vector(opt$infile),header=T,sep="\t",comment.char = "")
rawdat <- rawdat[1:30,]
}else if(nrow(rawdat) >0 && nrow(rawdat) < 30){
rawdat <- rawdat
}
rownames(rawdat)<-as.matrix(rawdat)[,1]
#rownames(rawdat)
#rawdat=as.matrix(rawdat[,grepl("[0-9]+$",colnames(rawdat))])
#rawdat<-as.matrix(rawdat[,2:(ncol(rawdat)-3)])
rawdat<-as.matrix(rawdat[,-1])
rawdat<-rawdat+opt$zero
if(opt$log=="log2"){
rawdat<-log2(rawdat)
}else if(opt$log=="log10"){
rawdat<-log10(rawdat)
}else if(is.na(opt$log)){
rawdat=rawdat
}else{
stop("Paramter error: a logarithmic scale parameter log can only be NA log10 or log2!")
}
#
if(is.na(opt$groupfile)){
anColor = NA
colGroup =NA
heat.dat=rawdat
}else{
groupdat<-read.table(as.vector(opt$groupfile),header=F,sep="\t",comment.char = "")
group<-as.vector(groupdat[,2])
names(group)<-as.vector(groupdat[,1])
if(sum(!is.element(names(group),colnames(rawdat)))>0){
stop(paste(c("the following samples in group file not exist:",setdiff(names(group),colnames(rawdat)),"please check your groupfile!"),sep="\n"))
}
if(sum(!is.element(colnames(rawdat),names(group)))>0){
warning(paste(c("the following samples in infile will not be ploted:",setdiff(names(group),colnames(rawdat))),sep="\n"))
}
#多类样品热图添加分类条
heat.dat<-rawdat[,names(group)]
colGroup<-data.frame(Group=group)
colGroup$Group= factor(colGroup$Group, levels = c(unique(group), "other"))
row.names(colGroup)<-names(group)#设置样品颜色类
gColor<-c( "#7FC97F","#BEAED4","#FDC086","#FFFF99","#386CB0","#F0027F","#BF5B17","#666666", "#B3E2CD","#FDCDAC","#CBD5E8","#F4CAE4","#E6F5C9","#FFF2AE","#F1E2CC","#CCCCCC")
gColor=gColor[1:length(unique(group))]
names(gColor)<-unique(group)
anColor<-list(Group=gColor)
}
if(length(opt$color)==1&&is.element(opt$color,c("BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral"))){
require(RColorBrewer)
hmColors=colorRampPalette(rev(brewer.pal(n = 7, name = opt$color)))(100)
}else if(length(opt$color)==1&&(opt$color=="redgreen")){
library(gplots)
message(paste("color=",opt$color,sep=""))
hmColors=redgreen(255)
}else{
hmColors<-strsplit(opt$color,split = ",")[[1]]
hmColors=colorRampPalette(hmColors)(256)
}
hl<-hclust(dist(heat.dat))
capture.output(str(as.dendrogram(hclust(dist(heat.dat)))),file =paste(c(opt$outfile,".txt"),collapse =""))
#message(c("width",opt$width,"height",opt$height))
pheatmap2(filename =paste(c(opt$outfile,".png"),collapse =""),width = opt$width,height = opt$height,mat=heat.dat,cellwidth=opt$cell.width,color = hmColors,cellheight=opt$cell.height,main=opt$title,cluster_rows=T,cluster_cols=T,annotation_col = colGroup,annotation = colGroup,annotation_colors = anColor,fontsize=opt$size,col=hmColors,show_rownames=opt$rowname,show_colnames=opt$colname,fontsize_col=ifelse(is.na(opt$cell.width),opt$size,min(opt$size,opt$cell.width)),fontsize_row=ifelse(is.na(opt$cell.height),opt$size,min(opt$size,opt$cell.height)),scale=opt$scale)
dev.off()
pheatmap2(filename =paste(c(opt$outfile,".pdf"),collapse =""),width = opt$width,height = opt$height,mat=heat.dat,cellwidth=opt$cell.width,color = hmColors,cellheight=opt$cell.height,main=opt$title,cluster_rows=T,cluster_cols=T,annotation_col = colGroup,annotation = colGroup,annotation_colors = anColor,fontsize=opt$size,col=hmColors,show_rownames=opt$rowname,show_colnames=opt$colname,fontsize_col=ifelse(is.na(opt$cell.width),opt$size,min(opt$size,opt$cell.width)),fontsize_row=ifelse(is.na(opt$cell.height),opt$size,min(opt$size,opt$cell.height)),scale=opt$scale)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/about_you.R
\name{get_secret_conversations}
\alias{get_secret_conversations}
\title{Get any secret conversations}
\usage{
get_secret_conversations(folder = "data")
}
\arguments{
\item{folder}{the name of the data folder (in the project root directory)}
}
\value{
vector of logicals indicating whether the user has sent or received any secret conversations
}
\description{
Get any secret conversations
}
| /man/get_secret_conversations.Rd | no_license | chrisbrownlie/myFacebook | R | false | true | 481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/about_you.R
\name{get_secret_conversations}
\alias{get_secret_conversations}
\title{Get any secret conversations}
\usage{
get_secret_conversations(folder = "data")
}
\arguments{
\item{folder}{the name of the data folder (in the project root directory)}
}
\value{
vector of logicals indicating whether the user has sent or received any secret conversations
}
\description{
Get any secret conversations
}
|
# Clear the workspace
rm(list = ls())
graphics.off()
# Get the chr lengths
bai_file.v = system(command = "samtools idxstats /data/illumina_pipeline/aligned_experiments/DM242/dm242.bam",
intern = T
)
# Convert to a list
bai_file.l = strsplit(bai_file.v, split = "\t")
# Convert to a dataframe
bai_file.df = as.data.frame(matrix(unlist(bai_file.l), ncol = 4, byrow = T))
colnames(bai_file.df) = c("chr", "length", "read_num", "unaligned")
# Convert the length into a numeric
bai_file.df$length = as.numeric(bai_file.df$length)
# Get the chromosome length
chr_length.v = bai_file.df$length[1:16]
# Get the percentage of each chr length
chr_percent.v = chr_length.v / sum(chr_length.v)
# Select the total number of positions for the signal distribution
total_position = 2000
# Create the output file
output_file = paste("/data/data2/jab112/2014_mnase_manuscript/datasets/",
"genome_background_feature_file_2000_sites.csv", sep = ""
)
# Get the number of positions sampled for each chromosome
chr_samp.v = round(total_position * chr_percent.v)
if(sum(chr_samp.v) != total_position){
# Get the diff
diff = sum(chr_samp.v) - total_position
# Randomly modify the count from one chromosome
chr_change = sample(1:16, 1)
chr_samp.v[chr_change] = chr_samp.v[chr_change] - diff
}
# Make the storage dataframe
feature.df = data.frame(name = paste("sample_genome_", 1:total_position, sep = ""),
chr = rep(1:16, chr_samp.v),
pos = 0,
strand = "+"
)
# Iterate through each chr
for(i in 1:16){
cat("Sampling chr ", i, "...\r", sep = "")
# Get indices based on the total_position and chr_percent.v
sample_pos.v = sample(500:(chr_length.v[i] - 500), chr_samp.v[i], replace = FALSE)
# Update the position
feature.df[which(feature.df$chr == i),"pos"] = sort(sample_pos.v)
}
cat("\n\tComplete!\n")
# Write the output table
write.table(feature.df, file = output_file, sep = ",", col.names = T, row.names = F, quote = F)
| /scripts/r_scripts/create_genomic_background_feature_file.R | no_license | jbelsky/2015_genes_and_dev_belsky | R | false | false | 1,970 | r | # Clear the workspace
rm(list = ls())
graphics.off()
# Get the chr lengths
bai_file.v = system(command = "samtools idxstats /data/illumina_pipeline/aligned_experiments/DM242/dm242.bam",
intern = T
)
# Convert to a list
bai_file.l = strsplit(bai_file.v, split = "\t")
# Convert to a dataframe
bai_file.df = as.data.frame(matrix(unlist(bai_file.l), ncol = 4, byrow = T))
colnames(bai_file.df) = c("chr", "length", "read_num", "unaligned")
# Convert the length into a numeric
bai_file.df$length = as.numeric(bai_file.df$length)
# Get the chromosome length
chr_length.v = bai_file.df$length[1:16]
# Get the percentage of each chr length
chr_percent.v = chr_length.v / sum(chr_length.v)
# Select the total number of positions for the signal distribution
total_position = 2000
# Create the output file
output_file = paste("/data/data2/jab112/2014_mnase_manuscript/datasets/",
"genome_background_feature_file_2000_sites.csv", sep = ""
)
# Get the number of positions sampled for each chromosome
chr_samp.v = round(total_position * chr_percent.v)
if(sum(chr_samp.v) != total_position){
# Get the diff
diff = sum(chr_samp.v) - total_position
# Randomly modify the count from one chromosome
chr_change = sample(1:16, 1)
chr_samp.v[chr_change] = chr_samp.v[chr_change] - diff
}
# Make the storage dataframe
feature.df = data.frame(name = paste("sample_genome_", 1:total_position, sep = ""),
chr = rep(1:16, chr_samp.v),
pos = 0,
strand = "+"
)
# Iterate through each chr
for(i in 1:16){
cat("Sampling chr ", i, "...\r", sep = "")
# Get indices based on the total_position and chr_percent.v
sample_pos.v = sample(500:(chr_length.v[i] - 500), chr_samp.v[i], replace = FALSE)
# Update the position
feature.df[which(feature.df$chr == i),"pos"] = sort(sample_pos.v)
}
cat("\n\tComplete!\n")
# Write the output table
write.table(feature.df, file = output_file, sep = ",", col.names = T, row.names = F, quote = F)
|
#' Stratified random sample of daphnia counts.
#'
#' These data are from a stratified random sample from three layers of a lake: epilimnion, thermocline, and hypolimnion. The volumes of these layers are approximately 100kL, 200kL, and 400kL respectively, so that the sampling fractions are 1/7, 2/7, and 4/7, respectively. The sampling units are one liter containers of water, and the target variable is daphnia per liter.
#'
#' @format A data frame with 45 observations and two variables:
#' \describe{
#' \item{layer:}{layer from which the water sample was taken}
#' \item{count:}{number of daphnia in the liter of water}
#' }
#'
#' @source Barrett, J. P. & Nutt, M. E. (1979). \emph{Survey sampling in the environmental sciences: A computer approach}. Wentworth, NH: COMPress, Inc.
#'
#' Gregoire, T. G. & Valentine, H. T. (2007). \emph{Sampling strategies for natural resources and the environment}. Boca Raton, FL: Chapman & Hall/CRC.
"daphniastrat"
| /R/daphniastrat.R | no_license | trobinj/trtools | R | false | false | 961 | r | #' Stratified random sample of daphnia counts.
#'
#' These data are from a stratified random sample from three layers of a lake: epilimnion, thermocline, and hypolimnion. The volumes of these layers are approximately 100kL, 200kL, and 400kL respectively, so that the sampling fractions are 1/7, 2/7, and 4/7, respectively. The sampling units are one liter containers of water, and the target variable is daphnia per liter.
#'
#' @format A data frame with 45 observations and two variables:
#' \describe{
#' \item{layer:}{layer from which the water sample was taken}
#' \item{count:}{number of daphnia in the liter of water}
#' }
#'
#' @source Barrett, J. P. & Nutt, M. E. (1979). \emph{Survey sampling in the environmental sciences: A computer approach}. Wentworth, NH: COMPress, Inc.
#'
#' Gregoire, T. G. & Valentine, H. T. (2007). \emph{Sampling strategies for natural resources and the environment}. Boca Raton, FL: Chapman & Hall/CRC.
"daphniastrat"
|
\name{sim_data}
\docType{data}
\alias{sim_data}
\title{A simulation dataset of orthologous genes between the different species.}
\description{
This data set gives 4149 orthologous genes which include read counts and
genes length between the two different species.
}
\usage{sim_data}
\format{A data.frame containing 4149 orthologous genes.}
\source{
Zhou Y, Zhu JD, Tong TJ, Wang JH, Lin BQ, Zhang J(2018, pending publication).
A Novel Normalization Method and Differential Expression Analysis of RNA-seq
Data between Different Species.
}
\keyword{datasets}
| /man/sim_data.Rd | no_license | FocusPaka/SCBN | R | false | false | 583 | rd | \name{sim_data}
\docType{data}
\alias{sim_data}
\title{A simulation dataset of orthologous genes between the different species.}
\description{
This data set gives 4149 orthologous genes which include read counts and
genes length between the two different species.
}
\usage{sim_data}
\format{A data.frame containing 4149 orthologous genes.}
\source{
Zhou Y, Zhu JD, Tong TJ, Wang JH, Lin BQ, Zhang J(2018, pending publication).
A Novel Normalization Method and Differential Expression Analysis of RNA-seq
Data between Different Species.
}
\keyword{datasets}
|
context("filter")
test_that("list.is", {
x <- list(p1 = list(type = "A", score = list(c1 = 10, c2 = 8)), p2 = list(type = "B",
score = list(c1 = 9, c2 = 9)), p3 = list(type = "B", score = list(c1 = 9,
c2 = 7)))
expect_identical(list.is(x, type == "B"), unlist(lapply(x, function(item) item$type ==
"B")))
l1 <- list(a = list(x = 1, y = 2), b = list(x = 2, y = 3))
expect_identical(lapply(2:4, function(i) list.is(l1, sum(unlist(.)) <= i)),
list(c(a = FALSE, b = FALSE), c(a = TRUE, b = FALSE), c(a = TRUE, b = FALSE)))
})
test_that("list.filter", {
# simple list
x <- list(p1 = list(type = "A", score = list(c1 = 10, c2 = 8)), p2 = list(type = "B",
score = list(c1 = 9, c2 = 9)), p3 = list(type = "B", score = list(c1 = 9,
c2 = 7)))
expect_identical(list.filter(x, type == "B"), x[c(2, 3)])
# list of vectors
x <- list(a = c(x = 1, y = 2), b = c(x = 3, y = 4))
expect_identical(list.filter(x, sum(.) >= 4), x["b"])
# list of lists
l1 <- list(a = list(x = 1, y = 2), b = list(x = 2, y = 3))
expect_identical(list.filter(l1, sum(unlist(.)) <= 4), l1["a"])
# test dynamic scoping
lapply(2:4, function(i) list.filter(l1, sum(unlist(.)) <= i))
})
| /tests/testthat/test-filter.R | permissive | renkun-ken/rlist | R | false | false | 1,207 | r | context("filter")
test_that("list.is", {
x <- list(p1 = list(type = "A", score = list(c1 = 10, c2 = 8)), p2 = list(type = "B",
score = list(c1 = 9, c2 = 9)), p3 = list(type = "B", score = list(c1 = 9,
c2 = 7)))
expect_identical(list.is(x, type == "B"), unlist(lapply(x, function(item) item$type ==
"B")))
l1 <- list(a = list(x = 1, y = 2), b = list(x = 2, y = 3))
expect_identical(lapply(2:4, function(i) list.is(l1, sum(unlist(.)) <= i)),
list(c(a = FALSE, b = FALSE), c(a = TRUE, b = FALSE), c(a = TRUE, b = FALSE)))
})
test_that("list.filter", {
# simple list
x <- list(p1 = list(type = "A", score = list(c1 = 10, c2 = 8)), p2 = list(type = "B",
score = list(c1 = 9, c2 = 9)), p3 = list(type = "B", score = list(c1 = 9,
c2 = 7)))
expect_identical(list.filter(x, type == "B"), x[c(2, 3)])
# list of vectors
x <- list(a = c(x = 1, y = 2), b = c(x = 3, y = 4))
expect_identical(list.filter(x, sum(.) >= 4), x["b"])
# list of lists
l1 <- list(a = list(x = 1, y = 2), b = list(x = 2, y = 3))
expect_identical(list.filter(l1, sum(unlist(.)) <= 4), l1["a"])
# test dynamic scoping
lapply(2:4, function(i) list.filter(l1, sum(unlist(.)) <= i))
})
|
#' Life-table Shape Measures
#'
#' Get life table shape measures from pace-shape object.
#'
#' @param pash A pace-shape object.
#' @param type Which shape measure should be returned (default \code{"all"})?
#' @param harmonized Should the harmonized version of the shape measures be
#' returned (default \code{TRUE})?
#'
#' @details
#' If \code{harmonized == TRUE}, then all shape measures are re-scaled so that
#' (1) they are positive for monotonically increasing forces of mortality over
#' age (2), they are negative for monotonically decreasing forces
#' of mortality over age, (3) they are 0 for constant
#' forces of mortality over age, (4) they have a maximum value
#' of 1. See Wrycza etal. (2015) for details.
#'
#' If \code{harmonized == FALSE} the shape measures have their conventional
#' scaling.
#'
#' @return
#' The following shape measures are reurned:
#' \describe{
#' \item{\code{"entropy"}}{Life table entropy}
#' \item{\code{"gini"}}{Life table Gini coefficient}
#' \item{\code{"cv"}}{Life table coefficient of variation.}
#' \item{\code{"mr"}}{Mortality Ratio - Wrycza et al. (2015)}
#' \item{\code{"ler"}}{Life Expectancy Ratio - Wrycza et al. (2015)}
#' \item{\code{"acfm"}}{Average of Change in Force of Mortality
#' with respect to lx - Wrycza et al. (2015)}
#' \item{\code{"psmad"}}{Probability to Survive up to the Mean Age at Death
#' - Wrycza et al. (2015)}
#' \item{\code{"all"}}{All of the above measures.}
#' }
#'
#' @source Wrycza, Tomasz F., Trifon I. Missov, and Annette Baudisch. 2015.
#' "Quantifying the Shape of Aging." PLOS ONE 10 (3): 1-18. doi:10.1371/journal.pone.0119163.
#'
#' @examples
#' pash = Inputlx(x = prestons_lx$x, lx = prestons_lx$lx)
#' GetShape(pash)
#'
#' @export
GetShape <- function(pash, type = "all", harmonized = TRUE) {
TestClass(pash)
with(pash[["lt"]],
{
shapes = c(entropy = LifetableEntropy(nax, nx, ndx, ex, harmonized),
gini = LifetableGini(x, nax, ndx, ex, harmonized),
cv = LifetableCV(x, ndx, nax, ex, harmonized),
mr = MortalityRatio(x, nx, nmx, ex, harmonized),
ler = LER(x, nx, ex, harmonized),
acfm = ACFM(nmx, ndx, ex, harmonized),
psmad = PSMAD(x, nx, lx, ex, harmonized))
if (identical(type, "all")) { S = shapes } else { S = shapes[type] }
return(S)
})
}
# Life-table entropy ------------------------------------------------------
#' Average Life-Expectancy in Age x
#'
#' @source Vaupel et al. (2016)
#' @keywords internal
EDaggerx <- function(nax, nx, ex) {
nAx = nax/nx
edx = (nAx*c(ex[-1L], 0) + (1-nAx)*ex)
edx[length(edx)] = ex[length(ex)]
return(edx)
}
#' Total Life Years Lost due to Death
#'
#' @keywords internal
EDagger <- function(nax, nx, ndx, ex) {
edx = EDaggerx(nax, nx, ex)
ed = sum(ndx*edx)
return(ed)
}
#' Life Table Entropy
#'
#' @keywords internal
LifetableEntropy <- function(nax, nx, ndx, ex, harmonized) {
ed = EDagger(nax, nx, ndx, ex)
H = ed/ex[1L]
if (!isTRUE(harmonized)) {S = H}
if (isTRUE(harmonized)) {S = 1-H}
return(S)
}
# Life-table Gini coefficient ---------------------------------------------
#' Life Table Gini-Coefficient
#'
#' Discrete formulation of the Gini-Coeffcient
#'
#' @source Schoeley (2017)
#' @keywords internal
LifetableGini <- function (x, nax, ndx, ex, harmonized) {
e = rep(1, length(x))
D = outer(ndx, ndx)
x_ = x+nax
X_ = abs(e%*%t(x_) - x_%*%t(e))
G = sum(D*X_)/(2*ex[1L])
if (!isTRUE(harmonized)) {S = G}
if (isTRUE(harmonized)) {S = 1-2*G}
return(S)
}
# Life-table coefficient of variation -------------------------------------
#' Life Table Variance
#'
#' Discrete formulation of variance of life-table distribution of death
#'
#' @source Schoeley (2017)
#' @keywords internal
LifetableVar <- function(x, ndx, nax, ex) {
var = sum(ndx*(x+nax-ex[1L])^2)
return(var)
}
#' Life Table Coefficient of Variation
#'
#' @keywords internal
LifetableCV <- function(x, ndx, nax, ex, harmonized) {
var = LifetableVar(x, ndx, nax, ex)
CV = sqrt(var)/ex[1L]
if (!isTRUE(harmonized)) {S = CV}
if (isTRUE(harmonized)) {S = 1-CV}
return(S)
}
# ACFM --------------------------------------------------------------------
#' Average of Change in Force of Mortality with respect to lx
#'
#' @source Wrycza et al. (2015)
#' @keywords internal
ACFM <- function(nmx, ndx, ex, harmonized){
acfm_x = (nmx - nmx[1L]) * ndx
D = ex[1L] * sum(acfm_x)
if (!isTRUE(harmonized)) {S = D}
if (isTRUE(harmonized)) {S = 1-exp(-D)}
return(S)
}
# Mortality ratio ---------------------------------------------------------
#' Mortality Ratio
#'
#' @importFrom stats approx
#' @keywords internal
MortalityRatio <- function(x, nx, nmx, ex, harmonized){
m0 = nmx[1L]
m_e0 = approx(x = x, y = nmx, xout = ex[1L])[["y"]]
MR = m0/m_e0
if (!isTRUE(harmonized)) {S = MR}
if (isTRUE(harmonized)) {S = 1 - MR}
return(S)
}
# PSMAD -------------------------------------------------------------------
#' Probability to Survive up to the Mean Age at Death
#'
#' @importFrom stats approx
#' @keywords internal
PSMAD <- function(x, nx, lx, ex, harmonized){
l_e0 = approx(x = x, y = lx, xout = ex[1L])[["y"]]
if (!isTRUE(harmonized)) {S = l_e0}
if (isTRUE(harmonized)) {S = 1 + log(l_e0)}
return(S)
}
# LER ---------------------------------------------------------------------
#' Life Expectancy Ratio
#'
#' @importFrom stats approx
#' @keywords internal
LER <- function(x, nx, ex, harmonized){
e_e0 = approx(x = x, y = ex, xout = ex[1L])[["y"]]
ler = e_e0/ex[1L]
if (!isTRUE(harmonized)) {S = ler}
if (isTRUE(harmonized)) {S = 1-ler}
return(S)
}
| /R/shape_measures.R | no_license | jschoeley/pash | R | false | false | 5,777 | r | #' Life-table Shape Measures
#'
#' Get life table shape measures from pace-shape object.
#'
#' @param pash A pace-shape object.
#' @param type Which shape measure should be returned (default \code{"all"})?
#' @param harmonized Should the harmonized version of the shape measures be
#' returned (default \code{TRUE})?
#'
#' @details
#' If \code{harmonized == TRUE}, then all shape measures are re-scaled so that
#' (1) they are positive for monotonically increasing forces of mortality over
#' age (2), they are negative for monotonically decreasing forces
#' of mortality over age, (3) they are 0 for constant
#' forces of mortality over age, (4) they have a maximum value
#' of 1. See Wrycza etal. (2015) for details.
#'
#' If \code{harmonized == FALSE} the shape measures have their conventional
#' scaling.
#'
#' @return
#' The following shape measures are reurned:
#' \describe{
#' \item{\code{"entropy"}}{Life table entropy}
#' \item{\code{"gini"}}{Life table Gini coefficient}
#' \item{\code{"cv"}}{Life table coefficient of variation.}
#' \item{\code{"mr"}}{Mortality Ratio - Wrycza et al. (2015)}
#' \item{\code{"ler"}}{Life Expectancy Ratio - Wrycza et al. (2015)}
#' \item{\code{"acfm"}}{Average of Change in Force of Mortality
#' with respect to lx - Wrycza et al. (2015)}
#' \item{\code{"psmad"}}{Probability to Survive up to the Mean Age at Death
#' - Wrycza et al. (2015)}
#' \item{\code{"all"}}{All of the above measures.}
#' }
#'
#' @source Wrycza, Tomasz F., Trifon I. Missov, and Annette Baudisch. 2015.
#' "Quantifying the Shape of Aging." PLOS ONE 10 (3): 1-18. doi:10.1371/journal.pone.0119163.
#'
#' @examples
#' pash = Inputlx(x = prestons_lx$x, lx = prestons_lx$lx)
#' GetShape(pash)
#'
#' @export
GetShape <- function(pash, type = "all", harmonized = TRUE) {
TestClass(pash)
with(pash[["lt"]],
{
shapes = c(entropy = LifetableEntropy(nax, nx, ndx, ex, harmonized),
gini = LifetableGini(x, nax, ndx, ex, harmonized),
cv = LifetableCV(x, ndx, nax, ex, harmonized),
mr = MortalityRatio(x, nx, nmx, ex, harmonized),
ler = LER(x, nx, ex, harmonized),
acfm = ACFM(nmx, ndx, ex, harmonized),
psmad = PSMAD(x, nx, lx, ex, harmonized))
if (identical(type, "all")) { S = shapes } else { S = shapes[type] }
return(S)
})
}
# Life-table entropy ------------------------------------------------------
#' Average Life-Expectancy in Age x
#'
#' @source Vaupel et al. (2016)
#' @keywords internal
EDaggerx <- function(nax, nx, ex) {
nAx = nax/nx
edx = (nAx*c(ex[-1L], 0) + (1-nAx)*ex)
edx[length(edx)] = ex[length(ex)]
return(edx)
}
#' Total Life Years Lost due to Death
#'
#' @keywords internal
EDagger <- function(nax, nx, ndx, ex) {
edx = EDaggerx(nax, nx, ex)
ed = sum(ndx*edx)
return(ed)
}
#' Life Table Entropy
#'
#' @keywords internal
LifetableEntropy <- function(nax, nx, ndx, ex, harmonized) {
ed = EDagger(nax, nx, ndx, ex)
H = ed/ex[1L]
if (!isTRUE(harmonized)) {S = H}
if (isTRUE(harmonized)) {S = 1-H}
return(S)
}
# Life-table Gini coefficient ---------------------------------------------
#' Life Table Gini-Coefficient
#'
#' Discrete formulation of the Gini-Coeffcient
#'
#' @source Schoeley (2017)
#' @keywords internal
LifetableGini <- function (x, nax, ndx, ex, harmonized) {
e = rep(1, length(x))
D = outer(ndx, ndx)
x_ = x+nax
X_ = abs(e%*%t(x_) - x_%*%t(e))
G = sum(D*X_)/(2*ex[1L])
if (!isTRUE(harmonized)) {S = G}
if (isTRUE(harmonized)) {S = 1-2*G}
return(S)
}
# Life-table coefficient of variation -------------------------------------
#' Life Table Variance
#'
#' Discrete formulation of variance of life-table distribution of death
#'
#' @source Schoeley (2017)
#' @keywords internal
LifetableVar <- function(x, ndx, nax, ex) {
var = sum(ndx*(x+nax-ex[1L])^2)
return(var)
}
#' Life Table Coefficient of Variation
#'
#' @keywords internal
LifetableCV <- function(x, ndx, nax, ex, harmonized) {
var = LifetableVar(x, ndx, nax, ex)
CV = sqrt(var)/ex[1L]
if (!isTRUE(harmonized)) {S = CV}
if (isTRUE(harmonized)) {S = 1-CV}
return(S)
}
# ACFM --------------------------------------------------------------------
#' Average of Change in Force of Mortality with respect to lx
#'
#' @source Wrycza et al. (2015)
#' @keywords internal
ACFM <- function(nmx, ndx, ex, harmonized){
acfm_x = (nmx - nmx[1L]) * ndx
D = ex[1L] * sum(acfm_x)
if (!isTRUE(harmonized)) {S = D}
if (isTRUE(harmonized)) {S = 1-exp(-D)}
return(S)
}
# Mortality ratio ---------------------------------------------------------
#' Mortality Ratio
#'
#' @importFrom stats approx
#' @keywords internal
MortalityRatio <- function(x, nx, nmx, ex, harmonized){
m0 = nmx[1L]
m_e0 = approx(x = x, y = nmx, xout = ex[1L])[["y"]]
MR = m0/m_e0
if (!isTRUE(harmonized)) {S = MR}
if (isTRUE(harmonized)) {S = 1 - MR}
return(S)
}
# PSMAD -------------------------------------------------------------------
#' Probability to Survive up to the Mean Age at Death
#'
#' @importFrom stats approx
#' @keywords internal
PSMAD <- function(x, nx, lx, ex, harmonized){
l_e0 = approx(x = x, y = lx, xout = ex[1L])[["y"]]
if (!isTRUE(harmonized)) {S = l_e0}
if (isTRUE(harmonized)) {S = 1 + log(l_e0)}
return(S)
}
# LER ---------------------------------------------------------------------
#' Life Expectancy Ratio
#'
#' @importFrom stats approx
#' @keywords internal
LER <- function(x, nx, ex, harmonized){
e_e0 = approx(x = x, y = ex, xout = ex[1L])[["y"]]
ler = e_e0/ex[1L]
if (!isTRUE(harmonized)) {S = ler}
if (isTRUE(harmonized)) {S = 1-ler}
return(S)
}
|
# Extracting Information From Objects Using Names()
# original source: http://rforpublichealth.blogspot.com/2013/03/extracting-information-from-objects.html
# create some simulated data
ID <- 1:10
Age <- c(26, 65, 15, 7, 88, 43, 28, 66 ,45, 12)
Sex <- c(1, 0, 1, 1, 0 ,1, 1, 1, 0, 1)
Weight <- c(132, 122, 184, 145, 118, NA, 128, 154, 166, 164)
Height <- c(60, 63, 57, 59, 64, NA, 67, 65, NA, 60)
Married <- c(0, 0, 0, 0, 0, 0, 1, 1, 0, 1)
# create a dataframe of the simulated data
mydata <- data.frame(ID, Age, Sex, Weight, Height, Married)
# names() shows us everything stored under an object
# view everything under mydata
names(mydata)
# we can use names() to change a column header
# change the name of column 4 to Weight_lbs
names(mydata)[4]<-"Weight_lbs"
# run a regression
reg.object <- lm(Weight_lbs ~ Height + Age, data = mydata)
# display all the objects under the regression
names(reg.object)
# print the residuals of the regression
reg.object$residuals
# print a histogram of the residuals
hist(reg.object$residuals, main="Distribution of Residuals" ,xlab="Residuals")
| /names.r | no_license | anhnguyendepocen/r-code-examples | R | false | false | 1,094 | r | # Extracting Information From Objects Using Names()
# original source: http://rforpublichealth.blogspot.com/2013/03/extracting-information-from-objects.html
# create some simulated data
ID <- 1:10
Age <- c(26, 65, 15, 7, 88, 43, 28, 66 ,45, 12)
Sex <- c(1, 0, 1, 1, 0 ,1, 1, 1, 0, 1)
Weight <- c(132, 122, 184, 145, 118, NA, 128, 154, 166, 164)
Height <- c(60, 63, 57, 59, 64, NA, 67, 65, NA, 60)
Married <- c(0, 0, 0, 0, 0, 0, 1, 1, 0, 1)
# create a dataframe of the simulated data
mydata <- data.frame(ID, Age, Sex, Weight, Height, Married)
# names() shows us everything stored under an object
# view everything under mydata
names(mydata)
# we can use names() to change a column header
# change the name of column 4 to Weight_lbs
names(mydata)[4]<-"Weight_lbs"
# run a regression
reg.object <- lm(Weight_lbs ~ Height + Age, data = mydata)
# display all the objects under the regression
names(reg.object)
# print the residuals of the regression
reg.object$residuals
# print a histogram of the residuals
hist(reg.object$residuals, main="Distribution of Residuals" ,xlab="Residuals")
|
#' Create a related item
#'
#' @export
#'
#' @param id (character) id of package that the related item should be added to.
#' This should be an alphanumeric string. Required.
#' @param title (character) Title of the related item. Required.
#' @param type (character) The type of the related item. One of API, application,
#' idea, news article, paper, post or visualization. Required.
#' @param description (character) description (optional). Optional
#' @param related_id (character) An id to assign to the related item. If blank, an
#' ID will be assigned for you. Optional
#' @param related_url (character) A url to associated with the related item. Optional
#' @param image_url (character) A url to associated image. Optional
#' @template args
#' @template key
#'
#' @examples \dontrun{
#' # Setup
#' ckanr_setup(url = "https://demo.ckan.org/", key = getOption("ckan_demo_key"))
#'
#' # create a package
#' (res <- package_create("hello-mars"))
#'
#' # create a related item
#' related_create(res, title = "asdfdaf", type = "idea")
#'
#' # pipe operations together
#' package_create("foobbbbbarrrr") %>%
#' related_create(title = "my resource",
#' type = "visualization")
#' }
related_create <- function(id, title, type, description = NULL,
related_id = NULL, related_url = NULL, image_url = NULL,
key = get_default_key(), url = get_default_url(), as = 'list', ...) {
id <- as.ckan_package(id, url = url)
body <- cc(list(dataset_id = id$id, title = title,
type = type, url = related_url,
description = description, id = related_id,
image_url = image_url))
res <- ckan_POST(url, 'related_create',
body = tojun(body, TRUE), key = key,
encode = "json", ctj(), ...)
switch(as, json = res, list = as_ck(jsl(res), "ckan_related"), table = jsd(res))
}
| /R/related_create.R | permissive | HawaDawa/ckanr | R | false | false | 1,875 | r | #' Create a related item
#'
#' @export
#'
#' @param id (character) id of package that the related item should be added to.
#' This should be an alphanumeric string. Required.
#' @param title (character) Title of the related item. Required.
#' @param type (character) The type of the related item. One of API, application,
#' idea, news article, paper, post or visualization. Required.
#' @param description (character) description (optional). Optional
#' @param related_id (character) An id to assign to the related item. If blank, an
#' ID will be assigned for you. Optional
#' @param related_url (character) A url to associated with the related item. Optional
#' @param image_url (character) A url to associated image. Optional
#' @template args
#' @template key
#'
#' @examples \dontrun{
#' # Setup
#' ckanr_setup(url = "https://demo.ckan.org/", key = getOption("ckan_demo_key"))
#'
#' # create a package
#' (res <- package_create("hello-mars"))
#'
#' # create a related item
#' related_create(res, title = "asdfdaf", type = "idea")
#'
#' # pipe operations together
#' package_create("foobbbbbarrrr") %>%
#' related_create(title = "my resource",
#' type = "visualization")
#' }
related_create <- function(id, title, type, description = NULL,
related_id = NULL, related_url = NULL, image_url = NULL,
key = get_default_key(), url = get_default_url(), as = 'list', ...) {
id <- as.ckan_package(id, url = url)
body <- cc(list(dataset_id = id$id, title = title,
type = type, url = related_url,
description = description, id = related_id,
image_url = image_url))
res <- ckan_POST(url, 'related_create',
body = tojun(body, TRUE), key = key,
encode = "json", ctj(), ...)
switch(as, json = res, list = as_ck(jsl(res), "ckan_related"), table = jsd(res))
}
|
\name{CPP}
\alias{CPP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Curve Pre-Processor
}
\description{
The function \code{\link[chipPCR]{CPP}} encompasses a set of functions to
pre-process an amplification curve. The pre-processing includes options to
normalize curve data, to remove background, to remove outliers
in the background range and to test if an amplification is significant.
}
\usage{
CPP(x, y, trans = TRUE, bg.outliers = FALSE, median = FALSE, minmax = FALSE,
qnL = 0.1, amptest = FALSE, manual = FALSE, nl = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
is a vector containing the time or cycle values.
}
\item{y}{
is a vector containing the fluorescence values.
}
\item{trans}{
defines if the slope of the background range in a curve should be
corrected by a linear regression.
}
\item{bg.outliers}{
is a logical argument which to remove outliers in the background range.
}
\item{median}{
If set to TRUE, median is used instead of mean in outlier replacement.
The mean is used by default.
}
\item{minmax}{
is a logical argument to use a quantile normalization.
}
\item{qnL}{
is the quantile to be used for the quantile normalization.
}
\item{amptest}{
is a logical operator which is used to set a test for a positive
amplification.
}
\item{manual}{
is used to test for a fixed threshold value of the background.
}
\item{nl}{
is a value used as fixed threshold value for the background.
}
}
\details{
The function \code{\link[chipPCR]{CPP}} uses the function
\code{\link[chipPCR]{bg.max}} to estimate automatically the start of the
amplification process. In the background range there is often noise which
makes it harder to determine a meaningful background value. Therefore
\code{\link[chipPCR]{CPP}} can optionally remove outliers by finding the
value with largest difference from the mean as provided by the
\code{\link[outliers]{rm.outlier}} function. The functions also tries to
prevent calculations on non amplified signals. The parameter \code{qnL} is a
user defined quantile which is used for the quantile normalization. A quantile
normalization herein refers to an approach which is less prone to outliers
than a normalization based on the minimum and the maximum of an amplification
curve. The slope of the background range is often unequal to zero. By setting
the parameter \code{trans} it is possible to apply a simple correction of the
slope. Thereby either a robust linear regression by computing MM-type
regression estimators or a standard linear regression model. Care is needed
when using \code{trans} with time series (see \code{\link[stats]{lm}} for
details).
}
\author{
Stefan Roediger, Michal Burdukiewicz
}
\examples{
# Function to pre-process an amplification curve.
# Take a subset of the C17 data frame.
data(C17)
par(mfrow = c(2,1))
plot(NA, NA, xlab = "Time [sec]", ylab = "refMFI",
main = "HDA Raw Data",
xlim = c(0, 2500), ylim = c(0,1.1), pch = 20)
for (i in 3:5) {
lines(C17[1:50, 1], C17[1:50, i], col = i - 2,
type = "b", pch = 20)
}
legend(50, 0.5, c("55 deg Celsius", "60 deg Celsius", "65 deg Celsius"),
col = c(1,2,3), pch = rep(20,3))
# Use CPP to preprocess the data by removing the missing value and
# normalization of the data
plot(NA, NA, xlab = "Time [sec]", ylab = "refMFI",
main = "Curve Pre-Processor Applied to HDA Data",
xlim = c(0, 2500), ylim = c(0,1.1), pch = 20)
for (i in 3:5) {
y.cpp <- CPP(C17[2:50, 1], C17[2:50, i], minmax = TRUE,
bg.outliers = TRUE)$y.norm
lines(C17[2:50, 1], y.cpp, col = i - 2,
type = "b", pch = 20)
}
legend(50, 1, c("55 deg Celsius", "60 deg Celsius", "65 deg Celsius"),
col = c(1,2,3), pch = rep(20,3))
par(mfrow = c(1,1))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ background }
\keyword{ noise }
\keyword{ outlier }
\keyword{ normalize }
\keyword{ amplification } | /man/CPP.Rd | no_license | devSJR/chipPCR | R | false | false | 4,123 | rd | \name{CPP}
\alias{CPP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Curve Pre-Processor
}
\description{
The function \code{\link[chipPCR]{CPP}} encompasses a set of functions to
pre-process an amplification curve. The pre-processing includes options to
normalize curve data, to remove background, to remove outliers
in the background range and to test if an amplification is significant.
}
\usage{
CPP(x, y, trans = TRUE, bg.outliers = FALSE, median = FALSE, minmax = FALSE,
qnL = 0.1, amptest = FALSE, manual = FALSE, nl = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
is a vector containing the time or cycle values.
}
\item{y}{
is a vector containing the fluorescence values.
}
\item{trans}{
defines if the slope of the background range in a curve should be
corrected by a linear regression.
}
\item{bg.outliers}{
is a logical argument which to remove outliers in the background range.
}
\item{median}{
If set to TRUE, median is used instead of mean in outlier replacement.
The mean is used by default.
}
\item{minmax}{
is a logical argument to use a quantile normalization.
}
\item{qnL}{
is the quantile to be used for the quantile normalization.
}
\item{amptest}{
is a logical operator which is used to set a test for a positive
amplification.
}
\item{manual}{
is used to test for a fixed threshold value of the background.
}
\item{nl}{
is a value used as fixed threshold value for the background.
}
}
\details{
The function \code{\link[chipPCR]{CPP}} uses the function
\code{\link[chipPCR]{bg.max}} to estimate automatically the start of the
amplification process. In the background range there is often noise which
makes it harder to determine a meaningful background value. Therefore
\code{\link[chipPCR]{CPP}} can optionally remove outliers by finding the
value with largest difference from the mean as provided by the
\code{\link[outliers]{rm.outlier}} function. The functions also tries to
prevent calculations on non amplified signals. The parameter \code{qnL} is a
user defined quantile which is used for the quantile normalization. A quantile
normalization herein refers to an approach which is less prone to outliers
than a normalization based on the minimum and the maximum of an amplification
curve. The slope of the background range is often unequal to zero. By setting
the parameter \code{trans} it is possible to apply a simple correction of the
slope. Thereby either a robust linear regression by computing MM-type
regression estimators or a standard linear regression model. Care is needed
when using \code{trans} with time series (see \code{\link[stats]{lm}} for
details).
}
\author{
Stefan Roediger, Michal Burdukiewicz
}
\examples{
# Function to pre-process an amplification curve.
# Take a subset of the C17 data frame.
data(C17)
par(mfrow = c(2,1))
plot(NA, NA, xlab = "Time [sec]", ylab = "refMFI",
main = "HDA Raw Data",
xlim = c(0, 2500), ylim = c(0,1.1), pch = 20)
for (i in 3:5) {
lines(C17[1:50, 1], C17[1:50, i], col = i - 2,
type = "b", pch = 20)
}
legend(50, 0.5, c("55 deg Celsius", "60 deg Celsius", "65 deg Celsius"),
col = c(1,2,3), pch = rep(20,3))
# Use CPP to preprocess the data by removing the missing value and
# normalization of the data
plot(NA, NA, xlab = "Time [sec]", ylab = "refMFI",
main = "Curve Pre-Processor Applied to HDA Data",
xlim = c(0, 2500), ylim = c(0,1.1), pch = 20)
for (i in 3:5) {
y.cpp <- CPP(C17[2:50, 1], C17[2:50, i], minmax = TRUE,
bg.outliers = TRUE)$y.norm
lines(C17[2:50, 1], y.cpp, col = i - 2,
type = "b", pch = 20)
}
legend(50, 1, c("55 deg Celsius", "60 deg Celsius", "65 deg Celsius"),
col = c(1,2,3), pch = rep(20,3))
par(mfrow = c(1,1))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ background }
\keyword{ noise }
\keyword{ outlier }
\keyword{ normalize }
\keyword{ amplification } |
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680227344e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615765567-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680227344e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
### This script applies PCA to the US corpus readability measures.
rm(list=ls())
setwd('C:/Users/SF515-51T/Desktop/CAPS')
library(factoextra)
library(ggplot2)
library(dplyr)
library(plyr)
# Read in data
corpus <- read.csv('benchmark_readability.csv')
dim(corpus)
# Plot # of observations by year/decade
summary(corpus$word_count)
corp.yr <- corpus %>% dplyr::count(year)
corpus$decade <- round_any(corpus$year,10, f = floor)
corp.dec <- corpus %>% dplyr::count(decade)
plot(corp.yr$year, corp.yr$n)
plot(corp.dec$decade, corp.dec$n)
ggplot(corp.yr, aes(x=year, y=n)) + geom_point() + ylab('Number of documents') +
xlab('') + ggtitle('Number of Documents by Year: US Corpus')
ggsave('docs_year_us_corpus.png')
ggplot(corp.dec, aes(x=decade, y=n)) + geom_point() + ylab('Number of documents') +
xlab('') + ggtitle('Number of Documents by Decade: US Corpus')
ggsave('docs_dec_us_corpus.png')
# Omit na values, subset readability metrics
colnames(corpus)
read.metrics <- na.omit(corpus[,1:18])
read.metrics <- read.metrics[,which(colnames(read.metrics) != 'file_id')]
colnames(read.metrics)
# Deal w/ infinite values
read.metrics <- read.metrics[rowSums(is.infinite(as.matrix(read.metrics))) == 0,]
# Reverse scale Flesh and Flesh-Kincaid scores (so that large values indicate higher readability)
# new values = maximum value + minimum value - old values
# hist(read.metrics$flesch_R)
# read.metrics$flesch_R <- max(read.metrics$flesch_R) + min(read.metrics$flesch_R) - read.metrics$flesch_R
# hist(read.metrics$flesch_R)
# Remove Coleman-Liau Short (quanteda does not calculate this correctly; it's the same as Coleman-Liau Grade)
read.metrics <- subset(read.metrics, select = -c(Coleman_Liau_Short_R))
dim(read.metrics)
colnames(read.metrics)
# Calculate singular value decomposition
read.pca <- prcomp(read.metrics, scale = TRUE)
# Visualize eigenvalues (scree plot)
fviz_eig(read.pca, ncp = 5, main = 'Scree Plot: US Corpus Benchmark')
ggsave('scree_plot_benchmark_measures.png')
# Graph biplot correlation of variables
fviz_pca_var(read.pca,
col.var = "contrib", # Color by contributions to the PC
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
ggsave('var_biplot_us_benchmark_measures.png')
# Remove scientific notation
options(scipen=999)
### Access PCA results
# Eigenvalues
eig.val <- get_eigenvalue(read.pca)
eig.val # First dimension explains 86.5% of variance
# Results for Variables
res.var <- get_pca_var(read.pca)
#res.var$coord # Coordinates
#res.var$contrib # Contributions to the PCs
#res.var$cos2 # Quality of representation
# Results for individuals
res.ind <- get_pca_ind(read.pca)
#res.ind$coord # Coordinates
#res.ind$contrib # Contributions to the PCs
#res.ind$cos2 # Quality of representation
# Pull first prin. component
coord <- as.data.frame(res.ind$coord[,1])
colnames(coord)[1] <- 'dim1'
summary(coord$dim1) # median = -0.1951; mean = 0
sd(coord$dim1) # 3.721
hist(coord$dim1)#, xlim = c(-20,15))
hist(coord$dim1, xlim = c(-20, 20))
#coord <- as.data.frame(coord[which(coord$dim1 > -100),])
#colnames(coord)[1] <- 'dim1'
#summary(coord$dim1)
ggplot(coord, aes(x=dim1)) + geom_histogram(color="darkblue", fill="lightblue") +
geom_vline(data=coord, aes(xintercept=median(dim1), color="red"),
linetype="dashed") + xlim(-18, 18) + xlab('Dim. 1') + ylab('Count') + theme(legend.position='none')
#ggsave('hist_1st_dim.png')
gc()
# Add first dim. values as variable
corpus$first.dim <- coord$dim1
# top.example <- all.courts[max(all.courts$first.dim),]
# low.example <- all.courts[min(all.courts$first.dim),]
# set.seed(24519)
# low.example <- low.example[sample(nrow(low.example),1),]
# low.example$word_count
#
# ex <- mean(all.courts$first.dim) - sd(coord$dim1)
# low.example <- all.courts[which(all.courts$first.dim < ex),]
# low.example <- low.example[order(low.example$first.dim, decreasing = T),]
# low.example <- low.example[which(low.example$word_count > 500),]
# low.example <- low.example[7,]
# low.example$cite
#
# ex2 <- mean(all.courts$first.dim) + sd(coord$dim1)
# top.example <- all.courts[which(all.courts$first.dim > ex2),]
# top.example <- top.example[order(top.example$first.dim, decreasing = T),]
# top.example <- top.example[which(top.example$word_count > 500),]
# top.example <- top.example[4,]
# top.example$cite
# Group by year/decade median
year.1d <- aggregate(corpus[,c('first.dim')],
list(corpus$year), median)
colnames(year.1d)[1] <- 'year'
dec.1d <- aggregate(corpus[,c('first.dim')],
list(corpus$decade), median)
colnames(dec.1d)[1] <- 'decade'
# Plot first dimension measure by year and state
#load('firstdim.RData')
plot(year_state1d$year, year_state1d$x) # This is stupid, it's PCA, shouldn't plot aggregates
plot(all.courts$year, all.courts$x) # Lol this is dumber
ggplot(data=year.1d, aes(x=year,y=x)) + geom_point()
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
tgc <- summarySE(corpus,
measurevar="first.dim" , groupvars=c("year"))
# Standard error of the mean
ggplot(tgc, aes(x=year, y=first.dim)) +
geom_errorbar(aes(ymin=first.dim-se, ymax=first.dim+se), width=.1) +
#geom_line() +
geom_point() + xlab('Year') + ylab('Readability') +
theme_bw() + ggtitle('PCA Readability Scores by Year: US Benchmark Corpus')
ggsave('year_benchmark_first_dim.png')
#ggplot(data=all.courts[which(all.courts$state == 'Massachusetts'),], aes(x=year,y=first.dim)) + geom_point()
# Decade
tgc <- summarySE(corpus,
measurevar="first.dim" , groupvars=c("decade"))
# Standard error of the mean
ggplot(tgc, aes(x=decade, y=first.dim)) +
geom_errorbar(aes(ymin=first.dim-se, ymax=first.dim+se), width=.1) +
#geom_line() +
geom_point() + xlab('Decade') + ylab('Readability') +
theme_bw() + ggtitle('PCA Readability Scores by Decade: US Benchmark Corpus')
ggsave('dec_benchmark_first_dim.png')
#ggplot(data=all.courts[which(all.courts$state == 'Massachusetts'),], aes(x=year,y=first.dim)) + geom_point()
save(year.1d, file = 'byu_read.RData') | /src/benchmark_pca.R | no_license | stevenjmorgan/CAPS | R | false | false | 7,352 | r | ### This script applies PCA to the US corpus readability measures.
rm(list=ls())
setwd('C:/Users/SF515-51T/Desktop/CAPS')
library(factoextra)
library(ggplot2)
library(dplyr)
library(plyr)
# Read in data
corpus <- read.csv('benchmark_readability.csv')
dim(corpus)
# Plot # of observations by year/decade
summary(corpus$word_count)
corp.yr <- corpus %>% dplyr::count(year)
corpus$decade <- round_any(corpus$year,10, f = floor)
corp.dec <- corpus %>% dplyr::count(decade)
plot(corp.yr$year, corp.yr$n)
plot(corp.dec$decade, corp.dec$n)
ggplot(corp.yr, aes(x=year, y=n)) + geom_point() + ylab('Number of documents') +
xlab('') + ggtitle('Number of Documents by Year: US Corpus')
ggsave('docs_year_us_corpus.png')
ggplot(corp.dec, aes(x=decade, y=n)) + geom_point() + ylab('Number of documents') +
xlab('') + ggtitle('Number of Documents by Decade: US Corpus')
ggsave('docs_dec_us_corpus.png')
# Omit na values, subset readability metrics
colnames(corpus)
read.metrics <- na.omit(corpus[,1:18])
read.metrics <- read.metrics[,which(colnames(read.metrics) != 'file_id')]
colnames(read.metrics)
# Deal w/ infinite values
read.metrics <- read.metrics[rowSums(is.infinite(as.matrix(read.metrics))) == 0,]
# Reverse scale Flesh and Flesh-Kincaid scores (so that large values indicate higher readability)
# new values = maximum value + minimum value - old values
# hist(read.metrics$flesch_R)
# read.metrics$flesch_R <- max(read.metrics$flesch_R) + min(read.metrics$flesch_R) - read.metrics$flesch_R
# hist(read.metrics$flesch_R)
# Remove Coleman-Liau Short (quanteda does not calculate this correctly; it's the same as Coleman-Liau Grade)
read.metrics <- subset(read.metrics, select = -c(Coleman_Liau_Short_R))
dim(read.metrics)
colnames(read.metrics)
# Calculate singular value decomposition
read.pca <- prcomp(read.metrics, scale = TRUE)
# Visualize eigenvalues (scree plot)
fviz_eig(read.pca, ncp = 5, main = 'Scree Plot: US Corpus Benchmark')
ggsave('scree_plot_benchmark_measures.png')
# Graph biplot correlation of variables
fviz_pca_var(read.pca,
col.var = "contrib", # Color by contributions to the PC
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
ggsave('var_biplot_us_benchmark_measures.png')
# Remove scientific notation
options(scipen=999)
### Access PCA results
# Eigenvalues
eig.val <- get_eigenvalue(read.pca)
eig.val # First dimension explains 86.5% of variance
# Results for Variables
res.var <- get_pca_var(read.pca)
#res.var$coord # Coordinates
#res.var$contrib # Contributions to the PCs
#res.var$cos2 # Quality of representation
# Results for individuals
res.ind <- get_pca_ind(read.pca)
#res.ind$coord # Coordinates
#res.ind$contrib # Contributions to the PCs
#res.ind$cos2 # Quality of representation
# Pull first prin. component
coord <- as.data.frame(res.ind$coord[,1])
colnames(coord)[1] <- 'dim1'
summary(coord$dim1) # median = -0.1951; mean = 0
sd(coord$dim1) # 3.721
hist(coord$dim1)#, xlim = c(-20,15))
hist(coord$dim1, xlim = c(-20, 20))
#coord <- as.data.frame(coord[which(coord$dim1 > -100),])
#colnames(coord)[1] <- 'dim1'
#summary(coord$dim1)
ggplot(coord, aes(x=dim1)) + geom_histogram(color="darkblue", fill="lightblue") +
geom_vline(data=coord, aes(xintercept=median(dim1), color="red"),
linetype="dashed") + xlim(-18, 18) + xlab('Dim. 1') + ylab('Count') + theme(legend.position='none')
#ggsave('hist_1st_dim.png')
gc()
# Add first dim. values as variable
corpus$first.dim <- coord$dim1
# top.example <- all.courts[max(all.courts$first.dim),]
# low.example <- all.courts[min(all.courts$first.dim),]
# set.seed(24519)
# low.example <- low.example[sample(nrow(low.example),1),]
# low.example$word_count
#
# ex <- mean(all.courts$first.dim) - sd(coord$dim1)
# low.example <- all.courts[which(all.courts$first.dim < ex),]
# low.example <- low.example[order(low.example$first.dim, decreasing = T),]
# low.example <- low.example[which(low.example$word_count > 500),]
# low.example <- low.example[7,]
# low.example$cite
#
# ex2 <- mean(all.courts$first.dim) + sd(coord$dim1)
# top.example <- all.courts[which(all.courts$first.dim > ex2),]
# top.example <- top.example[order(top.example$first.dim, decreasing = T),]
# top.example <- top.example[which(top.example$word_count > 500),]
# top.example <- top.example[4,]
# top.example$cite
# Group by year/decade median
year.1d <- aggregate(corpus[,c('first.dim')],
list(corpus$year), median)
colnames(year.1d)[1] <- 'year'
dec.1d <- aggregate(corpus[,c('first.dim')],
list(corpus$decade), median)
colnames(dec.1d)[1] <- 'decade'
# Plot first dimension measure by year and state
#load('firstdim.RData')
plot(year_state1d$year, year_state1d$x) # This is stupid, it's PCA, shouldn't plot aggregates
plot(all.courts$year, all.courts$x) # Lol this is dumber
ggplot(data=year.1d, aes(x=year,y=x)) + geom_point()
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
tgc <- summarySE(corpus,
measurevar="first.dim" , groupvars=c("year"))
# Standard error of the mean
ggplot(tgc, aes(x=year, y=first.dim)) +
geom_errorbar(aes(ymin=first.dim-se, ymax=first.dim+se), width=.1) +
#geom_line() +
geom_point() + xlab('Year') + ylab('Readability') +
theme_bw() + ggtitle('PCA Readability Scores by Year: US Benchmark Corpus')
ggsave('year_benchmark_first_dim.png')
#ggplot(data=all.courts[which(all.courts$state == 'Massachusetts'),], aes(x=year,y=first.dim)) + geom_point()
# Decade
tgc <- summarySE(corpus,
measurevar="first.dim" , groupvars=c("decade"))
# Standard error of the mean
ggplot(tgc, aes(x=decade, y=first.dim)) +
geom_errorbar(aes(ymin=first.dim-se, ymax=first.dim+se), width=.1) +
#geom_line() +
geom_point() + xlab('Decade') + ylab('Readability') +
theme_bw() + ggtitle('PCA Readability Scores by Decade: US Benchmark Corpus')
ggsave('dec_benchmark_first_dim.png')
#ggplot(data=all.courts[which(all.courts$state == 'Massachusetts'),], aes(x=year,y=first.dim)) + geom_point()
save(year.1d, file = 'byu_read.RData') |
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.65,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/liver/liver_070.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/liver/liver_070.R | no_license | leon1003/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.65,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/liver/liver_070.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' Get the PageRank values for all nodes
#'
#' Get the PageRank values for all nodes in the graph.
#' @inheritParams render_graph
#' @param directed if \code{TRUE} (the default) then directed paths will be
#' considered for directed graphs. This is ignored for undirected graphs.
#' @param damping the damping factor. The default value is set to \code{0.85}.
#' @return a data frame with PageRank values for each of the nodes.
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 10,
#' m = 15,
#' set_seed = 23)
#'
#' # Get the PageRank scores
#' # for all nodes in the graph
#' graph %>%
#' get_pagerank()
#'
#' # Colorize nodes according to their
#' # PageRank scores
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_pagerank(graph = .)) %>%
#' colorize_node_attrs(
#' node_attr_from = pagerank,
#' node_attr_to = fillcolor,
#' palette = "RdYlGn")
#' @importFrom igraph page_rank
#' @export
get_pagerank <- function(graph,
directed = TRUE,
damping = 0.85) {
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the PageRank values for each of the
# graph's nodes
pagerank_values <-
igraph::page_rank(
graph = ig_graph,
directed = directed,
damping = damping)$vector
# Create df with the PageRank values
data.frame(
id = pagerank_values %>%
names() %>%
as.integer(),
pagerank = pagerank_values %>% round(4),
stringsAsFactors = FALSE)
}
| /R/get_pagerank.R | permissive | lionel-/DiagrammeR | R | false | false | 1,857 | r | #' Get the PageRank values for all nodes
#'
#' Get the PageRank values for all nodes in the graph.
#' @inheritParams render_graph
#' @param directed if \code{TRUE} (the default) then directed paths will be
#' considered for directed graphs. This is ignored for undirected graphs.
#' @param damping the damping factor. The default value is set to \code{0.85}.
#' @return a data frame with PageRank values for each of the nodes.
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 10,
#' m = 15,
#' set_seed = 23)
#'
#' # Get the PageRank scores
#' # for all nodes in the graph
#' graph %>%
#' get_pagerank()
#'
#' # Colorize nodes according to their
#' # PageRank scores
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_pagerank(graph = .)) %>%
#' colorize_node_attrs(
#' node_attr_from = pagerank,
#' node_attr_to = fillcolor,
#' palette = "RdYlGn")
#' @importFrom igraph page_rank
#' @export
get_pagerank <- function(graph,
directed = TRUE,
damping = 0.85) {
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the PageRank values for each of the
# graph's nodes
pagerank_values <-
igraph::page_rank(
graph = ig_graph,
directed = directed,
damping = damping)$vector
# Create df with the PageRank values
data.frame(
id = pagerank_values %>%
names() %>%
as.integer(),
pagerank = pagerank_values %>% round(4),
stringsAsFactors = FALSE)
}
|
#sqlite database connection
setwd("D:\")
library("RSQLite")
db=dbConnect(SQLite(),dbname="ruia.db")
dbListTables(db) #it will show all tables
dbListFields(db,"summer") #it will show all fields of summer table
summer_data= dbGetQuery(db,"select * from summer")
winter_data= dbGetQuery(db,"select * from winter")
attach(summer_data)
attach(winter_data)
dbDiscoonect(db) #it will disconnect the database | /sqlite_connection.R | no_license | omkargokhale05/R-Project-Air-Pollution-Analysis- | R | false | false | 400 | r | #sqlite database connection
setwd("D:\")
library("RSQLite")
db=dbConnect(SQLite(),dbname="ruia.db")
dbListTables(db) #it will show all tables
dbListFields(db,"summer") #it will show all fields of summer table
summer_data= dbGetQuery(db,"select * from summer")
winter_data= dbGetQuery(db,"select * from winter")
attach(summer_data)
attach(winter_data)
dbDiscoonect(db) #it will disconnect the database |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uri_functions.R
\name{.options}
\alias{.options}
\alias{carbon-options}
\title{concatenate the carbon options to a string}
\usage{
.options(self, private, code)
}
\arguments{
\item{self}{carbon self object}
\item{private}{carbon private object}
\item{code}{character, script to embbed into the uri}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
combine all the carbon options into a carbon.js valid string
}
\seealso{
\link[=carbon]{carbon}
}
| /man/options.Rd | permissive | yonicd/carbonate | R | false | true | 522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uri_functions.R
\name{.options}
\alias{.options}
\alias{carbon-options}
\title{concatenate the carbon options to a string}
\usage{
.options(self, private, code)
}
\arguments{
\item{self}{carbon self object}
\item{private}{carbon private object}
\item{code}{character, script to embbed into the uri}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
combine all the carbon options into a carbon.js valid string
}
\seealso{
\link[=carbon]{carbon}
}
|
context("model_matrix_responseless")
test_that("testing that we can build a responseless model matrix", {
# Making of random data for the test
alpha = 1; beta = 0.2;
w0 = 0.2; w1 = 0.1; w2 = 0.5;
d1 = data.frame(x=rnorm(5), z = rnorm(5));
d1$y = rnorm(5, w0-w1*d1$x+w2*d1$z, 1/beta)
m1 = y ~ x + z
# Making a responseless model matrix
responseless1 = model_matrix_responseless(m1, d1)
# Test cases
# Testing if the error works, negative beta
expect_error(model_matrix_responseless(m1, NULL))
# Testing that the output gives the right class
expect_true(class(responseless1) == "matrix")
}) | /tests/testthat/test_model_matrix_responseless.R | no_license | steinunngroa/blm | R | false | false | 619 | r | context("model_matrix_responseless")
test_that("testing that we can build a responseless model matrix", {
# Making of random data for the test
alpha = 1; beta = 0.2;
w0 = 0.2; w1 = 0.1; w2 = 0.5;
d1 = data.frame(x=rnorm(5), z = rnorm(5));
d1$y = rnorm(5, w0-w1*d1$x+w2*d1$z, 1/beta)
m1 = y ~ x + z
# Making a responseless model matrix
responseless1 = model_matrix_responseless(m1, d1)
# Test cases
# Testing if the error works, negative beta
expect_error(model_matrix_responseless(m1, NULL))
# Testing that the output gives the right class
expect_true(class(responseless1) == "matrix")
}) |
reals_formula<-formula(~AppliedAmount+AppliedAmountToIncome+DebtToIncome+FreeCash+LiabilitiesToIncome+NewLoanMonthlyPayment+NewPaymentToIncome+SumOfBankCredits+SumOfOtherCredits-1)
ints_formula<-update.formula(reals_formula, . ~ Age+LoanDuration+nr_of_dependants+CountOfBankCredits+CountOfPaydayLoans+CountOfOtherCredits+NoOfPreviousApplications+NoOfPreviousLoans-1 )
base_formula<-formula(~VerificationType+Gender+UseOfLoan+LoanDuration+education_id+
employment_status_id+Employment_Duration_Current_Employer+work_experience_10+occupation_area+
marital_status_id+nr_of_dependants_1+home_ownership_type_id+
CountOfBankCredits+CountOfOtherCredits-1)
base_formula_no_credits<-formula(~VerificationType+Gender+UseOfLoan+LoanDuration+education_id+
employment_status_id+Employment_Duration_Current_Employer+work_experience_10+occupation_area+
marital_status_id+nr_of_dependants_1+home_ownership_type_id -1)
equi_formula<- formula(~
equi_age_fac +
equi_marital_fac +
equi_education_fac +
equi_employment_status_fac +
equi_employment_length_fac +
equi_net_income_fac +
equi_principal_duration_fac +
equi_loan_purpose_fac -1)
bondora_formula<- formula(defaulted_before_6m~Rating_V0+Rating_V1-1 )
verification_formula<-formula(defaulted_before_6m~VerificationType -1)
models<-c( equi=equi_formula, bond=bondora_formula, veri=verification_formula)
# determine where to label selection flags (& whether to include all other filters) ..
# better to label everything if write to main df (or likely to have old data in other rows via bugs)
# select data
# filter out loans that have been issued and NOT been extended
# loan_ (singular) is for boolean vector, loans_ is dataframe
loan_issued<-!is.na(loandata$LoanDate)
# NA if loan not issued, otherwise could have been cancelled, defaulted or still live
loandata$surv_time<-pmin(interval(loandata$LoanDate,loandata$ReportAsOfEOD)/edays(1),
interval(loandata$LoanDate,loandata$ContractEndDate)/edays(1),
loandata$DefaultedOnDay,na.rm=TRUE)
loans_issued<-loandata[loan_issued, ]
loandata$loan_unchanged<-loan_issued &
(loandata$CurrentLoanHasBeenExtended==0) &
(loandata$MaturityDate_Last==loandata$MaturityDate_Original)
loandata$loan_cancelled<-loan_issued &
!is.na(loandata$ContractEndDate) &
(loandata$ContractEndDate==loandata$FirstPaymentDate)
#loan_verified<-loan_issued & loandata$VerificationType=='Income and expenses verified'
loan_elapsed_6m<-( loan_issued & (interval(loandata$LoanDate,loandata$ReportAsOfEOD)/edays(1)>180))
loan_elapsed_6m_mod<-( !is.na(loandata$FirstPaymentDate) &
(interval(loandata$FirstPaymentDate,loandata$ReportAsOfEOD)/edays(1)>150))
loandata$defaulted_before_6m<-!is.na(loandata$DefaultedOnDay) &
loandata$DefaultedOnDay<=180
loandata$defaulted_before_6m_or_restructured<-loandata$defaulted_before_6m |
!loandata$loan_unchanged
loandata$defaulted_before_6m_mod<-!is.na(loandata$Default_StartDate) &
(interval(loandata$FirstPaymentDate,loandata$Default_StartDate)/edays(1)<=150)
loan_selections <- list(elapsed_6m=list(select=loan_elapsed_6m ,target="defaulted_before_6m"),
elapsed_6m_unchanged=list(select=loan_elapsed_6m & loan_unchanged,target="defaulted_before_6m"),
elapsed_6m_restructured=list(select=loan_elapsed_6m ,target="defaulted_before_6m_or_restructured"),
elapsed_6m_mod=list(select=loan_elapsed_6m_mod, target="defaulted_before_6m_mod"),
elapsed_6m_mod_unchanged=list(select=loan_elapsed_6m_mod & loan_unchanged, target="defaulted_before_6m_mod")
)
z<-data.frame()
j<-0
for (data in loan_selections){
j<-j+1
loan_selected<-data$select
target_variable<-data$target
loans_selected<-loandata[loan_selected,]
loans_selected_dt<-data.table(loans_selected)
#x1<-model.matrix(AD~(NewPaymentToIncome+LiabilitiesToIncome)*(VerificationType + Gender+ UseOfLoan+education_id+marital_status_id+employment_status_id+Employment_Duration_Current_Employer+occupation_area+home_ownership_type_id)-1,data=loandata[selected_loans,])
#y<-loandata[selected_loans,'AD']==1
set.seed(1234)
nfolds<-10
cross_val<-sample(nfolds,nrow(loans_selected),replace=TRUE)
#z<-cv_test(loans_selected, model_formula,target_variable, cross_val)
z1<-lapply(models,function(x) cv_test(loans_selected, x,target_variable, cross_val))
z2<-do.call(rbind,z1)
z2$data=j
z<-rbind(z,z2)
}
z3<-ddply(z,~model+data,summarise,
logloss_tr_mean=mean(ll_train),
logloss_tr_se=sd(ll_train)/sqrt(length(ll_train)),
logloss_te_mean=mean(ll_test),
logloss_te_se=sd(ll_test)/sqrt(length(ll_test)),
gini_tr_mean=mean(gini_train),
gini_tr_se=sd(gini_train)/sqrt(length(gini_train)),
gini_te_mean=mean(gini_test),
gini_te_se=sd(gini_test)/sqrt(length(gini_test)),
N=length(gini_test))
surv<-Surv(loans_issued$surv_time[loans_issued$surv_time>0 & !is.na(loans_issued$employment_status_id)],
event=loans_issued$AD[loans_issued$surv_time>0 & !is.na(loans_issued$employment_status_id)])
z<-survfit(surv)
plot(z)
x<-model.matrix(base_formula,data=loans_issued[loans_issued$surv_time>0,])
cv.fit<-cv.glmnet(x,surv,family="cox")
plot(cv.fit)
coef(cv.fit,'lambda.min')
co<-coef(cv.fit,'lambda.min')
ind<-which(co!=0)
cos<-data.frame(row.names=rownames(co)[ind],value=co[ind])
qplot(x=rownames(cos),y=value,xlab="coeff",data=cos)+coord_flip()
#tr_te=rbinom(sum(loan_selected),1,1-test_frac)
#loans_selected$train_test<-tr_te
plot(cv.fit)
ggplot(as.data.frame(predict_tr),aes(x=`1`))+geom_bar()
#extract coefs
coef(cv.fit,s='lambda.1se')
predict_tr<-predict(cv.fit,x1,type='response',s='lambda.min')
loans_dt_reals_all[loans_dt_reals_all$real %in% rownames(coef(cv.fit,s='lambda.1se')),]
#library(grid)
#library(gridExtra)
# bootstrapping
glm_boot<-glm_boot_gen('binomial', 'auc', 'lambda.1se', 'response')
z<-boot(data,glm_boot,10,stype='f')
a<-z$t
b1<-apply(a,2,sd)
b<-colMeans(a)
c<-data.frame(m=b,s=b1/sqrt(10))
d<-c[order(c$m),]
ggplot(d,aes(x=1:647,y=m))+geom_point()+geom_errorbar(aes(ymax=m+s,ymin=m-s))
loans_attribute$score_bd=logit(-0.547780803 +
-0.099912729 +
-0.002036076*loans_attribute$duration_months+
0.118092729*(loans_attribute$user_income_employment_length_years<1))
| /Projects/Bondora/src/bondora_logistic.R | no_license | seanv507/lendico | R | false | false | 6,764 | r | reals_formula<-formula(~AppliedAmount+AppliedAmountToIncome+DebtToIncome+FreeCash+LiabilitiesToIncome+NewLoanMonthlyPayment+NewPaymentToIncome+SumOfBankCredits+SumOfOtherCredits-1)
ints_formula<-update.formula(reals_formula, . ~ Age+LoanDuration+nr_of_dependants+CountOfBankCredits+CountOfPaydayLoans+CountOfOtherCredits+NoOfPreviousApplications+NoOfPreviousLoans-1 )
base_formula<-formula(~VerificationType+Gender+UseOfLoan+LoanDuration+education_id+
employment_status_id+Employment_Duration_Current_Employer+work_experience_10+occupation_area+
marital_status_id+nr_of_dependants_1+home_ownership_type_id+
CountOfBankCredits+CountOfOtherCredits-1)
base_formula_no_credits<-formula(~VerificationType+Gender+UseOfLoan+LoanDuration+education_id+
employment_status_id+Employment_Duration_Current_Employer+work_experience_10+occupation_area+
marital_status_id+nr_of_dependants_1+home_ownership_type_id -1)
equi_formula<- formula(~
equi_age_fac +
equi_marital_fac +
equi_education_fac +
equi_employment_status_fac +
equi_employment_length_fac +
equi_net_income_fac +
equi_principal_duration_fac +
equi_loan_purpose_fac -1)
bondora_formula<- formula(defaulted_before_6m~Rating_V0+Rating_V1-1 )
verification_formula<-formula(defaulted_before_6m~VerificationType -1)
models<-c( equi=equi_formula, bond=bondora_formula, veri=verification_formula)
# determine where to label selection flags (& whether to include all other filters) ..
# better to label everything if write to main df (or likely to have old data in other rows via bugs)
# select data
# filter out loans that have been issued and NOT been extended
# loan_ (singular) is for boolean vector, loans_ is dataframe
loan_issued<-!is.na(loandata$LoanDate)
# NA if loan not issued, otherwise could have been cancelled, defaulted or still live
loandata$surv_time<-pmin(interval(loandata$LoanDate,loandata$ReportAsOfEOD)/edays(1),
interval(loandata$LoanDate,loandata$ContractEndDate)/edays(1),
loandata$DefaultedOnDay,na.rm=TRUE)
loans_issued<-loandata[loan_issued, ]
loandata$loan_unchanged<-loan_issued &
(loandata$CurrentLoanHasBeenExtended==0) &
(loandata$MaturityDate_Last==loandata$MaturityDate_Original)
loandata$loan_cancelled<-loan_issued &
!is.na(loandata$ContractEndDate) &
(loandata$ContractEndDate==loandata$FirstPaymentDate)
#loan_verified<-loan_issued & loandata$VerificationType=='Income and expenses verified'
loan_elapsed_6m<-( loan_issued & (interval(loandata$LoanDate,loandata$ReportAsOfEOD)/edays(1)>180))
loan_elapsed_6m_mod<-( !is.na(loandata$FirstPaymentDate) &
(interval(loandata$FirstPaymentDate,loandata$ReportAsOfEOD)/edays(1)>150))
loandata$defaulted_before_6m<-!is.na(loandata$DefaultedOnDay) &
loandata$DefaultedOnDay<=180
loandata$defaulted_before_6m_or_restructured<-loandata$defaulted_before_6m |
!loandata$loan_unchanged
loandata$defaulted_before_6m_mod<-!is.na(loandata$Default_StartDate) &
(interval(loandata$FirstPaymentDate,loandata$Default_StartDate)/edays(1)<=150)
loan_selections <- list(elapsed_6m=list(select=loan_elapsed_6m ,target="defaulted_before_6m"),
elapsed_6m_unchanged=list(select=loan_elapsed_6m & loan_unchanged,target="defaulted_before_6m"),
elapsed_6m_restructured=list(select=loan_elapsed_6m ,target="defaulted_before_6m_or_restructured"),
elapsed_6m_mod=list(select=loan_elapsed_6m_mod, target="defaulted_before_6m_mod"),
elapsed_6m_mod_unchanged=list(select=loan_elapsed_6m_mod & loan_unchanged, target="defaulted_before_6m_mod")
)
z<-data.frame()
j<-0
for (data in loan_selections){
j<-j+1
loan_selected<-data$select
target_variable<-data$target
loans_selected<-loandata[loan_selected,]
loans_selected_dt<-data.table(loans_selected)
#x1<-model.matrix(AD~(NewPaymentToIncome+LiabilitiesToIncome)*(VerificationType + Gender+ UseOfLoan+education_id+marital_status_id+employment_status_id+Employment_Duration_Current_Employer+occupation_area+home_ownership_type_id)-1,data=loandata[selected_loans,])
#y<-loandata[selected_loans,'AD']==1
set.seed(1234)
nfolds<-10
cross_val<-sample(nfolds,nrow(loans_selected),replace=TRUE)
#z<-cv_test(loans_selected, model_formula,target_variable, cross_val)
z1<-lapply(models,function(x) cv_test(loans_selected, x,target_variable, cross_val))
z2<-do.call(rbind,z1)
z2$data=j
z<-rbind(z,z2)
}
z3<-ddply(z,~model+data,summarise,
logloss_tr_mean=mean(ll_train),
logloss_tr_se=sd(ll_train)/sqrt(length(ll_train)),
logloss_te_mean=mean(ll_test),
logloss_te_se=sd(ll_test)/sqrt(length(ll_test)),
gini_tr_mean=mean(gini_train),
gini_tr_se=sd(gini_train)/sqrt(length(gini_train)),
gini_te_mean=mean(gini_test),
gini_te_se=sd(gini_test)/sqrt(length(gini_test)),
N=length(gini_test))
surv<-Surv(loans_issued$surv_time[loans_issued$surv_time>0 & !is.na(loans_issued$employment_status_id)],
event=loans_issued$AD[loans_issued$surv_time>0 & !is.na(loans_issued$employment_status_id)])
z<-survfit(surv)
plot(z)
x<-model.matrix(base_formula,data=loans_issued[loans_issued$surv_time>0,])
cv.fit<-cv.glmnet(x,surv,family="cox")
plot(cv.fit)
coef(cv.fit,'lambda.min')
co<-coef(cv.fit,'lambda.min')
ind<-which(co!=0)
cos<-data.frame(row.names=rownames(co)[ind],value=co[ind])
qplot(x=rownames(cos),y=value,xlab="coeff",data=cos)+coord_flip()
#tr_te=rbinom(sum(loan_selected),1,1-test_frac)
#loans_selected$train_test<-tr_te
plot(cv.fit)
ggplot(as.data.frame(predict_tr),aes(x=`1`))+geom_bar()
#extract coefs
coef(cv.fit,s='lambda.1se')
predict_tr<-predict(cv.fit,x1,type='response',s='lambda.min')
loans_dt_reals_all[loans_dt_reals_all$real %in% rownames(coef(cv.fit,s='lambda.1se')),]
#library(grid)
#library(gridExtra)
# bootstrapping
glm_boot<-glm_boot_gen('binomial', 'auc', 'lambda.1se', 'response')
z<-boot(data,glm_boot,10,stype='f')
a<-z$t
b1<-apply(a,2,sd)
b<-colMeans(a)
c<-data.frame(m=b,s=b1/sqrt(10))
d<-c[order(c$m),]
ggplot(d,aes(x=1:647,y=m))+geom_point()+geom_errorbar(aes(ymax=m+s,ymin=m-s))
loans_attribute$score_bd=logit(-0.547780803 +
-0.099912729 +
-0.002036076*loans_attribute$duration_months+
0.118092729*(loans_attribute$user_income_employment_length_years<1))
|
DDPCA_nonconvex <-
function(Sigma,K,max_iter_nonconvex = 15,SDD_approx = TRUE, max_iter_SDD = 20,eps = NA){
S = Sigma
for (i in 1:max_iter_nonconvex){
eig_object = eigs(Sigma,K)
if (K>1){
D = diag(eig_object$values)
} else {
D = eig_object$values
}
V = eig_object$vectors
L = V%*%D%*%t(V)
if (SDD_approx) {
A = ProjDD(Sigma - L)
A_sym = (A + t(A))/2
} else {
A_sym = ProjSDD(Sigma - L, max_iter_SDD,eps)
}
S = Sigma - A_sym
}
return(list(L=L,A=A_sym))
}
| /R/DDPCA_nonconvex.R | no_license | cran/ddpca | R | false | false | 532 | r | DDPCA_nonconvex <-
function(Sigma,K,max_iter_nonconvex = 15,SDD_approx = TRUE, max_iter_SDD = 20,eps = NA){
S = Sigma
for (i in 1:max_iter_nonconvex){
eig_object = eigs(Sigma,K)
if (K>1){
D = diag(eig_object$values)
} else {
D = eig_object$values
}
V = eig_object$vectors
L = V%*%D%*%t(V)
if (SDD_approx) {
A = ProjDD(Sigma - L)
A_sym = (A + t(A))/2
} else {
A_sym = ProjSDD(Sigma - L, max_iter_SDD,eps)
}
S = Sigma - A_sym
}
return(list(L=L,A=A_sym))
}
|
#' Filled Density Plot
#'
#' Create a simple and beautiful distribution plot
#' @param data the dataset in which is stored the variable to plot
#' @param x an integer, indicating the index of the column you want to visualise the distribution
#' @param col A string, indicating the color of the plot. Default is 'orange'.
#' @return A plot with a density function
#' @export
plot_density <- function(data, x, col = "orange") {
d <- density(data[[x]], na.rm = TRUE) ##calculate density
plot(d, main = colnames(data)[x]) ##plot density
polygon(d, col = col, border = "black") ##describe shapes and colours
}
| /R/plot_distribution.R | no_license | DavideCannata/cutR | R | false | false | 622 | r | #' Filled Density Plot
#'
#' Create a simple and beautiful distribution plot
#' @param data the dataset in which is stored the variable to plot
#' @param x an integer, indicating the index of the column you want to visualise the distribution
#' @param col A string, indicating the color of the plot. Default is 'orange'.
#' @return A plot with a density function
#' @export
plot_density <- function(data, x, col = "orange") {
d <- density(data[[x]], na.rm = TRUE) ##calculate density
plot(d, main = colnames(data)[x]) ##plot density
polygon(d, col = col, border = "black") ##describe shapes and colours
}
|
# Allochthony model for labeled lakes: Paul Lake 2001
# Stephen R. Carpenter, 2015-09-19
rm(list = ls())
graphics.off()
library(numDeriv)
# Functions for phytoplankton ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Integrated irradiance effect for phytoplankton growth
GAMMA.fun <- function(z,Pbar,dz) {
eps.total = eps0 + epsDOC*DOC + epsP*Pbar
Iz <- I0*exp(-z*eps.total)
rate.at.z <- dz*(1/Fmax)*(1 - exp(-k_sat*Iz))*exp(-k_inh*Iz)
GAMMA = sum(rate.at.z)
return(GAMMA)
}
# Phytoplankton instantaneous growth rate (losses not included)
Grow.Phyto = function(P0,DOC,Load,Zvec,dz) {
Igamma = GAMMA.fun(Zvec,P0,dz)
Prate = rP*Igamma*P0*Load/(2 + Load)
return(Prate)
}
# End of phytoplankton functions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Main Program &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# Set up for phytoplankton calculations
I0 <- 600 # Surface irradiance, microEinsteins m-1 s-1; 600 is typical daily median
# P-I curve parameters, median of NTL P-I curves
k_sat <- 0.0194 # per microEinsteins m-1 s-1 (Follows 0.012)
k_inh <- 0.00065 # per microEinsteins m-1 s-1 (Follows mean 0.004, range 0.001-0.007)
# Derived parameter from Follows et al.
Fmax <- ((k_sat + k_inh)/k_sat)*exp(-(k_inh/k_sat)*log(k_inh/(k_sat+k_inh)))
# Light extinction parameters, Carpenter et al. L&O 1998
eps0 <- 0.0213 # Light extinction by pure water
epsDOC = 0.0514 # DOC light extinction coef
epsP <- 0.0177 # Phytoplankton, per m2 (mg phosphorus)-1
rP = 1 # Phytoplankton production per unit P load
# Data for individual whole-lake experiment ++++++++++++++++++++++++++++++++++++++++++++
# Paul Lake 2001
ZT = 3.5
DOC = 304*12/1000 # umol/L * 12ug/umol * 10^-3 mg/ug
POC = 35.5*12/1000 # umol/L * 12ug/umol * 10^-3 mg/ug
Chl = 4.21
Load = 0.3 # mg P m-2 d-1
ZB = 1.05*0.5 # Estimate converted to g C m-2 from Dry Mass
Phi.POC = 0.38 # POC allochthony
TPOC = Phi.POC*POC
Phi.Z = 0.36 # Zoop allochthony # Chaob 0.36; Zoop 0.24
GPP = 43.4 # GPP in mmol O2 m-2 d-1
# End experiment-specific data ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# C:Chl ratio based on autochthonous POC and Chl
APOC = (1-Phi.POC)*POC*1000 # convert mg to ug
CChl = APOC/Chl # mass:mass
# Areal Phyto Biomass as C
AC.AR = Chl*ZT*CChl/1000 # Algal g C m-2 using C:Chl ratio
# Algae
I.AC = GPP*12*0.7/1000 # NPP as g C m-2 d-1
Mtot.AC = I.AC/AC.AR # total mortality of algae due to all losses
s.A = 0.3/ZT # Phytoplankton sinking (velocity / thermocline depth)
QAZ = (Mtot.AC - s.A)*AC.AR # grazing flux as g C m-2
print('Results for Paul 2001',quote=F)
print('Phytoplankton',quote=F)
print(c('Phyto C to Chl ratios direct method = ',CChl),quote=F)
print(c('Phyto biomass g C m-2',AC.AR),quote=F)
print(c('NPP g C m-2 d-1',I.AC),quote=F)
print(c('Phyto total mort = ',Mtot.AC),quote=F)
print(c('Sinking component of total Mort',s.A),quote=F)
print(c('Flux to zooplankton ',QAZ),quote=F)
# Scale the Follows et al. production function to observed NPP
# Depth sequence for integration of GPP
nZ <- 20 # Steps for vertical integration
dz <- ZT/nZ
Zvec <- seq(0,ZT,by=dz)
# Compute production for reference conditions
NPP_ref = Grow.Phyto(Chl,DOC,Load,Zvec,dz)
print('',quote=F)
print('Rescaling Follows et al. production function to observed NPP',quote=F)
print('Reference Conditions', quote=F)
print(c('NPP = ',NPP_ref),quote=F)
print('Chl, DOC, Zthermo, Load, rP',quote=F)
print(c(Chl,DOC,ZT,Load,rP),quote=F)
# Rescale rP so that NPP is observed value at reference conditions
rP = I.AC/NPP_ref
NPP_ref = Grow.Phyto(Chl,DOC,Load,Zvec,dz)
print(c('Reference Conditions with rP rescaled to ',rP), quote=F)
print(c('NPP = ',NPP_ref),quote=F)
print('Chl, DOC, ZT, Load, rP',quote=F)
print(c(Chl,DOC,ZT,Load,rP),quote=F)
# Compute attack rate
# See Chow-Fraser+Sprules_FT_fit.R
handle = 0.005 # handling time in days*animals/algae consumed from Chow-Fraser & Sprules
attack = QAZ/(AC.AR*ZB - handle*AC.AR*QAZ)
print('',quote=F)
print('Grazing',quote=F)
print(c('Attack rate = ',attack),quote=F)
print(c('Handling Time = ',handle),quote=F)
# Compute steady-state algae detritus
pA = 0.3 # egestion coefficient BACK TO D for zoops feeding on algae
pD = 0.3
s.D = 0.5/ZT # Sedimentation loss coefficient = sinking rate/ZT (Reynolds 1984)
Dcoef = c(0,0,0) # vector to hold polynomial coefficients for detritus polynomial
Dcoef[1] = pA*QAZ
Dcoef[2] = pA*QAZ*attack*handle - s.D - (1-pD)*attack*ZB
Dcoef[3] = -1*s.D*attack*handle
Droots = polyroot(Dcoef)
Dstar = max(Re(Droots))
# Flux of algae detritus to zooplankton
QDZ = attack*Dstar*ZB/(1 + attack*handle*Dstar)
print('Detrital algae info',quote=F)
print(c('Detrital algae steady state g C m-2 ',Dstar),quote=F)
print(c('Detrital algae flux to zoopl g C m-2 d-1',QDZ),quote=F)
# Compute TPOC input rate
pT = 0.5 # egestion coefficient for TPOC back to TPOC
TPOCAR = TPOC*ZT # areal TPOC g/m2
QTZ = attack*TPOCAR*ZB/(1 + handle*attack*TPOCAR) # Flux from TPOC to Zoopl
s.T = 0.1/ZT # Sedimentation loss coefficient = sinking rate/ZT (Reynolds 1984)
I.T = s.T*TPOCAR + (1-pT)*QTZ
print('Phyto and TPOC fluxes to Zoopl',quote=F)
print(c(QAZ,QTZ))
print('TPOC fluxes',quote=F)
print(c('TPOC biomass g C m-2',TPOCAR),quote=F)
print(c('TPOC sedimentation loss coefficient = ',s.T),quote=F)
print(c('TPOC input rate g m-2 d-1 = ',I.T),quote=F)
# Compute growth efficiencies on algae and TPOC for zooplankton
gAZ = 0.25 # assumed growth efficiency from algae to zoop
gDZ = 0.05 # assumed growth efficiency from algal detritus to zoop
gTZ = Phi.Z*(gAZ*QAZ + gDZ*QDZ)/( (1-Phi.Z)*QTZ )
print('Zooplankton',quote=F)
print(c('Efficiencies gAZ, gTZ ',gAZ,gTZ),quote=F)
# Compute Zoop mortality
mZTOT = gAZ*QAZ + gDZ*QDZ + gTZ*QTZ
mZ = mZTOT/ZB
print(c('Total Zoop Mort flux = ',mZTOT),quote=F)
print(c('Zoop Mort Coef = ',mZ),quote=F)
print(c('Zoop biomass, g m-2',ZB),quote=F)
# Parameters for zooplanktivory
mZnp = 0.04 # non-predatory mortality coefficient of zooplankton
QZF = (mZ-mZnp)*ZB # planktivory flux of zooplankton
hF = 1.4*ZT # Based on estimate in the Regime Shift book
cF = QZF*(hF^2 + ZB^2)/(ZB^2) # maximum planktivory rate
# Parameters for zooplankton refuging
D.Z = 0 # Diffusion rate between refuge and foraging arena
Zref = ZB # Zoop biomass in refuge
print('Zooplankton parameters',quote=F)
print('Planktivory flux, hF, cF',quote=F)
print(c(QZF,hF,cF))
# Analysis of Equilibria $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Function for deviation of A, T and Z from equilibrium
dATZdt.eq = function(lY0) {
# unpack state variables (all as g c m-2)
Y0 = exp(lY0)
A0 = Y0[1]
T0 = Y0[2]
Z0 = Y0[3]
D0 = Y0[4]
# Light effects
vChl = A0*(1/CChl)*1000*(1/ZT) # convert g C/m2 to mg Chl/m3
NPP = Grow.Phyto(vChl,DOC,Load,Zvec,dz) # NPP
Anet = NPP - s.A*A0 # Net after sinking
# Consumption fluxes
Q.AZ = attack*A0*Z0/(1 - attack*handle*A0)
Q.TZ = attack*T0*Z0/(1 - attack*handle*T0)
Q.DZ = attack*D0*Z0/(1 - attack*handle*D0)
Q.ZF = (cF*Z0^2)/(hF^2 + Z0^2)
# Dynamics
dAdt = Anet - Q.AZ
dTdt = I.T - s.T*T0 -(1-pT)*Q.TZ
dZdt = gAZ*Q.AZ + gTZ*Q.TZ + gAZ*Q.DZ - mZnp*Z0 - Q.ZF + D.Z*(Zref-Z0)
dDdt = pA*Q.AZ - s.D*D0 - (1-pD)*Q.DZ
rates = c(dAdt,dTdt,dZdt,dDdt)
SSE = sum(rates*rates) # sum of squared distance from equilibrium
return(SSE) # return either rates or SSE
}
# Function for Jacobian of A, T and Z
dATZdt.jac = function(Y0) {
# unpack state variables (all as g c m-2)
#Y0 = exp(lY0) # no need for transform
A0 = Y0[1]
T0 = Y0[2]
Z0 = Y0[3]
D0 = Y0[4]
# Light effects
vChl = A0*(1/CChl)*1000*(1/ZT) # convert g C/m2 to mg Chl/m3
NPP = Grow.Phyto(vChl,DOC,Load,Zvec,dz) # NPP
Anet = NPP - s.A*A0 # Net after sinking
# Consumption fluxes
Q.AZ = attack*A0*Z0/(1 - attack*handle*A0)
Q.TZ = attack*T0*Z0/(1 - attack*handle*T0)
Q.DZ = attack*D0*Z0/(1 - attack*handle*D0)
Q.ZF = (cF*Z0^2)/(hF^2 + Z0^2)
# Dynamics
dAdt = Anet - Q.AZ
dTdt = I.T - s.T*T0 -(1-pT)*Q.TZ
dZdt = gAZ*Q.AZ + gTZ*Q.TZ + gAZ*Q.DZ - mZnp*Z0 - Q.ZF + D.Z*(Zref-Z0)
dDdt = pA*Q.AZ - s.D*D0 - (1-pD)*Q.DZ
rates = c(dAdt,dTdt,dZdt,dDdt)
SSE = sum(rates*rates) # sum of squared distance from equilibrium
return(rates) # return either rates or SSE
}
# Load regressions to predict ZT from DOC, Chl and P Load
# Best-fitting model predicts ZT from DOC and Chl (ZT_DOC.Chl)
# However prediction from DOC alone is almost as good (ZT_DOC)
# Save line:
# save(ZTvec,DOCvec,ChlVvec,Pvec,ZT_DOC.Chl,ZT_DOC.Load,ZT_DOC,
# file='ZTmodels.Rdata')
load(file='ZTmodels.Rdata')
ZTb = ZT_DOC$coefficients # intercept and slope for ZT ~ DOC model
# Set up driver gradient ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scaler = seq(0.9,1.6,length.out=12)
NG = length(Scaler) # number of gradient steps)
LPgrad = rep(0,NG) # Vector to hold scaled driver
LPbase = Load # Save the nominal value
Loadvec = LPbase*Scaler
# Vectors to hold results
Avec = rep(0,NG)
Tvec = rep(0,NG)
ZBvec = rep(0,NG)
Dvec = rep(0,NG)
Allovec = rep(0,NG)
Lamvec = rep(0,NG)
Zprod = rep(0,NG)
NPPvec = rep(0,NG)
for(iG in 1:NG) { # Start gradient over parameter value
# Modify the load parameter
LPgrad[iG] = Scaler[iG]*LPbase
Load = Scaler[iG]*LPbase # Alter the P load
# Find equilibria for Experimental conditions
Y0 = c(AC.AR,TPOCAR,ZB,Dstar) # guesses
lY0 = log(Y0)
ATZeq = optim(lY0,dATZdt.eq,method='Nelder-Mead')
parest = exp(ATZeq$par)
Avec[iG] = parest[1]
Tvec[iG] = parest[2]
ZBvec[iG] = parest[3]
Dvec[iG] = parest[4]
# Check stability
JAC = jacobian(dATZdt.jac,parest)
JAC.lamda = eigen(JAC,only.values=T)
Lmods = Mod(JAC.lamda$values)
iLmax = which.max(Lmods) # which eigenvalue has maximum modulus?
Lamvec[iG] = JAC.lamda$values[iLmax] # Save the eigenvalue with max modulus
# Compute allochthony for estimates
gQAZ = gAZ*attack*Avec[iG]*ZBvec[iG]/(1 + attack*handle*Avec[iG])
gQTZ = gTZ*attack*Tvec[iG]*ZBvec[iG]/(1 + attack*handle*Tvec[iG])
gQDZ = gDZ*attack*Dvec[iG]*ZBvec[iG]/(1 + attack*handle*Dvec[iG])
Allovec[iG] = gQTZ/(gQTZ + gQAZ + gQDZ)
# Zooplankton secondary production
Zprod[iG] = gQTZ + gQAZ + gQDZ - mZnp*ZBvec[iG]
# Compute GPP & NPP
vChl = Avec[iG]*(1/CChl)*1000*(1/ZT) # convert g C/m2 to mg Chl/m3
GPPtemp = Grow.Phyto(vChl,DOC,Load,Zvec,dz)
NPPvec[iG] = GPPtemp - s.A*Avec[iG]
}
# Plots
windows()
par(mfrow=c(2,2),cex.axis=1.2,cex.lab=1.2,mar=c(5, 4.2, 4, 2) + 0.1)
plot(Loadvec,Avec,type='l',lwd=2,col='forestgreen',
xlab = 'P Load, mg/(m2 d)', ylab = 'Phytos')
plot(Loadvec,Tvec,type='l',lwd=2,col='darkred',
xlab = 'P Load, mg/(m2 d)', ylab = 'TPOC')
plot(Loadvec,ZBvec,type='l',lwd=2,col='blue',
xlab = 'P Load, mg/(m2 d)', ylab = 'Zoopl')
plot(Loadvec,Allovec,type='l',lwd=2,col='sienna',
xlab = 'P Load, mg/(m2 d)', ylab = 'Allochthony')
Lsign = sign(Re(Lamvec))
Lamda = Lsign*Mod(Lamvec)
Lsym = rep(19,NG) # symbol for real vs complex
imLam = Im(Lamvec)
Lsym = ifelse(imLam == 0,19,21)
windows()
par(mfrow=c(1,1),cex.axis=1.5,cex.lab=1.5,mar=c(5, 4.2, 4, 2) + 0.1)
plot(Loadvec,Lamda,type='p',pch=Lsym,col='red',cex=1.5,
xlab='P Load, mg/(m2 d)',ylab='Max Eigenvalue',
main='Solid -> real, Open -> complex')
| /ProgramExample_PaulLake2001+Pgrad_2015-09-19.R | no_license | SRCarpen/ATZ_Cascade | R | false | false | 11,285 | r | # Allochthony model for labeled lakes: Paul Lake 2001
# Stephen R. Carpenter, 2015-09-19
rm(list = ls())
graphics.off()
library(numDeriv)
# Functions for phytoplankton ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Integrated irradiance effect for phytoplankton growth
GAMMA.fun <- function(z,Pbar,dz) {
eps.total = eps0 + epsDOC*DOC + epsP*Pbar
Iz <- I0*exp(-z*eps.total)
rate.at.z <- dz*(1/Fmax)*(1 - exp(-k_sat*Iz))*exp(-k_inh*Iz)
GAMMA = sum(rate.at.z)
return(GAMMA)
}
# Phytoplankton instantaneous growth rate (losses not included)
Grow.Phyto = function(P0,DOC,Load,Zvec,dz) {
Igamma = GAMMA.fun(Zvec,P0,dz)
Prate = rP*Igamma*P0*Load/(2 + Load)
return(Prate)
}
# End of phytoplankton functions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Main Program &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# Set up for phytoplankton calculations
I0 <- 600 # Surface irradiance, microEinsteins m-1 s-1; 600 is typical daily median
# P-I curve parameters, median of NTL P-I curves
k_sat <- 0.0194 # per microEinsteins m-1 s-1 (Follows 0.012)
k_inh <- 0.00065 # per microEinsteins m-1 s-1 (Follows mean 0.004, range 0.001-0.007)
# Derived parameter from Follows et al.
Fmax <- ((k_sat + k_inh)/k_sat)*exp(-(k_inh/k_sat)*log(k_inh/(k_sat+k_inh)))
# Light extinction parameters, Carpenter et al. L&O 1998
eps0 <- 0.0213 # Light extinction by pure water
epsDOC = 0.0514 # DOC light extinction coef
epsP <- 0.0177 # Phytoplankton, per m2 (mg phosphorus)-1
rP = 1 # Phytoplankton production per unit P load
# Data for individual whole-lake experiment ++++++++++++++++++++++++++++++++++++++++++++
# Paul Lake 2001
ZT = 3.5
DOC = 304*12/1000 # umol/L * 12ug/umol * 10^-3 mg/ug
POC = 35.5*12/1000 # umol/L * 12ug/umol * 10^-3 mg/ug
Chl = 4.21
Load = 0.3 # mg P m-2 d-1
ZB = 1.05*0.5 # Estimate converted to g C m-2 from Dry Mass
Phi.POC = 0.38 # POC allochthony
TPOC = Phi.POC*POC
Phi.Z = 0.36 # Zoop allochthony # Chaob 0.36; Zoop 0.24
GPP = 43.4 # GPP in mmol O2 m-2 d-1
# End experiment-specific data ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# C:Chl ratio based on autochthonous POC and Chl
APOC = (1-Phi.POC)*POC*1000 # convert mg to ug
CChl = APOC/Chl # mass:mass
# Areal Phyto Biomass as C
AC.AR = Chl*ZT*CChl/1000 # Algal g C m-2 using C:Chl ratio
# Algae
I.AC = GPP*12*0.7/1000 # NPP as g C m-2 d-1
Mtot.AC = I.AC/AC.AR # total mortality of algae due to all losses
s.A = 0.3/ZT # Phytoplankton sinking (velocity / thermocline depth)
QAZ = (Mtot.AC - s.A)*AC.AR # grazing flux as g C m-2
print('Results for Paul 2001',quote=F)
print('Phytoplankton',quote=F)
print(c('Phyto C to Chl ratios direct method = ',CChl),quote=F)
print(c('Phyto biomass g C m-2',AC.AR),quote=F)
print(c('NPP g C m-2 d-1',I.AC),quote=F)
print(c('Phyto total mort = ',Mtot.AC),quote=F)
print(c('Sinking component of total Mort',s.A),quote=F)
print(c('Flux to zooplankton ',QAZ),quote=F)
# Scale the Follows et al. production function to observed NPP
# Depth sequence for integration of GPP
nZ <- 20 # Steps for vertical integration
dz <- ZT/nZ
Zvec <- seq(0,ZT,by=dz)
# Compute production for reference conditions
NPP_ref = Grow.Phyto(Chl,DOC,Load,Zvec,dz)
print('',quote=F)
print('Rescaling Follows et al. production function to observed NPP',quote=F)
print('Reference Conditions', quote=F)
print(c('NPP = ',NPP_ref),quote=F)
print('Chl, DOC, Zthermo, Load, rP',quote=F)
print(c(Chl,DOC,ZT,Load,rP),quote=F)
# Rescale rP so that NPP is observed value at reference conditions
rP = I.AC/NPP_ref
NPP_ref = Grow.Phyto(Chl,DOC,Load,Zvec,dz)
print(c('Reference Conditions with rP rescaled to ',rP), quote=F)
print(c('NPP = ',NPP_ref),quote=F)
print('Chl, DOC, ZT, Load, rP',quote=F)
print(c(Chl,DOC,ZT,Load,rP),quote=F)
# Compute attack rate
# See Chow-Fraser+Sprules_FT_fit.R
handle = 0.005 # handling time in days*animals/algae consumed from Chow-Fraser & Sprules
attack = QAZ/(AC.AR*ZB - handle*AC.AR*QAZ)
print('',quote=F)
print('Grazing',quote=F)
print(c('Attack rate = ',attack),quote=F)
print(c('Handling Time = ',handle),quote=F)
# Compute steady-state algae detritus
pA = 0.3 # egestion coefficient BACK TO D for zoops feeding on algae
pD = 0.3
s.D = 0.5/ZT # Sedimentation loss coefficient = sinking rate/ZT (Reynolds 1984)
Dcoef = c(0,0,0) # vector to hold polynomial coefficients for detritus polynomial
Dcoef[1] = pA*QAZ
Dcoef[2] = pA*QAZ*attack*handle - s.D - (1-pD)*attack*ZB
Dcoef[3] = -1*s.D*attack*handle
Droots = polyroot(Dcoef)
Dstar = max(Re(Droots))
# Flux of algae detritus to zooplankton
QDZ = attack*Dstar*ZB/(1 + attack*handle*Dstar)
print('Detrital algae info',quote=F)
print(c('Detrital algae steady state g C m-2 ',Dstar),quote=F)
print(c('Detrital algae flux to zoopl g C m-2 d-1',QDZ),quote=F)
# Compute TPOC input rate
pT = 0.5 # egestion coefficient for TPOC back to TPOC
TPOCAR = TPOC*ZT # areal TPOC g/m2
QTZ = attack*TPOCAR*ZB/(1 + handle*attack*TPOCAR) # Flux from TPOC to Zoopl
s.T = 0.1/ZT # Sedimentation loss coefficient = sinking rate/ZT (Reynolds 1984)
I.T = s.T*TPOCAR + (1-pT)*QTZ
print('Phyto and TPOC fluxes to Zoopl',quote=F)
print(c(QAZ,QTZ))
print('TPOC fluxes',quote=F)
print(c('TPOC biomass g C m-2',TPOCAR),quote=F)
print(c('TPOC sedimentation loss coefficient = ',s.T),quote=F)
print(c('TPOC input rate g m-2 d-1 = ',I.T),quote=F)
# Compute growth efficiencies on algae and TPOC for zooplankton
gAZ = 0.25 # assumed growth efficiency from algae to zoop
gDZ = 0.05 # assumed growth efficiency from algal detritus to zoop
gTZ = Phi.Z*(gAZ*QAZ + gDZ*QDZ)/( (1-Phi.Z)*QTZ )
print('Zooplankton',quote=F)
print(c('Efficiencies gAZ, gTZ ',gAZ,gTZ),quote=F)
# Compute Zoop mortality
mZTOT = gAZ*QAZ + gDZ*QDZ + gTZ*QTZ
mZ = mZTOT/ZB
print(c('Total Zoop Mort flux = ',mZTOT),quote=F)
print(c('Zoop Mort Coef = ',mZ),quote=F)
print(c('Zoop biomass, g m-2',ZB),quote=F)
# Parameters for zooplanktivory
mZnp = 0.04 # non-predatory mortality coefficient of zooplankton
QZF = (mZ-mZnp)*ZB # planktivory flux of zooplankton
hF = 1.4*ZT # Based on estimate in the Regime Shift book
cF = QZF*(hF^2 + ZB^2)/(ZB^2) # maximum planktivory rate
# Parameters for zooplankton refuging
D.Z = 0 # Diffusion rate between refuge and foraging arena
Zref = ZB # Zoop biomass in refuge
print('Zooplankton parameters',quote=F)
print('Planktivory flux, hF, cF',quote=F)
print(c(QZF,hF,cF))
# Analysis of Equilibria $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# Function for deviation of A, T and Z from equilibrium
dATZdt.eq = function(lY0) {
# unpack state variables (all as g c m-2)
Y0 = exp(lY0)
A0 = Y0[1]
T0 = Y0[2]
Z0 = Y0[3]
D0 = Y0[4]
# Light effects
vChl = A0*(1/CChl)*1000*(1/ZT) # convert g C/m2 to mg Chl/m3
NPP = Grow.Phyto(vChl,DOC,Load,Zvec,dz) # NPP
Anet = NPP - s.A*A0 # Net after sinking
# Consumption fluxes
Q.AZ = attack*A0*Z0/(1 - attack*handle*A0)
Q.TZ = attack*T0*Z0/(1 - attack*handle*T0)
Q.DZ = attack*D0*Z0/(1 - attack*handle*D0)
Q.ZF = (cF*Z0^2)/(hF^2 + Z0^2)
# Dynamics
dAdt = Anet - Q.AZ
dTdt = I.T - s.T*T0 -(1-pT)*Q.TZ
dZdt = gAZ*Q.AZ + gTZ*Q.TZ + gAZ*Q.DZ - mZnp*Z0 - Q.ZF + D.Z*(Zref-Z0)
dDdt = pA*Q.AZ - s.D*D0 - (1-pD)*Q.DZ
rates = c(dAdt,dTdt,dZdt,dDdt)
SSE = sum(rates*rates) # sum of squared distance from equilibrium
return(SSE) # return either rates or SSE
}
# Function for Jacobian of A, T and Z
dATZdt.jac = function(Y0) {
# unpack state variables (all as g c m-2)
#Y0 = exp(lY0) # no need for transform
A0 = Y0[1]
T0 = Y0[2]
Z0 = Y0[3]
D0 = Y0[4]
# Light effects
vChl = A0*(1/CChl)*1000*(1/ZT) # convert g C/m2 to mg Chl/m3
NPP = Grow.Phyto(vChl,DOC,Load,Zvec,dz) # NPP
Anet = NPP - s.A*A0 # Net after sinking
# Consumption fluxes
Q.AZ = attack*A0*Z0/(1 - attack*handle*A0)
Q.TZ = attack*T0*Z0/(1 - attack*handle*T0)
Q.DZ = attack*D0*Z0/(1 - attack*handle*D0)
Q.ZF = (cF*Z0^2)/(hF^2 + Z0^2)
# Dynamics
dAdt = Anet - Q.AZ
dTdt = I.T - s.T*T0 -(1-pT)*Q.TZ
dZdt = gAZ*Q.AZ + gTZ*Q.TZ + gAZ*Q.DZ - mZnp*Z0 - Q.ZF + D.Z*(Zref-Z0)
dDdt = pA*Q.AZ - s.D*D0 - (1-pD)*Q.DZ
rates = c(dAdt,dTdt,dZdt,dDdt)
SSE = sum(rates*rates) # sum of squared distance from equilibrium
return(rates) # return either rates or SSE
}
# Load regressions to predict ZT from DOC, Chl and P Load
# Best-fitting model predicts ZT from DOC and Chl (ZT_DOC.Chl)
# However prediction from DOC alone is almost as good (ZT_DOC)
# Save line:
# save(ZTvec,DOCvec,ChlVvec,Pvec,ZT_DOC.Chl,ZT_DOC.Load,ZT_DOC,
# file='ZTmodels.Rdata')
load(file='ZTmodels.Rdata')
ZTb = ZT_DOC$coefficients # intercept and slope for ZT ~ DOC model
# Set up driver gradient ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scaler = seq(0.9,1.6,length.out=12)
NG = length(Scaler) # number of gradient steps)
LPgrad = rep(0,NG) # Vector to hold scaled driver
LPbase = Load # Save the nominal value
Loadvec = LPbase*Scaler
# Vectors to hold results
Avec = rep(0,NG)
Tvec = rep(0,NG)
ZBvec = rep(0,NG)
Dvec = rep(0,NG)
Allovec = rep(0,NG)
Lamvec = rep(0,NG)
Zprod = rep(0,NG)
NPPvec = rep(0,NG)
for(iG in 1:NG) { # Start gradient over parameter value
# Modify the load parameter
LPgrad[iG] = Scaler[iG]*LPbase
Load = Scaler[iG]*LPbase # Alter the P load
# Find equilibria for Experimental conditions
Y0 = c(AC.AR,TPOCAR,ZB,Dstar) # guesses
lY0 = log(Y0)
ATZeq = optim(lY0,dATZdt.eq,method='Nelder-Mead')
parest = exp(ATZeq$par)
Avec[iG] = parest[1]
Tvec[iG] = parest[2]
ZBvec[iG] = parest[3]
Dvec[iG] = parest[4]
# Check stability
JAC = jacobian(dATZdt.jac,parest)
JAC.lamda = eigen(JAC,only.values=T)
Lmods = Mod(JAC.lamda$values)
iLmax = which.max(Lmods) # which eigenvalue has maximum modulus?
Lamvec[iG] = JAC.lamda$values[iLmax] # Save the eigenvalue with max modulus
# Compute allochthony for estimates
gQAZ = gAZ*attack*Avec[iG]*ZBvec[iG]/(1 + attack*handle*Avec[iG])
gQTZ = gTZ*attack*Tvec[iG]*ZBvec[iG]/(1 + attack*handle*Tvec[iG])
gQDZ = gDZ*attack*Dvec[iG]*ZBvec[iG]/(1 + attack*handle*Dvec[iG])
Allovec[iG] = gQTZ/(gQTZ + gQAZ + gQDZ)
# Zooplankton secondary production
Zprod[iG] = gQTZ + gQAZ + gQDZ - mZnp*ZBvec[iG]
# Compute GPP & NPP
vChl = Avec[iG]*(1/CChl)*1000*(1/ZT) # convert g C/m2 to mg Chl/m3
GPPtemp = Grow.Phyto(vChl,DOC,Load,Zvec,dz)
NPPvec[iG] = GPPtemp - s.A*Avec[iG]
}
# Plots
windows()
par(mfrow=c(2,2),cex.axis=1.2,cex.lab=1.2,mar=c(5, 4.2, 4, 2) + 0.1)
plot(Loadvec,Avec,type='l',lwd=2,col='forestgreen',
xlab = 'P Load, mg/(m2 d)', ylab = 'Phytos')
plot(Loadvec,Tvec,type='l',lwd=2,col='darkred',
xlab = 'P Load, mg/(m2 d)', ylab = 'TPOC')
plot(Loadvec,ZBvec,type='l',lwd=2,col='blue',
xlab = 'P Load, mg/(m2 d)', ylab = 'Zoopl')
plot(Loadvec,Allovec,type='l',lwd=2,col='sienna',
xlab = 'P Load, mg/(m2 d)', ylab = 'Allochthony')
Lsign = sign(Re(Lamvec))
Lamda = Lsign*Mod(Lamvec)
Lsym = rep(19,NG) # symbol for real vs complex
imLam = Im(Lamvec)
Lsym = ifelse(imLam == 0,19,21)
windows()
par(mfrow=c(1,1),cex.axis=1.5,cex.lab=1.5,mar=c(5, 4.2, 4, 2) + 0.1)
plot(Loadvec,Lamda,type='p',pch=Lsym,col='red',cex=1.5,
xlab='P Load, mg/(m2 d)',ylab='Max Eigenvalue',
main='Solid -> real, Open -> complex')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_create_sample_findings}
\alias{guardduty_create_sample_findings}
\title{Generates example findings of types specified by the list of finding
types}
\usage{
guardduty_create_sample_findings(DetectorId, FindingTypes)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector to create sample findings for.}
\item{FindingTypes}{Types of sample findings that you want to generate.}
}
\description{
Generates example findings of types specified by the list of finding
types. If 'NULL' is specified for findingTypes, the API generates
example findings of all supported finding types.
}
\section{Request syntax}{
\preformatted{svc$create_sample_findings(
DetectorId = "string",
FindingTypes = list(
"string"
)
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/guardduty_create_sample_findings.Rd | permissive | peoplecure/paws | R | false | true | 865 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_create_sample_findings}
\alias{guardduty_create_sample_findings}
\title{Generates example findings of types specified by the list of finding
types}
\usage{
guardduty_create_sample_findings(DetectorId, FindingTypes)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector to create sample findings for.}
\item{FindingTypes}{Types of sample findings that you want to generate.}
}
\description{
Generates example findings of types specified by the list of finding
types. If 'NULL' is specified for findingTypes, the API generates
example findings of all supported finding types.
}
\section{Request syntax}{
\preformatted{svc$create_sample_findings(
DetectorId = "string",
FindingTypes = list(
"string"
)
)
}
}
\keyword{internal}
|
#' @title Cross validation, n-fold for generalized boosted regression modeling (gbm)
#'
#' @description This function is a cross validation function for generalized
#' boosted regression modeling.
#'
#' @param trainx a dataframe or matrix contains columns of predictive variables.
#' @param trainy a vector of response, must have length equal to the number of
#' rows in trainx.
#' @param var.monotone an optional vector, the same length as the number of
#' predictors, indicating which variables have a monotone increasing (+1),
#' decreasing (-1), or arbitrary (0) relationship with the outcome. By default,
#' a vector of 0 is used.
#' @param family either a character string specifying the name of the distribution to
#' use or a list with a component name specifying the distribution and any
#' additional parameters needed. See gbm for details. By default, "gaussian" is
#' used.
#' @param n.trees the total number of trees to fit. This is equivalent to the
#' number of iterations and the number of basis functions in the additive
#' expansion. By default, 3000 is used.
#' @param learning.rate a shrinkage parameter applied to each tree in the
#' expansion. Also known as step-size reduction.
#' @param interaction.depth the maximum depth of variable interactions.
#' 1 implies an additive model, 2 implies a model with up to 2-way
#' interactions, etc. By default, 2 is used.
#' @param bag.fraction the fraction of the training set observations randomly
#' selected to propose the next tree in the expansion. By default, 0.5 is used.
#' @param train.fraction The first train.fraction * nrows(data) observations
#' are used to fit the gbm and the remainder are used for computing
#' out-of-sample estimates of the loss function.
#' @param n.minobsinnode minimum number of observations in the trees terminal
#' nodes. Note that this is the actual number of observations not the total
#' weight. By default, 10 is used.
#' @param cv.fold integer; number of folds in the cross-validation. it is also
#' the number of cross-validation folds to perform within gbm. if > 1,
#' then apply n-fold cross validation; the default is 10, i.e., 10-fold cross
#' validation that is recommended.
#' @param weights an optional vector of weights to be used in the fitting
#' process. Must be positive but do not need to be normalized.
#' If keep.data = FALSE in the initial call to gbm then it is the user's
#' responsibility to resupply the weights to gbm.more. By default, a vector of
#' 1 is used.
#' @param keep.data a logical variable indicating whether to keep the data and
#' an index of the data stored with the object. Keeping the data and index
#' makes subsequent calls to gbm.more faster at the cost of storing an extra
#' copy of the dataset. By default, 'FALSE' is used.
#' @param verbose If TRUE, gbm will print out progress and performance
#' indicators. By default, 'TRUE' is used.
#' @param n.cores The number of CPU cores to use. See gbm for details. By
#' default, 6 is used.
#' @param predacc can be either "VEcv" for vecv or "ALL" for all measures
#' in function pred.acc.
#' @param ... other arguments passed on to gbm.
#'
#' @return A list with the following components:
#' for numerical data: me, rme, mae, rmae, mse, rmse, rrmse, vecv and e1; or vecv
#' for categorical data: correct classification rate (ccr.cv) and kappa (kappa.cv)
#'
#' @note This function is largely based on rf.cv (see Li et al. 2013),
#' rfcv in randomForest and gbm.
#'
#' @references Li, J., J. Siwabessy, M. Tran, Z. Huang, and A. Heap. 2013.
#' Predicting Seabed Hardness Using Random Forest in R. Pages 299-329 in Y.
#' Zhao and Y. Cen, editors. Data Mining Applications with R. Elsevier.
#'
#' Li, J. 2013. Predicting the spatial distribution of seabed gravel content
#' using random forest, spatial interpolation methods and their hybrid methods.
#' Pages 394-400 The International Congress on Modelling and Simulation
#' (MODSIM) 2013, Adelaide.
#'
#' Liaw, A. and M. Wiener (2002). Classification and Regression by
#' randomForest. R News 2(3), 18-22.
#'
#' Greg Ridgeway with contributions from others (2015). gbm: Generalized
#' Boosted Regression Models. R package version 2.1.1.
#' https://CRAN.R-project.org/package=gbm
#'
#' @author Jin Li
#' @examples
#' \dontrun{
#' data(sponge)
#'
#' gbmcv1 <- gbmcv(sponge[, -c(3)], sponge[, 3], cv.fold = 10,
#' family = "poisson", n.cores=2, predacc = "ALL")
#' gbmcv1
#'
#' n <- 20 # number of iterations, 60 to 100 is recommended.
#' VEcv <- NULL
#' for (i in 1:n) {
#' gbmcv1 <- gbmcv(sponge[, -c(3)], sponge[, 3], cv.fold = 10,
#' family = "poisson", n.cores=2, predacc = "VEcv")
#' VEcv [i] <- gbmcv1
#' }
#' plot(VEcv ~ c(1:n), xlab = "Iteration for gbm", ylab = "VEcv (%)")
#' points(cumsum(VEcv) / c(1:n) ~ c(1:n), col = 2)
#' abline(h = mean(VEcv), col = 'blue', lwd = 2)
#' }
#'
#' @export
gbmcv <- function (trainx, trainy, var.monotone = rep(0, ncol(trainx)),
family = "gaussian",
n.trees = 3000, # default number of trees
learning.rate = 0.001,
interaction.depth = 2,
bag.fraction = 0.5,
train.fraction = 1.0,
n.minobsinnode = 10,
cv.fold = 10, # becuase of the way used to resample data, we can not do leave-one-out cv.
weights = rep(1, nrow(trainx)), # by default set equal
keep.data = FALSE,
verbose = TRUE,
n.cores = 6,
predacc = "VEcv", ...) {
classRF <- is.factor(trainy)
n <- nrow(trainx)
if (classRF) {
stop ("This function is not for categorical response variable")
}
if (dim(table(trainy)) <= 4) {
f <- trainy
} else {
f <- cut(trainy, c(-Inf, stats::quantile(trainy, 1:4/5), Inf))
}
nlvl <- table(f)
idx <- numeric(n)
for (i in 1:length(nlvl)) {
idx[which(f == levels(f)[i])] <- sample(rep(1:cv.fold, length = nlvl[i]))
}
# cross validation
cv.pred <- NULL
for (i in 1:cv.fold) {
all.gbm1 <- gbm::gbm(trainy[idx != i] ~ ., data=trainx[idx != i, , drop = FALSE],
var.monotone = var.monotone,
distribution = as.character(family),
n.trees = n.trees,
shrinkage = learning.rate,
interaction.depth = interaction.depth,
bag.fraction = bag.fraction,
train.fraction = train.fraction,
n.minobsinnode = n.minobsinnode,
weights = weights[idx != i],
cv.folds = cv.fold,
keep.data = keep.data,
verbose = verbose,
n.cores = n.cores)
# gbm predictions
data.pred <- trainx[idx == i, , drop = FALSE]
best.iter <- gbm::gbm.perf(all.gbm1, method = "cv")
print(best.iter)
cv.pred[idx == i] <- gbm::predict.gbm(all.gbm1, data.pred, n.trees =
best.iter, type = "response")
}
# predicitve accuracy assessment
predictive.accuracy <- NULL
if (predacc == "VEcv") {predictive.accuracy = vecv(trainy, cv.pred)} else (
if (predacc == "ALL") {predictive.accuracy = pred.acc(trainy, cv.pred)} else (
stop ("This measure is not supported in this version!")))
predictive.accuracy
}
| /R/gbmcv.R | no_license | cran/spm | R | false | false | 7,048 | r | #' @title Cross validation, n-fold for generalized boosted regression modeling (gbm)
#'
#' @description This function is a cross validation function for generalized
#' boosted regression modeling.
#'
#' @param trainx a dataframe or matrix contains columns of predictive variables.
#' @param trainy a vector of response, must have length equal to the number of
#' rows in trainx.
#' @param var.monotone an optional vector, the same length as the number of
#' predictors, indicating which variables have a monotone increasing (+1),
#' decreasing (-1), or arbitrary (0) relationship with the outcome. By default,
#' a vector of 0 is used.
#' @param family either a character string specifying the name of the distribution to
#' use or a list with a component name specifying the distribution and any
#' additional parameters needed. See gbm for details. By default, "gaussian" is
#' used.
#' @param n.trees the total number of trees to fit. This is equivalent to the
#' number of iterations and the number of basis functions in the additive
#' expansion. By default, 3000 is used.
#' @param learning.rate a shrinkage parameter applied to each tree in the
#' expansion. Also known as step-size reduction.
#' @param interaction.depth the maximum depth of variable interactions.
#' 1 implies an additive model, 2 implies a model with up to 2-way
#' interactions, etc. By default, 2 is used.
#' @param bag.fraction the fraction of the training set observations randomly
#' selected to propose the next tree in the expansion. By default, 0.5 is used.
#' @param train.fraction The first train.fraction * nrows(data) observations
#' are used to fit the gbm and the remainder are used for computing
#' out-of-sample estimates of the loss function.
#' @param n.minobsinnode minimum number of observations in the trees terminal
#' nodes. Note that this is the actual number of observations not the total
#' weight. By default, 10 is used.
#' @param cv.fold integer; number of folds in the cross-validation. it is also
#' the number of cross-validation folds to perform within gbm. if > 1,
#' then apply n-fold cross validation; the default is 10, i.e., 10-fold cross
#' validation that is recommended.
#' @param weights an optional vector of weights to be used in the fitting
#' process. Must be positive but do not need to be normalized.
#' If keep.data = FALSE in the initial call to gbm then it is the user's
#' responsibility to resupply the weights to gbm.more. By default, a vector of
#' 1 is used.
#' @param keep.data a logical variable indicating whether to keep the data and
#' an index of the data stored with the object. Keeping the data and index
#' makes subsequent calls to gbm.more faster at the cost of storing an extra
#' copy of the dataset. By default, 'FALSE' is used.
#' @param verbose If TRUE, gbm will print out progress and performance
#' indicators. By default, 'TRUE' is used.
#' @param n.cores The number of CPU cores to use. See gbm for details. By
#' default, 6 is used.
#' @param predacc can be either "VEcv" for vecv or "ALL" for all measures
#' in function pred.acc.
#' @param ... other arguments passed on to gbm.
#'
#' @return A list with the following components:
#' for numerical data: me, rme, mae, rmae, mse, rmse, rrmse, vecv and e1; or vecv
#' for categorical data: correct classification rate (ccr.cv) and kappa (kappa.cv)
#'
#' @note This function is largely based on rf.cv (see Li et al. 2013),
#' rfcv in randomForest and gbm.
#'
#' @references Li, J., J. Siwabessy, M. Tran, Z. Huang, and A. Heap. 2013.
#' Predicting Seabed Hardness Using Random Forest in R. Pages 299-329 in Y.
#' Zhao and Y. Cen, editors. Data Mining Applications with R. Elsevier.
#'
#' Li, J. 2013. Predicting the spatial distribution of seabed gravel content
#' using random forest, spatial interpolation methods and their hybrid methods.
#' Pages 394-400 The International Congress on Modelling and Simulation
#' (MODSIM) 2013, Adelaide.
#'
#' Liaw, A. and M. Wiener (2002). Classification and Regression by
#' randomForest. R News 2(3), 18-22.
#'
#' Greg Ridgeway with contributions from others (2015). gbm: Generalized
#' Boosted Regression Models. R package version 2.1.1.
#' https://CRAN.R-project.org/package=gbm
#'
#' @author Jin Li
#' @examples
#' \dontrun{
#' data(sponge)
#'
#' gbmcv1 <- gbmcv(sponge[, -c(3)], sponge[, 3], cv.fold = 10,
#' family = "poisson", n.cores=2, predacc = "ALL")
#' gbmcv1
#'
#' n <- 20 # number of iterations, 60 to 100 is recommended.
#' VEcv <- NULL
#' for (i in 1:n) {
#' gbmcv1 <- gbmcv(sponge[, -c(3)], sponge[, 3], cv.fold = 10,
#' family = "poisson", n.cores=2, predacc = "VEcv")
#' VEcv [i] <- gbmcv1
#' }
#' plot(VEcv ~ c(1:n), xlab = "Iteration for gbm", ylab = "VEcv (%)")
#' points(cumsum(VEcv) / c(1:n) ~ c(1:n), col = 2)
#' abline(h = mean(VEcv), col = 'blue', lwd = 2)
#' }
#'
#' @export
gbmcv <- function (trainx, trainy, var.monotone = rep(0, ncol(trainx)),
family = "gaussian",
n.trees = 3000, # default number of trees
learning.rate = 0.001,
interaction.depth = 2,
bag.fraction = 0.5,
train.fraction = 1.0,
n.minobsinnode = 10,
cv.fold = 10, # becuase of the way used to resample data, we can not do leave-one-out cv.
weights = rep(1, nrow(trainx)), # by default set equal
keep.data = FALSE,
verbose = TRUE,
n.cores = 6,
predacc = "VEcv", ...) {
classRF <- is.factor(trainy)
n <- nrow(trainx)
if (classRF) {
stop ("This function is not for categorical response variable")
}
if (dim(table(trainy)) <= 4) {
f <- trainy
} else {
f <- cut(trainy, c(-Inf, stats::quantile(trainy, 1:4/5), Inf))
}
nlvl <- table(f)
idx <- numeric(n)
for (i in 1:length(nlvl)) {
idx[which(f == levels(f)[i])] <- sample(rep(1:cv.fold, length = nlvl[i]))
}
# cross validation
cv.pred <- NULL
for (i in 1:cv.fold) {
all.gbm1 <- gbm::gbm(trainy[idx != i] ~ ., data=trainx[idx != i, , drop = FALSE],
var.monotone = var.monotone,
distribution = as.character(family),
n.trees = n.trees,
shrinkage = learning.rate,
interaction.depth = interaction.depth,
bag.fraction = bag.fraction,
train.fraction = train.fraction,
n.minobsinnode = n.minobsinnode,
weights = weights[idx != i],
cv.folds = cv.fold,
keep.data = keep.data,
verbose = verbose,
n.cores = n.cores)
# gbm predictions
data.pred <- trainx[idx == i, , drop = FALSE]
best.iter <- gbm::gbm.perf(all.gbm1, method = "cv")
print(best.iter)
cv.pred[idx == i] <- gbm::predict.gbm(all.gbm1, data.pred, n.trees =
best.iter, type = "response")
}
# predicitve accuracy assessment
predictive.accuracy <- NULL
if (predacc == "VEcv") {predictive.accuracy = vecv(trainy, cv.pred)} else (
if (predacc == "ALL") {predictive.accuracy = pred.acc(trainy, cv.pred)} else (
stop ("This measure is not supported in this version!")))
predictive.accuracy
}
|
library('zoo')
is.normal=function(dataset){
if((class(dataset)!="numeric")&&(class(dataset)!="integer")){
stop("Class of dataset must be numeric or integer")
}
#plot histogram of dataset
p1=hist(dataset,right=FALSE,ann=FALSE,density=20)
title(main=paste("Histogram of", deparse(substitute(dataset))),ylab="Frequency",xlab="x")
#create normal breaks
breaks_norm=pnorm(p1$breaks,mean=mean(dataset),sd=sd(dataset))
null.probs_norm=rollapply(breaks_norm, 2, function(dataset) dataset[2]-dataset[1])
#run chi-squared test to check for normal distribution
norm_test=chisq.test(p1$counts,p=null.probs_norm,rescale.p = TRUE,simulate.p.value = TRUE,B=9999)
norm_test
#extract p-value from chisq.test()
pvalue=norm_test$p.value
#create ouput list
output=list(p.value=0,is.significant=character())
#set p-value in output list to pvalue from chisq.test
output$p.value=pvalue
#check for significance, with alpha=.1
if(pvalue>.1){
output$is.significant="Data may be significantly consistent with a Normal Distribution"
}else{
output$is.significant="Data is NOT significantly consistent with a Normal Distribution"
}
return(output)
}
is.normal()
pvalues=matrix(0,ncol(testdataKF),2)
pvalues[,1]=colnames(testdataKF)
for(i in 1:ncol(testdataKF)){
pvalues[i,2]=is.normal(testdataKF[,i])$p.value
}
| /is.normal.R | no_license | kwachs/data-visualization-package | R | false | false | 1,293 | r | library('zoo')
is.normal=function(dataset){
if((class(dataset)!="numeric")&&(class(dataset)!="integer")){
stop("Class of dataset must be numeric or integer")
}
#plot histogram of dataset
p1=hist(dataset,right=FALSE,ann=FALSE,density=20)
title(main=paste("Histogram of", deparse(substitute(dataset))),ylab="Frequency",xlab="x")
#create normal breaks
breaks_norm=pnorm(p1$breaks,mean=mean(dataset),sd=sd(dataset))
null.probs_norm=rollapply(breaks_norm, 2, function(dataset) dataset[2]-dataset[1])
#run chi-squared test to check for normal distribution
norm_test=chisq.test(p1$counts,p=null.probs_norm,rescale.p = TRUE,simulate.p.value = TRUE,B=9999)
norm_test
#extract p-value from chisq.test()
pvalue=norm_test$p.value
#create ouput list
output=list(p.value=0,is.significant=character())
#set p-value in output list to pvalue from chisq.test
output$p.value=pvalue
#check for significance, with alpha=.1
if(pvalue>.1){
output$is.significant="Data may be significantly consistent with a Normal Distribution"
}else{
output$is.significant="Data is NOT significantly consistent with a Normal Distribution"
}
return(output)
}
is.normal()
pvalues=matrix(0,ncol(testdataKF),2)
pvalues[,1]=colnames(testdataKF)
for(i in 1:ncol(testdataKF)){
pvalues[i,2]=is.normal(testdataKF[,i])$p.value
}
|
\docType{package}
\name{dxR-package}
\alias{dxR}
\alias{dxR-package}
\title{DNAnexus R Client Library}
\description{
dxR is an R extension containing API wrapper functions
for interacting with the new DNAnexus platform.
}
\details{
\tabular{ll}{ Package: \tab dxR\cr Type: \tab Package\cr
Version: \tab 0.185.0\cr License: \tab Apache License (==
2.0)\cr }
}
\author{
Katherine Lai
Maintainer: Katherine Lai <klai@dnanexus.com>
}
| /src/R/dxR/man/dxR-package.Rd | permissive | storozhilov/dx-toolkit | R | false | false | 446 | rd | \docType{package}
\name{dxR-package}
\alias{dxR}
\alias{dxR-package}
\title{DNAnexus R Client Library}
\description{
dxR is an R extension containing API wrapper functions
for interacting with the new DNAnexus platform.
}
\details{
\tabular{ll}{ Package: \tab dxR\cr Type: \tab Package\cr
Version: \tab 0.185.0\cr License: \tab Apache License (==
2.0)\cr }
}
\author{
Katherine Lai
Maintainer: Katherine Lai <klai@dnanexus.com>
}
|
###############################################################################
## Script: run_analysis.R
## This script downloads data files used to train and test human activity recognition
## using smartphones. It converts the data into a tidy dataset, extracts the
## mean and standard deviation variables, and writes it to a file in the working
## directory called "tidy_dataset.txt.
## It also creates a new tidy dataset, saved as "averages_dataset.txt", with the
## averages of each variable for each activity and each subject.
library(plyr) # Needed for call to ldply
library(dplyr) # Needed for call to sample
library(reshape2) #Needed for
###############################################################################
#### Step 0. Download and unzip the data files
file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
file_name <-"UCI HAR Dataset.zip"
data_dir <- "UCI HAR Dataset"
if (!file.exists(file_name)) {
message("Downloading data file...")
download.file(file_url, destfile = file_name, method = "curl")
}
if (!dir.exists(data_dir)) {
message("Unzipping data file...")
unzip(file_name)
}
###############################################################################
#### Step 1. Merge the training and the test sets to create one data set.
## Each of the two sets is spread across multiple files (subject, X, and Y).
## After reading in all the files, the data is split by source file, to extract
## the relevant columns, and the rows are then combined. The dataset is then melted
## to convert from a wide to a tall format.
## Read all the data files
message("Reading observation files from test and train directories...")
paths = dir(file.path(data_dir), pattern = "^(subject|X|y)_(train|test)\\.txt$",
full.names = TRUE, recursive = TRUE)
names(paths) <- basename(paths)
data <- ldply(paths, read.table, header = FALSE, stringsAsFactors = FALSE)
## Reshape and tidy data
message("Reshaping data...")
## Split the files based on source file (.id)
split_data <- split(data, data$.id)
## Extract the column values for each set and then bind all the rows together
## Construct the dataset starting with the fixed variable columns and
## create new column to indicate set
df = as_tibble(rbind(cbind(subject_id = split_data$subject_test.txt$V1,
set = "Test",
activity_id = split_data$y_test.txt$V1,
split_data$X_test.txt[,2:ncol(split_data$X_test.txt)]),
cbind(subject_id = split_data$subject_train.txt$V1,
set = "Train",
activity_id = split_data$y_train.txt$V1,
split_data$X_train.txt[,2:ncol(split_data$X_train.txt)])))
## Column headers are variables, instead of variable names.
# Melt the dataset (change from wide to long format)
molten_df <- melt(df, id.vars = c("subject_id", "set", "activity_id"))
###############################################################################
#### Step 2. Extract only the measurements on the mean and standard deviation
#### for each measurement.
## Variable names are obtained from the features.txt file.
## Only variables that include "mean()" or "std()" in their namesare extracted.
## Read features recorded in columns V1-V561
message("Reading features file...")
features <- read.table(file.path(data_dir, "features.txt"), header = FALSE,
col.names = c("feature_id", "feature_label"), stringsAsFactors = FALSE)
# Determine which features are based on the mean or standard deviation of a measuremet
# by searching for labels that contain either "mean()" or "std()"
fns <- features[grep("(mean|std)\\(", features$feature_label),]
# Extract measurements associated with mean/std functions
message("Extracting measurements on the mean and standard deviation...")
molten_df <- filter(molten_df, sub("V", "", variable) %in% fns$feature_id)
#### Step 3: Use descriptive activity names to name the activities in the data set
# Read activity labels from file and replace corresponding activity id values
message("Reading activity file...")
activities <- read.table(file.path(data_dir, "activity_labels.txt"),
col.names = c("activity_id", "activity_label"))
molten_df <- molten_df %>%
mutate(activity_id = activities$activity_label[activity_id]) %>%
rename("activity" = "activity_id")
###############################################################################
#### Step 4. Appropriately label the data set with descriptive variable names
## Read names from features files; remove parenthesis and replace hyphen with
## underscore to comply with naming convention
tidy_df <- mutate(molten_df,
variable = gsub("-", "_", # Replace hyphens with underscores
gsub("\\(\\)","", # Remove parenthesis
features$feature_label[ # Look up variable name
as.integer(sub("V", "", variable))])))
#### Write tidy dataset to "tidy_observations.txt
message("Writing tidy dataset to \"tidy_dataset.txt\" in the working directory")
write.table(tidy_df, "tidy_dataset.txt", row.names = FALSE)
###############################################################################
#### Step. 5 create a second, independent tidy data set with the average of each
#### variable for each activity and each subject.
## Cast the tidy dataset using fixed variables subject_id and activity,
## and calculating the averages of each measurment (stored as variable/value pairs in
## the dataset) for each subject and each activity.
averages_df <- dcast(tidy_df, subject_id + activity ~ variable, fun.aggregate = mean)
#### Write averages dataset to "averages_observations.txt
message("Writing averages dataset to \"averages_dataset.txt\" in the working directory")
write.table(averages_df, "averages_dataset.txt", row.names = FALSE)
message("Use the following code to read the resulting datasets:")
message("tds <- read.table(\"tidy_dataset.txt\", header = TRUE, stringsAsFactors = FALSE)")
message("avg_ds <- read.table(\"averages_dataset.txt\", header = TRUE, stringsAsFactors = FALSE)")
| /run_analysis.R | no_license | serendipicat/GCDataCourseProject | R | false | false | 6,329 | r | ###############################################################################
## Script: run_analysis.R
## This script downloads data files used to train and test human activity recognition
## using smartphones. It converts the data into a tidy dataset, extracts the
## mean and standard deviation variables, and writes it to a file in the working
## directory called "tidy_dataset.txt.
## It also creates a new tidy dataset, saved as "averages_dataset.txt", with the
## averages of each variable for each activity and each subject.
library(plyr) # Needed for call to ldply
library(dplyr) # Needed for call to sample
library(reshape2) #Needed for
###############################################################################
#### Step 0. Download and unzip the data files
file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
file_name <-"UCI HAR Dataset.zip"
data_dir <- "UCI HAR Dataset"
if (!file.exists(file_name)) {
message("Downloading data file...")
download.file(file_url, destfile = file_name, method = "curl")
}
if (!dir.exists(data_dir)) {
message("Unzipping data file...")
unzip(file_name)
}
###############################################################################
#### Step 1. Merge the training and the test sets to create one data set.
## Each of the two sets is spread across multiple files (subject, X, and Y).
## After reading in all the files, the data is split by source file, to extract
## the relevant columns, and the rows are then combined. The dataset is then melted
## to convert from a wide to a tall format.
## Read all the data files
message("Reading observation files from test and train directories...")
paths = dir(file.path(data_dir), pattern = "^(subject|X|y)_(train|test)\\.txt$",
full.names = TRUE, recursive = TRUE)
names(paths) <- basename(paths)
data <- ldply(paths, read.table, header = FALSE, stringsAsFactors = FALSE)
## Reshape and tidy data
message("Reshaping data...")
## Split the files based on source file (.id)
split_data <- split(data, data$.id)
## Extract the column values for each set and then bind all the rows together
## Construct the dataset starting with the fixed variable columns and
## create new column to indicate set
df = as_tibble(rbind(cbind(subject_id = split_data$subject_test.txt$V1,
set = "Test",
activity_id = split_data$y_test.txt$V1,
split_data$X_test.txt[,2:ncol(split_data$X_test.txt)]),
cbind(subject_id = split_data$subject_train.txt$V1,
set = "Train",
activity_id = split_data$y_train.txt$V1,
split_data$X_train.txt[,2:ncol(split_data$X_train.txt)])))
## Column headers are variables, instead of variable names.
# Melt the dataset (change from wide to long format)
molten_df <- melt(df, id.vars = c("subject_id", "set", "activity_id"))
###############################################################################
#### Step 2. Extract only the measurements on the mean and standard deviation
#### for each measurement.
## Variable names are obtained from the features.txt file.
## Only variables that include "mean()" or "std()" in their namesare extracted.
## Read features recorded in columns V1-V561
message("Reading features file...")
features <- read.table(file.path(data_dir, "features.txt"), header = FALSE,
col.names = c("feature_id", "feature_label"), stringsAsFactors = FALSE)
# Determine which features are based on the mean or standard deviation of a measuremet
# by searching for labels that contain either "mean()" or "std()"
fns <- features[grep("(mean|std)\\(", features$feature_label),]
# Extract measurements associated with mean/std functions
message("Extracting measurements on the mean and standard deviation...")
molten_df <- filter(molten_df, sub("V", "", variable) %in% fns$feature_id)
#### Step 3: Use descriptive activity names to name the activities in the data set
# Read activity labels from file and replace corresponding activity id values
message("Reading activity file...")
activities <- read.table(file.path(data_dir, "activity_labels.txt"),
col.names = c("activity_id", "activity_label"))
molten_df <- molten_df %>%
mutate(activity_id = activities$activity_label[activity_id]) %>%
rename("activity" = "activity_id")
###############################################################################
#### Step 4. Appropriately label the data set with descriptive variable names
## Read names from features files; remove parenthesis and replace hyphen with
## underscore to comply with naming convention
tidy_df <- mutate(molten_df,
variable = gsub("-", "_", # Replace hyphens with underscores
gsub("\\(\\)","", # Remove parenthesis
features$feature_label[ # Look up variable name
as.integer(sub("V", "", variable))])))
#### Write tidy dataset to "tidy_observations.txt
message("Writing tidy dataset to \"tidy_dataset.txt\" in the working directory")
write.table(tidy_df, "tidy_dataset.txt", row.names = FALSE)
###############################################################################
#### Step. 5 create a second, independent tidy data set with the average of each
#### variable for each activity and each subject.
## Cast the tidy dataset using fixed variables subject_id and activity,
## and calculating the averages of each measurment (stored as variable/value pairs in
## the dataset) for each subject and each activity.
averages_df <- dcast(tidy_df, subject_id + activity ~ variable, fun.aggregate = mean)
#### Write averages dataset to "averages_observations.txt
message("Writing averages dataset to \"averages_dataset.txt\" in the working directory")
write.table(averages_df, "averages_dataset.txt", row.names = FALSE)
message("Use the following code to read the resulting datasets:")
message("tds <- read.table(\"tidy_dataset.txt\", header = TRUE, stringsAsFactors = FALSE)")
message("avg_ds <- read.table(\"averages_dataset.txt\", header = TRUE, stringsAsFactors = FALSE)")
|
rm(list = setdiff(ls(), "nycm"))
rm(list = ls())
gc()
#############
##| Setup |##
#############
# Load Selenium #
require("RSelenium")
require("data.table")
# Start driver #
Dr <- rsDriver(port = 4344L,
browser = c("chrome"),
chromever = "86.0.4240.22")
# Start remote driver #
nycm.RemDr <- Dr$client
# Create empty data frame #
nycm <- data.frame(
bib = integer(), name = character(), city = character(), country = character(), age = integer(), sex = character(), finish = character(), place = integer(), totRun = integer(),
place.sex = integer(), place.age = integer(), av.pace = character(), mile3.time = character(), mile4.time = character(), mile5.time = character(), mile6.time = character(),
mile7.time = character(), mile8.time = character(), mile9.time = character(), mile10.time = character(), mile11.time = character(), mile12.time = character(),
mile13.time = character(), half.time = character(), mile14.time = character(), mile15.time = character(), mile16.time = character(), mile17.time = character(),
mile18.time = character(), mile19.time = character(), mile20.time = character(), mile21.time = character(), mile22.time = character(), mile23.time = character(),
mile24.time = character(), mile25.time = character(), mile26.time = character()
)
# OR #
# Read previous results file #
nycm <- fread("scraperResults-71430.csv",
header = TRUE,
sep = ",",
data.table = FALSE,
stringsAsFactors = FALSE)
###############
##| Scraper |##
###############
for(i in 1:74000) {
# Navigation #
site <- paste0("https://results.nyrr.org/event/M2019/result/",i)
nycm.RemDr$navigate(site)
Sys.sleep(2) # For page load
# Test for results home redirect, if not scrape data #
if (unlist(nycm.RemDr$getCurrentUrl()) != "https://results.nyrr.org/home") {
# Age, sex, and bib number #
errorCatch <- try({
suppressMessages({
ageSexBibElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[3]/div/div[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
age <- NA
sex <- NA
bib <- i
} else {
age <- as.integer(gsub(".*?([0-9]+).*", "\\1", strsplit(as.character(ageSexBibElm$getElementText()), " ")[[1]][2]))
sex <- substr(strsplit(as.character(ageSexBibElm$getElementText()), " ")[[1]][2], 1, 1)
bib <- as.integer(strsplit(as.character(ageSexBibElm$getElementText()), " ")[[1]][8])
rm(ageSexBibElm)
}
rm(errorCatch)
# Name #
errorCatch <- try({
suppressMessages({
nameElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
name <- NA
} else {
name <- as.character(nameElm$getElementText())
rm(nameElm)
}
rm(errorCatch)
# City and country #
errorCatch <- try({
suppressMessages({
cityCountryElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[2]")
})
},
silent = TRUE)
if ("try-error" %in% class(errorCatch)){
city <- NA
country <- NA
} else {
city <- strsplit(as.character(cityCountryElm$getElementText()), " \\| ")[[1]][1]
country <- strsplit(as.character(cityCountryElm$getElementText()), " \\| ")[[1]][2]
rm(cityCountryElm)
}
rm(errorCatch)
# Overall place #
errorCatch <- try({
suppressMessages({
placeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[3]/span[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
place <- NA
} else {
place <- as.integer(gsub(",", "", as.character(placeElm$getElementText())))
rm(placeElm)
}
rm(errorCatch)
# Total runners (for participant type designation) #
errorCatch <- try({
suppressMessages({
totRunElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[3]/span[2]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
totRun <- NA
} else {
totRun <- as.integer(gsub(".*?([0-9]+).*", "\\1", gsub(",", "", as.character(totRunElm$getElementText()))))
rm(totRunElm)
}
rm(errorCatch)
# Sex place #
errorCatch <- try({
suppressMessages({
place.sexElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[3]/div[1]/span[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
place.sex <- NA
} else {
place.sex <- as.integer(gsub(",", "", as.character(place.sexElm$getElementText())))
rm(place.sexElm)
}
rm(errorCatch)
# Age place #
errorCatch <- try({
suppressMessages({
place.ageElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[3]/div[2]/span[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
place.age <- NA
} else {
place.age <- as.integer(gsub(",", "", as.character(place.ageElm$getElementText())))
rm(place.ageElm)
}
rm(errorCatch)
# Finish time #
errorCatch <- try({
suppressMessages({
finishElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[1]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
finish <- NA
} else {
finish <- as.character(finishElm$getElementText())
rm(finishElm)
}
rm(errorCatch)
# Average pace #
errorCatch <- try({
suppressMessages({
av.paceElm <- nycm.RemDr$findElement(using = "xpath", value ="/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
av.pace <- NA
} else {
av.pace <- as.character(av.paceElm$getElementText())
rm(av.paceElm)
}
rm(errorCatch)
# Mile 3 #
errorCatch <- try({
suppressMessages({
mile3.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[1]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile3.time <- NA
} else {
mile3.time <- as.character(mile3.timeElm$getElementText())
rm(mile3.timeElm)
}
rm(errorCatch)
# Mile 4 #
errorCatch <- try({
suppressMessages({
mile4.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[3]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile4.time <- NA
} else {
mile4.time <- as.character(mile4.timeElm$getElementText())
rm(mile4.timeElm)
}
rm(errorCatch)
# Mile 5 #
errorCatch <- try({
suppressMessages({
mile5.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[4]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile5.time <- NA
} else {
mile5.time <- as.character(mile5.timeElm$getElementText())
rm(mile5.timeElm)
}
rm(errorCatch)
# Mile 6 #
errorCatch <- try({
suppressMessages({
mile6.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[5]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile6.time <- NA
} else {
mile6.time <- as.character(mile6.timeElm$getElementText())
rm(mile6.timeElm)
}
rm(errorCatch)
# Mile 7 #
errorCatch <- try({
suppressMessages({
mile7.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[7]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile7.time <- NA
} else {
mile7.time <- as.character(mile7.timeElm$getElementText())
rm(mile7.timeElm)
}
rm(errorCatch)
# Mile 8 #
errorCatch <- try({
suppressMessages({
mile8.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[8]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile8.time <- NA
} else {
mile8.time <- as.character(mile8.timeElm$getElementText())
rm(mile8.timeElm)
}
rm(errorCatch)
# Mile 9 #
errorCatch <- try({
suppressMessages({
mile9.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[9]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile9.time <- NA
} else {
mile9.time <- as.character(mile9.timeElm$getElementText())
rm(mile9.timeElm)
}
rm(errorCatch)
# Mile 10 #
errorCatch <- try({
suppressMessages({
mile10.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[11]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile10.time <- NA
} else {
mile10.time <- as.character(mile10.timeElm$getElementText())
rm(mile10.timeElm)
}
rm(errorCatch)
# Mile 11 #
errorCatch <- try({
suppressMessages({
mile11.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[12]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile11.time <- NA
} else {
mile11.time <- as.character(mile11.timeElm$getElementText())
rm(mile11.timeElm)
}
rm(errorCatch)
# Mile 12 #
errorCatch <- try({
suppressMessages({
mile12.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[13]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile12.time <- NA
} else {
mile12.time <- as.character(mile12.timeElm$getElementText())
rm(mile12.timeElm)
}
rm(errorCatch)
# Mile 13 #
errorCatch <- try({
suppressMessages({
mile13.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[15]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile13.time <- NA
} else {
mile13.time <- as.character(mile13.timeElm$getElementText())
rm(mile13.timeElm)
}
rm(errorCatch)
# Half #
errorCatch <- try({
suppressMessages({
half.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[16]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
half.time <- NA
} else {
half.time <- as.character(half.timeElm$getElementText())
rm(half.timeElm)
}
rm(errorCatch)
# Mile 14 #
errorCatch <- try({
suppressMessages({
mile14.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[17]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile14.time <- NA
} else {
mile14.time <- as.character(mile14.timeElm$getElementText())
rm(mile14.timeElm)
}
rm(errorCatch)
# Mile 15 #
errorCatch <- try({
suppressMessages({
mile15.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[1]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile15.time <- NA
} else {
mile15.time <- as.character(mile15.timeElm$getElementText())
rm(mile15.timeElm)
}
rm(errorCatch)
# Mile 16 #
errorCatch <- try({
suppressMessages({
mile16.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[3]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile16.time <- NA
} else {
mile16.time <- as.character(mile16.timeElm$getElementText())
rm(mile16.timeElm)
}
rm(errorCatch)
# Mile 17 #
errorCatch <- try({
suppressMessages({
mile17.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[4]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile17.time <- NA
} else {
mile17.time <- as.character(mile17.timeElm$getElementText())
rm(mile17.timeElm)
}
rm(errorCatch)
# Mile 18 #
errorCatch <- try({
suppressMessages({
mile18.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[5]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile18.time <- NA
} else {
mile18.time <- as.character(mile18.timeElm$getElementText())
rm(mile18.timeElm)
}
rm(errorCatch)
# Mile 19 #
errorCatch <- try({
suppressMessages({
mile19.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[7]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile19.time <- NA
} else {
mile19.time <- as.character(mile19.timeElm$getElementText())
rm(mile19.timeElm)
}
rm(errorCatch)
# Mile 20 #
errorCatch <- try({
suppressMessages({
mile20.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[8]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile20.time <- NA
} else {
mile20.time <- as.character(mile20.timeElm$getElementText())
rm(mile20.timeElm)
}
rm(errorCatch)
# Mile 21 #
errorCatch <- try({
suppressMessages({
mile21.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[9]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile21.time <- NA
} else {
mile21.time <- as.character(mile21.timeElm$getElementText())
rm(mile21.timeElm)
}
rm(errorCatch)
# Mile 22 #
errorCatch <- try({
suppressMessages({
mile22.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[11]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile22.time <- NA
} else {
mile22.time <- as.character(mile22.timeElm$getElementText())
rm(mile22.timeElm)
}
rm(errorCatch)
# Mile 23 #
errorCatch <- try({
suppressMessages({
mile23.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[12]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile23.time <- NA
} else {
mile23.time <- as.character(mile23.timeElm$getElementText())
rm(mile23.timeElm)
}
rm(errorCatch)
# Mile 24 #
errorCatch <- try({
suppressMessages({
mile24.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[13]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile24.time <- NA
} else {
mile24.time <- as.character(mile24.timeElm$getElementText())
rm(mile24.timeElm)
}
rm(errorCatch)
# Mile 25 #
errorCatch <- try({
suppressMessages({
mile25.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[15]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile25.time <- NA
} else {
mile25.time <- as.character(mile25.timeElm$getElementText())
rm(mile25.timeElm)
}
rm(errorCatch)
# Mile 26 #
errorCatch <- try({
suppressMessages({
mile26.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[16]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile26.time <- NA
} else {
mile26.time <- as.character(mile26.timeElm$getElementText())
rm(mile26.timeElm)
}
rm(errorCatch)
# Combine and add to results set #
temp <- as.data.frame(
cbind(
bib, name, city, country, age, sex, finish, place, totRun, place.sex, place.age, av.pace, mile3.time, mile4.time, mile5.time, mile6.time, mile7.time, mile8.time,
mile9.time, mile10.time, mile11.time, mile12.time, mile13.time, half.time, mile14.time, mile15.time, mile16.time, mile17.time, mile18.time, mile19.time, mile20.time,
mile21.time, mile22.time, mile23.time, mile24.time, mile25.time, mile26.time
)
)
nycm <- rbind(nycm, temp)
rm(temp)
}
}
# Write to file #
write.csv(nycm,
"scraperResults.csv",
quote = TRUE,
row.names = FALSE,
na = "")
View(nycm)
#:::::::::::::::::::::::::::::::::::::::$
errorCatch <- try({
suppressMessages({
# element assignment
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
# < - NA
} else {
# assignments
# rm(Elm)
}
rm(errorCatch)
#For some runners it's this xpath ...
#name <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[1]")"
#For some runners it's this xpath ...
#city <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[2]")
#For some runners it's this xpath ...
#finish <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[1]/span")
| /nyrrScraper2.R | no_license | ibeforev/NyrrMarathonScraper | R | false | false | 20,308 | r | rm(list = setdiff(ls(), "nycm"))
rm(list = ls())
gc()
#############
##| Setup |##
#############
# Load Selenium #
require("RSelenium")
require("data.table")
# Start driver #
Dr <- rsDriver(port = 4344L,
browser = c("chrome"),
chromever = "86.0.4240.22")
# Start remote driver #
nycm.RemDr <- Dr$client
# Create empty data frame #
nycm <- data.frame(
bib = integer(), name = character(), city = character(), country = character(), age = integer(), sex = character(), finish = character(), place = integer(), totRun = integer(),
place.sex = integer(), place.age = integer(), av.pace = character(), mile3.time = character(), mile4.time = character(), mile5.time = character(), mile6.time = character(),
mile7.time = character(), mile8.time = character(), mile9.time = character(), mile10.time = character(), mile11.time = character(), mile12.time = character(),
mile13.time = character(), half.time = character(), mile14.time = character(), mile15.time = character(), mile16.time = character(), mile17.time = character(),
mile18.time = character(), mile19.time = character(), mile20.time = character(), mile21.time = character(), mile22.time = character(), mile23.time = character(),
mile24.time = character(), mile25.time = character(), mile26.time = character()
)
# OR #
# Read previous results file #
nycm <- fread("scraperResults-71430.csv",
header = TRUE,
sep = ",",
data.table = FALSE,
stringsAsFactors = FALSE)
###############
##| Scraper |##
###############
for(i in 1:74000) {
# Navigation #
site <- paste0("https://results.nyrr.org/event/M2019/result/",i)
nycm.RemDr$navigate(site)
Sys.sleep(2) # For page load
# Test for results home redirect, if not scrape data #
if (unlist(nycm.RemDr$getCurrentUrl()) != "https://results.nyrr.org/home") {
# Age, sex, and bib number #
errorCatch <- try({
suppressMessages({
ageSexBibElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[3]/div/div[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
age <- NA
sex <- NA
bib <- i
} else {
age <- as.integer(gsub(".*?([0-9]+).*", "\\1", strsplit(as.character(ageSexBibElm$getElementText()), " ")[[1]][2]))
sex <- substr(strsplit(as.character(ageSexBibElm$getElementText()), " ")[[1]][2], 1, 1)
bib <- as.integer(strsplit(as.character(ageSexBibElm$getElementText()), " ")[[1]][8])
rm(ageSexBibElm)
}
rm(errorCatch)
# Name #
errorCatch <- try({
suppressMessages({
nameElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
name <- NA
} else {
name <- as.character(nameElm$getElementText())
rm(nameElm)
}
rm(errorCatch)
# City and country #
errorCatch <- try({
suppressMessages({
cityCountryElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[2]")
})
},
silent = TRUE)
if ("try-error" %in% class(errorCatch)){
city <- NA
country <- NA
} else {
city <- strsplit(as.character(cityCountryElm$getElementText()), " \\| ")[[1]][1]
country <- strsplit(as.character(cityCountryElm$getElementText()), " \\| ")[[1]][2]
rm(cityCountryElm)
}
rm(errorCatch)
# Overall place #
errorCatch <- try({
suppressMessages({
placeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[3]/span[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
place <- NA
} else {
place <- as.integer(gsub(",", "", as.character(placeElm$getElementText())))
rm(placeElm)
}
rm(errorCatch)
# Total runners (for participant type designation) #
errorCatch <- try({
suppressMessages({
totRunElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[3]/span[2]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
totRun <- NA
} else {
totRun <- as.integer(gsub(".*?([0-9]+).*", "\\1", gsub(",", "", as.character(totRunElm$getElementText()))))
rm(totRunElm)
}
rm(errorCatch)
# Sex place #
errorCatch <- try({
suppressMessages({
place.sexElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[3]/div[1]/span[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
place.sex <- NA
} else {
place.sex <- as.integer(gsub(",", "", as.character(place.sexElm$getElementText())))
rm(place.sexElm)
}
rm(errorCatch)
# Age place #
errorCatch <- try({
suppressMessages({
place.ageElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[3]/div[2]/span[1]")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
place.age <- NA
} else {
place.age <- as.integer(gsub(",", "", as.character(place.ageElm$getElementText())))
rm(place.ageElm)
}
rm(errorCatch)
# Finish time #
errorCatch <- try({
suppressMessages({
finishElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[1]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
finish <- NA
} else {
finish <- as.character(finishElm$getElementText())
rm(finishElm)
}
rm(errorCatch)
# Average pace #
errorCatch <- try({
suppressMessages({
av.paceElm <- nycm.RemDr$findElement(using = "xpath", value ="/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
av.pace <- NA
} else {
av.pace <- as.character(av.paceElm$getElementText())
rm(av.paceElm)
}
rm(errorCatch)
# Mile 3 #
errorCatch <- try({
suppressMessages({
mile3.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[1]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile3.time <- NA
} else {
mile3.time <- as.character(mile3.timeElm$getElementText())
rm(mile3.timeElm)
}
rm(errorCatch)
# Mile 4 #
errorCatch <- try({
suppressMessages({
mile4.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[3]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile4.time <- NA
} else {
mile4.time <- as.character(mile4.timeElm$getElementText())
rm(mile4.timeElm)
}
rm(errorCatch)
# Mile 5 #
errorCatch <- try({
suppressMessages({
mile5.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[4]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile5.time <- NA
} else {
mile5.time <- as.character(mile5.timeElm$getElementText())
rm(mile5.timeElm)
}
rm(errorCatch)
# Mile 6 #
errorCatch <- try({
suppressMessages({
mile6.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[5]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile6.time <- NA
} else {
mile6.time <- as.character(mile6.timeElm$getElementText())
rm(mile6.timeElm)
}
rm(errorCatch)
# Mile 7 #
errorCatch <- try({
suppressMessages({
mile7.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[7]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile7.time <- NA
} else {
mile7.time <- as.character(mile7.timeElm$getElementText())
rm(mile7.timeElm)
}
rm(errorCatch)
# Mile 8 #
errorCatch <- try({
suppressMessages({
mile8.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[8]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile8.time <- NA
} else {
mile8.time <- as.character(mile8.timeElm$getElementText())
rm(mile8.timeElm)
}
rm(errorCatch)
# Mile 9 #
errorCatch <- try({
suppressMessages({
mile9.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[9]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile9.time <- NA
} else {
mile9.time <- as.character(mile9.timeElm$getElementText())
rm(mile9.timeElm)
}
rm(errorCatch)
# Mile 10 #
errorCatch <- try({
suppressMessages({
mile10.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[11]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile10.time <- NA
} else {
mile10.time <- as.character(mile10.timeElm$getElementText())
rm(mile10.timeElm)
}
rm(errorCatch)
# Mile 11 #
errorCatch <- try({
suppressMessages({
mile11.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[12]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile11.time <- NA
} else {
mile11.time <- as.character(mile11.timeElm$getElementText())
rm(mile11.timeElm)
}
rm(errorCatch)
# Mile 12 #
errorCatch <- try({
suppressMessages({
mile12.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[13]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile12.time <- NA
} else {
mile12.time <- as.character(mile12.timeElm$getElementText())
rm(mile12.timeElm)
}
rm(errorCatch)
# Mile 13 #
errorCatch <- try({
suppressMessages({
mile13.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[15]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile13.time <- NA
} else {
mile13.time <- as.character(mile13.timeElm$getElementText())
rm(mile13.timeElm)
}
rm(errorCatch)
# Half #
errorCatch <- try({
suppressMessages({
half.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[16]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
half.time <- NA
} else {
half.time <- as.character(half.timeElm$getElementText())
rm(half.timeElm)
}
rm(errorCatch)
# Mile 14 #
errorCatch <- try({
suppressMessages({
mile14.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[1]/div[17]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile14.time <- NA
} else {
mile14.time <- as.character(mile14.timeElm$getElementText())
rm(mile14.timeElm)
}
rm(errorCatch)
# Mile 15 #
errorCatch <- try({
suppressMessages({
mile15.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[1]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile15.time <- NA
} else {
mile15.time <- as.character(mile15.timeElm$getElementText())
rm(mile15.timeElm)
}
rm(errorCatch)
# Mile 16 #
errorCatch <- try({
suppressMessages({
mile16.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[3]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile16.time <- NA
} else {
mile16.time <- as.character(mile16.timeElm$getElementText())
rm(mile16.timeElm)
}
rm(errorCatch)
# Mile 17 #
errorCatch <- try({
suppressMessages({
mile17.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[4]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile17.time <- NA
} else {
mile17.time <- as.character(mile17.timeElm$getElementText())
rm(mile17.timeElm)
}
rm(errorCatch)
# Mile 18 #
errorCatch <- try({
suppressMessages({
mile18.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[5]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile18.time <- NA
} else {
mile18.time <- as.character(mile18.timeElm$getElementText())
rm(mile18.timeElm)
}
rm(errorCatch)
# Mile 19 #
errorCatch <- try({
suppressMessages({
mile19.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[7]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile19.time <- NA
} else {
mile19.time <- as.character(mile19.timeElm$getElementText())
rm(mile19.timeElm)
}
rm(errorCatch)
# Mile 20 #
errorCatch <- try({
suppressMessages({
mile20.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[8]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile20.time <- NA
} else {
mile20.time <- as.character(mile20.timeElm$getElementText())
rm(mile20.timeElm)
}
rm(errorCatch)
# Mile 21 #
errorCatch <- try({
suppressMessages({
mile21.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[9]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile21.time <- NA
} else {
mile21.time <- as.character(mile21.timeElm$getElementText())
rm(mile21.timeElm)
}
rm(errorCatch)
# Mile 22 #
errorCatch <- try({
suppressMessages({
mile22.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[11]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile22.time <- NA
} else {
mile22.time <- as.character(mile22.timeElm$getElementText())
rm(mile22.timeElm)
}
rm(errorCatch)
# Mile 23 #
errorCatch <- try({
suppressMessages({
mile23.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[12]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile23.time <- NA
} else {
mile23.time <- as.character(mile23.timeElm$getElementText())
rm(mile23.timeElm)
}
rm(errorCatch)
# Mile 24 #
errorCatch <- try({
suppressMessages({
mile24.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[13]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile24.time <- NA
} else {
mile24.time <- as.character(mile24.timeElm$getElementText())
rm(mile24.timeElm)
}
rm(errorCatch)
# Mile 25 #
errorCatch <- try({
suppressMessages({
mile25.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[15]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile25.time <- NA
} else {
mile25.time <- as.character(mile25.timeElm$getElementText())
rm(mile25.timeElm)
}
rm(errorCatch)
# Mile 26 #
errorCatch <- try({
suppressMessages({
mile26.timeElm <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[6]/div[3]/div[2]/div[16]/div[2]/span")
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
mile26.time <- NA
} else {
mile26.time <- as.character(mile26.timeElm$getElementText())
rm(mile26.timeElm)
}
rm(errorCatch)
# Combine and add to results set #
temp <- as.data.frame(
cbind(
bib, name, city, country, age, sex, finish, place, totRun, place.sex, place.age, av.pace, mile3.time, mile4.time, mile5.time, mile6.time, mile7.time, mile8.time,
mile9.time, mile10.time, mile11.time, mile12.time, mile13.time, half.time, mile14.time, mile15.time, mile16.time, mile17.time, mile18.time, mile19.time, mile20.time,
mile21.time, mile22.time, mile23.time, mile24.time, mile25.time, mile26.time
)
)
nycm <- rbind(nycm, temp)
rm(temp)
}
}
# Write to file #
write.csv(nycm,
"scraperResults.csv",
quote = TRUE,
row.names = FALSE,
na = "")
View(nycm)
#:::::::::::::::::::::::::::::::::::::::$
errorCatch <- try({
suppressMessages({
# element assignment
})
}, silent = TRUE)
if ("try-error" %in% class(errorCatch)){
# < - NA
} else {
# assignments
# rm(Elm)
}
rm(errorCatch)
#For some runners it's this xpath ...
#name <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[1]")"
#For some runners it's this xpath ...
#city <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[1]/div[1]/div/div/div[2]")
#For some runners it's this xpath ...
#finish <- nycm.RemDr$findElement(using = "xpath", value = "/html/body/div[1]/div[2]/div/main/div/div[2]/div[2]/div/div/div[3]/div[1]/div[2]/div[1]/span")
|
library(git2r)
### Name: branch_delete
### Title: Delete a branch
### Aliases: branch_delete
### ** Examples
## Not run:
##D ## Initialize a temporary repository
##D path <- tempfile(pattern="git2r-")
##D dir.create(path)
##D repo <- init(path)
##D
##D ## Create a user and commit a file
##D config(repo, user.name="Alice", user.email="alice@example.org")
##D writeLines("Hello world!", file.path(path, "example.txt"))
##D add(repo, "example.txt")
##D commit_1 <- commit(repo, "First commit message")
##D
##D ## Create a 'dev' branch
##D dev <- branch_create(commit_1, name = "dev")
##D branches(repo)
##D
##D ## Delete 'dev' branch
##D branch_delete(dev)
##D branches(repo)
## End(Not run)
| /data/genthat_extracted_code/git2r/examples/branch_delete.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 702 | r | library(git2r)
### Name: branch_delete
### Title: Delete a branch
### Aliases: branch_delete
### ** Examples
## Not run:
##D ## Initialize a temporary repository
##D path <- tempfile(pattern="git2r-")
##D dir.create(path)
##D repo <- init(path)
##D
##D ## Create a user and commit a file
##D config(repo, user.name="Alice", user.email="alice@example.org")
##D writeLines("Hello world!", file.path(path, "example.txt"))
##D add(repo, "example.txt")
##D commit_1 <- commit(repo, "First commit message")
##D
##D ## Create a 'dev' branch
##D dev <- branch_create(commit_1, name = "dev")
##D branches(repo)
##D
##D ## Delete 'dev' branch
##D branch_delete(dev)
##D branches(repo)
## End(Not run)
|
context("stability")
# We check that the results from stability using prof = FALSE and prof = TRUE
# are identical.
# Set a tolerance for the comparison of the simulated values
my_tol <- 1e-5
n <- 500
seed <- 29082017
test_data <- revdbayes::rgp(n)
u_vec <- quantile(test_data, probs = seq(0.05, 0.95, by = 0.1))
res1 <- stability(data = test_data, u_vec = u_vec)
res2 <- stability(data = test_data, u_vec = u_vec, prof = TRUE)
test_that("MLEs equal regardless of prof", {
testthat::expect_equal(res1$ests, res2$ests, tolerance = my_tol)
})
| /tests/testthat/test-stability.R | no_license | cran/threshr | R | false | false | 571 | r | context("stability")
# We check that the results from stability using prof = FALSE and prof = TRUE
# are identical.
# Set a tolerance for the comparison of the simulated values
my_tol <- 1e-5
n <- 500
seed <- 29082017
test_data <- revdbayes::rgp(n)
u_vec <- quantile(test_data, probs = seq(0.05, 0.95, by = 0.1))
res1 <- stability(data = test_data, u_vec = u_vec)
res2 <- stability(data = test_data, u_vec = u_vec, prof = TRUE)
test_that("MLEs equal regardless of prof", {
testthat::expect_equal(res1$ests, res2$ests, tolerance = my_tol)
})
|
# Correlated features - only works for numeric, can discard 3, not useful
corMatr <- cor(training[,num_predictors])
highlyCorrelated <- findCorrelation(corMatr, cutoff=.75)
# Boruta feature selection
boruta.train <- Boruta(SalePrice~., data = training, doTrace = 2, )
b <- boruta.train$finalDecision
confirmed <- names(b[b == 'Confirmed'])
tentative <- names(b[b == 'Tentative'])
#Filter only confirmed features
trainB1 <- training[,c(confirmed, tentative, 'SalePrice')]
testB1 <- testing[,c(confirmed, tentative, 'SalePrice')]
submiB1 <- orig_submi[,c(confirmed, tentative)]
train_all <- orig_train[,c(confirmed, tentative, 'SalePrice')]
| /feature_selection.R | no_license | dmitrytoda/houseprice | R | false | false | 643 | r | # Correlated features - only works for numeric, can discard 3, not useful
corMatr <- cor(training[,num_predictors])
highlyCorrelated <- findCorrelation(corMatr, cutoff=.75)
# Boruta feature selection
boruta.train <- Boruta(SalePrice~., data = training, doTrace = 2, )
b <- boruta.train$finalDecision
confirmed <- names(b[b == 'Confirmed'])
tentative <- names(b[b == 'Tentative'])
#Filter only confirmed features
trainB1 <- training[,c(confirmed, tentative, 'SalePrice')]
testB1 <- testing[,c(confirmed, tentative, 'SalePrice')]
submiB1 <- orig_submi[,c(confirmed, tentative)]
train_all <- orig_train[,c(confirmed, tentative, 'SalePrice')]
|
# load the package
library(MicroPlate)
library(testthat)
library(plyr)
#
# Test MicroPlate.R
#
#
#
test_that("MicroPlate.R_$_tests",{
# file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### $ tests
# Plate
testData$test=1234567 # write
expect_equal(testData$test,1234567) # read
#TODO ADD OVERWRITE TEST...
testData$test=NULL # test remove
suppressWarnings(expect_true(is.null(testData$test))) # does give a warning
# well
testData$testw=1:96 # write
expect_true(all(testData$testw==1:96)) # read
testData$testw=20 # overwrite all same value
expect_true(all(testData$testw==20)) # read
testData$testw=NULL # test remove
suppressWarnings(expect_true(is.null(testData$testw))) # does give a warning
# measurement
testData$testm=1:24000 # write
expect_true(all(testData$testm==1:24000)) # read
testData$testm=20 # overwrite all same value
expect_true(all(testData$testm==20)) # read
testData$testm=NULL # test remove
suppressWarnings(expect_true(is.null(testData$testm))) # does give a warning
})
test_that("MicroPlate.R_basic_tests",{
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### colnames
expect_true(any(colnames(testData)=="value")) # test the colname
# expect_error((colnames(testData)="cookies")) # only 1 element while data has 8 columns # NOTE: currently it just returns an error in any case
# expect_warning((colnames(testData)=c(1,"cookies",3,4,5,6,7,8)))
# expect_true(any(colnames(testData)=="cookies")) # test if the colname was changed
# expect_equal(testData$cookies,1234567) # test if the data also changed...
### dim
#TODO level support?
expect_true(all(dim(testData)==c(24000,8)))
### instance tests
# change in one instance effectes the other
testData2=testData
testData$cookies=123 # once again at plate level
expect_equal(testData2$cookies,123) # note that we changed test and check test2
# test that you can have multiple instances that dont influence eachother
testData3=novostar.xls(file)
testData3$cookies=1234
expect_false(testData2$cookies==1234)
### copy
testData4=copy(testData)
testData4$cookies=123456
expect_false(testData$cookies==123456) # same kinda instance test
})
test_that("MicroPlate.R_[]_tests",{
# test both [] and []<-
#
#
# file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### just level
expect_equal(testData[level=1],testData[level="measurement"])
expect_equal(testData[level=2], testData[level="well"])
expect_equal(testData[level=3], testData[level="plate"])
expect_error(testData[level=4])
expect_error(testData[level="COOKIESSS!!!"]) # todo: add cookie level
### singel column
# plate
testData["newColumn"]=1
expect_equal(testData["newColumn"],1)
testData["newColumn"]=2 # overwrite
expect_equal(testData["newColumn"],2)
expect_error((testData["newColumn"]=1:24000))# try to add data at wrong level
testData["newColumn"]=NULL # remove
suppressWarnings(expect_true(is.null(testData$newColumn))) # does give a warning
# well
testData["newColumn"]=1:96 # reuse column name at different level
expect_true(all(testData["newColumn"]==1:96))
testData["newColumn"]=500 # single value overwrite
expect_true(all(testData["newColumn"]==rep(500,96)))
expect_error((testData["newColumn"]=1:24000))# try to add data at wrong level
testData["newColumn"]=NULL
suppressWarnings(expect_true(is.null(testData$newColumn))) # does give a warning
# measurement
testData["newColumn"]=1:24000 # GOES WITH BLAZING SPEED!
expect_true(all(testData["newColumn"]==1:24000))
testData["newColumn"]=500 # single value overwrite -- yup the new underlying structure makes this much faster!
expect_true(all(testData["newColumn"]==rep(500,24000)))
expect_error((testData["newColumn"]=1:96))# try to add data at wrong level
testData["newColumn"]=NULL
suppressWarnings(expect_true(is.null(testData$newColumn))) # does give a warning
### row
# plate
testData=novostar.xls(file)
testData[1,"newColumn",level="plate"]=5
expect_equal(testData[1,"newColumn"],5)
expect_error((testData[1,"newColumn"]=NULL)) # you are not allowed to delete individual values
testData[1,"newColumn",level=3]=50
expect_error(testData[10,"newColumn"]) # out of range
expect_error((testData[2,"newColumn"]=5)) # out of range assign
# well
expect_error((testData[5,"newColumn",level="well"]=5))
testData=novostar.xls(file)
testData[5,"newColumn",level="well"]=5
expect_equal(testData[5,"newColumn"],5)
testData[6,"newColumn",level=2]=50
expect_error(testData[97,"newColumn"]) # out of range
expect_error((testData[97,"newColumn"]=5)) # out of range assign
# measurement
expect_error((testData[5,"newColumn",level="measurement"]=5))
testData=novostar.xls(file)
testData[15,"newColumn",level="measurement"]=5
testData[18,"newColumn",level=1]=55
expect_equal(testData[18,"newColumn"],55)
expect_error(testData[24010,"newColumn"]) # out of range
expect_error((testData[24001,"newColumn"]=5)) # out of range assign
### just row
# TODO increase this section!!!
# boolean select
testData=novostar.xls(file)
expect_true(all(testData[1,]==c(0.2663,0,600,1,1,"Sample X1",1,"KineticData.xls"))) # first row
expect_true(all(dim(testData[testData$row==1,])==c(12,5))) # boolean selection
### multiple column
# plate
testData=novostar.xls(file)
testData$newColumn=1
testData[8:9]=matrix(1,1,2) # change
expect_true(all(testData[8:9]==c(1,1)))
testData[8:9]=1:2 # change
expect_true(all(testData[8:9]==1:2))
expect_error((testData[8:9]=1)) # you cant overwrite a block of data...
testData[c("plateName","evenNewerColumn")]=10:11 # 50% new!
expect_true(all(testData[c("plateName","evenNewerColumn")]==10:11))
# testData[c("lalala","lalalala")]=10:11# 100% new! # TODO MAKE THIS WORK!!
# expect_true(all(testData[c("lalala","lalalala")]==10:11))
testData[c("lalala","lalalala"),level=3]=10:11# 100% new
expect_true(all(testData[c("lalala","lalalala")]==10:11))
testData[c("lalala","lalalala")]=NULL # multi column delete
expect_error(testData[c("lalala","lalalala")]) # error cause rows are deleted
# well
testData=novostar.xls(file)
testData[5:6]=matrix(1,96,2)
expect_true(all(testData[5:6]==1))
expect_error((testData[5:6]=1:192)) # 2D selection requires 2D data! i will not shape the data for you! that is crazy!
testData[c("content","evenNewerColumn")]= matrix(1,96,2) # 50% new!
expect_true(all(testData[c("content","evenNewerColumn")]==1))
testData[c("lalala","lalalala"),level="well"]=matrix(2,96,2) # 100% new
expect_true(all(testData[c("lalala","lalalala")]==2))
testData[c("lalala","lalalala")]=NULL # multi column delete
expect_error(testData[c("lalala","lalalala")]) # error cause rows are deleted
# measurement
testData=novostar.xls(file)
testData[1:2]=matrix(1,24000,2)
expect_true(all(testData[1:2]==1))
testData[c("temp","evenNewerColumn")]= matrix(2,24000,2) # 50% new!
expect_true(all(testData[c("temp","evenNewerColumn")]==2))
testData[c("lalala","lalalala"),level="measurement"]=matrix("cookies!",24000,2) # 100% new
expect_true(all(testData[c("lalala","lalalala")]=="cookies!"))
testData[c("lalala","lalalala")]=NULL # multi column delete
expect_error(testData[c("lalala","lalalala")]) # error cause rows are deleted
### multiple column+row
# general
testData=novostar.xls(file)
expect_equal(class(testData[1:7,1:7]),"data.frame")
expect_error((testData[1:7,1:7]=matrix(1,7,7))) # you cant change data at multiple levels in 1 go
expect_true(all(dim(testData[1:7,1:7])==c(7,7))) # read columns different levels
expect_error((testData[1:7,1:7]=1)) #assign wrong format
expect_error((testData[1:7,1:7]=1:7))
# plate
testData=novostar.xls(file)
testData=merge(testData,testData,removeOther = F)
testData["newPlateData"]=1:2
expect_true(all(testData[1:2,8:9][,2]==1:2))
testData[1:2,8:9]=matrix(5,2,2)
expect_true(all(testData[1:2,8:9]==5))
# well
testData=novostar.xls(file)
expect_true(all(dim(testData[12:44,4:6])==c(33,3)))
testData[12:44,4:6]=matrix(123,33,3)
expect_true(all(testData[12:44,4:6]==123))
# measurement
expect_error((testData[1:7,1:2]=1)) #assign wrong format
expect_error((testData[1:7,1:2]=1:7))
testData[1:7,1:3]=matrix("cookies",7,3)
expect_true(all(testData[1:7,1:3]=="cookies")) # my favorite kinda test
### boolean selection
# plate
testData=novostar.xls(file)
expect_equal(testData[testData$plateName=="KineticData.xls","plateName"],"KineticData.xls")
expect_error((testData[testData$plateName=="KineticData.xls"]="plateOfDoom"))# should give error as i do not specify what column
testData[testData$plateName=="KineticData.xls","plateName"]="plateOfDoom"
expect_equal(testData["plateName"],"plateOfDoom")
# well
expect_error((testData[testData$row>10,"plateName"]=="KineticData.xls")) # wrong level!... and nothing selected...
expect_error((testData[testData$row>2,"plateName"]=="KineticData.xls")) # wrong level!...
testData[testData$row>2,"content"]="NEW CONTENT!"
expect_true(sum(testData$content=="NEW CONTENT!")==72)
# measurement
# expect_true(sum(testData[testData$value>0.5,"value"])==7498.442)# FUCK YOU R!!! DONT HIDE STUFF FROM ME!
expect_true(sum(testData[testData$value>0.5,"value"])==7498.4418)
expect_true(max(testData[testData$value>0.5,"value"])==0.8514)
testData[testData$value>0.4,"value"]=100
expect_true(all(testData[testData$value>0.4,"value"]==100))
### diffrent level then col selection
# plate
testData=novostar.xls(file)
expect_true(length(testData["plateName",level="well"])==96)
expect_error((testData["plateName",level="well"]=1:96))
expect_error((testData[1:96,"plateName",level="well"]=1:96)) # say i want well and give well level data, but its a plate level column
expect_true(length(testData["plateName",level=1])==24000)
expect_error((testData["plateName",level="measurement"]=1:96))
# well
expect_error(testData["row",level=3])
expect_true(length(testData["row",level=1])==24000)
expect_error((testData["row",level="measurement"]=1:96)) # say i want well but give measurement level
expect_error((testData["row",level="measurement"]=1:24000))
# measurement
expect_error(testData["value",level=3]) # data level lower then requested level
expect_error(testData["value",level=2])
# restricted column names.. plate measurement etc...
# plate
expect_error((testData["plate"]=1))
expect_error((testData["measurement"]=1))
# expect_error((testData["well"]=1)) # might need to change this
})
test_that("MicroPlate.R_[]_tests_2nd_mode",{
###################
# 2nd mode test #
###################
# mp[colNamesYouWant, colname=content]
# mp[,well=96]
# mp[,well=4:12]
# file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### well=
expect_true(all(dim(testData[well=10])==c(250,8)))
expect_true(all(testData[well=10]==testData[well="A10"]))
expect_true(all(dim(testData[well=12:80])==c(17250 ,8)))
testData["content",well=10]="COOKIES!!!"
expect_equal(testData["content",well=10],"COOKIES!!!")
expect_error((testData["content",well=10]=1:250))
expect_error((testData["content",well=10,level="measurement"]=1:250))
expect_true(all(dim(testData[well=10,level=2])==c(1,5)))
# testData[well=10,level=2]=c(1,10,"lalalala") # does not work... should it?
# expect_error(testData[well=100])#out of range ... dunno if i should throw an error or return nothing...
# testData[well=10]
# testData[well=10,level=1] # works
# testData[well=10,level=2] # works
# testData[well=10,level=2]=c(1,10,"lalalala") # does not work... should it?
# testData[,well=10,level=2]# works
# testData["content",well=10,level=2] # works
# testData[well=10]
# testData[well=10]=1 # TODO: needs better error
### all kind of sexy combinations...
expect_equal(testData["row",well=4,level=2],1)
expect_equal(length(testData["content",well=10:23,level=2]),14)
expect_true(all(testData[c("row","column","content"),well=4,level=2] == c(1,4,"Sample X4")))
#
# testData[well=4,level=2]
# testData[well=8]
# testData[,well=8]
# testData[,,well=8]
#
#
# testData["value",well=8]
#
# testData["content",well="B6",level=1]#should give error!
# testData["content",well="B6",level=2]
# testData["content",well="B6",level=3]#should crash
# testData["content",row=2,column=6,level=2]
# testData["content",column=2,level=2]
})
test_that("MicroPlate.R_ stress/compare tests",{
# its probably a bad idea to keep this in the stress test
# ... stress unit test sounds like a silly idea in general..
# # file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
# file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
# testData=novostar.xls(file)
#
### OLD
# file="../testdata/"
# workspace = getwd()
# testdir=file.path(workspace, "tests/testdata/enzymeAssays")
# file=file.path(testdir, "GJS_layout3263.tab")
# layoutData=readLayoutFile(file=file)
# file2=file.path(testdir, "3263.dbf")
# newData=novostar.dbf(path=file2)
# testData=new("MicroPlate")
#
# system.time(replicate(50, addPlate(testData,newData=newData,layoutData=layoutData)))
# tdf=testData[] # 2MB ish
#
# system.time(replicate(1000,testData$value)) # 3 sec
# system.time(replicate(1000,tdf$value)) # .4 sec
# # about Data = 10x slower then data.frame
#
# system.time(replicate(1000,testData["value"])) # 24 sec
# system.time(replicate(1000,tdf["value"])) # .3 sec
# # many many times slower
#
# system.time(replicate(1000,testData["content"])) # .5 sec
# system.time(replicate(1000,tdf["content"])) # 1.2 sec #... that is ... weird...
# # that is ... weird... is this factor vs string?
# # oooh crap... this is 600 vs 30000 rows....
# system.time(replicate(1000,testData["content",level="measurement"])) # 55 sec
# # that is ... many many times slower
#
# system.time(replicate(1000,testData["row",level="measurement"])) # 27 sec
# system.time(replicate(1000,tdf["row"])) # .4 sec
# # eeeugh....
#
#
# testData=new("Data")
# system.time(replicate(100, addPlate(testData,newData=newData,layoutData=layoutData))) # 5sec
# testData
#
#
# tdf=testData[] # 2MB ish
# system.time(replicate(10,testData$value))
# system.time(replicate(10,tdf$value))
#
# system.time(replicate(10000,testData["value"]))
# system.time(replicate(10000,tdf["value"]))
#
#
# testData[]
# testData
})
| /tests/testthat/test_MicroPlate.R | no_license | phonixor/MicroPlate | R | false | false | 15,161 | r | # load the package
library(MicroPlate)
library(testthat)
library(plyr)
#
# Test MicroPlate.R
#
#
#
test_that("MicroPlate.R_$_tests",{
# file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### $ tests
# Plate
testData$test=1234567 # write
expect_equal(testData$test,1234567) # read
#TODO ADD OVERWRITE TEST...
testData$test=NULL # test remove
suppressWarnings(expect_true(is.null(testData$test))) # does give a warning
# well
testData$testw=1:96 # write
expect_true(all(testData$testw==1:96)) # read
testData$testw=20 # overwrite all same value
expect_true(all(testData$testw==20)) # read
testData$testw=NULL # test remove
suppressWarnings(expect_true(is.null(testData$testw))) # does give a warning
# measurement
testData$testm=1:24000 # write
expect_true(all(testData$testm==1:24000)) # read
testData$testm=20 # overwrite all same value
expect_true(all(testData$testm==20)) # read
testData$testm=NULL # test remove
suppressWarnings(expect_true(is.null(testData$testm))) # does give a warning
})
test_that("MicroPlate.R_basic_tests",{
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### colnames
expect_true(any(colnames(testData)=="value")) # test the colname
# expect_error((colnames(testData)="cookies")) # only 1 element while data has 8 columns # NOTE: currently it just returns an error in any case
# expect_warning((colnames(testData)=c(1,"cookies",3,4,5,6,7,8)))
# expect_true(any(colnames(testData)=="cookies")) # test if the colname was changed
# expect_equal(testData$cookies,1234567) # test if the data also changed...
### dim
#TODO level support?
expect_true(all(dim(testData)==c(24000,8)))
### instance tests
# change in one instance effectes the other
testData2=testData
testData$cookies=123 # once again at plate level
expect_equal(testData2$cookies,123) # note that we changed test and check test2
# test that you can have multiple instances that dont influence eachother
testData3=novostar.xls(file)
testData3$cookies=1234
expect_false(testData2$cookies==1234)
### copy
testData4=copy(testData)
testData4$cookies=123456
expect_false(testData$cookies==123456) # same kinda instance test
})
test_that("MicroPlate.R_[]_tests",{
# test both [] and []<-
#
#
# file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### just level
expect_equal(testData[level=1],testData[level="measurement"])
expect_equal(testData[level=2], testData[level="well"])
expect_equal(testData[level=3], testData[level="plate"])
expect_error(testData[level=4])
expect_error(testData[level="COOKIESSS!!!"]) # todo: add cookie level
### singel column
# plate
testData["newColumn"]=1
expect_equal(testData["newColumn"],1)
testData["newColumn"]=2 # overwrite
expect_equal(testData["newColumn"],2)
expect_error((testData["newColumn"]=1:24000))# try to add data at wrong level
testData["newColumn"]=NULL # remove
suppressWarnings(expect_true(is.null(testData$newColumn))) # does give a warning
# well
testData["newColumn"]=1:96 # reuse column name at different level
expect_true(all(testData["newColumn"]==1:96))
testData["newColumn"]=500 # single value overwrite
expect_true(all(testData["newColumn"]==rep(500,96)))
expect_error((testData["newColumn"]=1:24000))# try to add data at wrong level
testData["newColumn"]=NULL
suppressWarnings(expect_true(is.null(testData$newColumn))) # does give a warning
# measurement
testData["newColumn"]=1:24000 # GOES WITH BLAZING SPEED!
expect_true(all(testData["newColumn"]==1:24000))
testData["newColumn"]=500 # single value overwrite -- yup the new underlying structure makes this much faster!
expect_true(all(testData["newColumn"]==rep(500,24000)))
expect_error((testData["newColumn"]=1:96))# try to add data at wrong level
testData["newColumn"]=NULL
suppressWarnings(expect_true(is.null(testData$newColumn))) # does give a warning
### row
# plate
testData=novostar.xls(file)
testData[1,"newColumn",level="plate"]=5
expect_equal(testData[1,"newColumn"],5)
expect_error((testData[1,"newColumn"]=NULL)) # you are not allowed to delete individual values
testData[1,"newColumn",level=3]=50
expect_error(testData[10,"newColumn"]) # out of range
expect_error((testData[2,"newColumn"]=5)) # out of range assign
# well
expect_error((testData[5,"newColumn",level="well"]=5))
testData=novostar.xls(file)
testData[5,"newColumn",level="well"]=5
expect_equal(testData[5,"newColumn"],5)
testData[6,"newColumn",level=2]=50
expect_error(testData[97,"newColumn"]) # out of range
expect_error((testData[97,"newColumn"]=5)) # out of range assign
# measurement
expect_error((testData[5,"newColumn",level="measurement"]=5))
testData=novostar.xls(file)
testData[15,"newColumn",level="measurement"]=5
testData[18,"newColumn",level=1]=55
expect_equal(testData[18,"newColumn"],55)
expect_error(testData[24010,"newColumn"]) # out of range
expect_error((testData[24001,"newColumn"]=5)) # out of range assign
### just row
# TODO increase this section!!!
# boolean select
testData=novostar.xls(file)
expect_true(all(testData[1,]==c(0.2663,0,600,1,1,"Sample X1",1,"KineticData.xls"))) # first row
expect_true(all(dim(testData[testData$row==1,])==c(12,5))) # boolean selection
### multiple column
# plate
testData=novostar.xls(file)
testData$newColumn=1
testData[8:9]=matrix(1,1,2) # change
expect_true(all(testData[8:9]==c(1,1)))
testData[8:9]=1:2 # change
expect_true(all(testData[8:9]==1:2))
expect_error((testData[8:9]=1)) # you cant overwrite a block of data...
testData[c("plateName","evenNewerColumn")]=10:11 # 50% new!
expect_true(all(testData[c("plateName","evenNewerColumn")]==10:11))
# testData[c("lalala","lalalala")]=10:11# 100% new! # TODO MAKE THIS WORK!!
# expect_true(all(testData[c("lalala","lalalala")]==10:11))
testData[c("lalala","lalalala"),level=3]=10:11# 100% new
expect_true(all(testData[c("lalala","lalalala")]==10:11))
testData[c("lalala","lalalala")]=NULL # multi column delete
expect_error(testData[c("lalala","lalalala")]) # error cause rows are deleted
# well
testData=novostar.xls(file)
testData[5:6]=matrix(1,96,2)
expect_true(all(testData[5:6]==1))
expect_error((testData[5:6]=1:192)) # 2D selection requires 2D data! i will not shape the data for you! that is crazy!
testData[c("content","evenNewerColumn")]= matrix(1,96,2) # 50% new!
expect_true(all(testData[c("content","evenNewerColumn")]==1))
testData[c("lalala","lalalala"),level="well"]=matrix(2,96,2) # 100% new
expect_true(all(testData[c("lalala","lalalala")]==2))
testData[c("lalala","lalalala")]=NULL # multi column delete
expect_error(testData[c("lalala","lalalala")]) # error cause rows are deleted
# measurement
testData=novostar.xls(file)
testData[1:2]=matrix(1,24000,2)
expect_true(all(testData[1:2]==1))
testData[c("temp","evenNewerColumn")]= matrix(2,24000,2) # 50% new!
expect_true(all(testData[c("temp","evenNewerColumn")]==2))
testData[c("lalala","lalalala"),level="measurement"]=matrix("cookies!",24000,2) # 100% new
expect_true(all(testData[c("lalala","lalalala")]=="cookies!"))
testData[c("lalala","lalalala")]=NULL # multi column delete
expect_error(testData[c("lalala","lalalala")]) # error cause rows are deleted
### multiple column+row
# general
testData=novostar.xls(file)
expect_equal(class(testData[1:7,1:7]),"data.frame")
expect_error((testData[1:7,1:7]=matrix(1,7,7))) # you cant change data at multiple levels in 1 go
expect_true(all(dim(testData[1:7,1:7])==c(7,7))) # read columns different levels
expect_error((testData[1:7,1:7]=1)) #assign wrong format
expect_error((testData[1:7,1:7]=1:7))
# plate
testData=novostar.xls(file)
testData=merge(testData,testData,removeOther = F)
testData["newPlateData"]=1:2
expect_true(all(testData[1:2,8:9][,2]==1:2))
testData[1:2,8:9]=matrix(5,2,2)
expect_true(all(testData[1:2,8:9]==5))
# well
testData=novostar.xls(file)
expect_true(all(dim(testData[12:44,4:6])==c(33,3)))
testData[12:44,4:6]=matrix(123,33,3)
expect_true(all(testData[12:44,4:6]==123))
# measurement
expect_error((testData[1:7,1:2]=1)) #assign wrong format
expect_error((testData[1:7,1:2]=1:7))
testData[1:7,1:3]=matrix("cookies",7,3)
expect_true(all(testData[1:7,1:3]=="cookies")) # my favorite kinda test
### boolean selection
# plate
testData=novostar.xls(file)
expect_equal(testData[testData$plateName=="KineticData.xls","plateName"],"KineticData.xls")
expect_error((testData[testData$plateName=="KineticData.xls"]="plateOfDoom"))# should give error as i do not specify what column
testData[testData$plateName=="KineticData.xls","plateName"]="plateOfDoom"
expect_equal(testData["plateName"],"plateOfDoom")
# well
expect_error((testData[testData$row>10,"plateName"]=="KineticData.xls")) # wrong level!... and nothing selected...
expect_error((testData[testData$row>2,"plateName"]=="KineticData.xls")) # wrong level!...
testData[testData$row>2,"content"]="NEW CONTENT!"
expect_true(sum(testData$content=="NEW CONTENT!")==72)
# measurement
# expect_true(sum(testData[testData$value>0.5,"value"])==7498.442)# FUCK YOU R!!! DONT HIDE STUFF FROM ME!
expect_true(sum(testData[testData$value>0.5,"value"])==7498.4418)
expect_true(max(testData[testData$value>0.5,"value"])==0.8514)
testData[testData$value>0.4,"value"]=100
expect_true(all(testData[testData$value>0.4,"value"]==100))
### diffrent level then col selection
# plate
testData=novostar.xls(file)
expect_true(length(testData["plateName",level="well"])==96)
expect_error((testData["plateName",level="well"]=1:96))
expect_error((testData[1:96,"plateName",level="well"]=1:96)) # say i want well and give well level data, but its a plate level column
expect_true(length(testData["plateName",level=1])==24000)
expect_error((testData["plateName",level="measurement"]=1:96))
# well
expect_error(testData["row",level=3])
expect_true(length(testData["row",level=1])==24000)
expect_error((testData["row",level="measurement"]=1:96)) # say i want well but give measurement level
expect_error((testData["row",level="measurement"]=1:24000))
# measurement
expect_error(testData["value",level=3]) # data level lower then requested level
expect_error(testData["value",level=2])
# restricted column names.. plate measurement etc...
# plate
expect_error((testData["plate"]=1))
expect_error((testData["measurement"]=1))
# expect_error((testData["well"]=1)) # might need to change this
})
test_that("MicroPlate.R_[]_tests_2nd_mode",{
###################
# 2nd mode test #
###################
# mp[colNamesYouWant, colname=content]
# mp[,well=96]
# mp[,well=4:12]
# file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
testData=novostar.xls(file)
### well=
expect_true(all(dim(testData[well=10])==c(250,8)))
expect_true(all(testData[well=10]==testData[well="A10"]))
expect_true(all(dim(testData[well=12:80])==c(17250 ,8)))
testData["content",well=10]="COOKIES!!!"
expect_equal(testData["content",well=10],"COOKIES!!!")
expect_error((testData["content",well=10]=1:250))
expect_error((testData["content",well=10,level="measurement"]=1:250))
expect_true(all(dim(testData[well=10,level=2])==c(1,5)))
# testData[well=10,level=2]=c(1,10,"lalalala") # does not work... should it?
# expect_error(testData[well=100])#out of range ... dunno if i should throw an error or return nothing...
# testData[well=10]
# testData[well=10,level=1] # works
# testData[well=10,level=2] # works
# testData[well=10,level=2]=c(1,10,"lalalala") # does not work... should it?
# testData[,well=10,level=2]# works
# testData["content",well=10,level=2] # works
# testData[well=10]
# testData[well=10]=1 # TODO: needs better error
### all kind of sexy combinations...
expect_equal(testData["row",well=4,level=2],1)
expect_equal(length(testData["content",well=10:23,level=2]),14)
expect_true(all(testData[c("row","column","content"),well=4,level=2] == c(1,4,"Sample X4")))
#
# testData[well=4,level=2]
# testData[well=8]
# testData[,well=8]
# testData[,,well=8]
#
#
# testData["value",well=8]
#
# testData["content",well="B6",level=1]#should give error!
# testData["content",well="B6",level=2]
# testData["content",well="B6",level=3]#should crash
# testData["content",row=2,column=6,level=2]
# testData["content",column=2,level=2]
})
test_that("MicroPlate.R_ stress/compare tests",{
# its probably a bad idea to keep this in the stress test
# ... stress unit test sounds like a silly idea in general..
# # file=paste(getwd(),"/tests/testdata/parsers/novostar.xls/KineticData.xls",sep="")
# file=paste(getwd(),"/../testdata/parsers/novostar.xls/KineticData.xls",sep="")
# testData=novostar.xls(file)
#
### OLD
# file="../testdata/"
# workspace = getwd()
# testdir=file.path(workspace, "tests/testdata/enzymeAssays")
# file=file.path(testdir, "GJS_layout3263.tab")
# layoutData=readLayoutFile(file=file)
# file2=file.path(testdir, "3263.dbf")
# newData=novostar.dbf(path=file2)
# testData=new("MicroPlate")
#
# system.time(replicate(50, addPlate(testData,newData=newData,layoutData=layoutData)))
# tdf=testData[] # 2MB ish
#
# system.time(replicate(1000,testData$value)) # 3 sec
# system.time(replicate(1000,tdf$value)) # .4 sec
# # about Data = 10x slower then data.frame
#
# system.time(replicate(1000,testData["value"])) # 24 sec
# system.time(replicate(1000,tdf["value"])) # .3 sec
# # many many times slower
#
# system.time(replicate(1000,testData["content"])) # .5 sec
# system.time(replicate(1000,tdf["content"])) # 1.2 sec #... that is ... weird...
# # that is ... weird... is this factor vs string?
# # oooh crap... this is 600 vs 30000 rows....
# system.time(replicate(1000,testData["content",level="measurement"])) # 55 sec
# # that is ... many many times slower
#
# system.time(replicate(1000,testData["row",level="measurement"])) # 27 sec
# system.time(replicate(1000,tdf["row"])) # .4 sec
# # eeeugh....
#
#
# testData=new("Data")
# system.time(replicate(100, addPlate(testData,newData=newData,layoutData=layoutData))) # 5sec
# testData
#
#
# tdf=testData[] # 2MB ish
# system.time(replicate(10,testData$value))
# system.time(replicate(10,tdf$value))
#
# system.time(replicate(10000,testData["value"]))
# system.time(replicate(10000,tdf["value"]))
#
#
# testData[]
# testData
})
|
# LOAD DATA ----
data(iris)
plot(iris)
# SCALE DATA ----
irisScaled <- scale(iris[, -5])
# K-MEANS CLUSTERING ----
## CLUSTERING
fitK <- kmeans(irisScaled[, -5], 3)
fitK
str(fitK)
fitK$cluster
plot(iris, col = fitK$cluster)
## CHOOSING K
k <- list()
for(i in 1:10){
k[[i]] <- kmeans(irisScaled[,1:4], i)
}
k
betweenss_totss <- list()
for(i in 1:10){
betweenss_totss[[i]] <- k[[i]]$betweenss/k[[i]]$totss
}
plot(1:10, betweenss_totss, type = "b",
ylab = "Between SS / Total SS", xlab = "Clusters (k)")
for(i in 1:4){
plot(iris, col = k[[i]]$cluster)
}
# HIERACHICAL CLUSTERING ----
d <- dist(irisScaled[, 1:4])
fitH <- hclust(d, "ward.D2")
plot(fitH)
rect.hclust(fitH, k = 3, border = "red")
clusters <- cutree(fitH, k = 3)
plot(iris, col = clusters)
# MODEL-BASED CLUSTERING ----
library(mclust)
fitM <- Mclust(irisScaled)
plot(fitM)
# DENSITY-BASED CLUSTERING ----
install.packages("dbscan")
library(dbscan)
kNNdistplot(irisScaled, k = 3)
abline(h = 0.7, col = "red", lty = 2)
fitD <- dbscan(irisScaled, eps = 0.7, minPts = 5)
fitD
plot(iris, col = fitD$cluster) | /04 Cluster analysis.R | no_license | muhsinilHaq/Cluster-Analisys | R | false | false | 1,143 | r | # LOAD DATA ----
data(iris)
plot(iris)
# SCALE DATA ----
irisScaled <- scale(iris[, -5])
# K-MEANS CLUSTERING ----
## CLUSTERING
fitK <- kmeans(irisScaled[, -5], 3)
fitK
str(fitK)
fitK$cluster
plot(iris, col = fitK$cluster)
## CHOOSING K
k <- list()
for(i in 1:10){
k[[i]] <- kmeans(irisScaled[,1:4], i)
}
k
betweenss_totss <- list()
for(i in 1:10){
betweenss_totss[[i]] <- k[[i]]$betweenss/k[[i]]$totss
}
plot(1:10, betweenss_totss, type = "b",
ylab = "Between SS / Total SS", xlab = "Clusters (k)")
for(i in 1:4){
plot(iris, col = k[[i]]$cluster)
}
# HIERACHICAL CLUSTERING ----
d <- dist(irisScaled[, 1:4])
fitH <- hclust(d, "ward.D2")
plot(fitH)
rect.hclust(fitH, k = 3, border = "red")
clusters <- cutree(fitH, k = 3)
plot(iris, col = clusters)
# MODEL-BASED CLUSTERING ----
library(mclust)
fitM <- Mclust(irisScaled)
plot(fitM)
# DENSITY-BASED CLUSTERING ----
install.packages("dbscan")
library(dbscan)
kNNdistplot(irisScaled, k = 3)
abline(h = 0.7, col = "red", lty = 2)
fitD <- dbscan(irisScaled, eps = 0.7, minPts = 5)
fitD
plot(iris, col = fitD$cluster) |
/TTR_Interpolation/1_ParticalSwarm_NerualNetwork_origin.R | no_license | onthejeep/TTR_Interpolation | R | false | false | 15,239 | r | ||
#######################################################################################
# Analysis 1 for Manuscript: A 2x2 factorial randomized controlled trial of rhetorical training and s...
#######################################################################################
#setting environment -------------------------------------------------------------------
#remove all objects and then check
#remove all objects and then check
rm(list = ls())
ls()
# Adelia
# Adelia
# Adelia
#dettach all packages
detach()
# data1=reading the first database (work professor x student)
# Header for data1 ####################################
# GRO = Group, ENC=Encounter with mentor and researcher
# 1=Control, 2=Template, 3=Swarm, 4=Template+Swarm
# EXP=Explanation from mentor to researcher, QOW=Quality of writing
# SOR=satisfaction of Student, CWR=Communication from mentor with Student
# reading the second database (introdcution corrections)
# Install packages
install.packages("nortest") # to use Anderson-Darling test
install.packages("RCurl") # to read remote spreadsheet in GDocs
# Load packages
library(nortest)
library(RCurl)
# Reading remoda data in GDocs ------------------------------------------------------------------------
options(RCurlOptions = list(capath = system.file("CurlSSL", "cacert.pem", package = "RCurl"), ssl.verifypeer = FALSE))
uem.data <- getURL("https://docs.google.com/spreadsheet/pub?key=0ArSWDBjbC6hHdDM5eGFubjJtbGV3Ukd0cEpaMDRHcFE&single=true&gid=0&output=csv")
data1<-read.csv(textConnection(uem.data), header=T)
attach(data1)
# Verify normality
ad.test(enc)
ad.test(exp)
ad.test(qow)
ad.test(sor)
ad.test(cwr)
# Data nor normal - verify variance - use Kruskal Wallis Test
kruskal.test(enc ~ gro)
kruskal.test(exp ~ gro)
kruskal.test(qow ~ gro)
kruskal.test(sor ~ gro)
kruskal.test(cwr ~ gro)
# One-Way Anova
aov(enc~gro)
aov(exp~gro)
aov(qow~gro)
aov(sor~gro)
aov(cwr~gro)
#building a boxplot to analyse the groups and actions
boxplot(enc~gro)
boxplot(exp~gro)
boxplot(qow~gro)
boxplot(sor~gro)
boxplot(cwr~gro)
| /analysis1.R | no_license | ecacarva/UEM_2x2 | R | false | false | 2,071 | r | #######################################################################################
# Analysis 1 for Manuscript: A 2x2 factorial randomized controlled trial of rhetorical training and s...
#######################################################################################
#setting environment -------------------------------------------------------------------
#remove all objects and then check
#remove all objects and then check
rm(list = ls())
ls()
# Adelia
# Adelia
# Adelia
#dettach all packages
detach()
# data1=reading the first database (work professor x student)
# Header for data1 ####################################
# GRO = Group, ENC=Encounter with mentor and researcher
# 1=Control, 2=Template, 3=Swarm, 4=Template+Swarm
# EXP=Explanation from mentor to researcher, QOW=Quality of writing
# SOR=satisfaction of Student, CWR=Communication from mentor with Student
# reading the second database (introdcution corrections)
# Install packages
install.packages("nortest") # to use Anderson-Darling test
install.packages("RCurl") # to read remote spreadsheet in GDocs
# Load packages
library(nortest)
library(RCurl)
# Reading remoda data in GDocs ------------------------------------------------------------------------
options(RCurlOptions = list(capath = system.file("CurlSSL", "cacert.pem", package = "RCurl"), ssl.verifypeer = FALSE))
uem.data <- getURL("https://docs.google.com/spreadsheet/pub?key=0ArSWDBjbC6hHdDM5eGFubjJtbGV3Ukd0cEpaMDRHcFE&single=true&gid=0&output=csv")
data1<-read.csv(textConnection(uem.data), header=T)
attach(data1)
# Verify normality
ad.test(enc)
ad.test(exp)
ad.test(qow)
ad.test(sor)
ad.test(cwr)
# Data nor normal - verify variance - use Kruskal Wallis Test
kruskal.test(enc ~ gro)
kruskal.test(exp ~ gro)
kruskal.test(qow ~ gro)
kruskal.test(sor ~ gro)
kruskal.test(cwr ~ gro)
# One-Way Anova
aov(enc~gro)
aov(exp~gro)
aov(qow~gro)
aov(sor~gro)
aov(cwr~gro)
#building a boxplot to analyse the groups and actions
boxplot(enc~gro)
boxplot(exp~gro)
boxplot(qow~gro)
boxplot(sor~gro)
boxplot(cwr~gro)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.rekognition_operations.R
\name{start_stream_processor}
\alias{start_stream_processor}
\title{Starts processing a stream processor}
\usage{
start_stream_processor(Name)
}
\arguments{
\item{Name}{[required] The name of the stream processor to start processing.}
}
\description{
Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell \code{StartStreamProcessor} which stream processor to start, use the value of the \code{Name} field specified in the call to \code{CreateStreamProcessor}.
}
\section{Accepted Parameters}{
\preformatted{start_stream_processor(
Name = "string"
)
}
}
| /service/paws.rekognition/man/start_stream_processor.Rd | permissive | CR-Mercado/paws | R | false | true | 720 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.rekognition_operations.R
\name{start_stream_processor}
\alias{start_stream_processor}
\title{Starts processing a stream processor}
\usage{
start_stream_processor(Name)
}
\arguments{
\item{Name}{[required] The name of the stream processor to start processing.}
}
\description{
Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell \code{StartStreamProcessor} which stream processor to start, use the value of the \code{Name} field specified in the call to \code{CreateStreamProcessor}.
}
\section{Accepted Parameters}{
\preformatted{start_stream_processor(
Name = "string"
)
}
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 45472
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 45472
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipfixpoint/nusmv.tcas^3.B-f3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 16179
c no.of clauses 45472
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 45472
c
c QBFLIB/Biere/tipfixpoint/nusmv.tcas^3.B-f3.qdimacs 16179 45472 E1 [] 0 440 15739 45472 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Biere/tipfixpoint/nusmv.tcas^3.B-f3/nusmv.tcas^3.B-f3.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 641 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 45472
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 45472
c
c Input Parameter (command line, file):
c input filename QBFLIB/Biere/tipfixpoint/nusmv.tcas^3.B-f3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 16179
c no.of clauses 45472
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 45472
c
c QBFLIB/Biere/tipfixpoint/nusmv.tcas^3.B-f3.qdimacs 16179 45472 E1 [] 0 440 15739 45472 NONE
|
#!/usr/bin/env Rscript
require(dplyr)
require(data.table)
args=commandArgs(trailingOnly = T)
if (length(args)<2) {
stop("Usage is: ./gtf_to_exons.R input.gtf.gz output.txt.gz")
}
cat("Reading in ",args[1],"\n")
gtf=fread(cmd=paste("zcat <", args[1]), data.table = F, col.names=c("chr","source","feature","start","end","a","strand","b","dat"))
cat("Processing...\n")
gtf = gtf %>% filter( feature=="exon" )
gn_where=regexpr("gene_name \"[^ ]+\"" , gtf$dat) # find gene_names in dat
gn_where=gn_where + 11 # ignore "gene_name" label
attr(gn_where,"match.length")=attr(gn_where,"match.length") - 11- 1 # cutoff trailing quote mark
gtf$gene_name=regmatches(gtf$dat, gn_where )
if( any( gtf$gene_name== "" ) ){
cat("Warning: there are empty 'gene_name' attributes, using 'gene_id' for them\n")
gi_where=regexpr("gene_id \"[^ ]+\"" , gtf$dat) # find gene_ids in dat
gi_where=gi_where + 9 # ignore "gene_id" label
attr(gi_where,"match.length")=attr(gi_where,"match.length") - 9- 1 # cutoff trailing quote mark
gtf$gene_id=regmatches(gtf$dat, gi_where )
gtf$gene_name[ gtf$gene_name == "" ] <- gtf$gene_id[ gtf$gene_name == "" ]
gtf=select (gtf,-gene_id )
}
#gtf$gene=foreach(s=strsplit(gtf$dat," "), .combine=c) %dopar% { s[which(s=="gene_name")+1] }
#gtf$gene=substr(gtf$gene, 1, nchar(gtf$gene)-1)
gtf = gtf %>% select( chr, start, end, strand, gene_name ) %>% distinct()
cat("Saving exons to ",args[2],"\n")
gz=gzfile(args[2],"w")
write.table(gtf, gz, row.names = F, quote=F, sep="\t")
close(gz)
| /scripts/gtf_to_exons.R | permissive | davidaknowles/leafcutter | R | false | false | 1,519 | r | #!/usr/bin/env Rscript
require(dplyr)
require(data.table)
args=commandArgs(trailingOnly = T)
if (length(args)<2) {
stop("Usage is: ./gtf_to_exons.R input.gtf.gz output.txt.gz")
}
cat("Reading in ",args[1],"\n")
gtf=fread(cmd=paste("zcat <", args[1]), data.table = F, col.names=c("chr","source","feature","start","end","a","strand","b","dat"))
cat("Processing...\n")
gtf = gtf %>% filter( feature=="exon" )
gn_where=regexpr("gene_name \"[^ ]+\"" , gtf$dat) # find gene_names in dat
gn_where=gn_where + 11 # ignore "gene_name" label
attr(gn_where,"match.length")=attr(gn_where,"match.length") - 11- 1 # cutoff trailing quote mark
gtf$gene_name=regmatches(gtf$dat, gn_where )
if( any( gtf$gene_name== "" ) ){
cat("Warning: there are empty 'gene_name' attributes, using 'gene_id' for them\n")
gi_where=regexpr("gene_id \"[^ ]+\"" , gtf$dat) # find gene_ids in dat
gi_where=gi_where + 9 # ignore "gene_id" label
attr(gi_where,"match.length")=attr(gi_where,"match.length") - 9- 1 # cutoff trailing quote mark
gtf$gene_id=regmatches(gtf$dat, gi_where )
gtf$gene_name[ gtf$gene_name == "" ] <- gtf$gene_id[ gtf$gene_name == "" ]
gtf=select (gtf,-gene_id )
}
#gtf$gene=foreach(s=strsplit(gtf$dat," "), .combine=c) %dopar% { s[which(s=="gene_name")+1] }
#gtf$gene=substr(gtf$gene, 1, nchar(gtf$gene)-1)
gtf = gtf %>% select( chr, start, end, strand, gene_name ) %>% distinct()
cat("Saving exons to ",args[2],"\n")
gz=gzfile(args[2],"w")
write.table(gtf, gz, row.names = F, quote=F, sep="\t")
close(gz)
|
df<- read.csv("~/Downloads/Seattle_Real_Time_Fire_911_Calls.csv")
library(plyr)
df$Type <- as.character(df$Type)
df$Datetime <- as.character(df$Datetime)
df <- df[2:nrow(df),]
df$Datetime <- gsub("[ ][+]0{4}","", df$Datetime)
df$Datetime <- as.POSIXct(df$Datetime,format = "%m/%d/%Y %I:%M:%S %p")
df$Date <- as.Date(format(df$Datetime, "%m/%d/%Y" ),"%m/%d/%Y")
df <- df[df$Type=="Aid Response",]
events <- count(df,c('Type'))
df_count <- count(df,c('Date'))
df_count <- df_count[order(df_count$Date),]
df_count$month <- format(df_count$Date, "%m")
df_count$weekday <- weekdays(df_count$Date)
df_count$id <- 1:nrow(df_count)
train <- df_count[ df_count$id<2000 & df_count$freq<500,]
test <- df_count[df_count$freq <500 & df_count$freq > 0 & df_count$id>2000 ,]
fit <- lm(log(freq) ~ id + factor(month) factor(weekday), data=train)
summary(fit)
y_hat <- predict.lm(fit,test)
plot(train$id,log(train$freq), col="grey")
lines(train$id,log(train$freq), col="black")
lines(test$id, y_hat, col="red")
plot(test$id,log(test$freq), col="grey")
lines(test$id,test$freq, col="black")
lines(test$id, y_hat, col="red")
test$smoothed <- NA
for(k in 1:2:nrow(df_count)){
}
library(ggplot2)
ggplot(df_count, aes(x=Date, y=freq)) +
geom_line(alpha = 0.6, colour="navy",size = 0.2) +
xlab("Time") + ylab("incidents") + geom_smooth()+
coord_cartesian(ylim = c(0, 500))
| /lecture-05/Untitled.R | no_license | RaiyanK/data-science | R | false | false | 1,381 | r | df<- read.csv("~/Downloads/Seattle_Real_Time_Fire_911_Calls.csv")
library(plyr)
df$Type <- as.character(df$Type)
df$Datetime <- as.character(df$Datetime)
df <- df[2:nrow(df),]
df$Datetime <- gsub("[ ][+]0{4}","", df$Datetime)
df$Datetime <- as.POSIXct(df$Datetime,format = "%m/%d/%Y %I:%M:%S %p")
df$Date <- as.Date(format(df$Datetime, "%m/%d/%Y" ),"%m/%d/%Y")
df <- df[df$Type=="Aid Response",]
events <- count(df,c('Type'))
df_count <- count(df,c('Date'))
df_count <- df_count[order(df_count$Date),]
df_count$month <- format(df_count$Date, "%m")
df_count$weekday <- weekdays(df_count$Date)
df_count$id <- 1:nrow(df_count)
train <- df_count[ df_count$id<2000 & df_count$freq<500,]
test <- df_count[df_count$freq <500 & df_count$freq > 0 & df_count$id>2000 ,]
fit <- lm(log(freq) ~ id + factor(month) factor(weekday), data=train)
summary(fit)
y_hat <- predict.lm(fit,test)
plot(train$id,log(train$freq), col="grey")
lines(train$id,log(train$freq), col="black")
lines(test$id, y_hat, col="red")
plot(test$id,log(test$freq), col="grey")
lines(test$id,test$freq, col="black")
lines(test$id, y_hat, col="red")
test$smoothed <- NA
for(k in 1:2:nrow(df_count)){
}
library(ggplot2)
ggplot(df_count, aes(x=Date, y=freq)) +
geom_line(alpha = 0.6, colour="navy",size = 0.2) +
xlab("Time") + ylab("incidents") + geom_smooth()+
coord_cartesian(ylim = c(0, 500))
|
# v1.5.1
# This program creates an MDS model of the given data and plots it and
# goodness-of-fit statistics in several forms. The data should be given as a
# CSV file, with the first column containing the names of what each row is, and
# with the first row containing column headers.
#
# Several files will be generated:
# - plot.png A wordcloud plot of the MDS model
# - plain_plot.png An unlabeled plot of the MDS model
# - gof_dim.png A plot of MDS goodness-of-fit vs. the MDS dimension
# - gof_exp.png A plot of MDS goodness-of-fit vs. the Minkowski exponent
# - points.csv A comma separated file of the xy-coordinates of the model
# along with their corresponding names.
#
# All plots and data files will be saved in the current working directory.
#
# This program should be run as:
# $ Rscript MDS.R [colors]
# [colors] is an optional .csv file that specifies the r,g,b,a [0,1] values
# that each corresponding point should be colored with.
#
# Notes on installing the necessary packages:
# install.packages("wordcloud", repos="http://cran.us.r-project.org")
# install.packages("RColorBrewer", repos="http://cran.us.r-project.org")
# install.packages("scatterplot3d", repos="http://cran.us.r-project.org")
#
# Changelog v1.5.1
# - Added 3D color plotting
# - 1D plot now plots with the y-axis uniformly distributed
# Changelog v1.5
# - Added 1D plotting
# - Added 3D plotting
# Changelog v1.4
# - Added option to specify colors for each points for the unlabeled plot
# Changelog v1.3
# - Added unlabeled plot generation
# - Changed dimensions of word cloud plot to be more square
# - MDS points now saved to text file
# Changelog v1.2
# - Added plotting of goodness of fit vs. the MDS dimension
# - Added plotting of goodness of fit vs. the Minkowski exponent
# Changelog v1.1
# - Added ability to apply function to distances
# Check if file for colors was given
args = commandArgs(trailingOnly=TRUE)
colors_file = ""
if(length(args) != 0) {
colors_file = args[1]
}
# Import data (and ignore first column, since it just has names)
raw_data <- read.csv(file="filtered.csv", head=TRUE, sep=",")
data = raw_data[-1]
names = t(raw_data[1])
# Function to apply to all distances
f <- function(x) {
# return(abs(x)^0.5 * sign(x))
return(x)
}
# Compute distances and MDS
distances = dist((apply(data, MARGIN=c(1,2), f)), method="minkowski", p=2)
mds <- cmdscale(distances, k=2)
# For the 1D plot
#uniform <- as.vector(rep(0, length(mds[,1])))
uniform <- runif(length(mds[,1]), -0.5, 0.5)
# For the 3D plot
#library("scatterplot3d")
#png(filename="3d_plain_plot.png")
#scatterplot3d(mds[,1], mds[,2], mds[,3], main="3D Plot")
#scatterplot3d(mds[,1], mds[,2], mds[,3], main="3D Plot", color=colors, pch=symbols)
# Create unlabeled plot. Color if a color file was given.
png(filename="plain_plot.png")
if(colors_file != "") {
# Get point colors
raw_colors <- read.csv(file=colors_file, head=FALSE, sep=",")
colors <- vector("list", nrow(raw_colors))
colors = c()
symbols = c()
for(i in 1:nrow(raw_colors)) {
r = raw_colors[i,][1]
g = raw_colors[i,][2]
b = raw_colors[i,][3]
a = raw_colors[i,][4]
colors[i] = rgb(r, g, b, a)
if(colors[i] == "#000000FF") {
symbols[i] = 1
} else {
symbols[i] = 16
}
}
plot(mds[,1], mds[,2], xlab="x", ylab="y", col=colors, pch=symbols)
#plot(mds[,1], uniform, xlab="x", ylab="y", col=colors, pch=symbols, ylim=c(-3, 3))
} else {
plot(mds[,1], mds[,2], xlab="x", ylab="y")
#plot(mds[,1], uniform, xlab="x", ylab="y", ylim=c(-3, 3))
}
# Create wordcloud plot
library("wordcloud")
png(filename="plot.png", width=4000, height=4000, units="px")
textplot(mds[,1], mds[,2], names, cex=0.8)
#textplot(mds[,1], uniform, names, cex=0.8)
# Save MDS points to file
points <- paste(names, mds[,1], mds[,2], sep=",")
write(points, "points.csv")
# Plot goodness of fit vs. dimension
#DIM_MAX <- 10
#gofs_dim <- vector("list", DIM_MAX)
#for(i in 1:DIM_MAX) {
# gofs_dim[[i]] <- cmdscale(distances, k=i, eig=TRUE)$GOF[1]
#}
#png(filename="gof_dim.png")
#plot(1:DIM_MAX, gofs_dim, xlab="MDS Dimension", ylab="MDS Goodness of Fit")
# Plot goodness of fit vs. Minkowski exponent
#EXP_MAX <- 10
#gofs_exp <- vector("list", EXP_MAX)
#for(i in 1:EXP_MAX) {
# distances = dist((apply(data, MARGIN=c(1,2), f)), method="minkowski", p=i)
# gofs_exp[[i]] <- cmdscale(distances, k=2, eig=TRUE)$GOF[1]
#}
#png(filename="gof_exp.png")
#plot(1:EXP_MAX, gofs_exp, xlab="Minkowski Exponent", ylab="MDS Goodness of Fit")
| /MDS.R | no_license | sparemind/MDS-Modeling-Tools | R | false | false | 4,632 | r | # v1.5.1
# This program creates an MDS model of the given data and plots it and
# goodness-of-fit statistics in several forms. The data should be given as a
# CSV file, with the first column containing the names of what each row is, and
# with the first row containing column headers.
#
# Several files will be generated:
# - plot.png A wordcloud plot of the MDS model
# - plain_plot.png An unlabeled plot of the MDS model
# - gof_dim.png A plot of MDS goodness-of-fit vs. the MDS dimension
# - gof_exp.png A plot of MDS goodness-of-fit vs. the Minkowski exponent
# - points.csv A comma separated file of the xy-coordinates of the model
# along with their corresponding names.
#
# All plots and data files will be saved in the current working directory.
#
# This program should be run as:
# $ Rscript MDS.R [colors]
# [colors] is an optional .csv file that specifies the r,g,b,a [0,1] values
# that each corresponding point should be colored with.
#
# Notes on installing the necessary packages:
# install.packages("wordcloud", repos="http://cran.us.r-project.org")
# install.packages("RColorBrewer", repos="http://cran.us.r-project.org")
# install.packages("scatterplot3d", repos="http://cran.us.r-project.org")
#
# Changelog v1.5.1
# - Added 3D color plotting
# - 1D plot now plots with the y-axis uniformly distributed
# Changelog v1.5
# - Added 1D plotting
# - Added 3D plotting
# Changelog v1.4
# - Added option to specify colors for each points for the unlabeled plot
# Changelog v1.3
# - Added unlabeled plot generation
# - Changed dimensions of word cloud plot to be more square
# - MDS points now saved to text file
# Changelog v1.2
# - Added plotting of goodness of fit vs. the MDS dimension
# - Added plotting of goodness of fit vs. the Minkowski exponent
# Changelog v1.1
# - Added ability to apply function to distances
# Check if file for colors was given
args = commandArgs(trailingOnly=TRUE)
colors_file = ""
if(length(args) != 0) {
colors_file = args[1]
}
# Import data (and ignore first column, since it just has names)
raw_data <- read.csv(file="filtered.csv", head=TRUE, sep=",")
data = raw_data[-1]
names = t(raw_data[1])
# Function to apply to all distances
f <- function(x) {
# return(abs(x)^0.5 * sign(x))
return(x)
}
# Compute distances and MDS
distances = dist((apply(data, MARGIN=c(1,2), f)), method="minkowski", p=2)
mds <- cmdscale(distances, k=2)
# For the 1D plot
#uniform <- as.vector(rep(0, length(mds[,1])))
uniform <- runif(length(mds[,1]), -0.5, 0.5)
# For the 3D plot
#library("scatterplot3d")
#png(filename="3d_plain_plot.png")
#scatterplot3d(mds[,1], mds[,2], mds[,3], main="3D Plot")
#scatterplot3d(mds[,1], mds[,2], mds[,3], main="3D Plot", color=colors, pch=symbols)
# Create unlabeled plot. Color if a color file was given.
png(filename="plain_plot.png")
if(colors_file != "") {
# Get point colors
raw_colors <- read.csv(file=colors_file, head=FALSE, sep=",")
colors <- vector("list", nrow(raw_colors))
colors = c()
symbols = c()
for(i in 1:nrow(raw_colors)) {
r = raw_colors[i,][1]
g = raw_colors[i,][2]
b = raw_colors[i,][3]
a = raw_colors[i,][4]
colors[i] = rgb(r, g, b, a)
if(colors[i] == "#000000FF") {
symbols[i] = 1
} else {
symbols[i] = 16
}
}
plot(mds[,1], mds[,2], xlab="x", ylab="y", col=colors, pch=symbols)
#plot(mds[,1], uniform, xlab="x", ylab="y", col=colors, pch=symbols, ylim=c(-3, 3))
} else {
plot(mds[,1], mds[,2], xlab="x", ylab="y")
#plot(mds[,1], uniform, xlab="x", ylab="y", ylim=c(-3, 3))
}
# Create wordcloud plot
library("wordcloud")
png(filename="plot.png", width=4000, height=4000, units="px")
textplot(mds[,1], mds[,2], names, cex=0.8)
#textplot(mds[,1], uniform, names, cex=0.8)
# Save MDS points to file
points <- paste(names, mds[,1], mds[,2], sep=",")
write(points, "points.csv")
# Plot goodness of fit vs. dimension
#DIM_MAX <- 10
#gofs_dim <- vector("list", DIM_MAX)
#for(i in 1:DIM_MAX) {
# gofs_dim[[i]] <- cmdscale(distances, k=i, eig=TRUE)$GOF[1]
#}
#png(filename="gof_dim.png")
#plot(1:DIM_MAX, gofs_dim, xlab="MDS Dimension", ylab="MDS Goodness of Fit")
# Plot goodness of fit vs. Minkowski exponent
#EXP_MAX <- 10
#gofs_exp <- vector("list", EXP_MAX)
#for(i in 1:EXP_MAX) {
# distances = dist((apply(data, MARGIN=c(1,2), f)), method="minkowski", p=i)
# gofs_exp[[i]] <- cmdscale(distances, k=2, eig=TRUE)$GOF[1]
#}
#png(filename="gof_exp.png")
#plot(1:EXP_MAX, gofs_exp, xlab="Minkowski Exponent", ylab="MDS Goodness of Fit")
|
#plots sample comparisons of MSG based retrieval and GPM.
#A color composite is printed as well and must currently manually be inserted into
#the overall plot
library(rgdal)
library(raster)
library(viridis)
library(Rsenal)
library(latticeExtra)
dates <- c("2014042410")
saturationpoint <- 10
mainpath <- "/media/memory01/data/IDESSA/"
#mainpath <- "/media/hanna/data/CopyFrom181/"
auxdatpath <- paste0(mainpath,"auxiliarydata/")
stationpath <- paste0(mainpath,"statdat/")
IMERGpath <- paste0(mainpath,"Results/IMERG/")
evaluationpath <- paste0(mainpath,"Results/Evaluation/")
MSGpredpath <- paste0(mainpath,"Results/Predictions/2014/")
figurepath <- paste0(mainpath,"Results/Figures/sampleimages2/")
dir.create(figurepath)
base <- readOGR(paste0(auxdatpath,"TM_WORLD_BORDERS-0.3.shp"),
"TM_WORLD_BORDERS-0.3")
stations <- readOGR(paste0(stationpath,"allStations.shp"),
"allStations")
date <- dates
IMERG <- raster(list.files(IMERGpath,pattern=paste0(date,".tif$"),full.names = TRUE))
rate <- raster(list.files(paste0(MSGpredpath,"/Rate"),pattern=paste0(date,".tif$"),full.names = TRUE))
area <- raster(list.files(paste0(MSGpredpath,"/Area"),pattern=paste0(date,".tif$"),full.names = TRUE))
MSG <- stack(list.files(paste0(MSGpredpath,"/MSG/"),pattern=paste0(date,".tif$"),full.names = TRUE))
rate[area==2] <- 0
IMERG <- mask(IMERG,area)
stck <- stack(rate,IMERG)
stck <- projectRaster(stck, crs="+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
MSG <- projectRaster(MSG, crs="+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
stck <- mask(stck,base)
base <- crop(base,c(11.4,36.2,-35.4,-17))
MSG <- crop(MSG, c(11.4,36.2,-35.4,-17))
stck <- crop(stck, c(11.4,36.2,-35.4,-17))
stck <- stack(stck[[1]],stck)
values(stck[[1]]) <- NA
names(stck)<- c("RGB","MSG","IMERG")
stck$IMERG[stck$IMERG>saturationpoint] <- saturationpoint
#########################################
#observed rainfall rasterize
comp <- get(load(paste0(evaluationpath,"IMERGComparison.RData")))
comp <- comp[comp$Date.x=="201404241000",]
station_all <- stations
stations$Obs <- merge(stations,comp,by.x="Name",by.y="Station")$RR_obs
stations <- stations[!is.na(stations$Obs),]
statrstr <- rasterize(stations,stck[[1]],field="Obs")
statrstragg <- aggregate(statrstr,18,fun=max)
statrstragg <- resample(statrstragg,stck[[1]])
stck$Observed <- statrstragg
#########################################
#plot
########################################
spp <- spplot(stck,col.regions = c("grey",rev(viridis(100))),
scales=list(draw=FALSE,x=list(rot=90)),
at=seq(0.0,saturationpoint,by=0.2),
ncol=2,nrow=2,
maxpixels=ncell(stck)*0.6,
par.settings = list(strip.background=list(col="lightgrey")),
sp.layout=list("sp.polygons", base, col = "black", first = FALSE))
png(paste0(figurepath,"rgb_",date,".png"),
width=8,height=8,units="cm",res = 600,type="cairo")
plotRGB(MSG,r=2,g=4,b=9,stretch="lin")
plot(base,add=T,lwd=1.4)
dev.off()
png(paste0(figurepath,"spp_",date,".png"),
width=17,height=16,units="cm",res = 600,type="cairo")
spp +as.layer(spplot(station_all,zcol="type",col.regions=c("black"),
pch=3,cex=0.5
))
dev.off()
###summary statistics
results_area <- rbind(classificationStats(comp$RA_pred,comp$RA_obs),
classificationStats(comp$RA_IMERG,comp$RA_obs))
results_rate <- rbind(regressionStats(comp$RR_pred,comp$RR_obs,adj.rsq = FALSE,method="spearman"),
regressionStats(comp$IMERG,comp$RR_obs,adj.rsq = FALSE,method="spearman"))
stats <- cbind(results_area,results_rate)
write.csv(stats,paste0(figurepath,"/stats_",date,".csv"))
| /IDESSA/develop_SA_retrieval/Review/sampleimage_review.R | no_license | environmentalinformatics-marburg/magic | R | false | false | 3,863 | r | #plots sample comparisons of MSG based retrieval and GPM.
#A color composite is printed as well and must currently manually be inserted into
#the overall plot
library(rgdal)
library(raster)
library(viridis)
library(Rsenal)
library(latticeExtra)
dates <- c("2014042410")
saturationpoint <- 10
mainpath <- "/media/memory01/data/IDESSA/"
#mainpath <- "/media/hanna/data/CopyFrom181/"
auxdatpath <- paste0(mainpath,"auxiliarydata/")
stationpath <- paste0(mainpath,"statdat/")
IMERGpath <- paste0(mainpath,"Results/IMERG/")
evaluationpath <- paste0(mainpath,"Results/Evaluation/")
MSGpredpath <- paste0(mainpath,"Results/Predictions/2014/")
figurepath <- paste0(mainpath,"Results/Figures/sampleimages2/")
dir.create(figurepath)
base <- readOGR(paste0(auxdatpath,"TM_WORLD_BORDERS-0.3.shp"),
"TM_WORLD_BORDERS-0.3")
stations <- readOGR(paste0(stationpath,"allStations.shp"),
"allStations")
date <- dates
IMERG <- raster(list.files(IMERGpath,pattern=paste0(date,".tif$"),full.names = TRUE))
rate <- raster(list.files(paste0(MSGpredpath,"/Rate"),pattern=paste0(date,".tif$"),full.names = TRUE))
area <- raster(list.files(paste0(MSGpredpath,"/Area"),pattern=paste0(date,".tif$"),full.names = TRUE))
MSG <- stack(list.files(paste0(MSGpredpath,"/MSG/"),pattern=paste0(date,".tif$"),full.names = TRUE))
rate[area==2] <- 0
IMERG <- mask(IMERG,area)
stck <- stack(rate,IMERG)
stck <- projectRaster(stck, crs="+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
MSG <- projectRaster(MSG, crs="+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
stck <- mask(stck,base)
base <- crop(base,c(11.4,36.2,-35.4,-17))
MSG <- crop(MSG, c(11.4,36.2,-35.4,-17))
stck <- crop(stck, c(11.4,36.2,-35.4,-17))
stck <- stack(stck[[1]],stck)
values(stck[[1]]) <- NA
names(stck)<- c("RGB","MSG","IMERG")
stck$IMERG[stck$IMERG>saturationpoint] <- saturationpoint
#########################################
#observed rainfall rasterize
comp <- get(load(paste0(evaluationpath,"IMERGComparison.RData")))
comp <- comp[comp$Date.x=="201404241000",]
station_all <- stations
stations$Obs <- merge(stations,comp,by.x="Name",by.y="Station")$RR_obs
stations <- stations[!is.na(stations$Obs),]
statrstr <- rasterize(stations,stck[[1]],field="Obs")
statrstragg <- aggregate(statrstr,18,fun=max)
statrstragg <- resample(statrstragg,stck[[1]])
stck$Observed <- statrstragg
#########################################
#plot
########################################
spp <- spplot(stck,col.regions = c("grey",rev(viridis(100))),
scales=list(draw=FALSE,x=list(rot=90)),
at=seq(0.0,saturationpoint,by=0.2),
ncol=2,nrow=2,
maxpixels=ncell(stck)*0.6,
par.settings = list(strip.background=list(col="lightgrey")),
sp.layout=list("sp.polygons", base, col = "black", first = FALSE))
png(paste0(figurepath,"rgb_",date,".png"),
width=8,height=8,units="cm",res = 600,type="cairo")
plotRGB(MSG,r=2,g=4,b=9,stretch="lin")
plot(base,add=T,lwd=1.4)
dev.off()
png(paste0(figurepath,"spp_",date,".png"),
width=17,height=16,units="cm",res = 600,type="cairo")
spp +as.layer(spplot(station_all,zcol="type",col.regions=c("black"),
pch=3,cex=0.5
))
dev.off()
###summary statistics
results_area <- rbind(classificationStats(comp$RA_pred,comp$RA_obs),
classificationStats(comp$RA_IMERG,comp$RA_obs))
results_rate <- rbind(regressionStats(comp$RR_pred,comp$RR_obs,adj.rsq = FALSE,method="spearman"),
regressionStats(comp$IMERG,comp$RR_obs,adj.rsq = FALSE,method="spearman"))
stats <- cbind(results_area,results_rate)
write.csv(stats,paste0(figurepath,"/stats_",date,".csv"))
|
library(readr)
library(dplyr)
house_power <- read_delim("/Users/whoyos21/Downloads/Course_4_Exploratory_Data_Analysis_with_R/Week_1/Project_1/household_power_consumption.txt",
";", escape_double = FALSE, trim_ws = TRUE)
house_power$Date <- as.Date(house_power$Date, "%d/%m/%Y")
house_power_feb <- house_power %>%
filter(Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
date_and_time <- paste(house_power_feb$Date, house_power_feb$Time)
house_power_feb <- cbind(house_power_feb, date_and_time)
house_power_feb$date_and_time <- as.POSIXct(date_and_time)
plot(house_power_feb$Global_active_power ~ house_power_feb$date_and_time, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
dev.copy(png, "plot2.png", width = 480, height = 480)
dev.off()
| /plot2.R | no_license | whoyos21/ExData_Plotting1 | R | false | false | 815 | r | library(readr)
library(dplyr)
house_power <- read_delim("/Users/whoyos21/Downloads/Course_4_Exploratory_Data_Analysis_with_R/Week_1/Project_1/household_power_consumption.txt",
";", escape_double = FALSE, trim_ws = TRUE)
house_power$Date <- as.Date(house_power$Date, "%d/%m/%Y")
house_power_feb <- house_power %>%
filter(Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
date_and_time <- paste(house_power_feb$Date, house_power_feb$Time)
house_power_feb <- cbind(house_power_feb, date_and_time)
house_power_feb$date_and_time <- as.POSIXct(date_and_time)
plot(house_power_feb$Global_active_power ~ house_power_feb$date_and_time, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
dev.copy(png, "plot2.png", width = 480, height = 480)
dev.off()
|
# Exercise 1: working with data frames (review)
# Install devtools package: allows installations from GitHub
install.packages("devtools")
# Install "fueleconomy" dataset from GitHub
devtools::install_github("hadley/fueleconomy")
# Use the `libary()` function to load the "fueleconomy" package
library(fueleconomy)
# You should now have access to the `vehicles` data frame
# You can use `View()` to inspect it
View(vehicles)
# Select the different manufacturers (makes) of the cars in this data set.
# Save this vector in a variable
makes <- vehicles$make
# Use the `unique()` function to determine how many different car manufacturers
# are represented by the data set
length(unique(makes))
# Filter the data set for vehicles manufactured in 1997
cars_1997 <- vehicles[vehicles$year == "1997", ]
# Arrange the 1997 cars by highway (`hwy`) gas milage
# Hint: use the `order()` function to get a vector of indices in order by value
# See also:
# https://www.r-bloggers.com/r-sorting-a-data-frame-by-the-contents-of-a-column/
hwy_gas_milage <- cars_1997[order(cars_1997$hwy),]
# Mutate the 1997 cars data frame to add a column `average` that has the average
# gas milage (between city and highway mpg) for each car
cars_1997$average <- (cars_1997$hwy + cars_1997$cty) / 2
# Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more
# than 20 miles/gallon in the city.
# Save this new data frame in a variable.
more_than_20 <- vehicles[vehicles$drive == "2-Wheel Drive" & vehicles$cty > 20, ]
# Of the above vehicles, what is the vehicle ID of the vehicle with the worst
# hwy mpg?
# Hint: filter for the worst vehicle, then select its ID.
worst_hwy_mpg <- more_than_20[more_than_20$hwy == min(more_than_20$hwy), ]
worst_id <- worst_hwy_mpg$id
# Write a function that takes a `year_choice` and a `make_choice` as parameters,
# and returns the vehicle model that gets the most hwy miles/gallon of vehicles
# of that make in that year.
# You'll need to filter more (and do some selecting)!
most_miles <- function(year_choice, make_choice) {
select_year_make <- vehicles[vehicles$make == make_choice & vehicles$year == year_choice, ]
vehicle_model <- select_year_make[select_year_make$hwy == max(select_year_make$hwy), "model"]
return(vehicle_model)
}
# What was the most efficient Honda model of 1995?
Honda <- most_miles(1995, "Honda")
| /chapter-11-exercises/exercise-1/exercise.R | permissive | njkpark/book-exercises | R | false | false | 2,373 | r | # Exercise 1: working with data frames (review)
# Install devtools package: allows installations from GitHub
install.packages("devtools")
# Install "fueleconomy" dataset from GitHub
devtools::install_github("hadley/fueleconomy")
# Use the `libary()` function to load the "fueleconomy" package
library(fueleconomy)
# You should now have access to the `vehicles` data frame
# You can use `View()` to inspect it
View(vehicles)
# Select the different manufacturers (makes) of the cars in this data set.
# Save this vector in a variable
makes <- vehicles$make
# Use the `unique()` function to determine how many different car manufacturers
# are represented by the data set
length(unique(makes))
# Filter the data set for vehicles manufactured in 1997
cars_1997 <- vehicles[vehicles$year == "1997", ]
# Arrange the 1997 cars by highway (`hwy`) gas milage
# Hint: use the `order()` function to get a vector of indices in order by value
# See also:
# https://www.r-bloggers.com/r-sorting-a-data-frame-by-the-contents-of-a-column/
hwy_gas_milage <- cars_1997[order(cars_1997$hwy),]
# Mutate the 1997 cars data frame to add a column `average` that has the average
# gas milage (between city and highway mpg) for each car
cars_1997$average <- (cars_1997$hwy + cars_1997$cty) / 2
# Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more
# than 20 miles/gallon in the city.
# Save this new data frame in a variable.
more_than_20 <- vehicles[vehicles$drive == "2-Wheel Drive" & vehicles$cty > 20, ]
# Of the above vehicles, what is the vehicle ID of the vehicle with the worst
# hwy mpg?
# Hint: filter for the worst vehicle, then select its ID.
worst_hwy_mpg <- more_than_20[more_than_20$hwy == min(more_than_20$hwy), ]
worst_id <- worst_hwy_mpg$id
# Write a function that takes a `year_choice` and a `make_choice` as parameters,
# and returns the vehicle model that gets the most hwy miles/gallon of vehicles
# of that make in that year.
# You'll need to filter more (and do some selecting)!
most_miles <- function(year_choice, make_choice) {
select_year_make <- vehicles[vehicles$make == make_choice & vehicles$year == year_choice, ]
vehicle_model <- select_year_make[select_year_make$hwy == max(select_year_make$hwy), "model"]
return(vehicle_model)
}
# What was the most efficient Honda model of 1995?
Honda <- most_miles(1995, "Honda")
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS Transfer Family
#'
#' @description
#' AWS Transfer Family is a fully managed service that enables the transfer
#' of files over the the File Transfer Protocol (FTP), File Transfer
#' Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol
#' (SFTP) directly into and out of Amazon Simple Storage Service (Amazon
#' S3). AWS helps you seamlessly migrate your file transfer workflows to
#' AWS Transfer Family by integrating with existing authentication systems,
#' and providing DNS routing with Amazon Route 53 so nothing changes for
#' your customers and partners, or their applications. With your data in
#' Amazon S3, you can use it with AWS services for processing, analytics,
#' machine learning, and archiving. Getting started with AWS Transfer
#' Family is easy since there is no infrastructure to buy and set up.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- transfer(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- transfer()
#' svc$create_server(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=transfer_create_server]{create_server} \tab Instantiates an autoscaling virtual server based on the selected file transfer protocol in AWS \cr
#' \link[=transfer_create_user]{create_user} \tab Creates a user and associates them with an existing file transfer protocol-enabled server \cr
#' \link[=transfer_delete_server]{delete_server} \tab Deletes the file transfer protocol-enabled server that you specify \cr
#' \link[=transfer_delete_ssh_public_key]{delete_ssh_public_key} \tab Deletes a user's Secure Shell (SSH) public key \cr
#' \link[=transfer_delete_user]{delete_user} \tab Deletes the user belonging to a file transfer protocol-enabled server you specify \cr
#' \link[=transfer_describe_server]{describe_server} \tab Describes a file transfer protocol-enabled server that you specify by passing the ServerId parameter \cr
#' \link[=transfer_describe_user]{describe_user} \tab Describes the user assigned to the specific file transfer protocol-enabled server, as identified by its ServerId property \cr
#' \link[=transfer_import_ssh_public_key]{import_ssh_public_key} \tab Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId\cr
#' \link[=transfer_list_servers]{list_servers} \tab Lists the file transfer protocol-enabled servers that are associated with your AWS account \cr
#' \link[=transfer_list_tags_for_resource]{list_tags_for_resource} \tab Lists all of the tags associated with the Amazon Resource Number (ARN) you specify \cr
#' \link[=transfer_list_users]{list_users} \tab Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId parameter \cr
#' \link[=transfer_start_server]{start_server} \tab Changes the state of a file transfer protocol-enabled server from OFFLINE to ONLINE \cr
#' \link[=transfer_stop_server]{stop_server} \tab Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE \cr
#' \link[=transfer_tag_resource]{tag_resource} \tab Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN) \cr
#' \link[=transfer_test_identity_provider]{test_identity_provider} \tab If the IdentityProviderType of a file transfer protocol-enabled server is API_Gateway, tests whether your API Gateway is set up successfully \cr
#' \link[=transfer_untag_resource]{untag_resource} \tab Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN) \cr
#' \link[=transfer_update_server]{update_server} \tab Updates the file transfer protocol-enabled server's properties after that server has been created \cr
#' \link[=transfer_update_user]{update_user} \tab Assigns new properties to a user
#' }
#'
#' @rdname transfer
#' @export
transfer <- function(config = list()) {
svc <- .transfer$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.transfer <- list()
.transfer$operations <- list()
.transfer$metadata <- list(
service_name = "transfer",
endpoints = list("*" = list(endpoint = "transfer.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "transfer.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "transfer.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "transfer.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Transfer",
api_version = "2018-11-05",
signing_name = "transfer",
json_version = "1.1",
target_prefix = "TransferService"
)
.transfer$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.transfer$metadata, handlers, config)
}
| /paws/R/transfer_service.R | permissive | jcheng5/paws | R | false | false | 5,316 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config
NULL
#' AWS Transfer Family
#'
#' @description
#' AWS Transfer Family is a fully managed service that enables the transfer
#' of files over the the File Transfer Protocol (FTP), File Transfer
#' Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol
#' (SFTP) directly into and out of Amazon Simple Storage Service (Amazon
#' S3). AWS helps you seamlessly migrate your file transfer workflows to
#' AWS Transfer Family by integrating with existing authentication systems,
#' and providing DNS routing with Amazon Route 53 so nothing changes for
#' your customers and partners, or their applications. With your data in
#' Amazon S3, you can use it with AWS services for processing, analytics,
#' machine learning, and archiving. Getting started with AWS Transfer
#' Family is easy since there is no infrastructure to buy and set up.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#'
#' @section Service syntax:
#' ```
#' svc <- transfer(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- transfer()
#' svc$create_server(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=transfer_create_server]{create_server} \tab Instantiates an autoscaling virtual server based on the selected file transfer protocol in AWS \cr
#' \link[=transfer_create_user]{create_user} \tab Creates a user and associates them with an existing file transfer protocol-enabled server \cr
#' \link[=transfer_delete_server]{delete_server} \tab Deletes the file transfer protocol-enabled server that you specify \cr
#' \link[=transfer_delete_ssh_public_key]{delete_ssh_public_key} \tab Deletes a user's Secure Shell (SSH) public key \cr
#' \link[=transfer_delete_user]{delete_user} \tab Deletes the user belonging to a file transfer protocol-enabled server you specify \cr
#' \link[=transfer_describe_server]{describe_server} \tab Describes a file transfer protocol-enabled server that you specify by passing the ServerId parameter \cr
#' \link[=transfer_describe_user]{describe_user} \tab Describes the user assigned to the specific file transfer protocol-enabled server, as identified by its ServerId property \cr
#' \link[=transfer_import_ssh_public_key]{import_ssh_public_key} \tab Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId\cr
#' \link[=transfer_list_servers]{list_servers} \tab Lists the file transfer protocol-enabled servers that are associated with your AWS account \cr
#' \link[=transfer_list_tags_for_resource]{list_tags_for_resource} \tab Lists all of the tags associated with the Amazon Resource Number (ARN) you specify \cr
#' \link[=transfer_list_users]{list_users} \tab Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId parameter \cr
#' \link[=transfer_start_server]{start_server} \tab Changes the state of a file transfer protocol-enabled server from OFFLINE to ONLINE \cr
#' \link[=transfer_stop_server]{stop_server} \tab Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE \cr
#' \link[=transfer_tag_resource]{tag_resource} \tab Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN) \cr
#' \link[=transfer_test_identity_provider]{test_identity_provider} \tab If the IdentityProviderType of a file transfer protocol-enabled server is API_Gateway, tests whether your API Gateway is set up successfully \cr
#' \link[=transfer_untag_resource]{untag_resource} \tab Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN) \cr
#' \link[=transfer_update_server]{update_server} \tab Updates the file transfer protocol-enabled server's properties after that server has been created \cr
#' \link[=transfer_update_user]{update_user} \tab Assigns new properties to a user
#' }
#'
#' @rdname transfer
#' @export
transfer <- function(config = list()) {
svc <- .transfer$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.transfer <- list()
.transfer$operations <- list()
.transfer$metadata <- list(
service_name = "transfer",
endpoints = list("*" = list(endpoint = "transfer.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "transfer.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "transfer.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "transfer.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Transfer",
api_version = "2018-11-05",
signing_name = "transfer",
json_version = "1.1",
target_prefix = "TransferService"
)
.transfer$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.transfer$metadata, handlers, config)
}
|
################# Calculations #################
2 + 3*5^2
2^3^5
3*exp(1)^5 + 2
1.4e-2
log2(33554432)
sin(pi/2)
################# Objects and Assignment ###############
vec <- c("a", "b")
vec <- c(3, 7, 11, 15)
u <- 1:10
x <- sample(1:99, 10)
################## Vectors #######################
x[1:3]
sort(x)
order(x)
x[order(x)]
x[x%%2==0]
v <- x^2
u <- u + 1
v/x
sqrt(v)
x + 0:2
################## Matrices #######################
x <- sample(1:99, 10)
#pause("21. ")
matrix(0,nrow=2,ncol=2)
#pause("22. ")
matrix(x,nrow=2)
#pause("23. ")
A <- matrix(x,nrow=2)
#pause("24. ")
A[1,]
#pause("25. ")
sum(A[,-2])
# Load and inspect the Shower data ----
Shower <- read.csv2("~/Downloads/Shower_data.csv")
?read.csv #help pages for format options of the data file
head(Shower)
tail(Shower)
str(Shower)
summary(Shower)
nrow(Shower)
ncol(Shower)
# the group is not meaningful as numeric! Convert it to factor
Shower$group <- as.factor(Shower$group)
summary(Shower)
levels(Shower$group)
#level names of factors can be changed - mind the order of the elements!
levels(Shower$group) <- c("First group", "Second group", "Fourth group",
"Third group", "Fifth group", "Sixth group")
summary(Shower$group)
# Basic statistics for the Shower data ----
mean(Shower$Showertime) #mean
var(Shower$Showertime) #variance (implements sample variance)
median(Shower$Showertime)
sd(Shower$Volume) #standard deviation (implements sample formula)
max(Shower$Showertime)
min(Shower$Showertime)
quantile(Shower$Showertime)
#####
library(dplyr)
#Exercise 1
data <- read.csv2("~/Downloads/Shower_data.csv")
a <- filter(data, Hh_ID == 6395)
b <- arrange(data, Volume)
c <- filter(data, !Hh_ID %in% c("6395", "5307"))
#Exercise 2
library(weathermetrics)
d <- dplyr::summarise(data, minShowerDuration = min(Showertime),
minShowerDuration = max(Showertime))
e <- dplyr::mutate(data, avgtemperaturefahrenheit = weathermetrics::celsius.to.fahrenheit(Avgtemperature))
grouped_showers <- group_by(data, Hh_ID)
measures <- summarise(grouped_showers, meanDuration = mean(Showertime),
meanTemperature = mean(Avgtemperature),
meanVolume = mean(Volume))
#####
library(dplyr)
#Exercise 1
data <- read.csv2("~/Downloads/Shower_data.csv")
a <- filter(data, Hh_ID == 6395)
b <- arrange(data, Volume)
c <- filter(data, !Hh_ID %in% c("6395", "5307"))
#Exercise 2
d <- dplyr::summarise(data, minShowerDuration = min(Showertime),
maxShowerDuration = max(Showertime))
e <- dplyr::mutate(data, avgtemperaturefahrenheit = weathermetrics::celsius.to.fahrenheit(Avgtemperature))
grouped_showers <- group_by(data, Hh_ID)
measures <- summarise(grouped_showers, meanDuration = mean(Showertime),
meanTemperature = mean(Avgtemperature),
meanVolume = mean(Volume))
#Exercise 3
measures <- data %>% dplyr::group_by(Hh_ID) %>%
dplyr::summarise(meanDuration = mean(Showertime),
meanTemperature = mean(Avgtemperature),
meanVolume = mean(Volume))
moreThan50 <- data %>% dplyr::group_by(Hh_ID) %>%
dplyr::summarise(n = n()) %>%
dplyr::filter(n > 50)
avgNumberOfShowers <- data %>% dplyr::group_by(Hh_ID, group) %>%
dplyr::summarise(n = n()) %>%
dplyr::group_by(group) %>%
dplyr::summarise(grpmean = mean(n)) %>%
dplyr::ungroup() %>%
dplyr::summarise(mean = mean(grpmean))
## Join
survey <- read.csv2("~/Downloads/Shower_survey_data.csv")
combined_dataset <- dplyr::inner_join(data, survey)
result <- combined_dataset %>% dplyr::group_by(X03d_longhair, group) %>%
dplyr::summarise(avgVolume = mean(Volume),
avgDuration = mean(Showertime))
##ggplot2
library(ggplot2)
#Exemplary plots
g <- ggplot(data, aes(x=Avgtemperature, y=Volume))
g <- g + geom_point()
g
g <- g + ggtitle("Distribution of average temparature and volume")
g <- g + xlab("Temperature")
g <- g + ylab("Volume in liters")
g <- g + geom_hline(yintercept = mean(data$Volume), color="red")
g
g <- ggplot(data, aes(x=Avgtemperature, y=Volume, color=factor(group)))
g <- g + geom_point()
g
g <- ggplot(data, aes(x=Avgtemperature, y=Volume))
g <- g + geom_point()
g <- g + facet_wrap(~group, nrow = 1)
g
#Exercise ggplot2
g <- ggplot(data, aes(x=Showertime, y=Volume))
g <- g + geom_point()
g
g2 <- ggplot(data, aes(x=log(Showertime), y=log(Volume)))
g2 <- g2 + geom_point()
g2
g3 <- ggplot(data, aes(x="",y=Showertime))
g3 <- g3 + geom_boxplot()
g3
g4 <- ggplot(survey, aes(x=einkommen))
g4 <- g4 + geom_bar()
g4
g5 <- ggplot(data, aes(x=Volume ))
g5 <- g5 + geom_density()
g5 <- g5 + facet_wrap(~group)
g5
| /R_Introductions.R | no_license | FSwoboda93/Business_Intelligence_and_Analytics | R | false | false | 5,119 | r | ################# Calculations #################
2 + 3*5^2
2^3^5
3*exp(1)^5 + 2
1.4e-2
log2(33554432)
sin(pi/2)
################# Objects and Assignment ###############
vec <- c("a", "b")
vec <- c(3, 7, 11, 15)
u <- 1:10
x <- sample(1:99, 10)
################## Vectors #######################
x[1:3]
sort(x)
order(x)
x[order(x)]
x[x%%2==0]
v <- x^2
u <- u + 1
v/x
sqrt(v)
x + 0:2
################## Matrices #######################
x <- sample(1:99, 10)
#pause("21. ")
matrix(0,nrow=2,ncol=2)
#pause("22. ")
matrix(x,nrow=2)
#pause("23. ")
A <- matrix(x,nrow=2)
#pause("24. ")
A[1,]
#pause("25. ")
sum(A[,-2])
# Load and inspect the Shower data ----
Shower <- read.csv2("~/Downloads/Shower_data.csv")
?read.csv #help pages for format options of the data file
head(Shower)
tail(Shower)
str(Shower)
summary(Shower)
nrow(Shower)
ncol(Shower)
# the group is not meaningful as numeric! Convert it to factor
Shower$group <- as.factor(Shower$group)
summary(Shower)
levels(Shower$group)
#level names of factors can be changed - mind the order of the elements!
levels(Shower$group) <- c("First group", "Second group", "Fourth group",
"Third group", "Fifth group", "Sixth group")
summary(Shower$group)
# Basic statistics for the Shower data ----
mean(Shower$Showertime) #mean
var(Shower$Showertime) #variance (implements sample variance)
median(Shower$Showertime)
sd(Shower$Volume) #standard deviation (implements sample formula)
max(Shower$Showertime)
min(Shower$Showertime)
quantile(Shower$Showertime)
#####
library(dplyr)
#Exercise 1
data <- read.csv2("~/Downloads/Shower_data.csv")
a <- filter(data, Hh_ID == 6395)
b <- arrange(data, Volume)
c <- filter(data, !Hh_ID %in% c("6395", "5307"))
#Exercise 2
library(weathermetrics)
d <- dplyr::summarise(data, minShowerDuration = min(Showertime),
minShowerDuration = max(Showertime))
e <- dplyr::mutate(data, avgtemperaturefahrenheit = weathermetrics::celsius.to.fahrenheit(Avgtemperature))
grouped_showers <- group_by(data, Hh_ID)
measures <- summarise(grouped_showers, meanDuration = mean(Showertime),
meanTemperature = mean(Avgtemperature),
meanVolume = mean(Volume))
#####
library(dplyr)
#Exercise 1
data <- read.csv2("~/Downloads/Shower_data.csv")
a <- filter(data, Hh_ID == 6395)
b <- arrange(data, Volume)
c <- filter(data, !Hh_ID %in% c("6395", "5307"))
#Exercise 2
d <- dplyr::summarise(data, minShowerDuration = min(Showertime),
maxShowerDuration = max(Showertime))
e <- dplyr::mutate(data, avgtemperaturefahrenheit = weathermetrics::celsius.to.fahrenheit(Avgtemperature))
grouped_showers <- group_by(data, Hh_ID)
measures <- summarise(grouped_showers, meanDuration = mean(Showertime),
meanTemperature = mean(Avgtemperature),
meanVolume = mean(Volume))
#Exercise 3
measures <- data %>% dplyr::group_by(Hh_ID) %>%
dplyr::summarise(meanDuration = mean(Showertime),
meanTemperature = mean(Avgtemperature),
meanVolume = mean(Volume))
moreThan50 <- data %>% dplyr::group_by(Hh_ID) %>%
dplyr::summarise(n = n()) %>%
dplyr::filter(n > 50)
avgNumberOfShowers <- data %>% dplyr::group_by(Hh_ID, group) %>%
dplyr::summarise(n = n()) %>%
dplyr::group_by(group) %>%
dplyr::summarise(grpmean = mean(n)) %>%
dplyr::ungroup() %>%
dplyr::summarise(mean = mean(grpmean))
## Join
survey <- read.csv2("~/Downloads/Shower_survey_data.csv")
combined_dataset <- dplyr::inner_join(data, survey)
result <- combined_dataset %>% dplyr::group_by(X03d_longhair, group) %>%
dplyr::summarise(avgVolume = mean(Volume),
avgDuration = mean(Showertime))
##ggplot2
library(ggplot2)
#Exemplary plots
g <- ggplot(data, aes(x=Avgtemperature, y=Volume))
g <- g + geom_point()
g
g <- g + ggtitle("Distribution of average temparature and volume")
g <- g + xlab("Temperature")
g <- g + ylab("Volume in liters")
g <- g + geom_hline(yintercept = mean(data$Volume), color="red")
g
g <- ggplot(data, aes(x=Avgtemperature, y=Volume, color=factor(group)))
g <- g + geom_point()
g
g <- ggplot(data, aes(x=Avgtemperature, y=Volume))
g <- g + geom_point()
g <- g + facet_wrap(~group, nrow = 1)
g
#Exercise ggplot2
g <- ggplot(data, aes(x=Showertime, y=Volume))
g <- g + geom_point()
g
g2 <- ggplot(data, aes(x=log(Showertime), y=log(Volume)))
g2 <- g2 + geom_point()
g2
g3 <- ggplot(data, aes(x="",y=Showertime))
g3 <- g3 + geom_boxplot()
g3
g4 <- ggplot(survey, aes(x=einkommen))
g4 <- g4 + geom_bar()
g4
g5 <- ggplot(data, aes(x=Volume ))
g5 <- g5 + geom_density()
g5 <- g5 + facet_wrap(~group)
g5
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/radial_phylo.R
\name{add_phylo_outer_rings}
\alias{add_phylo_outer_rings}
\title{Add rings to a PhyloXML}
\usage{
add_phylo_outer_rings(xml_file, seqs, d_clean, seqs_col, rings, condensed)
}
\arguments{
\item{xml_file}{A path to a PhyloXML file.}
\item{seqs}{A character vector containing sequences, preferably cleaned by
\code{\link{clean_data}}.}
\item{d_clean}{The cleaned data frame from which the \code{seqs} were
extracted, both preferably cleaned by \code{\link{clean_data}}.}
\item{seqs_col}{Either an integer corresponding to a column index or a string
corresponding to a column name in d that contains the sequences.}
\item{rings}{A named character vector that can be used to create outer-ring
annotations on the radial phylogram. The names of the vector must correspond
to column names in the data.frame \code{d}, and the values should
correspond to a desired value in each column which should be annotated
on the ring. For example, \code{c(FOXP3=1, species="human")} will create
two outer rings, the first of which will be colored whenever the column
"FOXP3" is 1 and the second of which will be colored whenever the column
"species" is "human". Annotations occur on a per-sequence basis when the
PhyloXML represents a non-condensed phylogram. If the PhyloXML represents
a condensed phylogram, annotations occur using individual sequence
populations: if 50\% or more of the cells with a given sequence meet the
current criteria given by \code{rings} then that sequence's ring on the
radial phylogram will be annotated.}
\item{condensed}{\code{TRUE} or \code{FALSE}, depending on whether or not the
PhyloXML represents a condensed phylogram (i.e. a phylogram with only
unique sequences and bars representing clonal expansion).}
}
\value{
A path to the PhyloXML file annotated with rings data.
}
\description{
An internal function that adds a given number of rings to a
condensed or not-condensed radial phylogram. Using the sequences in the
PhyloXML, it examines the provided data \code{d_clean} and adds the XML data
in order to create rings on a radial phylogram whenever the criteria provided
by the argument \code{rings} is found to be true in the data. The rings will
be colored using a preset color scheme.
}
\seealso{
\code{\link{radial_phylo}}
}
\keyword{internal}
| /man/add_phylo_outer_rings.Rd | permissive | mma5usf/receptormarker | R | false | false | 2,376 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/radial_phylo.R
\name{add_phylo_outer_rings}
\alias{add_phylo_outer_rings}
\title{Add rings to a PhyloXML}
\usage{
add_phylo_outer_rings(xml_file, seqs, d_clean, seqs_col, rings, condensed)
}
\arguments{
\item{xml_file}{A path to a PhyloXML file.}
\item{seqs}{A character vector containing sequences, preferably cleaned by
\code{\link{clean_data}}.}
\item{d_clean}{The cleaned data frame from which the \code{seqs} were
extracted, both preferably cleaned by \code{\link{clean_data}}.}
\item{seqs_col}{Either an integer corresponding to a column index or a string
corresponding to a column name in d that contains the sequences.}
\item{rings}{A named character vector that can be used to create outer-ring
annotations on the radial phylogram. The names of the vector must correspond
to column names in the data.frame \code{d}, and the values should
correspond to a desired value in each column which should be annotated
on the ring. For example, \code{c(FOXP3=1, species="human")} will create
two outer rings, the first of which will be colored whenever the column
"FOXP3" is 1 and the second of which will be colored whenever the column
"species" is "human". Annotations occur on a per-sequence basis when the
PhyloXML represents a non-condensed phylogram. If the PhyloXML represents
a condensed phylogram, annotations occur using individual sequence
populations: if 50\% or more of the cells with a given sequence meet the
current criteria given by \code{rings} then that sequence's ring on the
radial phylogram will be annotated.}
\item{condensed}{\code{TRUE} or \code{FALSE}, depending on whether or not the
PhyloXML represents a condensed phylogram (i.e. a phylogram with only
unique sequences and bars representing clonal expansion).}
}
\value{
A path to the PhyloXML file annotated with rings data.
}
\description{
An internal function that adds a given number of rings to a
condensed or not-condensed radial phylogram. Using the sequences in the
PhyloXML, it examines the provided data \code{d_clean} and adds the XML data
in order to create rings on a radial phylogram whenever the criteria provided
by the argument \code{rings} is found to be true in the data. The rings will
be colored using a preset color scheme.
}
\seealso{
\code{\link{radial_phylo}}
}
\keyword{internal}
|
# install.packages('xlsx') # CD: for my benefit
# install.packages('dplyr')
library(XLConnect)
library(dplyr)
library(xlsx)
#let's create the worksheet to which we will write our data
results <- xlsx::createWorkbook()
# we create files for the individuals sheets
y1952 <- read.xlsx("British_Empire.xlsx",1)
y1953 <- read.xlsx("British_Empire.xlsx",2)
y1954 <- read.xlsx("British_Empire.xlsx",3)
y1955 <- read.xlsx("British_Empire.xlsx",4)
y1956 <- read.xlsx("British_Empire.xlsx",5)
y1957 <- read.xlsx("British_Empire.xlsx",6)
y1958 <- read.xlsx("British_Empire.xlsx",7)
y1959 <- read.xlsx("British_Empire.xlsx",8)
y1960 <- read.xlsx("British_Empire.xlsx",9)
y1961 <- read.xlsx("British_Empire.xlsx",10)
y1962 <- read.xlsx("British_Empire.xlsx",11)
y1963 <- read.xlsx("British_Empire.xlsx",12)
y1964 <- read.xlsx("British_Empire.xlsx",13)
# we note that each year has different no of entries
colonies_50s <- Reduce(union, list(y1952$colony,y1953$colony,y1954$colony,y1955$colony,y1956$colony,y1957$colony,y1958$colony,y1959$colony))
colonies <- Reduce(union, list(colonies_50s,y1960$colony,y1961$colony,y1962$colony,y1963$colony,y1964$colony))
items <- unique(y1952$X_j)
years <- list(y1952,y1953,y1954,y1955,y1956,y1957,y1958,y1959,y1960,y1961,y1962,y1963,y1964)
solution <- list()
year_no <- seq(1952,1964,1)
for(item in items){
k <- 1
restructed_data = data.frame(Colonies = colonies, item = item)
for(year in years){
data_line <- dplyr::filter(year, X_j == item)
item_data <- data.frame(data_line$colony, data_line$entry)
colnames(item_data) <- c("Colonies",year_no[k])
restructed_data <- merge(restructed_data,item_data,by = "Colonies", all.x = TRUE)
k <- k + 1
}
sheet <- createSheet(wb = results, sheetName = item)
addDataFrame(x = restructed_data,sheet = sheet)
}
saveWorkbook(results,"Results.xlsx")
| /british_empire.R | no_license | rvpasari/British-Empire-Data- | R | false | false | 1,934 | r | # install.packages('xlsx') # CD: for my benefit
# install.packages('dplyr')
library(XLConnect)
library(dplyr)
library(xlsx)
#let's create the worksheet to which we will write our data
results <- xlsx::createWorkbook()
# we create files for the individuals sheets
y1952 <- read.xlsx("British_Empire.xlsx",1)
y1953 <- read.xlsx("British_Empire.xlsx",2)
y1954 <- read.xlsx("British_Empire.xlsx",3)
y1955 <- read.xlsx("British_Empire.xlsx",4)
y1956 <- read.xlsx("British_Empire.xlsx",5)
y1957 <- read.xlsx("British_Empire.xlsx",6)
y1958 <- read.xlsx("British_Empire.xlsx",7)
y1959 <- read.xlsx("British_Empire.xlsx",8)
y1960 <- read.xlsx("British_Empire.xlsx",9)
y1961 <- read.xlsx("British_Empire.xlsx",10)
y1962 <- read.xlsx("British_Empire.xlsx",11)
y1963 <- read.xlsx("British_Empire.xlsx",12)
y1964 <- read.xlsx("British_Empire.xlsx",13)
# we note that each year has different no of entries
colonies_50s <- Reduce(union, list(y1952$colony,y1953$colony,y1954$colony,y1955$colony,y1956$colony,y1957$colony,y1958$colony,y1959$colony))
colonies <- Reduce(union, list(colonies_50s,y1960$colony,y1961$colony,y1962$colony,y1963$colony,y1964$colony))
items <- unique(y1952$X_j)
years <- list(y1952,y1953,y1954,y1955,y1956,y1957,y1958,y1959,y1960,y1961,y1962,y1963,y1964)
solution <- list()
year_no <- seq(1952,1964,1)
for(item in items){
k <- 1
restructed_data = data.frame(Colonies = colonies, item = item)
for(year in years){
data_line <- dplyr::filter(year, X_j == item)
item_data <- data.frame(data_line$colony, data_line$entry)
colnames(item_data) <- c("Colonies",year_no[k])
restructed_data <- merge(restructed_data,item_data,by = "Colonies", all.x = TRUE)
k <- k + 1
}
sheet <- createSheet(wb = results, sheetName = item)
addDataFrame(x = restructed_data,sheet = sheet)
}
saveWorkbook(results,"Results.xlsx")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.