content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(reshape2)
png(filename="plot3.png",height=480,width=480)
hpc <- read.csv2("household_power_consumption.txt",header=TRUE,stringsAsFactors=FALSE,na.strings="?")
hpc$DateFormat <- as.Date(strptime(hpc$Date,"%d/%m/%Y"))
projdates <- hpc$DateFormat %in% c(as.Date("2007/02/01",format="%Y/%m/%d"),as.Date("2007/02/02",format="%Y/%m/%d"))
hpc2007020102 <- hpc[projdates,]
hpc2007020102$SubMeetering1 <- as.numeric(hpc2007020102$Sub_metering_1)
hpc2007020102$SubMeetering2 <- as.numeric(hpc2007020102$Sub_metering_2)
hpc2007020102$SubMeetering3 <- as.numeric(hpc2007020102$Sub_metering_3)
SubMeetering <- cbind(hpc2007020102$SubMeetering1,hpc2007020102$SubMeetering2,hpc2007020102$SubMeetering3)
SubMeetering <- data.frame(SubMeetering)
colnames(SubMeetering) <- c("Sub_meetering_1","Sub_meetering_2","Sub_meetering_3")
SubMeet <- melt(SubMeetering,measure=c("Sub_meetering_1","Sub_meetering_2","Sub_meetering_3"))
with(SubMeet,plot(ts(data=value),ylab="Energy sub meetering",type="n"))
with(subset(SubMeet,variable=="Sub_meetering_1"),lines(value,col="black"))
with(subset(SubMeet,variable=="Sub_meetering_2"),lines(value,col="red"))
with(subset(SubMeet,variable=="Sub_meetering_3"),lines(value,col="blue"))
legend("topright",lty=c(1,1,1),pch=c(NA,NA,NA),col=c("black","red","blue"),legend=c("Sub_meetering_1","Sub_meetering_2","Sub_meetering_3"))
dev.off() | /plot3.R | no_license | smmibrahim/ExData_Plotting1 | R | false | false | 1,358 | r | library(reshape2)
png(filename="plot3.png",height=480,width=480)
hpc <- read.csv2("household_power_consumption.txt",header=TRUE,stringsAsFactors=FALSE,na.strings="?")
hpc$DateFormat <- as.Date(strptime(hpc$Date,"%d/%m/%Y"))
projdates <- hpc$DateFormat %in% c(as.Date("2007/02/01",format="%Y/%m/%d"),as.Date("2007/02/02",format="%Y/%m/%d"))
hpc2007020102 <- hpc[projdates,]
hpc2007020102$SubMeetering1 <- as.numeric(hpc2007020102$Sub_metering_1)
hpc2007020102$SubMeetering2 <- as.numeric(hpc2007020102$Sub_metering_2)
hpc2007020102$SubMeetering3 <- as.numeric(hpc2007020102$Sub_metering_3)
SubMeetering <- cbind(hpc2007020102$SubMeetering1,hpc2007020102$SubMeetering2,hpc2007020102$SubMeetering3)
SubMeetering <- data.frame(SubMeetering)
colnames(SubMeetering) <- c("Sub_meetering_1","Sub_meetering_2","Sub_meetering_3")
SubMeet <- melt(SubMeetering,measure=c("Sub_meetering_1","Sub_meetering_2","Sub_meetering_3"))
with(SubMeet,plot(ts(data=value),ylab="Energy sub meetering",type="n"))
with(subset(SubMeet,variable=="Sub_meetering_1"),lines(value,col="black"))
with(subset(SubMeet,variable=="Sub_meetering_2"),lines(value,col="red"))
with(subset(SubMeet,variable=="Sub_meetering_3"),lines(value,col="blue"))
legend("topright",lty=c(1,1,1),pch=c(NA,NA,NA),col=c("black","red","blue"),legend=c("Sub_meetering_1","Sub_meetering_2","Sub_meetering_3"))
dev.off() |
pollutantmean <- function(directory, pollutant, id=1:332){
# Aux variables
file_list <- list.files(directory, pattern = ".csv", full.names=TRUE)
values <- numeric()
#For each id passed as parameter:
for (i in id) {
data <- read.csv(file_list[i])
values <- c(values, data[[pollutant]])
}
# Calculate the mean and return it
mean(values, na.rm = TRUE)
}
complete <- function(directory, id=1:332){
fileList <- list.files(directory, pattern = ".csv", full.names=TRUE)
nobs <- numeric()
# id servira para recorrer cada archivo csv
for (i in id) {
#leemos el archivo csv
data <- read.csv(fileList[i])
#comple.cases verifica si la row esta completa, ejemplo de salida 1 0 1 0 1 por cada row
nobs <- c(nobs, sum(complete.cases(data)))
}
data.frame(id, nobs)
}
#threshol as defualt 0
corr <- function(directory, threshold = 0) {
filesList <- list.files(path = directory, pattern = ".csv", full.names = TRUE)
cors <- numeric()
for (i in 1:332) {
data <- read.csv(filesList[i])
if (sum(complete.cases(data)) > threshold) {
# cor function es la relacion reciproca entre dos o mas funciones o fenomenos, complete.obs omitira valores na
cors <- c(cors, cor(data[["sulfate"]], data[["nitrate"]], use = "complete.obs"))
}
}
cors
}
seedata <- function(directory, threshold = 0) {
filesList <- list.files(path = directory, pattern = ".csv", full.names = TRUE)
for (i in 1:332) {
data <- read.csv(filesList[i])
complecases <- sum(complete.cases(data))
}
data
}
#pollutantmean("C:/Users/jonaathan/Desktop/R/specdata", "sulfate")
#complete("C:/Users/jonaathan/Desktop/R/specdata", c(1, 5)) | /functions for Statistics.R | no_license | jonathanramirezislas/airpollution | R | false | false | 1,746 | r | pollutantmean <- function(directory, pollutant, id=1:332){
# Aux variables
file_list <- list.files(directory, pattern = ".csv", full.names=TRUE)
values <- numeric()
#For each id passed as parameter:
for (i in id) {
data <- read.csv(file_list[i])
values <- c(values, data[[pollutant]])
}
# Calculate the mean and return it
mean(values, na.rm = TRUE)
}
complete <- function(directory, id=1:332){
fileList <- list.files(directory, pattern = ".csv", full.names=TRUE)
nobs <- numeric()
# id servira para recorrer cada archivo csv
for (i in id) {
#leemos el archivo csv
data <- read.csv(fileList[i])
#comple.cases verifica si la row esta completa, ejemplo de salida 1 0 1 0 1 por cada row
nobs <- c(nobs, sum(complete.cases(data)))
}
data.frame(id, nobs)
}
#threshol as defualt 0
corr <- function(directory, threshold = 0) {
filesList <- list.files(path = directory, pattern = ".csv", full.names = TRUE)
cors <- numeric()
for (i in 1:332) {
data <- read.csv(filesList[i])
if (sum(complete.cases(data)) > threshold) {
# cor function es la relacion reciproca entre dos o mas funciones o fenomenos, complete.obs omitira valores na
cors <- c(cors, cor(data[["sulfate"]], data[["nitrate"]], use = "complete.obs"))
}
}
cors
}
seedata <- function(directory, threshold = 0) {
filesList <- list.files(path = directory, pattern = ".csv", full.names = TRUE)
for (i in 1:332) {
data <- read.csv(filesList[i])
complecases <- sum(complete.cases(data))
}
data
}
#pollutantmean("C:/Users/jonaathan/Desktop/R/specdata", "sulfate")
#complete("C:/Users/jonaathan/Desktop/R/specdata", c(1, 5)) |
#beta
library(dplyr)
library(shinydashboard)
library(shiny)
library(leaflet)
library(sf)
library(cartography)
lugares <- readRDS(file = "lugares.rds")
x <- readRDS(file = "rutas.rds")
dashboardPage(title="TIS | Pachuca", skin = "purple",
dashboardHeader(
title = tags$img(src="logof.png", height='50', width='140'),
tags$li(class = "dropdown",
tags$a(
#href = "",
target = "_blank",
tags$img(height = "14px",
icon("home")))),
tags$li(class = "dropdown",
tags$a(
href = "https://www.facebook.com/tarifataxi",
target = "_blank",
tags$img(height = "14px",
icon("facebook")))),
tags$li(class = "dropdown",
tags$a(
href = "https://twitter.com/tarifa_taxi",
target = "_blank",
tags$img(height = "14px",
icon("twitter"))))
),
dashboardSidebar(
sidebarMenu(menuItem("Home", tabName = "inicio", icon = icon("dashboard")),
menuItem("About the Project", icon = icon("th"), tabName = "acerca"),
menuItem("Our Coverage", tabName = "cobertura", icon = icon("map")),
menuItem("Official Fare - SEMOT", tabName = "tarifa", icon = icon("money"))
)
),
dashboardBody(tags$head(tags$link(rel = "shortcut icon", href = "lg.png")),
tabItems(
tabItem(tabName = "inicio",
fluidRow(tags$style(".nav-tabs {
background-color: #FFF;
}
.nav-tabs-custom .nav-tabs li.active:hover a, .nav-tabs-custom .nav-tabs li.active a {
background-color: transparent;
border-color: transparent;
}
.nav-tabs-custom .nav-tabs li.active {
border-top-color: #605ca8;
}"),
tabItem("Route",
box(
selectInput("orig", "Origin",
choices = unique(c("Select your origin",
as.character(lugares$lugaro[2:length(lugares$lugaro)]))),
width = "100%"),
selectInput("dest", "Destination",
choices = unique(c("Select your destination",
as.character(lugares$lugaro[2:length(lugares$lugaro)]))),
width = "100%"), width = 12),
width = 12),
valueBoxOutput("distancia"),
valueBoxOutput("cobro"),
valueBoxOutput("cobron"),
leafletOutput("lf", width = "100%"))),
tabItem(tabName = "acerca",
box(
span("History", style = "color:purple"),
p("The app is developed with the intention of eliminating a common problem by using the taxi service in the Metropolitan Area of Pachuca City (Hidalgo State, Mexico). This problem (among many others) is the unjustified high price for the service, although we know that there is a fee established by the Ministry of Mobility and Transportation of Hidalgo, the operators of the units rarely respect it, which generally increases the cost of the transfer or, in some cases, users don't want to pay the right cost for the journey that they have made."),
span("A posible solution", style = "color:purple"),
p("This project use R packages like Shiny, OSRM, Leaflet and others, to get the approximate cost of a trip according to the starting point and destination point based on the rate approved by the Conventional Transportation System of Hidalgo with the intention of avoiding any problematic situation between the user and the service provider.",
style="text-align:justify"),
span("In the future", style = "color:purple"),
p("The objectives that it seeks to fulfill, either directly or indirectly, are issues such as the fair cost for the taxi service, improving the service by avoiding any type of confrontation due to disagreements with the payment, encouraging greater use of taxis since it generally decreases the cost in short journeys also, which is projected in a more advanced phase of the application, to be able to share through the interface the real-time location, information about the driver and personalized request of the service that minimizes the losses for the operator. ALL OF THESE USING R!.", style="text-align:justify"),
h4("¡Thanks!", align = "center"),
a("To check the original project, click on this link", href = "https://tarifainteligentedetaxi.shinyapps.io/beta/")
, width = 12)
),
tabItem(tabName = "cobertura",
box(p("This map represents the effort of our team to define a catalog of strategic destinations for use in the application. We recognize, we need to expand the catalog of locations, which is why we work every day to have more destinations, so we value your collaboration by suggesting more locations that we will gladly add and get down to work!",
style="text-align:justify"), width = 12),
leafletOutput("lf2", width = "100%")),
tabItem(tabName = "tarifa",
box(HTML('<img src="semot.JPG" width="180">'),
p(span("#NoTeDejesEngañar", style = "color:blue"), ", if someone wants to charge you more than the rate authorized by", span("#SEMOT", style = "color:blue"), "raise your complaint in the following ways: attention line 018005032002, social networks and email quejas_transporte@hidalgo.gob.mx", span("#SemotContigo", style = "color:blue")),
a("Ir al tweet", href = "https://twitter.com/MovilidadHgo/status/1149430789211213825"),
width = 12),
HTML('<center><img src="SEMOT tarifa.jpg" width="97%"></center>')
)
),
tags$footer("© Carlos Arturo Castro del Ángel,
2020.",
align = "center")
)
) | /ui.R | no_license | ccastro92/shinycontest | R | false | false | 6,279 | r | #beta
library(dplyr)
library(shinydashboard)
library(shiny)
library(leaflet)
library(sf)
library(cartography)
lugares <- readRDS(file = "lugares.rds")
x <- readRDS(file = "rutas.rds")
dashboardPage(title="TIS | Pachuca", skin = "purple",
dashboardHeader(
title = tags$img(src="logof.png", height='50', width='140'),
tags$li(class = "dropdown",
tags$a(
#href = "",
target = "_blank",
tags$img(height = "14px",
icon("home")))),
tags$li(class = "dropdown",
tags$a(
href = "https://www.facebook.com/tarifataxi",
target = "_blank",
tags$img(height = "14px",
icon("facebook")))),
tags$li(class = "dropdown",
tags$a(
href = "https://twitter.com/tarifa_taxi",
target = "_blank",
tags$img(height = "14px",
icon("twitter"))))
),
dashboardSidebar(
sidebarMenu(menuItem("Home", tabName = "inicio", icon = icon("dashboard")),
menuItem("About the Project", icon = icon("th"), tabName = "acerca"),
menuItem("Our Coverage", tabName = "cobertura", icon = icon("map")),
menuItem("Official Fare - SEMOT", tabName = "tarifa", icon = icon("money"))
)
),
dashboardBody(tags$head(tags$link(rel = "shortcut icon", href = "lg.png")),
tabItems(
tabItem(tabName = "inicio",
fluidRow(tags$style(".nav-tabs {
background-color: #FFF;
}
.nav-tabs-custom .nav-tabs li.active:hover a, .nav-tabs-custom .nav-tabs li.active a {
background-color: transparent;
border-color: transparent;
}
.nav-tabs-custom .nav-tabs li.active {
border-top-color: #605ca8;
}"),
tabItem("Route",
box(
selectInput("orig", "Origin",
choices = unique(c("Select your origin",
as.character(lugares$lugaro[2:length(lugares$lugaro)]))),
width = "100%"),
selectInput("dest", "Destination",
choices = unique(c("Select your destination",
as.character(lugares$lugaro[2:length(lugares$lugaro)]))),
width = "100%"), width = 12),
width = 12),
valueBoxOutput("distancia"),
valueBoxOutput("cobro"),
valueBoxOutput("cobron"),
leafletOutput("lf", width = "100%"))),
tabItem(tabName = "acerca",
box(
span("History", style = "color:purple"),
p("The app is developed with the intention of eliminating a common problem by using the taxi service in the Metropolitan Area of Pachuca City (Hidalgo State, Mexico). This problem (among many others) is the unjustified high price for the service, although we know that there is a fee established by the Ministry of Mobility and Transportation of Hidalgo, the operators of the units rarely respect it, which generally increases the cost of the transfer or, in some cases, users don't want to pay the right cost for the journey that they have made."),
span("A posible solution", style = "color:purple"),
p("This project use R packages like Shiny, OSRM, Leaflet and others, to get the approximate cost of a trip according to the starting point and destination point based on the rate approved by the Conventional Transportation System of Hidalgo with the intention of avoiding any problematic situation between the user and the service provider.",
style="text-align:justify"),
span("In the future", style = "color:purple"),
p("The objectives that it seeks to fulfill, either directly or indirectly, are issues such as the fair cost for the taxi service, improving the service by avoiding any type of confrontation due to disagreements with the payment, encouraging greater use of taxis since it generally decreases the cost in short journeys also, which is projected in a more advanced phase of the application, to be able to share through the interface the real-time location, information about the driver and personalized request of the service that minimizes the losses for the operator. ALL OF THESE USING R!.", style="text-align:justify"),
h4("¡Thanks!", align = "center"),
a("To check the original project, click on this link", href = "https://tarifainteligentedetaxi.shinyapps.io/beta/")
, width = 12)
),
tabItem(tabName = "cobertura",
box(p("This map represents the effort of our team to define a catalog of strategic destinations for use in the application. We recognize, we need to expand the catalog of locations, which is why we work every day to have more destinations, so we value your collaboration by suggesting more locations that we will gladly add and get down to work!",
style="text-align:justify"), width = 12),
leafletOutput("lf2", width = "100%")),
tabItem(tabName = "tarifa",
box(HTML('<img src="semot.JPG" width="180">'),
p(span("#NoTeDejesEngañar", style = "color:blue"), ", if someone wants to charge you more than the rate authorized by", span("#SEMOT", style = "color:blue"), "raise your complaint in the following ways: attention line 018005032002, social networks and email quejas_transporte@hidalgo.gob.mx", span("#SemotContigo", style = "color:blue")),
a("Ir al tweet", href = "https://twitter.com/MovilidadHgo/status/1149430789211213825"),
width = 12),
HTML('<center><img src="SEMOT tarifa.jpg" width="97%"></center>')
)
),
tags$footer("© Carlos Arturo Castro del Ángel,
2020.",
align = "center")
)
) |
###############################################################################
#Aux_ Functions
#This R file only makes sense in the context of NetworkCompare_R
###############################################################################
###############################################################################
#Cosine Difference
###############################################################################
cosineDif <- function (V1, #First Vector
V2 #Second Vector
) {
return( sum(V1 * V2) / ( sqrt(sum(V1 * V1)) * sqrt(sum(V2*V2)) ) )
}
###############################################################################
#Get the path of a result file, takes into account the batch_name
###############################################################################
getResFile <- function (in_file_name, in_file_extension, in_file_folder){
return (file.path(in_file_folder, paste(in_file_name, batch_name, ".", in_file_extension, sep="")))
}
###############################################################################
#Store the cluster information in a csv file
###############################################################################
storeClusters <- function (in_file_name, in_cluster_list, in_levels = 2) {
if(in_levels == 2){
for(i in 1:length(in_cluster_list)){
cat(paste("Cluster ", i), "\n", file=in_file_name, append=TRUE)
for(j in 1:length(in_cluster_list[[i]])){
cat(paste("Cluster ", i, "-", j), "\n", file=in_file_name, append=TRUE)
write.table(in_cluster_list[[i]][[j]], file=in_file_name, sep=",", append=TRUE, col.names=FALSE, na="")
}
}
}else{
for(i in 1:length(in_cluster_list)){
cat(paste("Cluster ", i), "\n", file=in_file_name, append=TRUE)
write.table(in_cluster_list[[i]], file=in_file_name, sep=",", append=TRUE, col.names=FALSE, na="")
}
}
}
storeClustersMatrix <- function (in_file_name, in_cluster_list, in_data) {
#Get the split sections
final_matrix <- in_data[rle(unlist(in_cluster_list))$values, rle(unlist(in_cluster_list))$values]
sections <- list()
br_point <- 0
for(i in 1:(length(in_cluster_list)-1)){
br_point <- br_point + length(in_cluster_list[[i]])
final_matrix <- cbind(final_matrix[,1:br_point],c(rep.int(NA,nrow(final_matrix))),final_matrix[,(br_point+1):ncol(final_matrix)])
final_matrix <- rbind(final_matrix[1:br_point,],c(rep.int(NA,ncol(final_matrix))),final_matrix[(br_point+1):nrow(final_matrix),])
br_point <- br_point + 1
}
write.table(final_matrix, file=in_file_name, sep=",", append=TRUE, col.names=NA, na="")
return(final_matrix)
}
###############################################################################
# extrapolate
# Changes the values of the matrix extrapolating
# based on the values of E.
# E contains the extrapolation values (from, to) in pairs
###############################################################################
extrapolate <- function (M, E
) {
M2 <- M;
first_idx <- E[1,];
for(i in 2:nrow(E)){
second_idx = E[i,];
a = first_idx[1];
b = first_idx[2];
c = second_idx[1];
d = second_idx[2];
indexes = which(M>=a & M<=c, arr.ind=TRUE);
M2[indexes] <- ( ( M[indexes] - a ) * (d-b) / (c-a) ) + b;
first_idx <- second_idx;
}
return(M2);
}
###############################################################################
#MCL clustering for the differences (two level clustering)
###############################################################################
clusterMatrix <- function (in_matrix){
#Map (values in the config file)
in_matrix_abs <- abs(extrapolate(in_matrix, extrap_values_1))
mcl_clusters <- mcl(in_matrix_abs,mclc_inf_level1,2000, verbose = F, heatmaps=F);
mcl_list <- collect.mcl.clusters2(mcl_clusters,mclc_minClusSize_level1);
mcl_listFull <- list();
for(i in 1:length(mcl_list)){
if(length(mcl_list[[i]]) >= mclc_minClusSize_level1 && length(mcl_list[[i]]) > 1){
new_data <- in_matrix[mcl_list[[i]], mcl_list[[i]]];
new_data <- extrapolate(new_data, extrap_values_2);
mcl_clusters2 <- mcl(new_data,mclc_inf_level2,2000, verbose = F);
mcl_leaders2 <- collect.mcl.leaders(mcl_clusters2);
mcl_list2 <- collect.mcl.clusters2(mcl_clusters2, mclc_minClusSize_level2);
#mcl_listFull <- c(mcl_listFull, mcl_list2);
mcl_listFull[[length(mcl_listFull)+1]] <- mcl_list2
}
}
return (mcl_listFull)
}
###############################################################################
#MCL clustering for the differences (single level clustering)
###############################################################################
clusterMatrixSingleLevel <- function (in_matrix){
#Map (values in the config file)
in_matrix_abs <- abs(extrapolate(in_matrix, extrap_values_1))
mcl_clusters <- mcl(in_matrix_abs,mclc_inf_level1,2000, verbose = F, heatmaps=F);
mcl_list <- collect.mcl.clusters2(mcl_clusters,mclc_minClusSize_level1);
return (mcl_list)
} | /auxFunctions.R | no_license | jrive258/BactNetwAnalysis | R | false | false | 4,910 | r | ###############################################################################
#Aux_ Functions
#This R file only makes sense in the context of NetworkCompare_R
###############################################################################
###############################################################################
#Cosine Difference
###############################################################################
cosineDif <- function (V1, #First Vector
V2 #Second Vector
) {
return( sum(V1 * V2) / ( sqrt(sum(V1 * V1)) * sqrt(sum(V2*V2)) ) )
}
###############################################################################
#Get the path of a result file, takes into account the batch_name
###############################################################################
getResFile <- function (in_file_name, in_file_extension, in_file_folder){
return (file.path(in_file_folder, paste(in_file_name, batch_name, ".", in_file_extension, sep="")))
}
###############################################################################
#Store the cluster information in a csv file
###############################################################################
storeClusters <- function (in_file_name, in_cluster_list, in_levels = 2) {
if(in_levels == 2){
for(i in 1:length(in_cluster_list)){
cat(paste("Cluster ", i), "\n", file=in_file_name, append=TRUE)
for(j in 1:length(in_cluster_list[[i]])){
cat(paste("Cluster ", i, "-", j), "\n", file=in_file_name, append=TRUE)
write.table(in_cluster_list[[i]][[j]], file=in_file_name, sep=",", append=TRUE, col.names=FALSE, na="")
}
}
}else{
for(i in 1:length(in_cluster_list)){
cat(paste("Cluster ", i), "\n", file=in_file_name, append=TRUE)
write.table(in_cluster_list[[i]], file=in_file_name, sep=",", append=TRUE, col.names=FALSE, na="")
}
}
}
storeClustersMatrix <- function (in_file_name, in_cluster_list, in_data) {
#Get the split sections
final_matrix <- in_data[rle(unlist(in_cluster_list))$values, rle(unlist(in_cluster_list))$values]
sections <- list()
br_point <- 0
for(i in 1:(length(in_cluster_list)-1)){
br_point <- br_point + length(in_cluster_list[[i]])
final_matrix <- cbind(final_matrix[,1:br_point],c(rep.int(NA,nrow(final_matrix))),final_matrix[,(br_point+1):ncol(final_matrix)])
final_matrix <- rbind(final_matrix[1:br_point,],c(rep.int(NA,ncol(final_matrix))),final_matrix[(br_point+1):nrow(final_matrix),])
br_point <- br_point + 1
}
write.table(final_matrix, file=in_file_name, sep=",", append=TRUE, col.names=NA, na="")
return(final_matrix)
}
###############################################################################
# extrapolate
# Changes the values of the matrix extrapolating
# based on the values of E.
# E contains the extrapolation values (from, to) in pairs
###############################################################################
extrapolate <- function (M, E
) {
M2 <- M;
first_idx <- E[1,];
for(i in 2:nrow(E)){
second_idx = E[i,];
a = first_idx[1];
b = first_idx[2];
c = second_idx[1];
d = second_idx[2];
indexes = which(M>=a & M<=c, arr.ind=TRUE);
M2[indexes] <- ( ( M[indexes] - a ) * (d-b) / (c-a) ) + b;
first_idx <- second_idx;
}
return(M2);
}
###############################################################################
#MCL clustering for the differences (two level clustering)
###############################################################################
clusterMatrix <- function (in_matrix){
#Map (values in the config file)
in_matrix_abs <- abs(extrapolate(in_matrix, extrap_values_1))
mcl_clusters <- mcl(in_matrix_abs,mclc_inf_level1,2000, verbose = F, heatmaps=F);
mcl_list <- collect.mcl.clusters2(mcl_clusters,mclc_minClusSize_level1);
mcl_listFull <- list();
for(i in 1:length(mcl_list)){
if(length(mcl_list[[i]]) >= mclc_minClusSize_level1 && length(mcl_list[[i]]) > 1){
new_data <- in_matrix[mcl_list[[i]], mcl_list[[i]]];
new_data <- extrapolate(new_data, extrap_values_2);
mcl_clusters2 <- mcl(new_data,mclc_inf_level2,2000, verbose = F);
mcl_leaders2 <- collect.mcl.leaders(mcl_clusters2);
mcl_list2 <- collect.mcl.clusters2(mcl_clusters2, mclc_minClusSize_level2);
#mcl_listFull <- c(mcl_listFull, mcl_list2);
mcl_listFull[[length(mcl_listFull)+1]] <- mcl_list2
}
}
return (mcl_listFull)
}
###############################################################################
#MCL clustering for the differences (single level clustering)
###############################################################################
clusterMatrixSingleLevel <- function (in_matrix){
#Map (values in the config file)
in_matrix_abs <- abs(extrapolate(in_matrix, extrap_values_1))
mcl_clusters <- mcl(in_matrix_abs,mclc_inf_level1,2000, verbose = F, heatmaps=F);
mcl_list <- collect.mcl.clusters2(mcl_clusters,mclc_minClusSize_level1);
return (mcl_list)
} |
library(ape)
testtree <- read.tree("12340_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12340_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/12340_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("12340_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12340_0_unrooted.txt") |
source("best.R")
performTests <- function(testNumbers = vector()) {
allTests <- length(testNumbers) ==0
if (allTests || 1 %in% testNumbers)
{
print("Test 1")
print(best("TX", "heart attack"))
writeLines("")
}
if (allTests || 2 %in% testNumbers)
{
print("Test 2")
print(best("TX", "heart failure"))
writeLines("")
}
if (allTests || 3 %in% testNumbers)
{
print("Test 3")
print(best("MD", "heart attack"))
writeLines("")
}
if (allTests || 4 %in% testNumbers)
{
print("Test 4")
print(best("MD", "pneumonia"))
writeLines("")
}
## errors
if ( 5 %in% testNumbers)
{
print("Test 5")
best("BB", "heart attack")
}
if ( 6 %in% testNumbers)
{
print("Test 6")
best("NY", "hert attack")
}
} | /Unit Tests.R | no_license | jschaal/RAssignment3 | R | false | false | 923 | r | source("best.R")
performTests <- function(testNumbers = vector()) {
allTests <- length(testNumbers) ==0
if (allTests || 1 %in% testNumbers)
{
print("Test 1")
print(best("TX", "heart attack"))
writeLines("")
}
if (allTests || 2 %in% testNumbers)
{
print("Test 2")
print(best("TX", "heart failure"))
writeLines("")
}
if (allTests || 3 %in% testNumbers)
{
print("Test 3")
print(best("MD", "heart attack"))
writeLines("")
}
if (allTests || 4 %in% testNumbers)
{
print("Test 4")
print(best("MD", "pneumonia"))
writeLines("")
}
## errors
if ( 5 %in% testNumbers)
{
print("Test 5")
best("BB", "heart attack")
}
if ( 6 %in% testNumbers)
{
print("Test 6")
best("NY", "hert attack")
}
} |
# 12/13 時系列を1つにし、30項目とする
setwd("C:\\Users\\sisco\\Documents\\GitHub\\letsgowaikiki\\data") # 作業ディレクトリを変更する
getwd() # 現在の作業ディレクトリを確認する# 複数のデータファイルを一括してリストに読み込む
dt_all <- data.frame()
# 読み込んだcsvをマージする
dt_ashi15 <- read.table("GBPJPY15.csv", header=F, sep=",")
nrow_ashi15 <- nrow(dt_ashi15)-11 #最後の11行(項目が10個なので)まで
i15 <- 0
# # 空行テーブル
for(i15 in 1:nrow_ashi15){
i15 <- i15 + 1
i15e <- i15+10
str_date <- as.character(dt_ashi15[i15,1]) #日
str_time <- as.character(dt_ashi15[i15,2]) #時
dt_endprice <- t(dt_ashi15[c(i15:i15e),6]) #終値
dt_record <- data.frame(cbind(str_date,str_time,dt_endprice))
dt_all <- rbind(dt_all,dt_record)
}
# write.dt(dt_all, "output.txt", quote=F,col.names=F, append=F,row.names=F)
write.csv(dt_all, "GBPJPYALL.csv", quote=F,row.names=F)
| /R/GBPJPY_MARGE_V4.R | no_license | MIJS-FX/letsgowaikiki | R | false | false | 1,035 | r | # 12/13 時系列を1つにし、30項目とする
setwd("C:\\Users\\sisco\\Documents\\GitHub\\letsgowaikiki\\data") # 作業ディレクトリを変更する
getwd() # 現在の作業ディレクトリを確認する# 複数のデータファイルを一括してリストに読み込む
dt_all <- data.frame()
# 読み込んだcsvをマージする
dt_ashi15 <- read.table("GBPJPY15.csv", header=F, sep=",")
nrow_ashi15 <- nrow(dt_ashi15)-11 #最後の11行(項目が10個なので)まで
i15 <- 0
# # 空行テーブル
for(i15 in 1:nrow_ashi15){
i15 <- i15 + 1
i15e <- i15+10
str_date <- as.character(dt_ashi15[i15,1]) #日
str_time <- as.character(dt_ashi15[i15,2]) #時
dt_endprice <- t(dt_ashi15[c(i15:i15e),6]) #終値
dt_record <- data.frame(cbind(str_date,str_time,dt_endprice))
dt_all <- rbind(dt_all,dt_record)
}
# write.dt(dt_all, "output.txt", quote=F,col.names=F, append=F,row.names=F)
write.csv(dt_all, "GBPJPYALL.csv", quote=F,row.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComparingImpMethods.R
\name{ReorganizeAndMICE}
\alias{ReorganizeAndMICE}
\title{Preps data and runs Mice}
\usage{
ReorganizeAndMICE(
corr_array_obs,
factor_mat,
num_of_metabos = 20,
num_of_predictors = 0
)
}
\arguments{
\item{corr_array_obs}{Observed correlation array. Output from CreateCorrelation()}
\item{factor_mat}{Matrix where 3 columns are three factors and rows are values of each factor}
\item{num_of_metabos}{Number of metabolites}
\item{num_of_predictors}{Number of other metabolites for MICE to use, set to 0 to use quickpred()}
}
\value{
un-fischer transformed 3d array corr_array_mice and fischer transformed 2d matrix reorganized_corr_imp_med
}
\description{
From CreateCorrelation() correlation matrices are in 3d arrays with dimensions num_of_metabo x num_of_metabo x total number of studies
First we change the 3d array into a 2d matrix with dimensions (num_of_metabos x (num_of_metabos-1)/2 by total number of studies
Each column corresponds to an individual study and each row to a pairwise correlation of metabolites. Then we use the fischer
transform and run MICE on the transpose of the prepped 2d matrix. 5 MICE iputations are done and then the median is taken for each pairwise value
corr_array_mice is a 3d array with dimensions num_of_metabo x num_of_metabo x total number of studies that has been un-fischer transformed.
reorganized_corr_imp_med is a 2d matrix with dimensions (num_of_metabos x (num_of_metabos-1)/2 by total number of studies that is still fischer transformed.
reorganized_corr_imp_med will be the input for the next step of the imputation pipeline.
}
| /man/ReorganizeAndMICE.Rd | no_license | jordanaron22/ImputingMetabolites | R | false | true | 1,687 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComparingImpMethods.R
\name{ReorganizeAndMICE}
\alias{ReorganizeAndMICE}
\title{Preps data and runs Mice}
\usage{
ReorganizeAndMICE(
corr_array_obs,
factor_mat,
num_of_metabos = 20,
num_of_predictors = 0
)
}
\arguments{
\item{corr_array_obs}{Observed correlation array. Output from CreateCorrelation()}
\item{factor_mat}{Matrix where 3 columns are three factors and rows are values of each factor}
\item{num_of_metabos}{Number of metabolites}
\item{num_of_predictors}{Number of other metabolites for MICE to use, set to 0 to use quickpred()}
}
\value{
un-fischer transformed 3d array corr_array_mice and fischer transformed 2d matrix reorganized_corr_imp_med
}
\description{
From CreateCorrelation() correlation matrices are in 3d arrays with dimensions num_of_metabo x num_of_metabo x total number of studies
First we change the 3d array into a 2d matrix with dimensions (num_of_metabos x (num_of_metabos-1)/2 by total number of studies
Each column corresponds to an individual study and each row to a pairwise correlation of metabolites. Then we use the fischer
transform and run MICE on the transpose of the prepped 2d matrix. 5 MICE iputations are done and then the median is taken for each pairwise value
corr_array_mice is a 3d array with dimensions num_of_metabo x num_of_metabo x total number of studies that has been un-fischer transformed.
reorganized_corr_imp_med is a 2d matrix with dimensions (num_of_metabos x (num_of_metabos-1)/2 by total number of studies that is still fischer transformed.
reorganized_corr_imp_med will be the input for the next step of the imputation pipeline.
}
|
library(TreeSearch)
### Name: Tree2Splits
### Title: Tree2Splits
### Aliases: Tree2Splits Tree2Bipartitions
### Keywords: internal
### ** Examples
Tree2Splits(ape::rtree(6, tip.label=1:6, br=NULL))
| /data/genthat_extracted_code/TreeSearch/examples/Tree2Splits.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 206 | r | library(TreeSearch)
### Name: Tree2Splits
### Title: Tree2Splits
### Aliases: Tree2Splits Tree2Bipartitions
### Keywords: internal
### ** Examples
Tree2Splits(ape::rtree(6, tip.label=1:6, br=NULL))
|
#' Twist cube
#'
#' Twist the cube by given string of moves and number of times.
#' @param cube - cube object
#' @param moves - string parameter
#' Syntax: The main QTM clockwise movements are the same as in the Singmasters notation: "U", "D", "F", "B", "R", "L". However moves from HTM such as U2 is not move of upper layer by 180 degrees (it will be explained further).
#' Counter clockwise moves are denoted by lowercase letters: "u", "d", "f", "b", "r", "l".
#' Rotations of the cube are denoted by "O" (rotate cube horizontally, "o" means rotation horizontally in different direction); and "P" (rotate cube vertically, "p" means rotation vertically in different direction).
#' Repetitions of the moves: there are several ways to repeat given sequence of moves. The simplest way is to copy commands. The most effective way to do this is using parameter times. However, in some cases it is useful to repeat only parts of sequence of moves - then we could use bracketing terms and operator times "x".
#' @param times - integer (default is 1). Number of repetitions of moves.
#' @return cube - cube object
#'
#' @examples
#' # Create classic Rubik's cube:
#' c <- createCube()
#' # Check moves LL FF RR BB
#' c <- twistCube(c,"LLFFRRBB")
#' # Check if LFRB repeated 316 times is cycle:
#' c <- twistCube(c,"(LFRB)x316")
#' is.solved(c)
#' # TRUE
#' # Twisted chicken feet pattern:
#' c <- createCube()
#' c <- twistCube(c,positions[21,"moves"])
#' plot3dCube(c)
#' # The same pattern using pipe %>% from magrittr package
#' require(magrittr)
#' createCube() %>% twistCube(positions[21,"moves"]) %>% plot3dCube()
#' # Rubik's Revenge
#' createCube(N = 4) %>% plot3dCube()
#' # Creating Professor's Cube
#' createCube(N = 5) %>% plot3dCube()
#' # Rotating and moving edges:
#' createCube(N = 5) %>% twistCube("(u3RUrFrfRU3)x12") %>% plot3dCube()
#' # Moving and rotating edges part 2:
#' createCube(5) %>% twistCube("((R1:2)x2 BBUU (L1:2)x2 UU rr2
#' UU RR2 UUFF RR2 FF ll2 BB (R1:2)x2 )x2 dd") %>% plot3dCube()
#' # Hearts pattern on a cube sized 13x13x13:
#' createCube(13) %>% twistCube("OP U2
#' l4:5 R4:5 u2 L4:5 r4:5 U3
#' l3:6 R3:6 u3 L3:6 r3:6 U4
#' l2:4 R2:4 l6:8 u4 L2:4 r2:4 L6:8 U5
#' l2:3 R2:3 l7 u5 L2:3 r2:3 L7 U6
#' l2:3 R2:3 u6 L2:3 r2:3 U7
#' l2:4 R2:4 u7 L2:4 r2:4 U8
#' l3:5 R3:5 u8 L3:5 r3:5 U9
#' l4:6 R4:6 u9 L4:6 r4:6 d4 l5:9 D4
#' L5:9 d3 l6:8 D3
#' L6:8 d2 l7 D2 L7") %>% plot3dCube()
#' # Creating octa cube
#' createCube(N = 4, mode = "octa") %>% plot3dCube()
#' # Rotating centers which is not visible on a classic cube (URL algorithm):
#' createCube(N = 4, mode = "octa") %>% twistCube("(URLuurl)x2") %>% plot3dCube()
#' # Creating void cube 8x8x8
#' createCube(N = 8,mode = "void") %>% plot3dCube()
#' @import magrittr
#' @export
twistCube <- function(cube,moves = "", times = 1){
cube$cube <- kostka.obrot(cube$cube,moves,times)
cube$moves <- paste(cube$moves,moves)
return(cube)
}
| /R/twistCube.R | no_license | cran/rcube | R | false | false | 2,919 | r | #' Twist cube
#'
#' Twist the cube by given string of moves and number of times.
#' @param cube - cube object
#' @param moves - string parameter
#' Syntax: The main QTM clockwise movements are the same as in the Singmasters notation: "U", "D", "F", "B", "R", "L". However moves from HTM such as U2 is not move of upper layer by 180 degrees (it will be explained further).
#' Counter clockwise moves are denoted by lowercase letters: "u", "d", "f", "b", "r", "l".
#' Rotations of the cube are denoted by "O" (rotate cube horizontally, "o" means rotation horizontally in different direction); and "P" (rotate cube vertically, "p" means rotation vertically in different direction).
#' Repetitions of the moves: there are several ways to repeat given sequence of moves. The simplest way is to copy commands. The most effective way to do this is using parameter times. However, in some cases it is useful to repeat only parts of sequence of moves - then we could use bracketing terms and operator times "x".
#' @param times - integer (default is 1). Number of repetitions of moves.
#' @return cube - cube object
#'
#' @examples
#' # Create classic Rubik's cube:
#' c <- createCube()
#' # Check moves LL FF RR BB
#' c <- twistCube(c,"LLFFRRBB")
#' # Check if LFRB repeated 316 times is cycle:
#' c <- twistCube(c,"(LFRB)x316")
#' is.solved(c)
#' # TRUE
#' # Twisted chicken feet pattern:
#' c <- createCube()
#' c <- twistCube(c,positions[21,"moves"])
#' plot3dCube(c)
#' # The same pattern using pipe %>% from magrittr package
#' require(magrittr)
#' createCube() %>% twistCube(positions[21,"moves"]) %>% plot3dCube()
#' # Rubik's Revenge
#' createCube(N = 4) %>% plot3dCube()
#' # Creating Professor's Cube
#' createCube(N = 5) %>% plot3dCube()
#' # Rotating and moving edges:
#' createCube(N = 5) %>% twistCube("(u3RUrFrfRU3)x12") %>% plot3dCube()
#' # Moving and rotating edges part 2:
#' createCube(5) %>% twistCube("((R1:2)x2 BBUU (L1:2)x2 UU rr2
#' UU RR2 UUFF RR2 FF ll2 BB (R1:2)x2 )x2 dd") %>% plot3dCube()
#' # Hearts pattern on a cube sized 13x13x13:
#' createCube(13) %>% twistCube("OP U2
#' l4:5 R4:5 u2 L4:5 r4:5 U3
#' l3:6 R3:6 u3 L3:6 r3:6 U4
#' l2:4 R2:4 l6:8 u4 L2:4 r2:4 L6:8 U5
#' l2:3 R2:3 l7 u5 L2:3 r2:3 L7 U6
#' l2:3 R2:3 u6 L2:3 r2:3 U7
#' l2:4 R2:4 u7 L2:4 r2:4 U8
#' l3:5 R3:5 u8 L3:5 r3:5 U9
#' l4:6 R4:6 u9 L4:6 r4:6 d4 l5:9 D4
#' L5:9 d3 l6:8 D3
#' L6:8 d2 l7 D2 L7") %>% plot3dCube()
#' # Creating octa cube
#' createCube(N = 4, mode = "octa") %>% plot3dCube()
#' # Rotating centers which is not visible on a classic cube (URL algorithm):
#' createCube(N = 4, mode = "octa") %>% twistCube("(URLuurl)x2") %>% plot3dCube()
#' # Creating void cube 8x8x8
#' createCube(N = 8,mode = "void") %>% plot3dCube()
#' @import magrittr
#' @export
twistCube <- function(cube,moves = "", times = 1){
cube$cube <- kostka.obrot(cube$cube,moves,times)
cube$moves <- paste(cube$moves,moves)
return(cube)
}
|
library(beautier)
### Name: ccp_tree_prior_to_xml_prior_distr
### Title: Creates the tree prior section in the prior section of the prior
### section of the distribution section of a BEAST2 XML parameter file
### for a Coalescent Constant Population tree prior
### Aliases: ccp_tree_prior_to_xml_prior_distr
### ** Examples
# <distribution id="posterior" spec="util.CompoundDistribution">
# <distribution id="prior" spec="util.CompoundDistribution">
# HERE, where the ID of the distribution is 'prior'
# </distribution>
# <distribution id="likelihood" ...>
# </distribution>
# </distribution>
| /data/genthat_extracted_code/beautier/examples/ccp_tree_prior_to_xml_prior_distr.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 633 | r | library(beautier)
### Name: ccp_tree_prior_to_xml_prior_distr
### Title: Creates the tree prior section in the prior section of the prior
### section of the distribution section of a BEAST2 XML parameter file
### for a Coalescent Constant Population tree prior
### Aliases: ccp_tree_prior_to_xml_prior_distr
### ** Examples
# <distribution id="posterior" spec="util.CompoundDistribution">
# <distribution id="prior" spec="util.CompoundDistribution">
# HERE, where the ID of the distribution is 'prior'
# </distribution>
# <distribution id="likelihood" ...>
# </distribution>
# </distribution>
|
## cachematrix contanins functions to cache a matix and its inverse
## Create a data structure to hold a matrix and its inverse
makeCacheMatrix <- function(x = numeric()) {
savedinv <- NULL
set <- function(y) {
x <<- y
savedinv <<- NULL
}
get <- function() x
setsolve <- function(myinv) savedinv <<- myinv
getsolve <- function() savedinv
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Recover the cached inverse, calculate and save inverse if necessary.
## This will return a matrix that is the inverse of 'x'.
## If the inverse has already been calculated then the cached inverse will be used.
cacheSolve <- function(x, ...) {
savedinv <- x$getsolve()
if(!is.null(savedinv)) {
message("getting cached data")
return(savedinv)
}
data <- x$get()
savedinv <- solve(data, ...)
x$setsolve(savedinv)
savedinv
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | jg-davies/ProgrammingAssignment2 | R | false | false | 957 | r | ## cachematrix contanins functions to cache a matix and its inverse
## Create a data structure to hold a matrix and its inverse
makeCacheMatrix <- function(x = numeric()) {
savedinv <- NULL
set <- function(y) {
x <<- y
savedinv <<- NULL
}
get <- function() x
setsolve <- function(myinv) savedinv <<- myinv
getsolve <- function() savedinv
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Recover the cached inverse, calculate and save inverse if necessary.
## This will return a matrix that is the inverse of 'x'.
## If the inverse has already been calculated then the cached inverse will be used.
cacheSolve <- function(x, ...) {
savedinv <- x$getsolve()
if(!is.null(savedinv)) {
message("getting cached data")
return(savedinv)
}
data <- x$get()
savedinv <- solve(data, ...)
x$setsolve(savedinv)
savedinv
## Return a matrix that is the inverse of 'x'
}
|
library(car)
rm(list=ls())
data <- read.csv("C:/ISEN 613/Midterm-I/Training.csv",header = T,sep = ",")
attach(data)
### preliminary check ###
dim(data)
str(data)
summary(data)
pairs(data)
par(mfrow=c(2,3))
plot(casual,type="l")
plot(temp,type="l")
plot(atemp,type ="l")
plot(hum,type="l")
plot(windspeed,type="l")
par(mfrow=c(2,4))
boxplot(casual~season,xlab="Season",main="Casual ~ season")
boxplot(casual~weekday,xlab="weekday",main="Casual ~ weekday")
boxplot(casual~year,xlab="year",main="casual~ year")
boxplot(casual~holiday,xlab="holiday",main="casual ~ holiday")
boxplot(casual~weathersit,xlab="weathersit",main="casual ~ weathersit")
plot(temp,casual,col="blue",xlab="temp",main="casual vs temp")
abline(lm(casual ~ temp))
plot(windspeed,casual,col="red",xlab="windspeed",main="casual vs windspeed")
abline(lm(casual ~ windspeed))
plot(hum,casual,col="green",xlab="humidity",main="casual vs hum")
abline(lm(casual ~ hum))
rho <- data.frame(cor(data))
### regression using all the variables ###
lm <- lm(casual ~ .,data = data)
summary(lm)
vif(lm)
par(mfrow=c(2,2))
plot(lm)
## new regression ##
lm1 <- lm(log(casual) ~ year+weekday+I(temp^.33)+weathersit+sqrt(windspeed)+weekday:temp,data=data)
summary(lm1)
vif(lm1)
par(mfrow=c(2,2))
plot(lm1)
lm2 <- lm(log(casual) ~ year+season+I(temp^.33)+weathersit+sqrt(windspeed),data=data)
summary(lm2)
vif(lm2)
par(mfrow=c(2,2))
plot(lm2)
lm3 <- lm(log(casual) ~ year+month+scale(temp,center = T,scale = T)+sqrt(windspeed)+weathersit+month:scale(temp,center = T,scale = T),data=data)
summary(lm3)
plot(lm3)
vif(lm3)
## predict for test data ###
data.test <- read.csv("C:/ISEN 613/Midterm-I/Test.csv",header = T,sep = ",")
lm1.predict <- predict(lm1,newdata = data.test,type = "response")
casual.pred1 <- exp(lm1.predict)
lm2.predict <- predict(lm2,newdata = data.test,type = "response")
casual.pred2 <- exp(lm2.predict)
lm3.predict <- predict(lm3,newdata = data.test,type = "response")
casual.pred4 <- exp(lm3.predict)
## root mean square error ##
rmse1 <- sqrt(sum((data.test$casual - casual.pred1)^2)/length(data.test$casual))
par(mfrow=c(1,1))
plot(data.test$casual,type="l",col="red")
lines(casual.pred,type="l",col="blue")
rmse2 <- sqrt(sum((data.test$casual- casual.pred2)^2)/length(data.test$casual))
par(mfrow=c(1,1))
plot(data.test$casual,type="l",col="red")
lines(casual.pred2,type="l",col="blue")
rmse4 <- sqrt(sum((data.test$casual- casual.pred4)^2)/length(data.test$casual))
##logarithimic error ##
lme1 <- sqrt(sum((log(data.test$casual+1)-log(casual.pred+1))^2)/20)
lme2 <- sqrt(sum((log(data.test$casual+1)-log(casual.pred2+1))^2)/20)
### models ####
lm2 <- lm(log(casual) ~ year+holiday+weathersit+windspeed+temp,data=data)
lm1 <- lm(log(casual) ~ year+season+temp+weathersit+windspeed+windspeed:season,data=data)
lm1 <- lm(log(casual) ~ year+season+temp+weathersit+I(windspeed^2),data=data)
lm1 <- lm(log(casual) ~ year+season+I(temp^2)+weathersit+I(windspeed^2),data=data)
lm2 <- lm(log(casual) ~ year+month+weathersit+log(temp)+log(windspeed)+year:month,data=data)
par(mfrow=c(1,1))
plot(casual[season==1],type = 'l')
boxplot(casual~month)
boxplot(weekday~holiday)
par(mfrow=c(1,1))
plot(temp,casual)
plot(windspeed,casual)
plot(hum,casual)
## predict with first model ##
predict.lm <- predict(lm,newdata = data.test,type="response")
casual.pred3 <- predict.lm
rmse <- sqrt(sum((data.test$casual - casual.pred3)^2)/length(data.test$casual))
summary(powerTransform(cbind(data$temp,data$windspeed)~1,))
par(mfrow=c(1,1))
plot(log(scale(casual,center = T,scale=T)),type="l")
| /midterm.R | no_license | bharathallu/Engineering-Data-Analysis | R | false | false | 3,680 | r | library(car)
rm(list=ls())
data <- read.csv("C:/ISEN 613/Midterm-I/Training.csv",header = T,sep = ",")
attach(data)
### preliminary check ###
dim(data)
str(data)
summary(data)
pairs(data)
par(mfrow=c(2,3))
plot(casual,type="l")
plot(temp,type="l")
plot(atemp,type ="l")
plot(hum,type="l")
plot(windspeed,type="l")
par(mfrow=c(2,4))
boxplot(casual~season,xlab="Season",main="Casual ~ season")
boxplot(casual~weekday,xlab="weekday",main="Casual ~ weekday")
boxplot(casual~year,xlab="year",main="casual~ year")
boxplot(casual~holiday,xlab="holiday",main="casual ~ holiday")
boxplot(casual~weathersit,xlab="weathersit",main="casual ~ weathersit")
plot(temp,casual,col="blue",xlab="temp",main="casual vs temp")
abline(lm(casual ~ temp))
plot(windspeed,casual,col="red",xlab="windspeed",main="casual vs windspeed")
abline(lm(casual ~ windspeed))
plot(hum,casual,col="green",xlab="humidity",main="casual vs hum")
abline(lm(casual ~ hum))
rho <- data.frame(cor(data))
### regression using all the variables ###
lm <- lm(casual ~ .,data = data)
summary(lm)
vif(lm)
par(mfrow=c(2,2))
plot(lm)
## new regression ##
lm1 <- lm(log(casual) ~ year+weekday+I(temp^.33)+weathersit+sqrt(windspeed)+weekday:temp,data=data)
summary(lm1)
vif(lm1)
par(mfrow=c(2,2))
plot(lm1)
lm2 <- lm(log(casual) ~ year+season+I(temp^.33)+weathersit+sqrt(windspeed),data=data)
summary(lm2)
vif(lm2)
par(mfrow=c(2,2))
plot(lm2)
lm3 <- lm(log(casual) ~ year+month+scale(temp,center = T,scale = T)+sqrt(windspeed)+weathersit+month:scale(temp,center = T,scale = T),data=data)
summary(lm3)
plot(lm3)
vif(lm3)
## predict for test data ###
data.test <- read.csv("C:/ISEN 613/Midterm-I/Test.csv",header = T,sep = ",")
lm1.predict <- predict(lm1,newdata = data.test,type = "response")
casual.pred1 <- exp(lm1.predict)
lm2.predict <- predict(lm2,newdata = data.test,type = "response")
casual.pred2 <- exp(lm2.predict)
lm3.predict <- predict(lm3,newdata = data.test,type = "response")
casual.pred4 <- exp(lm3.predict)
## root mean square error ##
rmse1 <- sqrt(sum((data.test$casual - casual.pred1)^2)/length(data.test$casual))
par(mfrow=c(1,1))
plot(data.test$casual,type="l",col="red")
lines(casual.pred,type="l",col="blue")
rmse2 <- sqrt(sum((data.test$casual- casual.pred2)^2)/length(data.test$casual))
par(mfrow=c(1,1))
plot(data.test$casual,type="l",col="red")
lines(casual.pred2,type="l",col="blue")
rmse4 <- sqrt(sum((data.test$casual- casual.pred4)^2)/length(data.test$casual))
##logarithimic error ##
lme1 <- sqrt(sum((log(data.test$casual+1)-log(casual.pred+1))^2)/20)
lme2 <- sqrt(sum((log(data.test$casual+1)-log(casual.pred2+1))^2)/20)
### models ####
lm2 <- lm(log(casual) ~ year+holiday+weathersit+windspeed+temp,data=data)
lm1 <- lm(log(casual) ~ year+season+temp+weathersit+windspeed+windspeed:season,data=data)
lm1 <- lm(log(casual) ~ year+season+temp+weathersit+I(windspeed^2),data=data)
lm1 <- lm(log(casual) ~ year+season+I(temp^2)+weathersit+I(windspeed^2),data=data)
lm2 <- lm(log(casual) ~ year+month+weathersit+log(temp)+log(windspeed)+year:month,data=data)
par(mfrow=c(1,1))
plot(casual[season==1],type = 'l')
boxplot(casual~month)
boxplot(weekday~holiday)
par(mfrow=c(1,1))
plot(temp,casual)
plot(windspeed,casual)
plot(hum,casual)
## predict with first model ##
predict.lm <- predict(lm,newdata = data.test,type="response")
casual.pred3 <- predict.lm
rmse <- sqrt(sum((data.test$casual - casual.pred3)^2)/length(data.test$casual))
summary(powerTransform(cbind(data$temp,data$windspeed)~1,))
par(mfrow=c(1,1))
plot(log(scale(casual,center = T,scale=T)),type="l")
|
#' Make a sorted frequency table for a factor
#'
#' @param x factor
#'
#' @return A tibble
#' @export
#'
#' @examples
#' fcount(iris$Species)
fcount <- function(x) {
forcats::fct_count(x, sort = TRUE)
}
| /R/fcount.R | permissive | blagburn/foofactors | R | false | false | 207 | r | #' Make a sorted frequency table for a factor
#'
#' @param x factor
#'
#' @return A tibble
#' @export
#'
#' @examples
#' fcount(iris$Species)
fcount <- function(x) {
forcats::fct_count(x, sort = TRUE)
}
|
# ---------------------------------------------------------------------------- #
# KPCA Alternative Specification: 6 string kernels
# observations
# Christopher Gandrud
# MIT License
# ---------------------------------------------------------------------------- #
# Set working directory. Change as needed.
possible_dir <- c('/git_repositories/EIUCrisesMeasure/',
'~/git_repositories/EIUCrisesMeasure/')
simpleSetup::set_valid_wd(possible_dir)
# Run set up script
source('source/pca_kpca/setup/setup.R')
# Run KPCA
system.time(
kpca_eiu(eiu_list, country_date, length_spec = 6,
out_dir = 'source/pca_kpca/raw_data_output/non_5_strings/')
)
| /source/pca_kpca/kpca_6.R | permissive | shirzartenwer/EIUCrisesMeasure | R | false | false | 679 | r | # ---------------------------------------------------------------------------- #
# KPCA Alternative Specification: 6 string kernels
# observations
# Christopher Gandrud
# MIT License
# ---------------------------------------------------------------------------- #
# Set working directory. Change as needed.
possible_dir <- c('/git_repositories/EIUCrisesMeasure/',
'~/git_repositories/EIUCrisesMeasure/')
simpleSetup::set_valid_wd(possible_dir)
# Run set up script
source('source/pca_kpca/setup/setup.R')
# Run KPCA
system.time(
kpca_eiu(eiu_list, country_date, length_spec = 6,
out_dir = 'source/pca_kpca/raw_data_output/non_5_strings/')
)
|
library(knotR)
filename <- "pretzel_p3_p5_p7_m3_m5_90deg_crossing.svg"
a <- reader(filename)
Mver <-
matrix(c(
58,28,
24,54,
56,26,
23,53,
57,27,
25,55,
59,29,
60,30,
22,52,
50,20,
21,51,
49,19,
48,18,
12,42,
46,16,
14,44,
47,17,
13,43,
45,15,
11,41,
10,40,
36,06,
08,38,
09,39,
37,07,
35,05,
34,04,
02,32,
01,31,
33,03
),ncol=2,byrow=TRUE)
p35735 <- symmetry_object(a, Mver=Mver, xver=NULL,mcdonalds=T)
a <- symmetrize(a,p35735)
knotplot2(a,circ=F,lwd=1,text=T,rainbow=T,seg=T)
jj <- matrix(c( # NB nonalternating!
58,24, 1,
25,57, 1,
56,26, 1,
27,55, 1,
54,28, 1,
22,50, 2,
51,21, 2,
20,52, 2,
48,12, 3,
13,47, 3,
46,14, 3,
15,45, 3,
44,16, 3,
17,43, 3,
42,18, 3,
10,36, 4,
37,09, 4,
08,38, 4,
39,07, 4,
06,40, 4,
34,02, 5,
03,33, 5,
32,04, 5
),byrow=TRUE,ncol=3)
# twists number 4 and 5 are negative:
swap <- jj[,3] %in% c(4,5)
jj[swap,1:2] <- jj[swap,2:1]
ou35735 <- jj[,1:2]
w <- c(pot = 1,
ang = 100,
ben = 1,
len = 1,
mid = 1,
clo = 1,
swi = 1,
con = 1,
ncn = 1
)
jj <- knotoptim(filename, weights=w,
symobj = p35735,
ou = ou35735,
prob = 0,
iterlim=1000, print.level=2
# control=list(trace=100,maxit=100000), useNLM=FALSE
)
write_svg(jj,filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
| /inst/pretzel_35735_worker_90deg_crossing_worker.R | no_license | RobinHankin/knotR | R | false | false | 1,723 | r | library(knotR)
filename <- "pretzel_p3_p5_p7_m3_m5_90deg_crossing.svg"
a <- reader(filename)
Mver <-
matrix(c(
58,28,
24,54,
56,26,
23,53,
57,27,
25,55,
59,29,
60,30,
22,52,
50,20,
21,51,
49,19,
48,18,
12,42,
46,16,
14,44,
47,17,
13,43,
45,15,
11,41,
10,40,
36,06,
08,38,
09,39,
37,07,
35,05,
34,04,
02,32,
01,31,
33,03
),ncol=2,byrow=TRUE)
p35735 <- symmetry_object(a, Mver=Mver, xver=NULL,mcdonalds=T)
a <- symmetrize(a,p35735)
knotplot2(a,circ=F,lwd=1,text=T,rainbow=T,seg=T)
jj <- matrix(c( # NB nonalternating!
58,24, 1,
25,57, 1,
56,26, 1,
27,55, 1,
54,28, 1,
22,50, 2,
51,21, 2,
20,52, 2,
48,12, 3,
13,47, 3,
46,14, 3,
15,45, 3,
44,16, 3,
17,43, 3,
42,18, 3,
10,36, 4,
37,09, 4,
08,38, 4,
39,07, 4,
06,40, 4,
34,02, 5,
03,33, 5,
32,04, 5
),byrow=TRUE,ncol=3)
# twists number 4 and 5 are negative:
swap <- jj[,3] %in% c(4,5)
jj[swap,1:2] <- jj[swap,2:1]
ou35735 <- jj[,1:2]
w <- c(pot = 1,
ang = 100,
ben = 1,
len = 1,
mid = 1,
clo = 1,
swi = 1,
con = 1,
ncn = 1
)
jj <- knotoptim(filename, weights=w,
symobj = p35735,
ou = ou35735,
prob = 0,
iterlim=1000, print.level=2
# control=list(trace=100,maxit=100000), useNLM=FALSE
)
write_svg(jj,filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
|
/aula_vetores.R | no_license | Marcelo-Honorio/minicurso_microdados | R | false | false | 1,685 | r | ||
library(ggplot2)
library(dplyr)
library(purrr)
library(tidyr)
library(magrittr)
library(broom)
library(stringr) # regex pattern matching
library(rlang) # rlang::duplicate()
library(cowplot)
library(rje) # powerSet()
library(meta)
library(metafor)
get_good_metadfs <- function(meta_df) {
toremove=list()
count=0
for(x in 1:length(meta_df)){
if(class(meta_df[[x]][[1]])[[1]]!='metagen'){
toremove[as.character(count)]=x
count=count+1
}
}
if(length(toremove)>0){
meta_df=meta_df[-unname(unlist(toremove))]
}
return(meta_df)
}
get_summary_stats <- function(input_meta_df, cohort_type, mapping) {
if (cohort_type == "multi") {
meta_df <- get_good_metadfs(input_meta_df) # for meta-analyzed outputs, filter for good outputs first
return(
tibble(
feature = colnames(meta_df),
feature_name = map_chr(feature, ~mapping[[as.integer(str_split(., "_")[[1]][[2]]), 1]]),
estimate = map_dbl(meta_df, ~.[[1]]$TE.random),
p.val = map_dbl(meta_df, ~.[[1]]$pval.random),
bonferroni.p.val = p.adjust(p.val, method = "bonferroni"),
fdr.p.val = p.adjust(p.val, method = "fdr"),
by.p.val = p.adjust(p.val, method = "BY"),
CI_95_lower = map_dbl(meta_df, ~.[[1]]$lower.random),
CI_95_upper = map_dbl(meta_df, ~.[[1]]$upper.random)
)
)
} else if (cohort_type == "single") {
meta_df <- input_meta_df # skip get_good_metadfs step
rows=list()
count=0
for(x in meta_df){
count=count+1
rows[[count]]=nrow(x[[1]])
}
rows=unname(unlist(rows))
clean_meta_df=meta_df[rows==1]
return(
tibble(
feature = colnames(clean_meta_df),
feature_name = map_chr(feature, ~mapping[[as.integer(str_split(., "_")[[1]][[2]]), 1]]),
estimate = map_dbl(clean_meta_df, ~.[[1]]$estimate),
p.val = map_dbl(clean_meta_df, ~.[[1]]$p.value),
bonferroni.p.val = p.adjust(p.val, method = "bonferroni"),
fdr.p.val = p.adjust(p.val, method = "fdr"),
by.p.val = p.adjust(p.val, method = "BY"),
CI_95_lower = map_dbl(clean_meta_df, ~(.[[1]]$estimate - 2 * .[[1]]$std.error)),
CI_95_upper = map_dbl(clean_meta_df, ~(.[[1]]$estimate + 2 * .[[1]]$std.error))
)
)
}
}
main <- function() {
args <- commandArgs(trailingOnly = TRUE)
meta_outputs <- as_tibble(readRDS(args[[1]]))
mapping <- as_tibble(readRDS(args[[2]]))
cohort_type <- args[[3]]
outputname <- args[[4]]
output <- get_summary_stats(meta_outputs, cohort_type, mapping)
saveRDS(output,
outputname)
}
main()
| /pipeline_scripts/clean_meta_analysis_output.R | permissive | jtnedoctor/microbiome_voe | R | false | false | 2,609 | r | library(ggplot2)
library(dplyr)
library(purrr)
library(tidyr)
library(magrittr)
library(broom)
library(stringr) # regex pattern matching
library(rlang) # rlang::duplicate()
library(cowplot)
library(rje) # powerSet()
library(meta)
library(metafor)
get_good_metadfs <- function(meta_df) {
toremove=list()
count=0
for(x in 1:length(meta_df)){
if(class(meta_df[[x]][[1]])[[1]]!='metagen'){
toremove[as.character(count)]=x
count=count+1
}
}
if(length(toremove)>0){
meta_df=meta_df[-unname(unlist(toremove))]
}
return(meta_df)
}
get_summary_stats <- function(input_meta_df, cohort_type, mapping) {
if (cohort_type == "multi") {
meta_df <- get_good_metadfs(input_meta_df) # for meta-analyzed outputs, filter for good outputs first
return(
tibble(
feature = colnames(meta_df),
feature_name = map_chr(feature, ~mapping[[as.integer(str_split(., "_")[[1]][[2]]), 1]]),
estimate = map_dbl(meta_df, ~.[[1]]$TE.random),
p.val = map_dbl(meta_df, ~.[[1]]$pval.random),
bonferroni.p.val = p.adjust(p.val, method = "bonferroni"),
fdr.p.val = p.adjust(p.val, method = "fdr"),
by.p.val = p.adjust(p.val, method = "BY"),
CI_95_lower = map_dbl(meta_df, ~.[[1]]$lower.random),
CI_95_upper = map_dbl(meta_df, ~.[[1]]$upper.random)
)
)
} else if (cohort_type == "single") {
meta_df <- input_meta_df # skip get_good_metadfs step
rows=list()
count=0
for(x in meta_df){
count=count+1
rows[[count]]=nrow(x[[1]])
}
rows=unname(unlist(rows))
clean_meta_df=meta_df[rows==1]
return(
tibble(
feature = colnames(clean_meta_df),
feature_name = map_chr(feature, ~mapping[[as.integer(str_split(., "_")[[1]][[2]]), 1]]),
estimate = map_dbl(clean_meta_df, ~.[[1]]$estimate),
p.val = map_dbl(clean_meta_df, ~.[[1]]$p.value),
bonferroni.p.val = p.adjust(p.val, method = "bonferroni"),
fdr.p.val = p.adjust(p.val, method = "fdr"),
by.p.val = p.adjust(p.val, method = "BY"),
CI_95_lower = map_dbl(clean_meta_df, ~(.[[1]]$estimate - 2 * .[[1]]$std.error)),
CI_95_upper = map_dbl(clean_meta_df, ~(.[[1]]$estimate + 2 * .[[1]]$std.error))
)
)
}
}
main <- function() {
args <- commandArgs(trailingOnly = TRUE)
meta_outputs <- as_tibble(readRDS(args[[1]]))
mapping <- as_tibble(readRDS(args[[2]]))
cohort_type <- args[[3]]
outputname <- args[[4]]
output <- get_summary_stats(meta_outputs, cohort_type, mapping)
saveRDS(output,
outputname)
}
main()
|
def main(rule_args, callback, rei):
ret_val = callback.msiGetSystemTime('dummy_str', 'human')
date = ret_val['arguments'][0]
ret_val = callback.msiHumanToSystemTime(date, 'dummy_str')
time = ret_val['arguments'][1]
callback.writeLine('stdout', 'Input date is ' + date)
callback.writeLine('stdout', 'Time in unix seconds is ' + time)
INPUT null
OUTPUT ruleExecOut
| /python_rules/rulemsiHumanToSystemTime.r | no_license | irods/irods_rule_engine_plugin_python | R | false | false | 390 | r | def main(rule_args, callback, rei):
ret_val = callback.msiGetSystemTime('dummy_str', 'human')
date = ret_val['arguments'][0]
ret_val = callback.msiHumanToSystemTime(date, 'dummy_str')
time = ret_val['arguments'][1]
callback.writeLine('stdout', 'Input date is ' + date)
callback.writeLine('stdout', 'Time in unix seconds is ' + time)
INPUT null
OUTPUT ruleExecOut
|
## Functions that can calculate and cache the inverse of a matrix.
## Function to cache the inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
InverseMatrix <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) inverseMatrix <<- inverse
getInverse <- function() inverseMatrix
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function to calculate the inverse matrix, if already cached then just get it from cache
cacheSolve <- function(x, ...) {
inverseMatrix <- x$getInverse()
if(!is.null(inverseMatrix)){
message("Getting cached inverse matrix")
return(inverseMatrix)
}
matrix <- x$get()
inverseMatrix <- solve(matrix)
x$setInverse(inverseMatrix)
inverseMatrix
}
| /cachematrix.R | no_license | szkwh/R-Programming-Assignment2 | R | false | false | 831 | r | ## Functions that can calculate and cache the inverse of a matrix.
## Function to cache the inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
InverseMatrix <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) inverseMatrix <<- inverse
getInverse <- function() inverseMatrix
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function to calculate the inverse matrix, if already cached then just get it from cache
cacheSolve <- function(x, ...) {
inverseMatrix <- x$getInverse()
if(!is.null(inverseMatrix)){
message("Getting cached inverse matrix")
return(inverseMatrix)
}
matrix <- x$get()
inverseMatrix <- solve(matrix)
x$setInverse(inverseMatrix)
inverseMatrix
}
|
rollingForecasts_exp = function (y, forecastfunction, h = 12, level = c(80, 95), doPar=T)
{
forecastfunction = match.fun(forecastfunction)
y <- as.ts(y)
n <- length(y)
fcasts = array(NA, dim = c(n, h, 2*length(level) + 3),
dimnames = list(forecast_date = 1:n,
steps_ahead = 1:h,
forecast = c("mean", paste0("lower_", level), paste0("upper_", level), "actual", "error")
)
)
result = vector(mode = "list", length = n)
if(doPar==T) {
#print(y)
nCore = parallel::detectCores()
cl = parallel::makeCluster(nCore)
parEnv = new.env()
assign("y", y, parEnv)
assign("n", n, parEnv)
# doParallel::registerDoParallel(cl, cores = nCore)
# result = foreach::foreach(i = 1:n, .packages = c("forecast")) %dopar%
# {
# attempt = try(forecastfunction(subset(y,end = i),
# h = h,
# level = level),
# silent = TRUE)
# attempt
# }
# parallel::clusterEvalQ(cl, expr = { library(forecast) })
# parallel::clusterExport(cl, varlist = c("y", "n", "h", "level", "forecastfunction"))
# fcasts = array(NA, dim = c(n, h, 2*length(level) + 3),
# dimnames = list(forecast_date = 1:n,
# steps_ahead = 1:h,
# forecast = c("mean", paste0("lower_", level), paste0("upper_", level), "actual", "error")
# )
# )
# result = parallel::parLapply(cl,
# X = seq_len(n),
# fun = function(i) try(forecastfunction(subset(y,end = i),
# h = h,
# level = level),
# silent = TRUE))
stopCluster(cl)
for (i in seq_len(n)) {
if (!is.element("try-error", class(result[[i]]))) {
fcasts[i, 1:h, 1] <- result[[i]]$mean[1:h]
for(j in 1:length(level)) {
fcasts[i, 1:h, 1 + j] <- result[[i]]$lower[1:h, j]
fcasts[i, 1:h, 1 + length(level) + j] <- result[[i]]$upper[1:h, j]
}
fcasts[i, 1:h, 1 + 2*length(level) + 1] <- y[(i+1):(i+h)]
fcasts[i, 1:h, 1 + 2*length(level) + 2] <- fcasts[i, 1:h, 1] - y[(i+1):(i+h)]
}
}
#return(result)
} else if(doPar==F){
for (i in seq_len(n)) {
result <- try(forecastfunction(subset(y,end = i), h = h, level = level), silent = TRUE)
if (!is.element("try-error", class(result))) {
fcasts[i, 1:h, 1] <- result$mean[1:h]
for(j in 1:length(level)) {
fcasts[i, 1:h, 1 + j] <- result$lower[1:h, j]
fcasts[i, 1:h, 1 + length(level) + j] <- result$upper[1:h, j]
}
fcasts[i, 1:h, 1 + 2*length(level) + 1] <- y[(i+1):(i+h)]
fcasts[i, 1:h, 1 + 2*length(level) + 2] <- fcasts[i, 1:h, 1] - y[(i+1):(i+h)]
}
}
}
return(fcasts)
}
| /R/rollingForecasts_exp.R | no_license | bplloyd/Core | R | false | false | 3,351 | r | rollingForecasts_exp = function (y, forecastfunction, h = 12, level = c(80, 95), doPar=T)
{
forecastfunction = match.fun(forecastfunction)
y <- as.ts(y)
n <- length(y)
fcasts = array(NA, dim = c(n, h, 2*length(level) + 3),
dimnames = list(forecast_date = 1:n,
steps_ahead = 1:h,
forecast = c("mean", paste0("lower_", level), paste0("upper_", level), "actual", "error")
)
)
result = vector(mode = "list", length = n)
if(doPar==T) {
#print(y)
nCore = parallel::detectCores()
cl = parallel::makeCluster(nCore)
parEnv = new.env()
assign("y", y, parEnv)
assign("n", n, parEnv)
# doParallel::registerDoParallel(cl, cores = nCore)
# result = foreach::foreach(i = 1:n, .packages = c("forecast")) %dopar%
# {
# attempt = try(forecastfunction(subset(y,end = i),
# h = h,
# level = level),
# silent = TRUE)
# attempt
# }
# parallel::clusterEvalQ(cl, expr = { library(forecast) })
# parallel::clusterExport(cl, varlist = c("y", "n", "h", "level", "forecastfunction"))
# fcasts = array(NA, dim = c(n, h, 2*length(level) + 3),
# dimnames = list(forecast_date = 1:n,
# steps_ahead = 1:h,
# forecast = c("mean", paste0("lower_", level), paste0("upper_", level), "actual", "error")
# )
# )
# result = parallel::parLapply(cl,
# X = seq_len(n),
# fun = function(i) try(forecastfunction(subset(y,end = i),
# h = h,
# level = level),
# silent = TRUE))
stopCluster(cl)
for (i in seq_len(n)) {
if (!is.element("try-error", class(result[[i]]))) {
fcasts[i, 1:h, 1] <- result[[i]]$mean[1:h]
for(j in 1:length(level)) {
fcasts[i, 1:h, 1 + j] <- result[[i]]$lower[1:h, j]
fcasts[i, 1:h, 1 + length(level) + j] <- result[[i]]$upper[1:h, j]
}
fcasts[i, 1:h, 1 + 2*length(level) + 1] <- y[(i+1):(i+h)]
fcasts[i, 1:h, 1 + 2*length(level) + 2] <- fcasts[i, 1:h, 1] - y[(i+1):(i+h)]
}
}
#return(result)
} else if(doPar==F){
for (i in seq_len(n)) {
result <- try(forecastfunction(subset(y,end = i), h = h, level = level), silent = TRUE)
if (!is.element("try-error", class(result))) {
fcasts[i, 1:h, 1] <- result$mean[1:h]
for(j in 1:length(level)) {
fcasts[i, 1:h, 1 + j] <- result$lower[1:h, j]
fcasts[i, 1:h, 1 + length(level) + j] <- result$upper[1:h, j]
}
fcasts[i, 1:h, 1 + 2*length(level) + 1] <- y[(i+1):(i+h)]
fcasts[i, 1:h, 1 + 2*length(level) + 2] <- fcasts[i, 1:h, 1] - y[(i+1):(i+h)]
}
}
}
return(fcasts)
}
|
#
# Weiner process
#
# https://en.wikipedia.org/wiki/Wiener_process
#
weiner_process <-function(T, n){
delta = T/n
W <- array(0,n)
for(t in c(2:n)){
W[t] <- W[t-1] + rnorm(1, 0, sqrt(delta))
}
return(W)
}
draw_weiner <- function(T, n, times){
data_ <- matrix(ncol = n, nrow = times)
for(i in c(1:times)){
data_[i,] <- weiner_process(T,n)
}
plot(data_[1,], col = rgb(runif(1,0,1), runif(1,0,1), runif(1,0,1)), type='l', ylim = c(min(data_), max(data_)))
for(i in c(2:times)){
lines(data_[i,], col = rgb(runif(1,0,1), runif(1,0,1), runif(1,0,1)));
}
} | /weiner_process.R | no_license | gurudk/fts3 | R | false | false | 593 | r | #
# Weiner process
#
# https://en.wikipedia.org/wiki/Wiener_process
#
weiner_process <-function(T, n){
delta = T/n
W <- array(0,n)
for(t in c(2:n)){
W[t] <- W[t-1] + rnorm(1, 0, sqrt(delta))
}
return(W)
}
draw_weiner <- function(T, n, times){
data_ <- matrix(ncol = n, nrow = times)
for(i in c(1:times)){
data_[i,] <- weiner_process(T,n)
}
plot(data_[1,], col = rgb(runif(1,0,1), runif(1,0,1), runif(1,0,1)), type='l', ylim = c(min(data_), max(data_)))
for(i in c(2:times)){
lines(data_[i,], col = rgb(runif(1,0,1), runif(1,0,1), runif(1,0,1)));
}
} |
\name{mstep}
\alias{mstep}
\title{M-step for parameterized Gaussian mixture models}
\description{
Maximization step in the EM algorithm for parameterized Gaussian
mixture models.
}
\usage{
mstep(modelName, data, z, prior = NULL, warn = NULL, \dots)
}
\arguments{
\item{modelName}{
A character string indicating the model. The help file for
\code{\link{mclustModelNames}} describes the available models.
}
\item{data}{
A numeric vector, matrix, or data frame of observations.
Categorical variables are not allowed.
If a matrix or data frame, rows correspond to observations and
columns correspond to variables.
}
\item{z}{
A matrix whose \code{[i,k]}th entry is the
conditional probability of the ith observation belonging to
the \emph{k}th component of the mixture.
In analyses involving noise, this should not include the
conditional probabilities for the noise component.
}
\item{prior}{
Specification of a conjugate prior on the means and variances.
The default assumes no prior.
}
\item{warn}{
A logical value indicating whether or not certain warnings
(usually related to singularity) should be issued when the
estimation fails. The default is given by \code{mclust.options("warn")}.
}
\item{\dots}{
Catches unused arguments in indirect or list calls via \code{do.call}.
}
}
\value{
A list including the following components:
\item{modelName}{
A character string identifying the model (same as the input argument).
}
\item{parameters}{
\describe{
\item{\code{pro}}{
A vector whose \emph{k}th component is the mixing proportion for
the \emph{k}th component of the mixture model.
If the model includes a Poisson term for noise, there
should be one more mixing proportion than the number
of Gaussian components.
}
\item{\code{mean}}{
The mean for each component. If there is more than one component,
this is a matrix whose kth column is the mean of the \emph{k}th
component of the mixture model.
}
\item{\code{variance}}{
A list of variance parameters for the model.
The components of this list depend on the model
specification. See the help file for \code{\link{mclustVariance}}
for details.
}
}
}
\item{Attributes:}{
\code{"info"} For those models with iterative M-steps
(\code{"VEI"} and \code{"VEV"}), information on the iteration.\cr
\code{"WARNING"} An appropriate warning if problems are
encountered in the computations.
}
}
\note{
This function computes the M-step only for MVN mixtures, so in
analyses involving noise, the conditional probabilities input should
exclude those for the noise component. \cr
In contrast to \code{me} for the EM algorithm, computations in \code{mstep}
are carried out unless failure due to overflow would occur. To impose
stricter tolerances on a single \code{mstep}, use \code{me} with the
\emph{itmax} component of the \code{control} argument set to 1.
}
\seealso{
\code{\link{mstepE}}, \dots,
\code{\link{mstepVVV}},
\code{\link{emControl}},
\code{\link{me}},
\code{\link{estep}},
\code{\link{mclust.options}}.
}
\examples{
\dontrun{
mstep(modelName = "VII", data = iris[,-5], z = unmap(iris[,5]))}
}
\keyword{cluster}
| /man/mstep.Rd | no_license | Japrin/mclust | R | false | false | 3,530 | rd | \name{mstep}
\alias{mstep}
\title{M-step for parameterized Gaussian mixture models}
\description{
Maximization step in the EM algorithm for parameterized Gaussian
mixture models.
}
\usage{
mstep(modelName, data, z, prior = NULL, warn = NULL, \dots)
}
\arguments{
\item{modelName}{
A character string indicating the model. The help file for
\code{\link{mclustModelNames}} describes the available models.
}
\item{data}{
A numeric vector, matrix, or data frame of observations.
Categorical variables are not allowed.
If a matrix or data frame, rows correspond to observations and
columns correspond to variables.
}
\item{z}{
A matrix whose \code{[i,k]}th entry is the
conditional probability of the ith observation belonging to
the \emph{k}th component of the mixture.
In analyses involving noise, this should not include the
conditional probabilities for the noise component.
}
\item{prior}{
Specification of a conjugate prior on the means and variances.
The default assumes no prior.
}
\item{warn}{
A logical value indicating whether or not certain warnings
(usually related to singularity) should be issued when the
estimation fails. The default is given by \code{mclust.options("warn")}.
}
\item{\dots}{
Catches unused arguments in indirect or list calls via \code{do.call}.
}
}
\value{
A list including the following components:
\item{modelName}{
A character string identifying the model (same as the input argument).
}
\item{parameters}{
\describe{
\item{\code{pro}}{
A vector whose \emph{k}th component is the mixing proportion for
the \emph{k}th component of the mixture model.
If the model includes a Poisson term for noise, there
should be one more mixing proportion than the number
of Gaussian components.
}
\item{\code{mean}}{
The mean for each component. If there is more than one component,
this is a matrix whose kth column is the mean of the \emph{k}th
component of the mixture model.
}
\item{\code{variance}}{
A list of variance parameters for the model.
The components of this list depend on the model
specification. See the help file for \code{\link{mclustVariance}}
for details.
}
}
}
\item{Attributes:}{
\code{"info"} For those models with iterative M-steps
(\code{"VEI"} and \code{"VEV"}), information on the iteration.\cr
\code{"WARNING"} An appropriate warning if problems are
encountered in the computations.
}
}
\note{
This function computes the M-step only for MVN mixtures, so in
analyses involving noise, the conditional probabilities input should
exclude those for the noise component. \cr
In contrast to \code{me} for the EM algorithm, computations in \code{mstep}
are carried out unless failure due to overflow would occur. To impose
stricter tolerances on a single \code{mstep}, use \code{me} with the
\emph{itmax} component of the \code{control} argument set to 1.
}
\seealso{
\code{\link{mstepE}}, \dots,
\code{\link{mstepVVV}},
\code{\link{emControl}},
\code{\link{me}},
\code{\link{estep}},
\code{\link{mclust.options}}.
}
\examples{
\dontrun{
mstep(modelName = "VII", data = iris[,-5], z = unmap(iris[,5]))}
}
\keyword{cluster}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Evaluation.R
\name{evaluateCiCalibration}
\alias{evaluateCiCalibration}
\title{Evaluate confidence interval calibration}
\usage{
evaluateCiCalibration(
logRr,
seLogRr,
trueLogRr,
strata = as.factor(trueLogRr),
crossValidationGroup = 1:length(logRr),
legacy = FALSE
)
}
\arguments{
\item{logRr}{A numeric vector of effect estimates on the log scale.}
\item{seLogRr}{The standard error of the log of the effect estimates. Hint: often the
standard error = (log(<lower bound 95 percent confidence interval>) -
log(<effect estimate>))/qnorm(0.025).}
\item{trueLogRr}{The true log relative risk.}
\item{strata}{Variable used to stratify the plot. Set \code{strata = NULL} for no
stratification.}
\item{crossValidationGroup}{What should be the unit for the cross-validation? By default the unit
is a single control, but a different grouping can be provided, for
example linking a negative control to synthetic positive controls
derived from that negative control.}
\item{legacy}{If true, a legacy error model will be fitted, meaning standard
deviation is linear on the log scale. If false, standard deviation
is assumed to be simply linear.}
}
\value{
A data frame specifying the coverage per strata (usually true effect size) for a wide range of widths
of the confidence interval. The result also includes the fraction of estimates that was below and above
the confidence interval.
}
\description{
\code{evaluateCiCalibration} performs a leave-one-out cross-validation to evaluate the calibration
confidence intervals.
}
\details{
The empirical calibration is performed using a leave-one-out design: The confidence interval of an
effect is computed by fitting a null using all other controls.
}
\examples{
\dontrun{
data <- simulateControls(n = 50 * 3, mean = 0.25, sd = 0.25, trueLogRr = log(c(1, 2, 4)))
eval <- evaluateCiCalibration(data$logRr, data$seLogRr, data$trueLogRr)
}
}
| /man/evaluateCiCalibration.Rd | permissive | fanbu1995/EmpiricalCalibration | R | false | true | 1,973 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Evaluation.R
\name{evaluateCiCalibration}
\alias{evaluateCiCalibration}
\title{Evaluate confidence interval calibration}
\usage{
evaluateCiCalibration(
logRr,
seLogRr,
trueLogRr,
strata = as.factor(trueLogRr),
crossValidationGroup = 1:length(logRr),
legacy = FALSE
)
}
\arguments{
\item{logRr}{A numeric vector of effect estimates on the log scale.}
\item{seLogRr}{The standard error of the log of the effect estimates. Hint: often the
standard error = (log(<lower bound 95 percent confidence interval>) -
log(<effect estimate>))/qnorm(0.025).}
\item{trueLogRr}{The true log relative risk.}
\item{strata}{Variable used to stratify the plot. Set \code{strata = NULL} for no
stratification.}
\item{crossValidationGroup}{What should be the unit for the cross-validation? By default the unit
is a single control, but a different grouping can be provided, for
example linking a negative control to synthetic positive controls
derived from that negative control.}
\item{legacy}{If true, a legacy error model will be fitted, meaning standard
deviation is linear on the log scale. If false, standard deviation
is assumed to be simply linear.}
}
\value{
A data frame specifying the coverage per strata (usually true effect size) for a wide range of widths
of the confidence interval. The result also includes the fraction of estimates that was below and above
the confidence interval.
}
\description{
\code{evaluateCiCalibration} performs a leave-one-out cross-validation to evaluate the calibration
confidence intervals.
}
\details{
The empirical calibration is performed using a leave-one-out design: The confidence interval of an
effect is computed by fitting a null using all other controls.
}
\examples{
\dontrun{
data <- simulateControls(n = 50 * 3, mean = 0.25, sd = 0.25, trueLogRr = log(c(1, 2, 4)))
eval <- evaluateCiCalibration(data$logRr, data$seLogRr, data$trueLogRr)
}
}
|
estvega<-function(Z, S0, sigma, r, T, K){
ST = S0*exp(r*T+sigma*sqrt(T)*Z-5*sigma^2*T)
v=max(0, ST-K)
v1=exp(-r*T)*(v*((Z^2-1)/sigma-sqrt(T)*Z))
#vprime=matrix(1,nrow=1, ncol=length(Z))*(ST>K)
vprime=matrix(1,nrow=1, ncol=length(Z))
v2= exp(-r*T)*(vprime*ST*(sqrt(T)*Z-sigma*T))
# vega<-rep(0, 2)
vega=c(mean(v1), mean(v2))
SE = c(sqrt(var(v1))/length(Z), sqrt(var(v2))/length(Z))
price=exp(-r*T)*mean(v)
return(list(price, vega, SE))
}
#######################################################
#Z1=rnorm(n=10000)
estvega(rnorm(10000),10,0.2,0.05,0.25,9)
# ###########option price##########################################################################
# local({r <- getOption("repos")
# r["CRAN"] <- "http://cran.stat.sfu.ca/"
# options(repos=r)})
#
# # Set a vector of strings: package names to use (and install, if necessary)
# pkg_list = c('zoo', 'tseries', 'MASS','stats','car','moments','fGarch', 'readxl','Ecdat',
# 'evir','AER','sandwich','fracdiff','longmemo','faraway','xts','forecast','nor1mix','bootstrap','rugarch',
# 'quantmod', 'rugarch', 'nnfor', 'forecast', 'fOptions', 'ggplot2')
#
#
# # ensure existing required packages are up to date:
# update.packages(ask=FALSE, oldPkgs=pkg_list)
#
# # Install packages if needed
# for (pkg in pkg_list)
# {
# # Try loading the library.
# if ( ! library(pkg, logical.return=TRUE, character.only=TRUE) )
# {
# # If the library cannot be loaded, install it; then load.
# install.packages(pkg)
# library(pkg, character.only=TRUE)
# }
# }
#
# library(fOptions)
# library(tseries)
# library(ggplot2)
# library(xts)
######################Monte Carlo Simulation####################################
# ######################Monte Carlo Simulation####################################
# callMonte.function<-function(r, S0, sigma, t, K){
# N=252
# j=N*t
# s=sigma/sqrt(N)
# mn=r/N-s^2/2
# y<-rep(0, j+1)
# y[1]=S0
# y2=S0*exp(cumsum(rnorm(j, mean=mn, sd=s)))
# y[1:j+1]<-y2
# x=(0:j)/N
# # plot(x, y, type="l",main="simulated return from call option", xlab="time in years", ylab="value of stock")
# #abline(h=K, lty=2, lwd=1, col="red")
# z=exp(-r*t)*max(y[j+1]-K,0)
# return(z)
# }
# graphics.off()
# y1=as.xts(get.hist.quote(instrument = "aapl",start = "2014-12-09",end = "2018-11-28",quote="AdjClose"))
# y1<-y1$Adjusted
# ret=na.omit(diff(log(y1))) ##compute returns
# ########### hngarch###############################################################
# ## hngarchFit -
# # HN-GARCH log likelihood Parameter Estimation:
#
# model=list(lambda = -0.5, omega =var(ret) , alpha = 0.1*var(ret),
# beta =0.1, gamma = 0, rf = 0)
# ret<-as.ts(ret)
# mle = hngarchFit(model = model, x = ret, symmetric = TRUE)
# mle
# sigma1<-mean(sqrt(mle$h)) # sigma is the average of the volatility
# volatility<-sqrt(252)*sigma1
#
#
# call1<-rep(0, 10000)
# for(i in 1:10000){
# call1[i]<-callMonte.function(0.05, 255, volatility , 63/250, 260)
# }
# mean(call1)
#
# call.monte.hn<-mean(call1)
| /vega.R | no_license | manmohits/DDNA-Forecasting-Stock-market | R | false | false | 3,164 | r |
estvega<-function(Z, S0, sigma, r, T, K){
ST = S0*exp(r*T+sigma*sqrt(T)*Z-5*sigma^2*T)
v=max(0, ST-K)
v1=exp(-r*T)*(v*((Z^2-1)/sigma-sqrt(T)*Z))
#vprime=matrix(1,nrow=1, ncol=length(Z))*(ST>K)
vprime=matrix(1,nrow=1, ncol=length(Z))
v2= exp(-r*T)*(vprime*ST*(sqrt(T)*Z-sigma*T))
# vega<-rep(0, 2)
vega=c(mean(v1), mean(v2))
SE = c(sqrt(var(v1))/length(Z), sqrt(var(v2))/length(Z))
price=exp(-r*T)*mean(v)
return(list(price, vega, SE))
}
#######################################################
#Z1=rnorm(n=10000)
estvega(rnorm(10000),10,0.2,0.05,0.25,9)
# ###########option price##########################################################################
# local({r <- getOption("repos")
# r["CRAN"] <- "http://cran.stat.sfu.ca/"
# options(repos=r)})
#
# # Set a vector of strings: package names to use (and install, if necessary)
# pkg_list = c('zoo', 'tseries', 'MASS','stats','car','moments','fGarch', 'readxl','Ecdat',
# 'evir','AER','sandwich','fracdiff','longmemo','faraway','xts','forecast','nor1mix','bootstrap','rugarch',
# 'quantmod', 'rugarch', 'nnfor', 'forecast', 'fOptions', 'ggplot2')
#
#
# # ensure existing required packages are up to date:
# update.packages(ask=FALSE, oldPkgs=pkg_list)
#
# # Install packages if needed
# for (pkg in pkg_list)
# {
# # Try loading the library.
# if ( ! library(pkg, logical.return=TRUE, character.only=TRUE) )
# {
# # If the library cannot be loaded, install it; then load.
# install.packages(pkg)
# library(pkg, character.only=TRUE)
# }
# }
#
# library(fOptions)
# library(tseries)
# library(ggplot2)
# library(xts)
######################Monte Carlo Simulation####################################
# ######################Monte Carlo Simulation####################################
# callMonte.function<-function(r, S0, sigma, t, K){
# N=252
# j=N*t
# s=sigma/sqrt(N)
# mn=r/N-s^2/2
# y<-rep(0, j+1)
# y[1]=S0
# y2=S0*exp(cumsum(rnorm(j, mean=mn, sd=s)))
# y[1:j+1]<-y2
# x=(0:j)/N
# # plot(x, y, type="l",main="simulated return from call option", xlab="time in years", ylab="value of stock")
# #abline(h=K, lty=2, lwd=1, col="red")
# z=exp(-r*t)*max(y[j+1]-K,0)
# return(z)
# }
# graphics.off()
# y1=as.xts(get.hist.quote(instrument = "aapl",start = "2014-12-09",end = "2018-11-28",quote="AdjClose"))
# y1<-y1$Adjusted
# ret=na.omit(diff(log(y1))) ##compute returns
# ########### hngarch###############################################################
# ## hngarchFit -
# # HN-GARCH log likelihood Parameter Estimation:
#
# model=list(lambda = -0.5, omega =var(ret) , alpha = 0.1*var(ret),
# beta =0.1, gamma = 0, rf = 0)
# ret<-as.ts(ret)
# mle = hngarchFit(model = model, x = ret, symmetric = TRUE)
# mle
# sigma1<-mean(sqrt(mle$h)) # sigma is the average of the volatility
# volatility<-sqrt(252)*sigma1
#
#
# call1<-rep(0, 10000)
# for(i in 1:10000){
# call1[i]<-callMonte.function(0.05, 255, volatility , 63/250, 260)
# }
# mean(call1)
#
# call.monte.hn<-mean(call1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appOne.r
\name{appOne}
\alias{appOne}
\title{example Shiny app}
\usage{
appOne()
}
\description{
A simple demonstration application to test the elements of the library
}
| /GoViewer/man/appOne.Rd | no_license | aidanmacnamara/epiView | R | false | true | 248 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appOne.r
\name{appOne}
\alias{appOne}
\title{example Shiny app}
\usage{
appOne()
}
\description{
A simple demonstration application to test the elements of the library
}
|
library(ggplot2)
library(plyr)
rm(list=ls())
x <- read.csv("~/Dropbox/fun/baseball/numbers/combined_data.csv", stringsAsFactors=FALSE)
sum(duplicated(x))
x <- x[!duplicated(x), ]
x$number <- as.numeric(x$number)
x <- subset(x, number > 0 & batting_average > 0)
x$plate_appearances <- as.numeric(x$plate_appearances)
x$batting_average <- as.numeric(x$batting_average)
x$is_pitcher <- factor(x$is_pitcher)
x <- subset(x, plate_appearances >= 500)
x <- x[order(-x$batting_average), ]
ggplot(x, aes(x=factor(number), y=batting_average)) +
geom_point(alpha=.5, aes(color=is_pitcher)) +
scale_x_discrete("Jersey Number") +
scale_y_continuous("Batting Average") +
coord_flip()
numbers <- ddply(subset(x, is_pitcher==0), .(number),
summarize,
ba=mean(batting_average),
pa=mean(plate_appearances),
score=sum(batting_average * plate_appearances / sum(plate_appearances)),
players=length(batting_average))
numbers <- subset(numbers, players > 4)
numbers$num <- factor(numbers$number)
ggplot(numbers, aes(x=number, y=score)) +
geom_text(aes(label=number)) +
ggtitle('Weighted Batting Average by Player Number\nMinimum 500 at bats, 5 players per number') +
scale_x_continuous("Jersey Number") +
scale_y_continuous("Batting Average Score")
## weighted average batting average
sum(numbers$ba * numbers$pa / sum(numbers$pa))
qplot(plate_appearances, batting_average, data=x, color=is_pitcher) | /player_numbers.R | no_license | alaiacano/baseball-jersey-numbers | R | false | false | 1,485 | r | library(ggplot2)
library(plyr)
rm(list=ls())
x <- read.csv("~/Dropbox/fun/baseball/numbers/combined_data.csv", stringsAsFactors=FALSE)
sum(duplicated(x))
x <- x[!duplicated(x), ]
x$number <- as.numeric(x$number)
x <- subset(x, number > 0 & batting_average > 0)
x$plate_appearances <- as.numeric(x$plate_appearances)
x$batting_average <- as.numeric(x$batting_average)
x$is_pitcher <- factor(x$is_pitcher)
x <- subset(x, plate_appearances >= 500)
x <- x[order(-x$batting_average), ]
ggplot(x, aes(x=factor(number), y=batting_average)) +
geom_point(alpha=.5, aes(color=is_pitcher)) +
scale_x_discrete("Jersey Number") +
scale_y_continuous("Batting Average") +
coord_flip()
numbers <- ddply(subset(x, is_pitcher==0), .(number),
summarize,
ba=mean(batting_average),
pa=mean(plate_appearances),
score=sum(batting_average * plate_appearances / sum(plate_appearances)),
players=length(batting_average))
numbers <- subset(numbers, players > 4)
numbers$num <- factor(numbers$number)
ggplot(numbers, aes(x=number, y=score)) +
geom_text(aes(label=number)) +
ggtitle('Weighted Batting Average by Player Number\nMinimum 500 at bats, 5 players per number') +
scale_x_continuous("Jersey Number") +
scale_y_continuous("Batting Average Score")
## weighted average batting average
sum(numbers$ba * numbers$pa / sum(numbers$pa))
qplot(plate_appearances, batting_average, data=x, color=is_pitcher) |
# This script assumes that the libraries 'plyr' and 'dplyr' are installed and loaded.
require(plyr)
require(dplyr)
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation
features <- read.table("UCI_HAR_Dataset/features.txt", header = FALSE, col.names = c("id", "features"))
data_test <- read.table("UCI_HAR_Dataset/test/X_test.txt", header = FALSE, sep = "", dec = "." ,col.names = features$features, colClasses = "numeric")
data_test_mean_std <- select(data_test, grep("mean\\(\\)|std\\(\\)", features$features))
subject_test <- read.table("UCI_HAR_Dataset/test/subject_test.txt", header = FALSE, col.names = c("subject"))
activity_test <- read.table("UCI_HAR_Dataset/test/y_test.txt", header = FALSE, col.names = c("activity"))
# add activity to testData
data_test_mean_std <- cbind(activity = activity_test, data_test_mean_std)
# add subject to testData
data_test_mean_std <- cbind(subject = subject_test, data_test_mean_std)
# do the same for train
subject_train <- read.table("UCI_HAR_Dataset/train/subject_train.txt", header = FALSE, col.names = c("subject"))
activity_train <- read.table("UCI_HAR_Dataset/train/y_train.txt", header = FALSE, col.names = c("activity"))
data_train <- read.table("UCI_HAR_Dataset/train/X_train.txt", header = FALSE, sep = "", dec = "." ,col.names = features$features, colClasses = "numeric")
data_train_mean_std <- select(data_train, grep("mean\\(\\)|std\\(\\)", features$features))
data_train_mean_std <- cbind(activity = activity_train, data_train_mean_std)
data_train_mean_std <- cbind(subject = subject_train, data_train_mean_std)
# merge test and train data
data_all_mean_std <- rbind(data_train_mean_std, data_test_mean_std)
## 3. Uses descriptive activity names to name the activities in the data set
activity_names <- read.table("UCI_HAR_Dataset/activity_labels.txt", header = FALSE, col.names = c("activityNumber", "activityName"), colClasses = c("integer", "character"))
data_all_mean_std$activity <- mapvalues(data_all_mean_std$activity, from = activity_names$activityNumber, to = activity_names$activityName)
## 4. Appropriately labels the data set with descriptive variable names.
colnames(data_all_mean_std) <- sub("\\.std(\\.)*", "Std", colnames(data_all_mean_std))
colnames(data_all_mean_std) <- sub("\\.mean(\\.)*", "Mean", colnames(data_all_mean_std))
## 5. From the data set in step 4, creates a second, independent tidy
## data set with the average of each variable for each activity and
## each subject.
group_by_activity_subject <- group_by(data_all_mean_std, activity, subject, add = TRUE)
summarisedData <- summarise_each(group_by_activity_subject, funs(mean))
print(summarisedData)
| /run_analysis.R | no_license | codeplumber/datacleaningcoursera | R | false | false | 2,743 | r | # This script assumes that the libraries 'plyr' and 'dplyr' are installed and loaded.
require(plyr)
require(dplyr)
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation
features <- read.table("UCI_HAR_Dataset/features.txt", header = FALSE, col.names = c("id", "features"))
data_test <- read.table("UCI_HAR_Dataset/test/X_test.txt", header = FALSE, sep = "", dec = "." ,col.names = features$features, colClasses = "numeric")
data_test_mean_std <- select(data_test, grep("mean\\(\\)|std\\(\\)", features$features))
subject_test <- read.table("UCI_HAR_Dataset/test/subject_test.txt", header = FALSE, col.names = c("subject"))
activity_test <- read.table("UCI_HAR_Dataset/test/y_test.txt", header = FALSE, col.names = c("activity"))
# add activity to testData
data_test_mean_std <- cbind(activity = activity_test, data_test_mean_std)
# add subject to testData
data_test_mean_std <- cbind(subject = subject_test, data_test_mean_std)
# do the same for train
subject_train <- read.table("UCI_HAR_Dataset/train/subject_train.txt", header = FALSE, col.names = c("subject"))
activity_train <- read.table("UCI_HAR_Dataset/train/y_train.txt", header = FALSE, col.names = c("activity"))
data_train <- read.table("UCI_HAR_Dataset/train/X_train.txt", header = FALSE, sep = "", dec = "." ,col.names = features$features, colClasses = "numeric")
data_train_mean_std <- select(data_train, grep("mean\\(\\)|std\\(\\)", features$features))
data_train_mean_std <- cbind(activity = activity_train, data_train_mean_std)
data_train_mean_std <- cbind(subject = subject_train, data_train_mean_std)
# merge test and train data
data_all_mean_std <- rbind(data_train_mean_std, data_test_mean_std)
## 3. Uses descriptive activity names to name the activities in the data set
activity_names <- read.table("UCI_HAR_Dataset/activity_labels.txt", header = FALSE, col.names = c("activityNumber", "activityName"), colClasses = c("integer", "character"))
data_all_mean_std$activity <- mapvalues(data_all_mean_std$activity, from = activity_names$activityNumber, to = activity_names$activityName)
## 4. Appropriately labels the data set with descriptive variable names.
colnames(data_all_mean_std) <- sub("\\.std(\\.)*", "Std", colnames(data_all_mean_std))
colnames(data_all_mean_std) <- sub("\\.mean(\\.)*", "Mean", colnames(data_all_mean_std))
## 5. From the data set in step 4, creates a second, independent tidy
## data set with the average of each variable for each activity and
## each subject.
group_by_activity_subject <- group_by(data_all_mean_std, activity, subject, add = TRUE)
summarisedData <- summarise_each(group_by_activity_subject, funs(mean))
print(summarisedData)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nhanes_data.R
\docType{data}
\name{nhanes_2010}
\alias{nhanes_2010}
\title{NHANES 2009-2010}
\format{
A data frame with 1417 rows and 24 variables:
\describe{
\item{id}{individual ID}
\item{gen_health}{general health indicator with five levels}
\item{mod_active}{minutes of moderate activity}
\item{vig_active}{minutes of vigorous activity}
\item{home_meals}{number of home meals a week}
\item{gender}{gender of the individual (factor with "male" or "female")}
\item{age}{age of the individual in years}
\item{marijuana}{whether the individual has used marijuana}
\item{illicit}{whether the individual has used illicit drugs}
\item{rehab}{whether the individual has been to rehab for their drug usage}
\item{asthma}{whether the individual has asthma}
\item{overweight}{whether the individual is overweight}
\item{cancer}{whether the individual has cancer}
\item{low_int}{rating of whether the individual has low interest in things}
\item{down}{rating of whether the individual has felt down}
\item{sleeping}{rating of whether the individual has had trouble sleeping}
\item{low_energy}{rating of whether the individual has low energy}
\item{appetite}{rating of whether the individual has lost appetite}
\item{feel_bad}{rating of whether the individual has felt bad}
\item{no_con}{rating of whether the individual has felt no confidence}
\item{speak_move}{rating of whether the individual has trouble speaking/moving}
\item{dead}{rating of whether the individual has wished he/she was dead}
\item{difficulty}{rating of whether the individual has felt difficulty from the previous conditions}
\item{active}{minutes of vigorous or moderate activity}
}
}
\source{
\url{https://wwwn.cdc.gov/nchs/nhanes/continuousnhanes/default.aspx?BeginYear=2009}
}
\usage{
nhanes_2010
}
\description{
A dataset containing information on health, healthcare, and demographics of adolescents
aged 18 - 30 in the United States from 2009 to 2010. This is a cleaned dataset
which is only a subset of the 2009-2010 data release of
the National Health and Nutrition Examination Survey (NHANES).
}
\keyword{datasets}
| /man/nhanes_2010.Rd | no_license | TysonStanley/furniture | R | false | true | 2,215 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nhanes_data.R
\docType{data}
\name{nhanes_2010}
\alias{nhanes_2010}
\title{NHANES 2009-2010}
\format{
A data frame with 1417 rows and 24 variables:
\describe{
\item{id}{individual ID}
\item{gen_health}{general health indicator with five levels}
\item{mod_active}{minutes of moderate activity}
\item{vig_active}{minutes of vigorous activity}
\item{home_meals}{number of home meals a week}
\item{gender}{gender of the individual (factor with "male" or "female")}
\item{age}{age of the individual in years}
\item{marijuana}{whether the individual has used marijuana}
\item{illicit}{whether the individual has used illicit drugs}
\item{rehab}{whether the individual has been to rehab for their drug usage}
\item{asthma}{whether the individual has asthma}
\item{overweight}{whether the individual is overweight}
\item{cancer}{whether the individual has cancer}
\item{low_int}{rating of whether the individual has low interest in things}
\item{down}{rating of whether the individual has felt down}
\item{sleeping}{rating of whether the individual has had trouble sleeping}
\item{low_energy}{rating of whether the individual has low energy}
\item{appetite}{rating of whether the individual has lost appetite}
\item{feel_bad}{rating of whether the individual has felt bad}
\item{no_con}{rating of whether the individual has felt no confidence}
\item{speak_move}{rating of whether the individual has trouble speaking/moving}
\item{dead}{rating of whether the individual has wished he/she was dead}
\item{difficulty}{rating of whether the individual has felt difficulty from the previous conditions}
\item{active}{minutes of vigorous or moderate activity}
}
}
\source{
\url{https://wwwn.cdc.gov/nchs/nhanes/continuousnhanes/default.aspx?BeginYear=2009}
}
\usage{
nhanes_2010
}
\description{
A dataset containing information on health, healthcare, and demographics of adolescents
aged 18 - 30 in the United States from 2009 to 2010. This is a cleaned dataset
which is only a subset of the 2009-2010 data release of
the National Health and Nutrition Examination Survey (NHANES).
}
\keyword{datasets}
|
######script for using DESeq to detect DEGs.##################
library("DESeq2")
library("RColorBrewer")
library("gplots")
countsTable <- read.delim( "merged_table.txt", header=T, stringsAsFactors=TRUE )
rownames( countsTable ) <- countsTable$gene
countsTable <- round(countsTable[ , 2:5 ])
colData <- data.frame(condition=factor(c( "KO","KO","WT","WT")))
dds<-DESeqDataSetFromMatrix(countsTable,colData, formula(~condition))
dds <- estimateSizeFactors(dds)
dds <- DESeq(dds)
res <- results(dds)
resOrdered <- res[order(res$padj),]
pdf("RNAseqRai1_cortex_NM.pdf")
plotMA(dds, main="Rai1_KO RNA seq cortex", ylim=c(-2,2))
write.table(as.data.frame(resOrdered),"RaiKO_DESeq_refGene_NM.txt", row.name=T,sep="\t",quote=F)
rld <-rlogTransformation(dds)
vsd <- varianceStabilizingTransformation(dds)
rlogMat <- assay(rld)
vstMat <- assay(vsd)
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
distsRL <- dist(t(assay(rld)))
mat <- as.matrix(distsRL)
samples<-c("Nestin-Cre-A","Nestin-Cre-B","WT-A","WT-B")
rownames(mat) <- colnames(mat) <- with(colData(dds), paste(condition, samples, sep=" : "))
heatmap.2(mat, trace="none", col = rev(hmcol), margin=c(13, 13))
print(plotPCA(rld, intgroup=c("condition")))
dev.off()
| /6_DESeq.R | no_license | jinxu9/RNAseq | R | false | false | 1,224 | r | ######script for using DESeq to detect DEGs.##################
library("DESeq2")
library("RColorBrewer")
library("gplots")
countsTable <- read.delim( "merged_table.txt", header=T, stringsAsFactors=TRUE )
rownames( countsTable ) <- countsTable$gene
countsTable <- round(countsTable[ , 2:5 ])
colData <- data.frame(condition=factor(c( "KO","KO","WT","WT")))
dds<-DESeqDataSetFromMatrix(countsTable,colData, formula(~condition))
dds <- estimateSizeFactors(dds)
dds <- DESeq(dds)
res <- results(dds)
resOrdered <- res[order(res$padj),]
pdf("RNAseqRai1_cortex_NM.pdf")
plotMA(dds, main="Rai1_KO RNA seq cortex", ylim=c(-2,2))
write.table(as.data.frame(resOrdered),"RaiKO_DESeq_refGene_NM.txt", row.name=T,sep="\t",quote=F)
rld <-rlogTransformation(dds)
vsd <- varianceStabilizingTransformation(dds)
rlogMat <- assay(rld)
vstMat <- assay(vsd)
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
distsRL <- dist(t(assay(rld)))
mat <- as.matrix(distsRL)
samples<-c("Nestin-Cre-A","Nestin-Cre-B","WT-A","WT-B")
rownames(mat) <- colnames(mat) <- with(colData(dds), paste(condition, samples, sep=" : "))
heatmap.2(mat, trace="none", col = rev(hmcol), margin=c(13, 13))
print(plotPCA(rld, intgroup=c("condition")))
dev.off()
|
library(lme4)
library(Matrix)
library(MASS)
library(ggplot2)
coffebra = read.table("BRA18realx1.txt", header = T)
coffebra.cont <- coffebra
# new: nn & grpunt
nn=dim(coffebra.cont)[1]
# 65 observations every country
grpunt=65
# righe.cont <- 1:2
# f1 <- 2.5
# f2 <- 2
#
# coffebra.cont$logV[righe.cont]=f1*coffebra$logV[righe.cont]
# coffebra.cont$logQ[righe.cont]=f2*coffebra$logQ[righe.cont]
plot(coffebra.cont$logQ, coffebra.cont$logV, cex = 1.5, pch = 20, xlab = "W", ylab = "V")
abline(lm(logV ~ logQ, data=coffebra.cont))
fit.coffebra.cont = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F)
y = coffebra.cont$logV
ngroups = max(fit.coffebra.cont@Gp)
X = fit.coffebra.cont@pp$X
Z = t(fit.coffebra.cont@pp$Zt)
beta = fit.coffebra.cont@beta
ttest = beta/diag(summary(fit.coffebra.cont)$vcov)^0.5
sigma2U = summary(fit.coffebra.cont)$varcor[[1]][1]
sigma2eps = summary(fit.coffebra.cont)$sigma^2
loglikhat = summary(fit.coffebra.cont)$logLik[1]
Vhat = sigma2U * Z %*% t(Z) + sigma2eps * diag(length(y))
Ghat = sigma2U * diag(max(fit.coffebra.cont@Gp))
### as.matrix(Vhat) to het the matrix
i1 = t(X)%*% ginv(as.matrix(Vhat)) %*% X
i1inv = ginv(i1)
i2 = t(X)%*% ginv(as.matrix(Vhat)) %*% y
### The product i1inv %*% i2 is identical to beta
# ranef : stima dei random effects
### identical to ranef(ranef(fit.coffebra.cont)$pigname[[1]])
uhat = Ghat%*%t(Z)%*%ginv(as.matrix(Vhat)) %*% (y - X%*%beta)
yhat = X%*%beta + Z%*%uhat
res = y - yhat
### pointer = matrix(rep(1:7, ngroups), nrow = 7, ncol = ngroups)
### Generate a matrix of starting time-index
positions = matrix(1:length(y), nrow = max(coffebra.cont$t), ncol = ngroups)
#temp = matrix(NA, nrow = 10, ncol = length(y)/(ncol(X)+1))
temp = NULL
### for(l in 1:nrow(temp)) {
for(l in 1:10) {
subset=matrix(NA, nrow=ncol(X), ncol = ngroups)
set.seed(l)
sss = sample(1:(max(coffebra.cont$t) - ncol(X)+1), ngroups, replace = T)
for (ii in 1:ncol(positions)) {
subset[,ii] = positions[sss[ii]:(sss[ii]+ncol(X)-1), ii]
}
temp = rbind(as.vector(subset), temp)
}
resorder =matrix(NA, nrow = length(y), ncol = nrow(temp))
for(j in 1:nrow(temp)) {
cat(j, "\n")
mysub = temp[j, ]
fit.coffebra.cont.sub = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F, subset = mysub)
ngroups = max(fit.coffebra.cont.sub@Gp)
y.sub = fit.coffebra.cont.sub@resp$y
X.sub = fit.coffebra.cont.sub@pp$X
Z.sub = t(fit.coffebra.cont.sub@pp$Zt)
beta.sub = fit.coffebra.cont.sub@beta
sigma2U.sub = summary(fit.coffebra.cont.sub)$varcor[[1]][1]
sigma2eps.sub = summary(fit.coffebra.cont.sub)$sigma^2
loglikhat.sub = summary(fit.coffebra.cont.sub)$logLik[1]
Ghat.sub = sigma2U.sub * diag(ngroups)
### This is to check if beta is obtained
Vhat.sub = sigma2U.sub * Z.sub %*% t(Z.sub) + sigma2eps.sub * diag(length(y.sub))
### as.matrix(Vhat) to het the matrix
i1 = t(X.sub)%*% ginv(as.matrix(Vhat.sub)) %*% X.sub
i1inv = ginv(i1)
i2 = t(X.sub)%*% ginv(as.matrix(Vhat.sub)) %*% y.sub
### The product i1inv %*% i2 is identical to beta
### Ste equal to zero rows unused
not.mysub = setdiff(1:length(y), mysub)
Z.reduced = Z
Z.reduced[not.mysub, ] = 0
Vhat.sub1 = sigma2U.sub * Z.reduced %*% t(Z.reduced) + sigma2eps.sub * diag(length(y))
Ghat.sub1 = sigma2U.sub * diag(ngroups)
### identical to ranef(ranef(fit.coffebra.cont.sub)$pigname[[1]])
uhat.sub = Ghat.sub1%*%t(Z.reduced)%*%ginv(as.matrix(Vhat.sub1)) %*% (y - X%*%beta.sub)
### yhat.sub = X%*%beta.sub + Z.reduced%*%uhat.sub
yhat.sub = X%*%beta.sub + Z%*%uhat.sub
res.sub = y - yhat.sub
resorder[,j] = sort(res.sub^2)
}
bsb = temp[which.min(apply(resorder, 2, median)), ]
### Storage of quantities
res.fwd=matrix(NA, nrow = length(y), ncol = length(y)-length(bsb) + 1)
beta.fwd = matrix(NA, nrow = ncol(X), ncol = length(y)-length(bsb) + 1)
ttest.fwd = beta.fwd
sigma2U.fwd = matrix(NA, nrow = 1, ncol = length(y)-length(bsb)+ 1)
sigma2eps.fwd = matrix(NA, nrow = 1, ncol = length(y)-length(bsb)+ 1)
row.inside.fwd = matrix(NA, nrow = length(y),ncol = length(y)-length(bsb)+ 1)
uhat.fwd = matrix(NA, nrow=ngroups, ncol = length(y)-length(bsb)+ 1)
############################ START OF THE FS ##########################
# new: seq code and for code
fscycle=seq(length(bsb),nn,ngroups)
for(m in fscycle) {
cat("Subset size m of the FWD = ", m, "\n")
if (m == length(bsb)) { ### first step of the forward search
step = 1
ttt = bsb
init.fit = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F, subset = ttt)
### init.fit = lmer(w ~ t +(1 | pigname), data = pig, REML=F, subset = ttt)
ngroups = max(init.fit@Gp)
y.sub = init.fit@resp$y
X.sub = init.fit@pp$X
Z.sub = t(init.fit@pp$Zt)
beta.sub = init.fit@beta
ttest.sub = beta.sub/diag(summary(init.fit)$vcov)^0.5
sigma2U.sub = summary(init.fit)$varcor[[1]][1]
sigma2eps.sub = summary(init.fit)$sigma^2
loglikhat.sub = summary(init.fit)$logLik[1]
Ghat.sub = sigma2U.sub * diag(ngroups)
### Ste equal to zero rows unused
not.mysub = setdiff(1:length(y), ttt)
Z.reduced = Z
Z.reduced[not.mysub, ] = 0
Vhat.sub1 = sigma2U.sub * Z.reduced %*% t(Z.reduced) + sigma2eps.sub * diag(length(y))
Ghat.sub1 = sigma2U.sub * diag(ngroups)
### identical to ranef(ranef(fit.coffebra.cont.sub)$pigname[[1]])
uhat.sub = Ghat.sub1%*%t(Z.reduced)%*%ginv(as.matrix(Vhat.sub1)) %*% (y - X%*%beta.sub)
### yhat.sub = X%*%beta.sub + Z.reduced%*%uhat.sub
yhat.sub = X%*%beta.sub + Z%*%uhat.sub
res.sub = y - yhat.sub
posm = which(order(res.sub^2)<= m)
res.fwd[, step] = as.vector(res.sub)
beta.fwd[, step] = as.vector(beta.sub)
ttest.fwd[, step] = as.vector(ttest.sub)
sigma2U.fwd[1, step] = as.vector(sigma2U.sub)
sigma2eps.fwd[1, step] = as.vector(sigma2eps.sub)
row.inside.fwd[1:length(bsb), step] = bsb
uhat.fwd[, step] = as.vector(uhat.sub)
}
else{
step = step+1
# cat("step: ",step,"\n")
# new: ordering a data matrix with countries
tmpres=as.data.frame(cbind(1:nn,as.vector(coffebra.cont$country),as.vector(res.sub^2)))
names(tmpres)=c("unit","country","res2")
# new: ordering two levels
otmpres=tmpres[order(tmpres$country, tmpres$res2),]
# we take the first m/18 units for each country
ordbsb=as.vector(otmpres$unit)
ordind=c((1:(m/18))+0*grpunt, (1:(m/18))+1*grpunt, (1:(m/18))+2*grpunt, (1:(m/18))+3*grpunt, (1:(m/18))+4*grpunt, (1:(m/18))+5*grpunt, (1:(m/18))+6*grpunt, (1:(m/18))+7 *grpunt , (1:(m/18))+8*grpunt , (1:(m/18))+9*grpunt , (1:(m/18))+10*grpunt , (1:(m/18))+11*grpunt , (1:(m/18))+12*grpunt , (1:(m/18))+13*grpunt , (1:(m/18))+14*grpunt , (1:(m/18))+15*grpunt , (1:(m/18))+16*grpunt , (1:(m/18))+17*grpunt)
nbsb=ordbsb[ordind]
# original
# nbsb= which(order(res.sub^2)<= m+1)
ttt = nbsb
init.fit = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F, subset = ttt)
### init.fit = lmer(w ~ t +(1 | pigname), data = pig, REML=F, subset = ttt)
check.ngroups = max(init.fit@Gp)
cat("\n", "Progression = ", step, "\n")
y.sub = init.fit@resp$y
X.sub = init.fit@pp$X
Z.sub = t(init.fit@pp$Zt)
beta.sub = init.fit@beta
ttest.sub = beta.sub/diag(summary(init.fit)$vcov)^0.5
sigma2U.sub = summary(init.fit)$varcor[[1]][1]
sigma2eps.sub = summary(init.fit)$sigma^2
loglikhat.sub = summary(init.fit)$logLik[1]
Ghat.sub = sigma2U.sub * diag(ngroups)
### Ste equal to zero rows unused
not.mysub = setdiff(1:length(y), ttt)
Z.reduced = Z
if(length(not.mysub)>0)
Z.reduced[not.mysub, ] = 0
Vhat.sub1 = sigma2U.sub * Z.reduced %*% t(Z.reduced) + sigma2eps.sub * diag(length(y))
Ghat.sub1 = sigma2U.sub * diag(ngroups)
### identical to ranef(ranef(fit.coffebra.cont.sub)$pigname[[1]])
uhat.sub = Ghat.sub1%*%t(Z.reduced)%*%ginv(as.matrix(Vhat.sub1)) %*% (y - X%*%beta.sub)
### yhat.sub = X%*%beta.sub + Z.reduced%*%uhat.sub
yhat.sub = X%*%beta.sub + Z%*%uhat.sub
res.sub = y - yhat.sub
res.fwd[, step] = as.vector(res.sub)
beta.fwd[, step] = as.vector(beta.sub)
ttest.fwd[, step] = as.vector(ttest.sub)
sigma2U.fwd[1, step] = as.vector(sigma2U.sub)
sigma2eps.fwd[1, step] = as.vector(sigma2eps.sub)
row.inside.fwd[1:length(nbsb), step] = nbsb
uhat.fwd[, step] = as.vector(uhat.sub)
}
}
steps=1:step
plot(length(bsb):length(y), (res.fwd[1,])^2, type = "n", ylim = range(res.fwd^2, na.rm = T), xlab = "Subset size m", ylab = "Squared Residuals")
for (i in 1:nrow(res.fwd))
lines(fscycle, (res.fwd[i, steps])^2, lty = i, col = i)
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/squared.resid.bra.eps", height = 18, width = 30, onefile = T)
plot(length(bsb):length(y), beta.fwd[1,], type = "n", ylim = range(beta.fwd, na.rm = T), xlab = "Subset size m", ylab = "Estimates of fixed effects")
for (i in 1:nrow(beta.fwd))
lines(fscycle, beta.fwd[i, steps], lty = i, col = i)
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/bhat.bra.eps", height = 18, width = 30, onefile = T)
plot(length(bsb):length(y), ttest.fwd[1,], type = "n", ylim = range(ttest.fwd, na.rm = T), xlab = "Subset size m", ylab = "T Test")
for (i in 1:nrow(beta.fwd))
lines(fscycle, ttest.fwd[i, steps], lty = i, col = i)
abline(h = c(-2,2), lwd = 3, lty = 2, col = "blue")
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/ttest.bhat.bra.eps", height = 18, width = 30, onefile = T)
plot(length(bsb):length(y), uhat.fwd[1,], type = "n", ylim = range(uhat.fwd, na.rm = T), xlab = "Subset size m", ylab = "Predicted random effects")
for (i in 1:nrow(uhat.fwd))
lines(fscycle, uhat.fwd[i, steps], lty = i, col = i)
abline(h = 0, lwd = 5, lty = 2, col = "blue")
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/predicted.reff.bra.eps", height = 18, width = 30, onefile = T)
##########################
plot(length(bsb):length(y), sigma2eps.fwd[1,], type = "n", ylim = range(sigma2eps.fwd, na.rm = T), xlab = "Subset size m", ylab = "Estimated variances")
for (i in 1:nrow(sigma2eps.fwd)) lines(length(bsb):length(y), sigma2eps.fwd[i, ], lty = i, col = i)
plot(length(bsb):length(y), sigma2U.fwd[1,], type = "n", ylim = range(sigma2U.fwd, na.rm = T), xlab = "Subset size m", ylab = "Estimated variances")
for (i in 1:nrow(sigma2U.fwd)) lines(length(bsb):length(y), sigma2U.fwd[i, ], lty = i, col = i)
| /coffeebrareal.R | no_license | kvcorb/multilevelFS | R | false | false | 11,002 | r | library(lme4)
library(Matrix)
library(MASS)
library(ggplot2)
coffebra = read.table("BRA18realx1.txt", header = T)
coffebra.cont <- coffebra
# new: nn & grpunt
nn=dim(coffebra.cont)[1]
# 65 observations every country
grpunt=65
# righe.cont <- 1:2
# f1 <- 2.5
# f2 <- 2
#
# coffebra.cont$logV[righe.cont]=f1*coffebra$logV[righe.cont]
# coffebra.cont$logQ[righe.cont]=f2*coffebra$logQ[righe.cont]
plot(coffebra.cont$logQ, coffebra.cont$logV, cex = 1.5, pch = 20, xlab = "W", ylab = "V")
abline(lm(logV ~ logQ, data=coffebra.cont))
fit.coffebra.cont = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F)
y = coffebra.cont$logV
ngroups = max(fit.coffebra.cont@Gp)
X = fit.coffebra.cont@pp$X
Z = t(fit.coffebra.cont@pp$Zt)
beta = fit.coffebra.cont@beta
ttest = beta/diag(summary(fit.coffebra.cont)$vcov)^0.5
sigma2U = summary(fit.coffebra.cont)$varcor[[1]][1]
sigma2eps = summary(fit.coffebra.cont)$sigma^2
loglikhat = summary(fit.coffebra.cont)$logLik[1]
Vhat = sigma2U * Z %*% t(Z) + sigma2eps * diag(length(y))
Ghat = sigma2U * diag(max(fit.coffebra.cont@Gp))
### as.matrix(Vhat) to het the matrix
i1 = t(X)%*% ginv(as.matrix(Vhat)) %*% X
i1inv = ginv(i1)
i2 = t(X)%*% ginv(as.matrix(Vhat)) %*% y
### The product i1inv %*% i2 is identical to beta
# ranef : stima dei random effects
### identical to ranef(ranef(fit.coffebra.cont)$pigname[[1]])
uhat = Ghat%*%t(Z)%*%ginv(as.matrix(Vhat)) %*% (y - X%*%beta)
yhat = X%*%beta + Z%*%uhat
res = y - yhat
### pointer = matrix(rep(1:7, ngroups), nrow = 7, ncol = ngroups)
### Generate a matrix of starting time-index
positions = matrix(1:length(y), nrow = max(coffebra.cont$t), ncol = ngroups)
#temp = matrix(NA, nrow = 10, ncol = length(y)/(ncol(X)+1))
temp = NULL
### for(l in 1:nrow(temp)) {
for(l in 1:10) {
subset=matrix(NA, nrow=ncol(X), ncol = ngroups)
set.seed(l)
sss = sample(1:(max(coffebra.cont$t) - ncol(X)+1), ngroups, replace = T)
for (ii in 1:ncol(positions)) {
subset[,ii] = positions[sss[ii]:(sss[ii]+ncol(X)-1), ii]
}
temp = rbind(as.vector(subset), temp)
}
resorder =matrix(NA, nrow = length(y), ncol = nrow(temp))
for(j in 1:nrow(temp)) {
cat(j, "\n")
mysub = temp[j, ]
fit.coffebra.cont.sub = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F, subset = mysub)
ngroups = max(fit.coffebra.cont.sub@Gp)
y.sub = fit.coffebra.cont.sub@resp$y
X.sub = fit.coffebra.cont.sub@pp$X
Z.sub = t(fit.coffebra.cont.sub@pp$Zt)
beta.sub = fit.coffebra.cont.sub@beta
sigma2U.sub = summary(fit.coffebra.cont.sub)$varcor[[1]][1]
sigma2eps.sub = summary(fit.coffebra.cont.sub)$sigma^2
loglikhat.sub = summary(fit.coffebra.cont.sub)$logLik[1]
Ghat.sub = sigma2U.sub * diag(ngroups)
### This is to check if beta is obtained
Vhat.sub = sigma2U.sub * Z.sub %*% t(Z.sub) + sigma2eps.sub * diag(length(y.sub))
### as.matrix(Vhat) to het the matrix
i1 = t(X.sub)%*% ginv(as.matrix(Vhat.sub)) %*% X.sub
i1inv = ginv(i1)
i2 = t(X.sub)%*% ginv(as.matrix(Vhat.sub)) %*% y.sub
### The product i1inv %*% i2 is identical to beta
### Ste equal to zero rows unused
not.mysub = setdiff(1:length(y), mysub)
Z.reduced = Z
Z.reduced[not.mysub, ] = 0
Vhat.sub1 = sigma2U.sub * Z.reduced %*% t(Z.reduced) + sigma2eps.sub * diag(length(y))
Ghat.sub1 = sigma2U.sub * diag(ngroups)
### identical to ranef(ranef(fit.coffebra.cont.sub)$pigname[[1]])
uhat.sub = Ghat.sub1%*%t(Z.reduced)%*%ginv(as.matrix(Vhat.sub1)) %*% (y - X%*%beta.sub)
### yhat.sub = X%*%beta.sub + Z.reduced%*%uhat.sub
yhat.sub = X%*%beta.sub + Z%*%uhat.sub
res.sub = y - yhat.sub
resorder[,j] = sort(res.sub^2)
}
bsb = temp[which.min(apply(resorder, 2, median)), ]
### Storage of quantities
res.fwd=matrix(NA, nrow = length(y), ncol = length(y)-length(bsb) + 1)
beta.fwd = matrix(NA, nrow = ncol(X), ncol = length(y)-length(bsb) + 1)
ttest.fwd = beta.fwd
sigma2U.fwd = matrix(NA, nrow = 1, ncol = length(y)-length(bsb)+ 1)
sigma2eps.fwd = matrix(NA, nrow = 1, ncol = length(y)-length(bsb)+ 1)
row.inside.fwd = matrix(NA, nrow = length(y),ncol = length(y)-length(bsb)+ 1)
uhat.fwd = matrix(NA, nrow=ngroups, ncol = length(y)-length(bsb)+ 1)
############################ START OF THE FS ##########################
# new: seq code and for code
fscycle=seq(length(bsb),nn,ngroups)
for(m in fscycle) {
cat("Subset size m of the FWD = ", m, "\n")
if (m == length(bsb)) { ### first step of the forward search
step = 1
ttt = bsb
init.fit = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F, subset = ttt)
### init.fit = lmer(w ~ t +(1 | pigname), data = pig, REML=F, subset = ttt)
ngroups = max(init.fit@Gp)
y.sub = init.fit@resp$y
X.sub = init.fit@pp$X
Z.sub = t(init.fit@pp$Zt)
beta.sub = init.fit@beta
ttest.sub = beta.sub/diag(summary(init.fit)$vcov)^0.5
sigma2U.sub = summary(init.fit)$varcor[[1]][1]
sigma2eps.sub = summary(init.fit)$sigma^2
loglikhat.sub = summary(init.fit)$logLik[1]
Ghat.sub = sigma2U.sub * diag(ngroups)
### Ste equal to zero rows unused
not.mysub = setdiff(1:length(y), ttt)
Z.reduced = Z
Z.reduced[not.mysub, ] = 0
Vhat.sub1 = sigma2U.sub * Z.reduced %*% t(Z.reduced) + sigma2eps.sub * diag(length(y))
Ghat.sub1 = sigma2U.sub * diag(ngroups)
### identical to ranef(ranef(fit.coffebra.cont.sub)$pigname[[1]])
uhat.sub = Ghat.sub1%*%t(Z.reduced)%*%ginv(as.matrix(Vhat.sub1)) %*% (y - X%*%beta.sub)
### yhat.sub = X%*%beta.sub + Z.reduced%*%uhat.sub
yhat.sub = X%*%beta.sub + Z%*%uhat.sub
res.sub = y - yhat.sub
posm = which(order(res.sub^2)<= m)
res.fwd[, step] = as.vector(res.sub)
beta.fwd[, step] = as.vector(beta.sub)
ttest.fwd[, step] = as.vector(ttest.sub)
sigma2U.fwd[1, step] = as.vector(sigma2U.sub)
sigma2eps.fwd[1, step] = as.vector(sigma2eps.sub)
row.inside.fwd[1:length(bsb), step] = bsb
uhat.fwd[, step] = as.vector(uhat.sub)
}
else{
step = step+1
# cat("step: ",step,"\n")
# new: ordering a data matrix with countries
tmpres=as.data.frame(cbind(1:nn,as.vector(coffebra.cont$country),as.vector(res.sub^2)))
names(tmpres)=c("unit","country","res2")
# new: ordering two levels
otmpres=tmpres[order(tmpres$country, tmpres$res2),]
# we take the first m/18 units for each country
ordbsb=as.vector(otmpres$unit)
ordind=c((1:(m/18))+0*grpunt, (1:(m/18))+1*grpunt, (1:(m/18))+2*grpunt, (1:(m/18))+3*grpunt, (1:(m/18))+4*grpunt, (1:(m/18))+5*grpunt, (1:(m/18))+6*grpunt, (1:(m/18))+7 *grpunt , (1:(m/18))+8*grpunt , (1:(m/18))+9*grpunt , (1:(m/18))+10*grpunt , (1:(m/18))+11*grpunt , (1:(m/18))+12*grpunt , (1:(m/18))+13*grpunt , (1:(m/18))+14*grpunt , (1:(m/18))+15*grpunt , (1:(m/18))+16*grpunt , (1:(m/18))+17*grpunt)
nbsb=ordbsb[ordind]
# original
# nbsb= which(order(res.sub^2)<= m+1)
ttt = nbsb
init.fit = lmer(logV ~ logQ + t +m1 + m2+m3+m4+m5+ m6+m7+m8+m9+m10+m11+(1 | country), data = coffebra.cont, REML=F, subset = ttt)
### init.fit = lmer(w ~ t +(1 | pigname), data = pig, REML=F, subset = ttt)
check.ngroups = max(init.fit@Gp)
cat("\n", "Progression = ", step, "\n")
y.sub = init.fit@resp$y
X.sub = init.fit@pp$X
Z.sub = t(init.fit@pp$Zt)
beta.sub = init.fit@beta
ttest.sub = beta.sub/diag(summary(init.fit)$vcov)^0.5
sigma2U.sub = summary(init.fit)$varcor[[1]][1]
sigma2eps.sub = summary(init.fit)$sigma^2
loglikhat.sub = summary(init.fit)$logLik[1]
Ghat.sub = sigma2U.sub * diag(ngroups)
### Ste equal to zero rows unused
not.mysub = setdiff(1:length(y), ttt)
Z.reduced = Z
if(length(not.mysub)>0)
Z.reduced[not.mysub, ] = 0
Vhat.sub1 = sigma2U.sub * Z.reduced %*% t(Z.reduced) + sigma2eps.sub * diag(length(y))
Ghat.sub1 = sigma2U.sub * diag(ngroups)
### identical to ranef(ranef(fit.coffebra.cont.sub)$pigname[[1]])
uhat.sub = Ghat.sub1%*%t(Z.reduced)%*%ginv(as.matrix(Vhat.sub1)) %*% (y - X%*%beta.sub)
### yhat.sub = X%*%beta.sub + Z.reduced%*%uhat.sub
yhat.sub = X%*%beta.sub + Z%*%uhat.sub
res.sub = y - yhat.sub
res.fwd[, step] = as.vector(res.sub)
beta.fwd[, step] = as.vector(beta.sub)
ttest.fwd[, step] = as.vector(ttest.sub)
sigma2U.fwd[1, step] = as.vector(sigma2U.sub)
sigma2eps.fwd[1, step] = as.vector(sigma2eps.sub)
row.inside.fwd[1:length(nbsb), step] = nbsb
uhat.fwd[, step] = as.vector(uhat.sub)
}
}
steps=1:step
plot(length(bsb):length(y), (res.fwd[1,])^2, type = "n", ylim = range(res.fwd^2, na.rm = T), xlab = "Subset size m", ylab = "Squared Residuals")
for (i in 1:nrow(res.fwd))
lines(fscycle, (res.fwd[i, steps])^2, lty = i, col = i)
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/squared.resid.bra.eps", height = 18, width = 30, onefile = T)
plot(length(bsb):length(y), beta.fwd[1,], type = "n", ylim = range(beta.fwd, na.rm = T), xlab = "Subset size m", ylab = "Estimates of fixed effects")
for (i in 1:nrow(beta.fwd))
lines(fscycle, beta.fwd[i, steps], lty = i, col = i)
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/bhat.bra.eps", height = 18, width = 30, onefile = T)
plot(length(bsb):length(y), ttest.fwd[1,], type = "n", ylim = range(ttest.fwd, na.rm = T), xlab = "Subset size m", ylab = "T Test")
for (i in 1:nrow(beta.fwd))
lines(fscycle, ttest.fwd[i, steps], lty = i, col = i)
abline(h = c(-2,2), lwd = 3, lty = 2, col = "blue")
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/ttest.bhat.bra.eps", height = 18, width = 30, onefile = T)
plot(length(bsb):length(y), uhat.fwd[1,], type = "n", ylim = range(uhat.fwd, na.rm = T), xlab = "Subset size m", ylab = "Predicted random effects")
for (i in 1:nrow(uhat.fwd))
lines(fscycle, uhat.fwd[i, steps], lty = i, col = i)
abline(h = 0, lwd = 5, lty = 2, col = "blue")
dev.print(device = postscript, file = "C:/ASIM/multilevelFS/figures/predicted.reff.bra.eps", height = 18, width = 30, onefile = T)
##########################
plot(length(bsb):length(y), sigma2eps.fwd[1,], type = "n", ylim = range(sigma2eps.fwd, na.rm = T), xlab = "Subset size m", ylab = "Estimated variances")
for (i in 1:nrow(sigma2eps.fwd)) lines(length(bsb):length(y), sigma2eps.fwd[i, ], lty = i, col = i)
plot(length(bsb):length(y), sigma2U.fwd[1,], type = "n", ylim = range(sigma2U.fwd, na.rm = T), xlab = "Subset size m", ylab = "Estimated variances")
for (i in 1:nrow(sigma2U.fwd)) lines(length(bsb):length(y), sigma2U.fwd[i, ], lty = i, col = i)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{snoopingcvs}
\alias{snoopingcvs}
\title{Snooping-adjusted critical values}
\format{There are TODO rows and 9 columns:
\describe{
\item{kernel}{kernel function}
\item{order}{Order of local polynomial (0 for local constant)}
\item{boundary}{Boundary or interior regression?}
\item{t}{ratio of maximum to minimum bandwidth}
\item{level}{confidence level}
\item{onesided}{Critical value for one-sided CIs}
\item{twosided}{Critical value for two-sided CIs}
\item{ua.onesided}{Coverage of unadjusted one-sided CIs}
\item{ua.onesided}{Coverage of unadjusted two-sided CIs}
}}
\source{
Computed by running
\code{snoopingcvs <- DFSnoopingCV(S=60000, T=10000, 1000)}
}
\usage{
snoopingcvs
}
\description{
Data frame of precomputed snooping-adjusted critical values, using the
function \code{\link{DFSnoopingCV}}. The data frame is used by
\code{\link{SnoopingCV}} to look up the appropriate critical value
}
\keyword{datasets}
| /man/snoopingcvs.Rd | no_license | kolesarm/BWSnooping | R | false | true | 1,050 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{snoopingcvs}
\alias{snoopingcvs}
\title{Snooping-adjusted critical values}
\format{There are TODO rows and 9 columns:
\describe{
\item{kernel}{kernel function}
\item{order}{Order of local polynomial (0 for local constant)}
\item{boundary}{Boundary or interior regression?}
\item{t}{ratio of maximum to minimum bandwidth}
\item{level}{confidence level}
\item{onesided}{Critical value for one-sided CIs}
\item{twosided}{Critical value for two-sided CIs}
\item{ua.onesided}{Coverage of unadjusted one-sided CIs}
\item{ua.onesided}{Coverage of unadjusted two-sided CIs}
}}
\source{
Computed by running
\code{snoopingcvs <- DFSnoopingCV(S=60000, T=10000, 1000)}
}
\usage{
snoopingcvs
}
\description{
Data frame of precomputed snooping-adjusted critical values, using the
function \code{\link{DFSnoopingCV}}. The data frame is used by
\code{\link{SnoopingCV}} to look up the appropriate critical value
}
\keyword{datasets}
|
\name{plot.utility.endnode.intpol1d}
\alias{plot.utility.endnode.intpol1d}
\title{Plot Node Definition}
\description{
Plot node definition.
}
\usage{
\method{plot}{utility.endnode.intpol1d}(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2, 0.4, 0.6, 0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
...)
}
\arguments{
\item{x}{
node to be plotted.
}
\item{par}{
(optional) labelled numeric parameter vector providing parameters to
modify the value or utility function before plotting the node.
}
\item{col}{
(optional) character vector of colors to be used to color the interval
between zero and unity in equidistant sections (use repetitions of the
same color if you want to have a non-equidistant color-coding).
This attribute is only used for value nodes.
}
\item{gridlines}{
(optional) numeric vector of levels at which gridlines are plotted
in the node definition.
}
\item{main}{
(optional) title of the plot.
}
\item{cex.main}{
(optional) scaling factor for title of the plot.
}
\item{xlim}{
(optional) limits for x-axis of the plot (default is range).
}
\item{\dots}{
additional arguments passed to the R plotting routine.
}
}
\note{
Note that the plotting routines for the other end nodes \cr
\code{\link{plot.utility.endnode.discrete}} \cr
\code{\link{plot.utility.endnode.parfun1d}} \cr
\code{\link{plot.utility.endnode.intpol2d}} \cr
\code{\link{plot.utility.endnode.cond}} \cr
\code{\link{plot.utility.endnode.firstavail}} \cr
are as far as possible the same so that all end nodes can be plotted with the same commands irrespective of the type of the end node.
}
\references{
Short description of the package: \cr\cr
Reichert, P., Schuwirth, N. and Langhans, S.,
Constructing, evaluating and visualizing value and utility functions for decision support, Environmental Modelling & Software 46, 283-291, 2013. \cr\cr
Textbooks on the use of utility and value functions in decision analysis: \cr\cr
Keeney, R. L. and Raiffa, H. Decisions with Multiple Objectives - Preferences and Value Tradeoffs. John Wiley & Sons, 1976. \cr\cr
Eisenfuehr, F., Weber, M. and Langer, T., Rational Decision Making, Springer, Berlin, 2010.
}
\author{
Peter Reichert <peter.reichert@eawag.ch>
}
\seealso{
See \code{\link{utility.endnode.intpol1d.create}} for how to construct such a node and \code{\link{evaluate.utility.endnode.intpol1d}} for how to evaluate the node. \cr\cr
See \code{\link{utility.calc.colors}} for an example of how to construct color schemes and \code{\link{utility.get.colors}} for how to get colors for specifed value levels.
}
\examples{
# see
help(utility)
# for examples.
}
| /man/plot.utility.endnode.intpol1d.Rd | no_license | cran/utility | R | false | false | 2,781 | rd | \name{plot.utility.endnode.intpol1d}
\alias{plot.utility.endnode.intpol1d}
\title{Plot Node Definition}
\description{
Plot node definition.
}
\usage{
\method{plot}{utility.endnode.intpol1d}(x,
par = NA,
col = utility.calc.colors(),
gridlines = c(0.2, 0.4, 0.6, 0.8),
main = "",
cex.main = 1,
xlim = numeric(0),
...)
}
\arguments{
\item{x}{
node to be plotted.
}
\item{par}{
(optional) labelled numeric parameter vector providing parameters to
modify the value or utility function before plotting the node.
}
\item{col}{
(optional) character vector of colors to be used to color the interval
between zero and unity in equidistant sections (use repetitions of the
same color if you want to have a non-equidistant color-coding).
This attribute is only used for value nodes.
}
\item{gridlines}{
(optional) numeric vector of levels at which gridlines are plotted
in the node definition.
}
\item{main}{
(optional) title of the plot.
}
\item{cex.main}{
(optional) scaling factor for title of the plot.
}
\item{xlim}{
(optional) limits for x-axis of the plot (default is range).
}
\item{\dots}{
additional arguments passed to the R plotting routine.
}
}
\note{
Note that the plotting routines for the other end nodes \cr
\code{\link{plot.utility.endnode.discrete}} \cr
\code{\link{plot.utility.endnode.parfun1d}} \cr
\code{\link{plot.utility.endnode.intpol2d}} \cr
\code{\link{plot.utility.endnode.cond}} \cr
\code{\link{plot.utility.endnode.firstavail}} \cr
are as far as possible the same so that all end nodes can be plotted with the same commands irrespective of the type of the end node.
}
\references{
Short description of the package: \cr\cr
Reichert, P., Schuwirth, N. and Langhans, S.,
Constructing, evaluating and visualizing value and utility functions for decision support, Environmental Modelling & Software 46, 283-291, 2013. \cr\cr
Textbooks on the use of utility and value functions in decision analysis: \cr\cr
Keeney, R. L. and Raiffa, H. Decisions with Multiple Objectives - Preferences and Value Tradeoffs. John Wiley & Sons, 1976. \cr\cr
Eisenfuehr, F., Weber, M. and Langer, T., Rational Decision Making, Springer, Berlin, 2010.
}
\author{
Peter Reichert <peter.reichert@eawag.ch>
}
\seealso{
See \code{\link{utility.endnode.intpol1d.create}} for how to construct such a node and \code{\link{evaluate.utility.endnode.intpol1d}} for how to evaluate the node. \cr\cr
See \code{\link{utility.calc.colors}} for an example of how to construct color schemes and \code{\link{utility.get.colors}} for how to get colors for specifed value levels.
}
\examples{
# see
help(utility)
# for examples.
}
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581804904e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853317-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 659 | r | testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581804904e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
# Podstawowe komponenty funkcji
# kazda funkcja ma 3 komponenty
# cialo body()
# formals / formals()
# enclosing env / environment
# Przyklad
f <- function( x = "John"){
print(x)
}
body(f)
typeof(f)
typeof(body(f))
formals(f)
typeof(formals(f))
environment(f)
attributes(f)
# Przyklad wykorzystanie do wstecznej modyfikacji funkcji
f <- function(){}
typeof(f)
formals(f) <- alist(x="Mary")
body(f) <- quote( print(x))
body(f)
formals(f)
attributes(f)
f()
f("John")
c
formals(c)
body(c)
# przyklad: liczba wszystkich funkcji primitive
obs <- mget( ls("package:base"), inherit = TRUE)
typeof(obs)
obs
fs <- Filter( f = is.function, x =obs)
length(fs) # liczba wszystkich funkcji
fsPrimitive <- Filter( f= function(x) is.primitive(x) & is.function(x), x = obs)
length(fsPrimitive)
fsNotPrimitive <- Filter( f= function(x) !is.primitive(x) & is.function(x), x = obs)
length(fsNotPrimitive)
# liczb aargumentow ktore przyjmuja funkcje (... - jeden argument)
w <- unlist(lapply(X = fsNotPrimitive, FUN = function(x) length( formals( x))))
names(w) <- NULL
wCounts <- table(w)
x <- as.integer(names(wCounts))
plot(x, wCounts, type='h',lwd=10, col='magenta',
main = 'Number of arguments')
# Przyklad - ast / everything is a call, wszystko jest funkcja
# ast - abstract syntax t
require(pryr)
ast(2+2)
ast(mean( rnorm(100)))
ast((2+2))
# przedefiniowanie funkcji `(`
`(` <- function(x) x+1
(2+1)
rm(`(`)
(2+1)
sapply(1:10, `+`, 3)
# wywolanie funkcji na argumentach
# do.call - trzeba podac nazwe funkcji i liste argumentow.
# call - sluzy to tworzenia wywolan
do.call(mean, list(1:10, na.rm=T))
# lazy computataions - zostaje oblicone dopierojak zostaje wykorzystane
f1<- function(a, b){
print(a)
print(nchar(b))
}
f1("ala", print("John")) # john jest drukowany bo jest wywolywany w nchar(b)
f2<- function(a, b){
print(a)
}
f2("ala", print("John")) # john w ogole nie jest obliczany
# dziwne rzeczy
# argumenty definiowane przez inne argumenty
f1 <- function(a = 1, b = 2*a){
c(a, b)
}
f1(2)
f1(2,5)
# argumenty definiowane przez zmienne lokalne
f2 <- function( a=1, b=2*x){
x <- 2*a
c(a,x,b)
}
f2(3)
f2(3, 6)
# arugmenty wewnatrz funkcji sa wywolywane w srodowisku execution,nie w srodowisku calling
# wymuszanie obliczen - force
f <- function( x=stop("Evaluated")){
123
}
f() # argumenty nie zostal obliczeony
# wymuszanie obliczen
f <- function( x=stop("Evaluated")){
force(x)
123
}
# function factory
f <- function(x){
function(y) x + y
}
g2 <- f(2)
g4 <- f(4)
g2(10)
g4(10)
# stworzenie listy funkcji
q1 <- lapply(1:10, f)
q1[[10]](10)
q1[[1]](10)
for(k in 1:length(q1)) print( q1[[k]](10))
# gdzie sa obliczane argumenty
# argumetny domslne sa oblcizene wewnatrz srodowiska exceution dla funckcji
# dlatego jestli podawana jest wartosc explicite to moze ona by cinna niz domyslan
f <- function( x = ls()){
a <- 123
x
}
rm(x)
x
f() # wartosc domyslna jest wyliczana w execution environment
f( ls()) # wartosc podana jest wyliczana w calling environment
# to rozroznienie jest kluczowe w przypadku obliczen na jedyzku i przy przeczhwytywaniu calli (cel w tym zeby wiedziec gdzie
# obliczane sa formalsy, ktore tez zostana przechwycane)
ne <- new.env(parent = globalenv())
x <- 10
ne$x <- 1
f <- function(q){
print(q)
}
environment(f) <- ne # zmieniam enclosing na ne
f(x)
eval(quote(f(x)), envir = ne)
#binding - tam gdzie szukasz funkcji
#enclosing -tam gdzie funkcja szuka zmiennych
#calling - tam gdzie oblicznae sa jawnie podane arguemnty
# !argumenty podane jawnie sa obliczane w srodowisku calling (a nie enclosing)
# promises
# promise to jest obiekt typu language - jest to nie obliczone wyrazenie. kazda obietynica zawiera dwa elementy:
# - wyrazenie ktore bedzie obliczone jest dojdzie do olibczenia / dostaniesz sie przez funkcje substitute()
# - srodowisko,gdzie wyrazenie zostalo stworzone i gdzie powinno byc wyliczone jezeli do niego dojdzie
# info o obietnicach mozna uzyskac za pomoca pryr::promise_info()
# przyklady z ksiazki wickhama
x <- NULL
x > 0
if (!is.null(x) && x>0){
print("Works?")
}
# to wyrazenie nie zwraca bledu dlatego ze drugie wyraznie nie jest obliczone bo pierwsze zwraca FALSE
if (is.null(x) && x>0){
print("Works?")
}
# tu jest blad
# zatem wyraznie x >0 jest przykladem promise- nie jest obliczany dopoki nie jest potrzebny
f <- function(a,b){
c( missing(a),missing(b))
}
f( a= 1, b= 2)
f( a= 1)
f()
#to sie przydaje jezeli chcemy jako wartosc defaultowa wykorzystac cos co wymaga wielu linii kodu do wyliczenia
# wtedy chcemy miec pewnosc ze zostanie policzona tylko jesli arugment jest missing
#argument ...
# testowanie funkcji last
x <- 1:5
address(x) #hash gdzie to siedzi w pamieci
x[0] <- 100
address(x) # adres sie zmienil - tak narpawde stworzono kopie
# wartosci zwracane przez funkcje
# funkcja moze zwrocic jedynie jeden obiekt
# funkcja moze zwracac wartosci niewidoczenpoprzez wykorzystanie invisible(). to jest wygodne bo pozwala laczyc wywolania metod
# invisible(self), przykladem takiej funkcji jest <-. to pozwala na laczenie wywolan
a <-b <- c<- d<- 10
a
b
c
d
# on exit - wygodne sprzatanie
# mozna ustawic wyrazenie, ktore bedzie zawsze wykonywane tuz przed zakonczeniem dzialania funkcji
# ladne wykorzystanie to zmiana katalogu roboczego
doSth <- function(dir, code){
wd <- setwd(dir) # zwraca stary katalog roboczy oraz ustawia na nowy katalog roboczy
on.exit(setwd(wd))
force(code)
}
getwd()
doSth('/home/bpol0190/Documents', quote(print(1)))
getwd()
# obliczenia na jezyku
# wprowadzenie / przechwytywanie wyrazen
# podstawowa funkcja ktora pozwala na wykonywanie obliczen an ajezyku ro fnkcaj subtitute. zamiast zbierac wartosci
# argumentow, zbiera kod, ktory je oblicza
f <- function(x){
substitute( x)
}
f(1:10)
typeof(f(1:10)) # language, a wiec to samo co zwraca funkcja quote
x <- 10
f(x)
eval(f(x))
g <- function(x){
print( typeof( substitute( x)))
}
g(2) # double (wektor atomowy), substitute na stalych zwraca stale
g(x<-2) # language, substitue na wyrazeniu zwraca obiekt typu language
# druga przydatna funkcja to deparse() - bierze obiekt typu language i zamienia na string
q <- quote( x <- 1:5)
q
deparse(q)
f <- function(s) deparse( substitute( s))
f(x <- 1:10)
f( function(s) s^2)
# polaczenie quote i eval
# obie funkcje znamy, ale pamietac ze drugi argument eval to srodowisko ALBO lista czy ramka danych
# przyklad - proste zastosowanie quote i eval
# obliczenia na liscie
eval(quote(x), list(x = 100))
x
# obliczenia na ramce danych
eval(quote(x), data.frame(x=1:10))
# obliczenia na srodowisku
q <-new.env(parent = globalenv())
q$x<- rnorm(10)
eval(quote(mean(x)), q)
rm(q)
#implementacja subset
subset2 <- function(data, cond){
condCall <- substitute( cond)
ind <- eval(condCall, data)
data[ind,]
}
data <- data.frame(pos = 1:10, val = rnorm(10))
data
subset2(data, cond = val>0)
subset2(data, cond = val>0 & val<1)
# przyklad ze nalezy zawsze patrzec na to co i gdzie sie oblicza. typowy problem z przeciekaniem danych od gory
d <-data.frame(a= 1:5, b=5:1, c=c(5,3,1,4,1))
d
subset2 <- function(data, cond){
condCall <- substitute( cond)
print(condCall)
print( ls())
ind <- eval(condCall, data)
print(ind)
data[ind,]
}
y<- 4
data<- 4
cond <- 4
condCall <- 4
subset2( d, a==4)#ok
subset2( d, a==y)#ok, y zbierane ze sciezki
subset2(d, a==data)# fails, wyrazenie a== data jest obliczane w ramce danych gdzie nie ma zmiennej data
#Po przejsciu o krokw gore wchodzimy do srodowiska executin, gdzie jest taka zmienna i jest to ramka danych
#porownujemy wektor a do calej ramki danych data, dostajemy spaghetti
subset2(d, a==cond)
# zmienna cond nie istnieje w ramce danych, idziem w gore, w execution env jest zmienna cond
subset2(d, a==condCall)
# nie ma w ramce danych, jest w execution, ale jest to obiekt typu language wiec to nie moze sotac porownane
# aby tego uniknac nalezy ustawic ostatni argument eval() na calling env dla tej funkcji
subset3 <- function(data, cond){
condCall <- substitute( cond)
ind <- eval(condCall, data, parent.frame())
data[ind,]
}
subset3( d, a==4)
subset3( d, a==y)
subset3(d, a==data)
subset3(d, a==cond)
subset3(d, a==condCall)
# wolanie z innej funkcji
rm(list=ls())
d <-data.frame(a= 1:5, b=5:1, c=c(5,3,1,4,1))
scramble <- function(x)x[sample(nrow(x)), ]
scramble(d)
subscramble <- function(x, cond) {
scramble(subset3(x,cond))
}
subscramble(d, cond = a>3)
traceback() # wywraca sie na evalu
# w zmiennej condCall znajduje sie boiekt language
# zaweirajacy cond. w momencie, gdy chcemy olbiczyc evala na condCall
# otrzymujemy cond, ktorego nie ma w data, wtedy przechodzimy od
# callign env, ktorym jest execution funkcji wolajace
# tam stramy sie obliczcyc cond ale tam juz nie ma a i otrzymujemy blad
# jezeli dodamy a do globalenv() to technicznei wszystko sie policzy ale otrzymamy blad logiczny
# w subset jest quote na quote i daltego(?) sie wywala
# przyklad
x
q <- quote(quote(x <-2))
eval(q)
x #zdjelismy tylko jednego quote
eval( eval(q))
x # teraz dopiero dobralismy sie do x
# escape catch
# cos ponizej jest nie tak - sprawdzic w ksiazce wickhama
subset3Q <- function(data, cond){
ind <- eval(cond, data, parent.frame())
data[ind,]
}
subset3 <- function(data, cond){
subset3Q(data, substitute(cond))
}
subscramble <- function(x, cond) {
condCall <- substitute( cond)
scramble(subset3Q(x,cond))
}
subscramble(d, cond = a>3)
| /R/L7.R | no_license | ronich/objectR | R | false | false | 9,451 | r | # Podstawowe komponenty funkcji
# kazda funkcja ma 3 komponenty
# cialo body()
# formals / formals()
# enclosing env / environment
# Przyklad
f <- function( x = "John"){
print(x)
}
body(f)
typeof(f)
typeof(body(f))
formals(f)
typeof(formals(f))
environment(f)
attributes(f)
# Przyklad wykorzystanie do wstecznej modyfikacji funkcji
f <- function(){}
typeof(f)
formals(f) <- alist(x="Mary")
body(f) <- quote( print(x))
body(f)
formals(f)
attributes(f)
f()
f("John")
c
formals(c)
body(c)
# przyklad: liczba wszystkich funkcji primitive
obs <- mget( ls("package:base"), inherit = TRUE)
typeof(obs)
obs
fs <- Filter( f = is.function, x =obs)
length(fs) # liczba wszystkich funkcji
fsPrimitive <- Filter( f= function(x) is.primitive(x) & is.function(x), x = obs)
length(fsPrimitive)
fsNotPrimitive <- Filter( f= function(x) !is.primitive(x) & is.function(x), x = obs)
length(fsNotPrimitive)
# liczb aargumentow ktore przyjmuja funkcje (... - jeden argument)
w <- unlist(lapply(X = fsNotPrimitive, FUN = function(x) length( formals( x))))
names(w) <- NULL
wCounts <- table(w)
x <- as.integer(names(wCounts))
plot(x, wCounts, type='h',lwd=10, col='magenta',
main = 'Number of arguments')
# Przyklad - ast / everything is a call, wszystko jest funkcja
# ast - abstract syntax t
require(pryr)
ast(2+2)
ast(mean( rnorm(100)))
ast((2+2))
# przedefiniowanie funkcji `(`
`(` <- function(x) x+1
(2+1)
rm(`(`)
(2+1)
sapply(1:10, `+`, 3)
# wywolanie funkcji na argumentach
# do.call - trzeba podac nazwe funkcji i liste argumentow.
# call - sluzy to tworzenia wywolan
do.call(mean, list(1:10, na.rm=T))
# lazy computataions - zostaje oblicone dopierojak zostaje wykorzystane
f1<- function(a, b){
print(a)
print(nchar(b))
}
f1("ala", print("John")) # john jest drukowany bo jest wywolywany w nchar(b)
f2<- function(a, b){
print(a)
}
f2("ala", print("John")) # john w ogole nie jest obliczany
# dziwne rzeczy
# argumenty definiowane przez inne argumenty
f1 <- function(a = 1, b = 2*a){
c(a, b)
}
f1(2)
f1(2,5)
# argumenty definiowane przez zmienne lokalne
f2 <- function( a=1, b=2*x){
x <- 2*a
c(a,x,b)
}
f2(3)
f2(3, 6)
# arugmenty wewnatrz funkcji sa wywolywane w srodowisku execution,nie w srodowisku calling
# wymuszanie obliczen - force
f <- function( x=stop("Evaluated")){
123
}
f() # argumenty nie zostal obliczeony
# wymuszanie obliczen
f <- function( x=stop("Evaluated")){
force(x)
123
}
# function factory
f <- function(x){
function(y) x + y
}
g2 <- f(2)
g4 <- f(4)
g2(10)
g4(10)
# stworzenie listy funkcji
q1 <- lapply(1:10, f)
q1[[10]](10)
q1[[1]](10)
for(k in 1:length(q1)) print( q1[[k]](10))
# gdzie sa obliczane argumenty
# argumetny domslne sa oblcizene wewnatrz srodowiska exceution dla funckcji
# dlatego jestli podawana jest wartosc explicite to moze ona by cinna niz domyslan
f <- function( x = ls()){
a <- 123
x
}
rm(x)
x
f() # wartosc domyslna jest wyliczana w execution environment
f( ls()) # wartosc podana jest wyliczana w calling environment
# to rozroznienie jest kluczowe w przypadku obliczen na jedyzku i przy przeczhwytywaniu calli (cel w tym zeby wiedziec gdzie
# obliczane sa formalsy, ktore tez zostana przechwycane)
ne <- new.env(parent = globalenv())
x <- 10
ne$x <- 1
f <- function(q){
print(q)
}
environment(f) <- ne # zmieniam enclosing na ne
f(x)
eval(quote(f(x)), envir = ne)
#binding - tam gdzie szukasz funkcji
#enclosing -tam gdzie funkcja szuka zmiennych
#calling - tam gdzie oblicznae sa jawnie podane arguemnty
# !argumenty podane jawnie sa obliczane w srodowisku calling (a nie enclosing)
# promises
# promise to jest obiekt typu language - jest to nie obliczone wyrazenie. kazda obietynica zawiera dwa elementy:
# - wyrazenie ktore bedzie obliczone jest dojdzie do olibczenia / dostaniesz sie przez funkcje substitute()
# - srodowisko,gdzie wyrazenie zostalo stworzone i gdzie powinno byc wyliczone jezeli do niego dojdzie
# info o obietnicach mozna uzyskac za pomoca pryr::promise_info()
# przyklady z ksiazki wickhama
x <- NULL
x > 0
if (!is.null(x) && x>0){
print("Works?")
}
# to wyrazenie nie zwraca bledu dlatego ze drugie wyraznie nie jest obliczone bo pierwsze zwraca FALSE
if (is.null(x) && x>0){
print("Works?")
}
# tu jest blad
# zatem wyraznie x >0 jest przykladem promise- nie jest obliczany dopoki nie jest potrzebny
f <- function(a,b){
c( missing(a),missing(b))
}
f( a= 1, b= 2)
f( a= 1)
f()
#to sie przydaje jezeli chcemy jako wartosc defaultowa wykorzystac cos co wymaga wielu linii kodu do wyliczenia
# wtedy chcemy miec pewnosc ze zostanie policzona tylko jesli arugment jest missing
#argument ...
# testowanie funkcji last
x <- 1:5
address(x) #hash gdzie to siedzi w pamieci
x[0] <- 100
address(x) # adres sie zmienil - tak narpawde stworzono kopie
# wartosci zwracane przez funkcje
# funkcja moze zwrocic jedynie jeden obiekt
# funkcja moze zwracac wartosci niewidoczenpoprzez wykorzystanie invisible(). to jest wygodne bo pozwala laczyc wywolania metod
# invisible(self), przykladem takiej funkcji jest <-. to pozwala na laczenie wywolan
a <-b <- c<- d<- 10
a
b
c
d
# on exit - wygodne sprzatanie
# mozna ustawic wyrazenie, ktore bedzie zawsze wykonywane tuz przed zakonczeniem dzialania funkcji
# ladne wykorzystanie to zmiana katalogu roboczego
doSth <- function(dir, code){
wd <- setwd(dir) # zwraca stary katalog roboczy oraz ustawia na nowy katalog roboczy
on.exit(setwd(wd))
force(code)
}
getwd()
doSth('/home/bpol0190/Documents', quote(print(1)))
getwd()
# obliczenia na jezyku
# wprowadzenie / przechwytywanie wyrazen
# podstawowa funkcja ktora pozwala na wykonywanie obliczen an ajezyku ro fnkcaj subtitute. zamiast zbierac wartosci
# argumentow, zbiera kod, ktory je oblicza
f <- function(x){
substitute( x)
}
f(1:10)
typeof(f(1:10)) # language, a wiec to samo co zwraca funkcja quote
x <- 10
f(x)
eval(f(x))
g <- function(x){
print( typeof( substitute( x)))
}
g(2) # double (wektor atomowy), substitute na stalych zwraca stale
g(x<-2) # language, substitue na wyrazeniu zwraca obiekt typu language
# druga przydatna funkcja to deparse() - bierze obiekt typu language i zamienia na string
q <- quote( x <- 1:5)
q
deparse(q)
f <- function(s) deparse( substitute( s))
f(x <- 1:10)
f( function(s) s^2)
# polaczenie quote i eval
# obie funkcje znamy, ale pamietac ze drugi argument eval to srodowisko ALBO lista czy ramka danych
# przyklad - proste zastosowanie quote i eval
# obliczenia na liscie
eval(quote(x), list(x = 100))
x
# obliczenia na ramce danych
eval(quote(x), data.frame(x=1:10))
# obliczenia na srodowisku
q <-new.env(parent = globalenv())
q$x<- rnorm(10)
eval(quote(mean(x)), q)
rm(q)
#implementacja subset
subset2 <- function(data, cond){
condCall <- substitute( cond)
ind <- eval(condCall, data)
data[ind,]
}
data <- data.frame(pos = 1:10, val = rnorm(10))
data
subset2(data, cond = val>0)
subset2(data, cond = val>0 & val<1)
# przyklad ze nalezy zawsze patrzec na to co i gdzie sie oblicza. typowy problem z przeciekaniem danych od gory
d <-data.frame(a= 1:5, b=5:1, c=c(5,3,1,4,1))
d
subset2 <- function(data, cond){
condCall <- substitute( cond)
print(condCall)
print( ls())
ind <- eval(condCall, data)
print(ind)
data[ind,]
}
y<- 4
data<- 4
cond <- 4
condCall <- 4
subset2( d, a==4)#ok
subset2( d, a==y)#ok, y zbierane ze sciezki
subset2(d, a==data)# fails, wyrazenie a== data jest obliczane w ramce danych gdzie nie ma zmiennej data
#Po przejsciu o krokw gore wchodzimy do srodowiska executin, gdzie jest taka zmienna i jest to ramka danych
#porownujemy wektor a do calej ramki danych data, dostajemy spaghetti
subset2(d, a==cond)
# zmienna cond nie istnieje w ramce danych, idziem w gore, w execution env jest zmienna cond
subset2(d, a==condCall)
# nie ma w ramce danych, jest w execution, ale jest to obiekt typu language wiec to nie moze sotac porownane
# aby tego uniknac nalezy ustawic ostatni argument eval() na calling env dla tej funkcji
subset3 <- function(data, cond){
condCall <- substitute( cond)
ind <- eval(condCall, data, parent.frame())
data[ind,]
}
subset3( d, a==4)
subset3( d, a==y)
subset3(d, a==data)
subset3(d, a==cond)
subset3(d, a==condCall)
# wolanie z innej funkcji
rm(list=ls())
d <-data.frame(a= 1:5, b=5:1, c=c(5,3,1,4,1))
scramble <- function(x)x[sample(nrow(x)), ]
scramble(d)
subscramble <- function(x, cond) {
scramble(subset3(x,cond))
}
subscramble(d, cond = a>3)
traceback() # wywraca sie na evalu
# w zmiennej condCall znajduje sie boiekt language
# zaweirajacy cond. w momencie, gdy chcemy olbiczyc evala na condCall
# otrzymujemy cond, ktorego nie ma w data, wtedy przechodzimy od
# callign env, ktorym jest execution funkcji wolajace
# tam stramy sie obliczcyc cond ale tam juz nie ma a i otrzymujemy blad
# jezeli dodamy a do globalenv() to technicznei wszystko sie policzy ale otrzymamy blad logiczny
# w subset jest quote na quote i daltego(?) sie wywala
# przyklad
x
q <- quote(quote(x <-2))
eval(q)
x #zdjelismy tylko jednego quote
eval( eval(q))
x # teraz dopiero dobralismy sie do x
# escape catch
# cos ponizej jest nie tak - sprawdzic w ksiazce wickhama
subset3Q <- function(data, cond){
ind <- eval(cond, data, parent.frame())
data[ind,]
}
subset3 <- function(data, cond){
subset3Q(data, substitute(cond))
}
subscramble <- function(x, cond) {
condCall <- substitute( cond)
scramble(subset3Q(x,cond))
}
subscramble(d, cond = a>3)
|
Renvs= new.env()
##' switchTo
##'
##' Switch to a different computing environment (set of installed R packages
##' and library location paths for new pkg installs)
##'
##' If switchr does not now about the specified computing environment, a new one
##' will be created via installCompEnv. This includes
##' creating a directory under the switchr base directory and installing
##' packages into it. See \code{installCompEnv} for more details.
##'
##' @param name The name associated (or to associate) with the computing
##' environment.
##' @param seed The seed, indicating packages to install into a newly created
##' package library
##' No effect if the library already exists
##' @param reverting Indicates whether we are reverting to the environment in
##' use before the current one. Typically not set directly by the user.
##' @param ignoreRVersion Should the R version in use be ignored when checking
##' for existing computing environmeSnts. This is experimental.
##' @param ... Passed directly to \code{installCompEnv} if an existing
##' computing environment is not found.
##' @details This function has the side effect of unloading all loaded
##' packages (other than base packages, GRAN or GRANBAse, switchr itself, and
##' switchr's dependencies) and the associated DLLs. It also changes the library
##' location R will use to search for packages, e.g. when you call
##' \code{library}.
##'
##' This means you will have to reinstall packages after switching, which is
##' important and intended (e.g. when switching to using Bioc devel from Bioc
##' release).
##'
##' @note By default, this process involves a call to \code{flushSession} which will
##' attempt to unload all loaded packages. While some support of configuring
##' what is unloaded is provided via \code{switchrDontUnload}, it is recommended
##' that you turn this feature entirely off via \code{switchrNoUnload(TRUE)} when
##' using switchr within dyanmic documents (.Rnw/.Rmd files, etc), particularly
##' when using the knitr package.
##' @return Invisibly returns the SwitchrCtx object representing the new
##' computing environment
##'
##' @examples
##' \dontrun{
##' switchTo("mynewlibrary")
##' switchBack()
##'
##' fdman = GithubManifest("gmbecker/fastdigest")
##' switchTo("fastdigestlib", seed = fdman)
##' }
##' @export
##' @docType methods
##' @rdname switchTo
setGeneric("switchTo", function(name, seed = NULL, reverting = FALSE,
ignoreRVersion = FALSE, ...)
standardGeneric("switchTo"))
##' @rdname switchTo
##' @aliases switchTo,character,character
setMethod("switchTo", c(name = "character", seed = "character"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE, ...) {
## At this point seed is guaranteed to be a repo url
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
cenv = findCompEnv(name = name, rvers = rvers)
if(is.null(cenv)) {
chtype = getStringType(seed)
if(chtype == "file") {
seed = readLines(seed)
chtype = getStringType(seed)
}
if(chtype == "sessioninfo") {
## we have session info output
##XXX need to make sure double use of ... is safe!
seed2 = makeSeedMan(parseSessionInfoString(seed))
sr = lazyRepo(seed2, ...)
seed = if(grepl("file://", sr)) sr else makeFileURL(sr)
seed = gsub("(/|\\\\)src(/|\\\\)contrib.*", "", seed)
chtype = "repourl"
} else if (chtype == "manifesturl") {
if(requireNamespace2("RCurl")) {
seed = strsplit(RCurl::getURL(seed), "\n")[[1]]
} else {
seed2 = tryCatch(readLines(seed), error = function(e) e)
if(is(seed2, "error"))
stop("Unable to access gist due to https URL. Please install RCurl or use an R version that has libcurl built in.")
seed = seed2
}
chtype = "manifesttxt"
}
if (chtype == "manifesttxt") {
con = textConnection(seed)
on.exit(close(con))
seed2 = loadManifest(con)
close(con)
on.exit(NULL)
sr = lazyRepo(seed2, ...)
seed = if(grepl("file://", sr)) sr else makeFileURL(sr)
seed = gsub("(/|\\\\)src(/|\\\\)contrib.*", "", seed)
chtype = "repourl"
}
if(grepl("(repo|contrib)", chtype)) {
seed = repoFromString(seed, chtype)
chtype = "repourl"
}
if(chtype != "repourl") {
stop("We should have a repository by this point. This shouldn't happen. Contact the maintainers")
}
cenv = makeLibraryCtx(name = name, seed = seed, ...)
} else {
message(sprintf("Library %s already exists. Ignoring seed and switching to existing library"), name)
}
if(!is.null(cenv))
switchTo(name = cenv)
else
stop("unable to switch to computing environment")
})
repoFromString = function(str, type) {
switch(type,
repodir = makeFileURL(str),
contribdir = makeFileURL(gsub("/(src|bin/windows|bin/macosx|bin/macos).*", "", str)),
repourl = str,
contriburl = gsub("/(src|bin/windows|bin/macosx|bin/macos).*", "", str))
}
##' @rdname switchTo
##' @aliases switchTo,character,SwitchrCtx
setMethod("switchTo", c(name = "character", seed= "SwitchrCtx"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE,...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
exsting = findCompEnv(name = name, rvers = rvers)
if(!is.null(exsting)) {
message("Found existing switchr context. Ignoring seed value")
return(switchTo(exsting))
}
cenv = makeLibraryCtx(name = name, seed = NULL,
exclude.site = seed@exclude.site,
...)
## copy existing library contents to the new one
dirs = list.dirs(file.path(switchrBaseDir(), seed@name), recursive = FALSE)
file.copy(dirs, library_paths(cenv)[1],
recursive = TRUE, overwrite = FALSE)
cenv = update_pkgs_list(cenv)
switchTo(cenv)
})
##' @rdname switchTo
##' @aliases switchTo,character,missing
setMethod("switchTo", c("character", "missing"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE,...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
cenv = findCompEnv(name = name, rvers = rvers)
if(is.null(cenv))
cenv = makeLibraryCtx(name = name, ...)
if(!is.null(cenv))
## switchTo(name = name, seed = cenv)
switchTo(name = cenv)
else
stop("unable to switch to computing environment")
})
gistregex = "gist\\.githubusercontent\\.com"
getStringType = function(str) {
if(any(grepl("Platform:", str)))
return("sessioninfo")
if(grepl("^# R manifest", str[1]))
return("manifesttxt")
if(length(str) > 1)
return(sapply(str, getStringType))
if(grepl("file://", str)) {
isfilurl = TRUE
str = fileFromFileURL(str)
} else
isfilurl = FALSE
if(file.exists(str)) {
if( !file.exists(file.path(str, ".")))
ret = "file"
else { #if str points to a directory
if(grepl("contrib/{0,1}$", str))
ret = "contribdir"
else if(file.exists(file.path(str,
"src/contrib/PACKAGES")))
ret = "repodir"
else
ret = "manifestdir"
}
if(!is.null(ret)) {
if(ret != "file" && isfilurl)
ret = gsub("dir$", "url", ret)
return(ret)
}
} else if (isfilurl) { # file doesn't exist, but its a file url
stop("file urls to non-existent files are not allowed as seeds/repos")
}
## gist urls have a weird thing where if you put *any* valid url
## after a gist raw link that works you get the same contents
## rather than 404, so the check for PACKAGES.gz isn't safe
## until after we've ruled out a gist
if(grepl(gistregex, str)){
if(!grepl("/raw/", str))
stop("When seeding with a manifest within a gist, use the URL to the raw file contents, not the overall gist URL.")
return("manifesturl")
}
if(url.exists(paste0(str, "/PACKAGES.gz")))
return("contriburl")
else if (url.exists(paste0(str, "/src/contrib/PACKAGES.gz")))
return("repourl")
else if (url.exists(str))
return("manifesturl")
stop("Unidentifiable string:", str)
}
##' @rdname switchTo
##' @aliases switchTo,SwitchrCtx,ANY
setMethod("switchTo", c(name = "SwitchrCtx", seed = "ANY"), function(name, seed, reverting=FALSE, ...) {
if(is.null(Renvs$stack)) {
paths = .libPaths()
paths = paths[!paths %in% c(.Library.site, .Library)]
Renvs$stack = list(original = SwitchrCtx("original", paths, exclude.site=FALSE, seed = NULL))
}
if(!switchrNoUnload())
flushSession()
.libPaths2(library_paths(name), name@exclude.site)
if(!reverting) {
# attachedPkgs(Renvs$stack[[length(Renvs$stack)]]) = atched
Renvs$stack = c(name, Renvs$stack)
} else
Renvs$stack = Renvs$stack[-1]
announce(name, reverted = reverting)
invisible(name)
})
##' @rdname switchTo
##' @aliases switchTo,character,RepoSubset
setMethod("switchTo", c(name = "character", seed="RepoSubset"), function(name, seed = NULL,
reverting = FALSE,
ignoreRVersion = FALSE,
...) {
if(any(c("pkgs", "repo_name") %in% names(list(...))))
stop("Cannot specify pkgs or repo_name when switching to a RepoSubset")
##seed is a RepoSubset object
if(missing(name)) {
name = seed@default_name
}
switchTo(seed = seed@repos, name = name, pkgs = seed@pkgs, ...)
})
##' @rdname switchTo
##' @aliases switchTo,character,PkgManifest
setMethod("switchTo", c("character", seed = "PkgManifest"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE, ...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
exsting = findCompEnv(name = name, rvers = rvers)
if(!is.null(exsting)) {
message("Found existing switchr context. Ignoring seed value")
return(switchTo(exsting))
}
cenv = makeLibraryCtx(name = name, seed = NULL,
...)
oldlp = .libPaths()
.libPaths2(library_paths(cenv), cenv@exclude.site)
on.exit(.libPaths2(oldlp))
install_packages(manifest_df(seed)$name, seed, lib = library_paths(cenv)[1])
cenv = update_pkgs_list(cenv)
.libPaths2(oldlp)
on.exit(NULL)
switchTo(cenv)
})
##' @rdname switchTo
##' @aliases switchTo,character,SessionManifest
setMethod("switchTo", c("character", seed = "SessionManifest"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE, ...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
exsting = findCompEnv(name = name, rvers = rvers)
if(!is.null(exsting)) {
message("Found existing switchr context. Ignoring seed value")
return(switchTo(exsting))
}
cenv = makeLibraryCtx(name = name, seed = NULL,
...)
install_packages(pkgs = seed, lib = library_paths(cenv)[1])
cenv = update_pkgs_list(cenv)
switchTo(cenv)
})
setGeneric("attachedPkgs<-", function(seed, value) standardGeneric("attachedPkgs<-"))
setMethod("attachedPkgs<-", "SwitchrCtx", function(seed, value) {
seed@attached = value
seed
})
setGeneric("announce", function(seed, reverted=FALSE) standardGeneric("announce"))
setMethod("announce", "SwitchrCtx", function(seed, reverted=FALSE) {
message(sprintf("%s to the '%s' computing environment. \n%d packages are currently available.", ifelse(reverted, "Reverted", "Switched"),
seed@name, nrow(seed@packages)))
if(seed@exclude.site)
message("Packages installed in your site library ARE suppressed.")
message("To switch back to your previous environment type switchBack()")
})
setMethod("show", "SwitchrCtx", function(object) {
message(paste(sprintf("An SwitchrCtx object defining the '%s' computing environment", object@name),
"\n\n\t", sprintf("Primary library location(s): %s", paste(object@libpaths, collapse=";")),
"\n\t", sprintf("Packages: %d packages installed in %d directories (including R's base library)", nrow(object@packages), length(unique(object@packages$LibPath))),
"\n\t", paste("This environment DOES ", ifelse(object@exclude.site, "NOT ", ""), "combine with the current site library location when loaded.", sep=""),
"\n\n"))
})
##' switchBack
##'
##' A convenience function to switch back to the previously used computing
##' environment.
##' @export
switchBack = function() {
if(length(Renvs$stack) < 2) {
warning("No previous computing environment to switch back to. Computing environment will remain unchanged")
return(NULL)
}
switchTo(Renvs$stack[[2]], reverting=TRUE)
}
##' currentCompEnv
##'
##' Display the computing environment currently in use. If switchTo has not been
##' called, a new SwitchrCtx object describing the current environment is
##' created.
##' @export
currentCompEnv = function() {
if(is.null(Renvs$stack)) {
lp = .libPaths()
lp = lp[!(lp %in% .Library | lp %in% .Library.site)]
Renvs$stack = list(original = SwitchrCtx("original",
libpaths = lp , seed = NULL,
exclude.site=FALSE))
}
Renvs$stack[[1]]
}
.libPaths2 = function(fulllp, exclude.site=TRUE) {
fun = .libPaths
lst = list()
lst$.Library.site = if(exclude.site) character() else .Library.site
environment(fun) = list2env(lst,
parent = environment(.libPaths))
fun(fulllp)
}
| /switchr/R/methods.R | no_license | ingted/R-Examples | R | false | false | 15,616 | r | Renvs= new.env()
##' switchTo
##'
##' Switch to a different computing environment (set of installed R packages
##' and library location paths for new pkg installs)
##'
##' If switchr does not now about the specified computing environment, a new one
##' will be created via installCompEnv. This includes
##' creating a directory under the switchr base directory and installing
##' packages into it. See \code{installCompEnv} for more details.
##'
##' @param name The name associated (or to associate) with the computing
##' environment.
##' @param seed The seed, indicating packages to install into a newly created
##' package library
##' No effect if the library already exists
##' @param reverting Indicates whether we are reverting to the environment in
##' use before the current one. Typically not set directly by the user.
##' @param ignoreRVersion Should the R version in use be ignored when checking
##' for existing computing environmeSnts. This is experimental.
##' @param ... Passed directly to \code{installCompEnv} if an existing
##' computing environment is not found.
##' @details This function has the side effect of unloading all loaded
##' packages (other than base packages, GRAN or GRANBAse, switchr itself, and
##' switchr's dependencies) and the associated DLLs. It also changes the library
##' location R will use to search for packages, e.g. when you call
##' \code{library}.
##'
##' This means you will have to reinstall packages after switching, which is
##' important and intended (e.g. when switching to using Bioc devel from Bioc
##' release).
##'
##' @note By default, this process involves a call to \code{flushSession} which will
##' attempt to unload all loaded packages. While some support of configuring
##' what is unloaded is provided via \code{switchrDontUnload}, it is recommended
##' that you turn this feature entirely off via \code{switchrNoUnload(TRUE)} when
##' using switchr within dyanmic documents (.Rnw/.Rmd files, etc), particularly
##' when using the knitr package.
##' @return Invisibly returns the SwitchrCtx object representing the new
##' computing environment
##'
##' @examples
##' \dontrun{
##' switchTo("mynewlibrary")
##' switchBack()
##'
##' fdman = GithubManifest("gmbecker/fastdigest")
##' switchTo("fastdigestlib", seed = fdman)
##' }
##' @export
##' @docType methods
##' @rdname switchTo
setGeneric("switchTo", function(name, seed = NULL, reverting = FALSE,
ignoreRVersion = FALSE, ...)
standardGeneric("switchTo"))
##' @rdname switchTo
##' @aliases switchTo,character,character
setMethod("switchTo", c(name = "character", seed = "character"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE, ...) {
## At this point seed is guaranteed to be a repo url
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
cenv = findCompEnv(name = name, rvers = rvers)
if(is.null(cenv)) {
chtype = getStringType(seed)
if(chtype == "file") {
seed = readLines(seed)
chtype = getStringType(seed)
}
if(chtype == "sessioninfo") {
## we have session info output
##XXX need to make sure double use of ... is safe!
seed2 = makeSeedMan(parseSessionInfoString(seed))
sr = lazyRepo(seed2, ...)
seed = if(grepl("file://", sr)) sr else makeFileURL(sr)
seed = gsub("(/|\\\\)src(/|\\\\)contrib.*", "", seed)
chtype = "repourl"
} else if (chtype == "manifesturl") {
if(requireNamespace2("RCurl")) {
seed = strsplit(RCurl::getURL(seed), "\n")[[1]]
} else {
seed2 = tryCatch(readLines(seed), error = function(e) e)
if(is(seed2, "error"))
stop("Unable to access gist due to https URL. Please install RCurl or use an R version that has libcurl built in.")
seed = seed2
}
chtype = "manifesttxt"
}
if (chtype == "manifesttxt") {
con = textConnection(seed)
on.exit(close(con))
seed2 = loadManifest(con)
close(con)
on.exit(NULL)
sr = lazyRepo(seed2, ...)
seed = if(grepl("file://", sr)) sr else makeFileURL(sr)
seed = gsub("(/|\\\\)src(/|\\\\)contrib.*", "", seed)
chtype = "repourl"
}
if(grepl("(repo|contrib)", chtype)) {
seed = repoFromString(seed, chtype)
chtype = "repourl"
}
if(chtype != "repourl") {
stop("We should have a repository by this point. This shouldn't happen. Contact the maintainers")
}
cenv = makeLibraryCtx(name = name, seed = seed, ...)
} else {
message(sprintf("Library %s already exists. Ignoring seed and switching to existing library"), name)
}
if(!is.null(cenv))
switchTo(name = cenv)
else
stop("unable to switch to computing environment")
})
repoFromString = function(str, type) {
switch(type,
repodir = makeFileURL(str),
contribdir = makeFileURL(gsub("/(src|bin/windows|bin/macosx|bin/macos).*", "", str)),
repourl = str,
contriburl = gsub("/(src|bin/windows|bin/macosx|bin/macos).*", "", str))
}
##' @rdname switchTo
##' @aliases switchTo,character,SwitchrCtx
setMethod("switchTo", c(name = "character", seed= "SwitchrCtx"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE,...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
exsting = findCompEnv(name = name, rvers = rvers)
if(!is.null(exsting)) {
message("Found existing switchr context. Ignoring seed value")
return(switchTo(exsting))
}
cenv = makeLibraryCtx(name = name, seed = NULL,
exclude.site = seed@exclude.site,
...)
## copy existing library contents to the new one
dirs = list.dirs(file.path(switchrBaseDir(), seed@name), recursive = FALSE)
file.copy(dirs, library_paths(cenv)[1],
recursive = TRUE, overwrite = FALSE)
cenv = update_pkgs_list(cenv)
switchTo(cenv)
})
##' @rdname switchTo
##' @aliases switchTo,character,missing
setMethod("switchTo", c("character", "missing"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE,...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
cenv = findCompEnv(name = name, rvers = rvers)
if(is.null(cenv))
cenv = makeLibraryCtx(name = name, ...)
if(!is.null(cenv))
## switchTo(name = name, seed = cenv)
switchTo(name = cenv)
else
stop("unable to switch to computing environment")
})
gistregex = "gist\\.githubusercontent\\.com"
getStringType = function(str) {
if(any(grepl("Platform:", str)))
return("sessioninfo")
if(grepl("^# R manifest", str[1]))
return("manifesttxt")
if(length(str) > 1)
return(sapply(str, getStringType))
if(grepl("file://", str)) {
isfilurl = TRUE
str = fileFromFileURL(str)
} else
isfilurl = FALSE
if(file.exists(str)) {
if( !file.exists(file.path(str, ".")))
ret = "file"
else { #if str points to a directory
if(grepl("contrib/{0,1}$", str))
ret = "contribdir"
else if(file.exists(file.path(str,
"src/contrib/PACKAGES")))
ret = "repodir"
else
ret = "manifestdir"
}
if(!is.null(ret)) {
if(ret != "file" && isfilurl)
ret = gsub("dir$", "url", ret)
return(ret)
}
} else if (isfilurl) { # file doesn't exist, but its a file url
stop("file urls to non-existent files are not allowed as seeds/repos")
}
## gist urls have a weird thing where if you put *any* valid url
## after a gist raw link that works you get the same contents
## rather than 404, so the check for PACKAGES.gz isn't safe
## until after we've ruled out a gist
if(grepl(gistregex, str)){
if(!grepl("/raw/", str))
stop("When seeding with a manifest within a gist, use the URL to the raw file contents, not the overall gist URL.")
return("manifesturl")
}
if(url.exists(paste0(str, "/PACKAGES.gz")))
return("contriburl")
else if (url.exists(paste0(str, "/src/contrib/PACKAGES.gz")))
return("repourl")
else if (url.exists(str))
return("manifesturl")
stop("Unidentifiable string:", str)
}
##' @rdname switchTo
##' @aliases switchTo,SwitchrCtx,ANY
setMethod("switchTo", c(name = "SwitchrCtx", seed = "ANY"), function(name, seed, reverting=FALSE, ...) {
if(is.null(Renvs$stack)) {
paths = .libPaths()
paths = paths[!paths %in% c(.Library.site, .Library)]
Renvs$stack = list(original = SwitchrCtx("original", paths, exclude.site=FALSE, seed = NULL))
}
if(!switchrNoUnload())
flushSession()
.libPaths2(library_paths(name), name@exclude.site)
if(!reverting) {
# attachedPkgs(Renvs$stack[[length(Renvs$stack)]]) = atched
Renvs$stack = c(name, Renvs$stack)
} else
Renvs$stack = Renvs$stack[-1]
announce(name, reverted = reverting)
invisible(name)
})
##' @rdname switchTo
##' @aliases switchTo,character,RepoSubset
setMethod("switchTo", c(name = "character", seed="RepoSubset"), function(name, seed = NULL,
reverting = FALSE,
ignoreRVersion = FALSE,
...) {
if(any(c("pkgs", "repo_name") %in% names(list(...))))
stop("Cannot specify pkgs or repo_name when switching to a RepoSubset")
##seed is a RepoSubset object
if(missing(name)) {
name = seed@default_name
}
switchTo(seed = seed@repos, name = name, pkgs = seed@pkgs, ...)
})
##' @rdname switchTo
##' @aliases switchTo,character,PkgManifest
setMethod("switchTo", c("character", seed = "PkgManifest"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE, ...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
exsting = findCompEnv(name = name, rvers = rvers)
if(!is.null(exsting)) {
message("Found existing switchr context. Ignoring seed value")
return(switchTo(exsting))
}
cenv = makeLibraryCtx(name = name, seed = NULL,
...)
oldlp = .libPaths()
.libPaths2(library_paths(cenv), cenv@exclude.site)
on.exit(.libPaths2(oldlp))
install_packages(manifest_df(seed)$name, seed, lib = library_paths(cenv)[1])
cenv = update_pkgs_list(cenv)
.libPaths2(oldlp)
on.exit(NULL)
switchTo(cenv)
})
##' @rdname switchTo
##' @aliases switchTo,character,SessionManifest
setMethod("switchTo", c("character", seed = "SessionManifest"),
function(name, seed, reverting = FALSE, ignoreRVersion = FALSE, ...) {
if(ignoreRVersion)
rvers = NULL
else
rvers = paste(R.version$major, R.version$minor, sep=".")
exsting = findCompEnv(name = name, rvers = rvers)
if(!is.null(exsting)) {
message("Found existing switchr context. Ignoring seed value")
return(switchTo(exsting))
}
cenv = makeLibraryCtx(name = name, seed = NULL,
...)
install_packages(pkgs = seed, lib = library_paths(cenv)[1])
cenv = update_pkgs_list(cenv)
switchTo(cenv)
})
setGeneric("attachedPkgs<-", function(seed, value) standardGeneric("attachedPkgs<-"))
setMethod("attachedPkgs<-", "SwitchrCtx", function(seed, value) {
seed@attached = value
seed
})
setGeneric("announce", function(seed, reverted=FALSE) standardGeneric("announce"))
setMethod("announce", "SwitchrCtx", function(seed, reverted=FALSE) {
message(sprintf("%s to the '%s' computing environment. \n%d packages are currently available.", ifelse(reverted, "Reverted", "Switched"),
seed@name, nrow(seed@packages)))
if(seed@exclude.site)
message("Packages installed in your site library ARE suppressed.")
message("To switch back to your previous environment type switchBack()")
})
setMethod("show", "SwitchrCtx", function(object) {
message(paste(sprintf("An SwitchrCtx object defining the '%s' computing environment", object@name),
"\n\n\t", sprintf("Primary library location(s): %s", paste(object@libpaths, collapse=";")),
"\n\t", sprintf("Packages: %d packages installed in %d directories (including R's base library)", nrow(object@packages), length(unique(object@packages$LibPath))),
"\n\t", paste("This environment DOES ", ifelse(object@exclude.site, "NOT ", ""), "combine with the current site library location when loaded.", sep=""),
"\n\n"))
})
##' switchBack
##'
##' A convenience function to switch back to the previously used computing
##' environment.
##' @export
switchBack = function() {
if(length(Renvs$stack) < 2) {
warning("No previous computing environment to switch back to. Computing environment will remain unchanged")
return(NULL)
}
switchTo(Renvs$stack[[2]], reverting=TRUE)
}
##' currentCompEnv
##'
##' Display the computing environment currently in use. If switchTo has not been
##' called, a new SwitchrCtx object describing the current environment is
##' created.
##' @export
currentCompEnv = function() {
if(is.null(Renvs$stack)) {
lp = .libPaths()
lp = lp[!(lp %in% .Library | lp %in% .Library.site)]
Renvs$stack = list(original = SwitchrCtx("original",
libpaths = lp , seed = NULL,
exclude.site=FALSE))
}
Renvs$stack[[1]]
}
.libPaths2 = function(fulllp, exclude.site=TRUE) {
fun = .libPaths
lst = list()
lst$.Library.site = if(exclude.site) character() else .Library.site
environment(fun) = list2env(lst,
parent = environment(.libPaths))
fun(fulllp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/atlantisfmsy_ini.R
\name{atlantis_fdistri}
\alias{atlantis_fdistri}
\title{Atlantis F distribution.}
\usage{
atlantis_fdistri(
func_grp,
model_path,
harvest_filename,
fishing_para,
raw.distri = F
)
}
\arguments{
\item{func_grp}{The code of the Atlantis functional group for which Fmsy will
be estimated.}
\item{model_path}{The directory of the calibrated model (containing all the
parameters files and one bach file. Forcing files can be stored in a direct
parent directory of model_path). \strong{WARNING:} Only working if the forcing
folder is in the main model directory \code{model_path} or if it is in the
direct parent directory. If not please either modify this package or modify
the path structure of your Atlantis input forcing parameters file.}
\item{harvest_filename}{The name of the harvest parameters file with its
extension (ex: file.prm).}
\item{fishing_para}{A dataframe containing data from the fishing fleet csv
file plus two extra columns, one to indicate if the fleet is active (1) or
not (0) named \code{active_flt} and one named \code{effortmodel} containing
the effortmodel option used for each fleet (output from the function
\code{\link{atlantis_checkf}}).}
\item{raw.distri}{If TRUE return the raw value of fishing mortality (F) for
active fleets (non-active fleets are set to 0), else the distribution of F is
returned (vector of values between [0-1] and their sum is 1). \strong{Default:}
FALSE.}
}
\value{
\code{f_prop} A vector with the proportion of fishing pressure
applied per each fleet. The order of the fleets is the same as the one used
in Atlantis.
}
\description{
Extract the fishing mortality distribution accross fleets in
Atlantis. It opens the harvest parameters file, select the \code{mFC_XXX}
vector for the considered functional group \code{func_grp} and return the
proportion of the total F allocated to each fleet. If the functional group
\code{func_grp} is unfished in the calibrated model this function will
attribute fishing pressure evenly accross active fleets.
}
\examples{
atlantis_fdistri("COD", "C:/Atlantis/AtlantisEEC/AtlantisEECF_v3",
"AEEC_harvest.prm", fishing_para)
atlantis_fdistri("COD", "/home/Atlantis/AtlantisEEC/AtlantisEECF_v3",
"AEEC_harvest.prm", fishing_para)
}
\seealso{
\code{\link{atlantis_openfile}} to open a parameters file and select
a parameter.
}
| /man/atlantis_fdistri.Rd | no_license | rgirardi/atlantisfmsy | R | false | true | 2,439 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/atlantisfmsy_ini.R
\name{atlantis_fdistri}
\alias{atlantis_fdistri}
\title{Atlantis F distribution.}
\usage{
atlantis_fdistri(
func_grp,
model_path,
harvest_filename,
fishing_para,
raw.distri = F
)
}
\arguments{
\item{func_grp}{The code of the Atlantis functional group for which Fmsy will
be estimated.}
\item{model_path}{The directory of the calibrated model (containing all the
parameters files and one bach file. Forcing files can be stored in a direct
parent directory of model_path). \strong{WARNING:} Only working if the forcing
folder is in the main model directory \code{model_path} or if it is in the
direct parent directory. If not please either modify this package or modify
the path structure of your Atlantis input forcing parameters file.}
\item{harvest_filename}{The name of the harvest parameters file with its
extension (ex: file.prm).}
\item{fishing_para}{A dataframe containing data from the fishing fleet csv
file plus two extra columns, one to indicate if the fleet is active (1) or
not (0) named \code{active_flt} and one named \code{effortmodel} containing
the effortmodel option used for each fleet (output from the function
\code{\link{atlantis_checkf}}).}
\item{raw.distri}{If TRUE return the raw value of fishing mortality (F) for
active fleets (non-active fleets are set to 0), else the distribution of F is
returned (vector of values between [0-1] and their sum is 1). \strong{Default:}
FALSE.}
}
\value{
\code{f_prop} A vector with the proportion of fishing pressure
applied per each fleet. The order of the fleets is the same as the one used
in Atlantis.
}
\description{
Extract the fishing mortality distribution accross fleets in
Atlantis. It opens the harvest parameters file, select the \code{mFC_XXX}
vector for the considered functional group \code{func_grp} and return the
proportion of the total F allocated to each fleet. If the functional group
\code{func_grp} is unfished in the calibrated model this function will
attribute fishing pressure evenly accross active fleets.
}
\examples{
atlantis_fdistri("COD", "C:/Atlantis/AtlantisEEC/AtlantisEECF_v3",
"AEEC_harvest.prm", fishing_para)
atlantis_fdistri("COD", "/home/Atlantis/AtlantisEEC/AtlantisEECF_v3",
"AEEC_harvest.prm", fishing_para)
}
\seealso{
\code{\link{atlantis_openfile}} to open a parameters file and select
a parameter.
}
|
source('./MMS_module/Functions/Storage_MMS.R')
source('./MMS_module/Functions/Grazing_MMS.R')
source('./MMS_module/Functions/Housing_MMS.R')
source('./MMS_module/Functions/Spreading_MMS.R')
source('./MMS_module/Functions/Nex_computation.R')
aggregate_totals <- function(year, subfolder, subfolderX2, manure_type, animal_class_list) {
## aggregates all gaseous losses from manure management systems
## OR
## aggregates the available N in gross manure spreading (i.e., wo/ spreading Nh3 emissions)
## or
## aggregates N excreted onto pastures
## Unit: kg N yr-1
calc_df <- create_main_csv()
# this allows the specification into ruminants and non-ruminants to distribute the manure
ifelse(missing(animal_class_list)==TRUE,
animal_class <- get_animal_classes(),
animal_class <- animal_class_list)
for (i in animal_class) {
select_file <- get_MMS_subfolder_output_file(subfolder = subfolder,
subfolderX2 = subfolderX2,
year = year,
file_pattern = file_pattern(subfolderX2, i, manure_type))
select_file <- data_cleaning(select_file)
# calculate total N excreted onto pastures from an animal class
calc_df[, i] <- round(rowSums(select_file[, seq(4,ncol(select_file))]), 0)
colnames(calc_df)[ncol(calc_df)] <- i
}
calc_df$TOTAL <- round(rowSums(calc_df[, seq(4, ncol(calc_df))]), 0)
return(calc_df)
}
| /MMS_module/Functions/Totals_aggregation_MMS.R | permissive | shekharsg/MITERRA-PORTUGAL | R | false | false | 1,508 | r | source('./MMS_module/Functions/Storage_MMS.R')
source('./MMS_module/Functions/Grazing_MMS.R')
source('./MMS_module/Functions/Housing_MMS.R')
source('./MMS_module/Functions/Spreading_MMS.R')
source('./MMS_module/Functions/Nex_computation.R')
aggregate_totals <- function(year, subfolder, subfolderX2, manure_type, animal_class_list) {
## aggregates all gaseous losses from manure management systems
## OR
## aggregates the available N in gross manure spreading (i.e., wo/ spreading Nh3 emissions)
## or
## aggregates N excreted onto pastures
## Unit: kg N yr-1
calc_df <- create_main_csv()
# this allows the specification into ruminants and non-ruminants to distribute the manure
ifelse(missing(animal_class_list)==TRUE,
animal_class <- get_animal_classes(),
animal_class <- animal_class_list)
for (i in animal_class) {
select_file <- get_MMS_subfolder_output_file(subfolder = subfolder,
subfolderX2 = subfolderX2,
year = year,
file_pattern = file_pattern(subfolderX2, i, manure_type))
select_file <- data_cleaning(select_file)
# calculate total N excreted onto pastures from an animal class
calc_df[, i] <- round(rowSums(select_file[, seq(4,ncol(select_file))]), 0)
colnames(calc_df)[ncol(calc_df)] <- i
}
calc_df$TOTAL <- round(rowSums(calc_df[, seq(4, ncol(calc_df))]), 0)
return(calc_df)
}
|
test_that("data loads", {
dim(coupleIQs) %>%
expect_equal(c(10, 3))
dim(speed_voice) %>%
expect_equal(c(2, 3))
})
| /tests/testthat/test_data_load.R | no_license | TysonStanley/educ6600 | R | false | false | 129 | r |
test_that("data loads", {
dim(coupleIQs) %>%
expect_equal(c(10, 3))
dim(speed_voice) %>%
expect_equal(c(2, 3))
})
|
## ----setup, echo=FALSE, warning = FALSE, message = FALSE-----------------
#NZJFMS manuscript
#15032017
#Anthony Davidson
#packages
library(boot)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(qpcR)
library(pwr)
library(ggthemes)
library(gridExtra)
#set working dir
#laptop
# github file location
# setwd("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Data")
#setwd("C:/Users/s435389/Dropbox/dqNZJMFR Publication Calving interval/NZJMFR Calving Analysis/Final code for publication")
#desktop
#setwd("C:/Users/s435389/Dropbox (Population_Stats)/NZJMFR Publication Calving interval/NZJMFR Calving Analysis/Final code for publication")
###############################DATA####################################
Data<- read.csv("./R/Data/RawCI.csv", header=T, quote="\"")
#Structure of data set
#str(Data)
#year recorded
Year<-unique(Data$Calves.1)
#calving interval observed in 2010
year2010a<-c(3,3,2)
year2010 <- filter(Data,Calves.1 < 2011)
year2010 <- year2010$Interval.1[!is.na(year2010$Interval.1)]
#calving interval observed in 2011
year2011a<-c(3,3,2,3,3,3,3,3,3,3,3,3,3,3,2)
year2011 <- filter(Data,Calves.1 < 2012)
year2011 <- year2011$Interval.1[!is.na(year2011$Interval.1)]
#calving interval observed in 2012
year2012a<-c(3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,
6,4,4,4,4,4,3,3,3,3)
year2012 <- filter(Data,Calves.1 < 2013)
year2012 <- year2012$Interval.1[!is.na(year2012$Interval.1)]
#calving interval observed in 2013
year2013a<-c(3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,
6,4,4,4,4,4,3,3,3,3,
6,5,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,2,2)
full <- c(Data$Interval.1,Data$Interval.2)
year2013<- full[!is.na(unlist(full))]
## ----basic, echo=FALSE, message=FALSE, warning=FALSE---------------------
#Confidence intervals
#2010
mean2010<-sum(year2010)/length(year2010)
s2010<-sd(year2010)
SE2010<-s2010/(sqrt(length(year2010)))
n2010<-(length(year2010))
low.qt2010 <- mean2010-(qt(0.975,length(year2010))*SE2010)
high.qt2010 <- mean2010+(qt(0.975,length(year2010))*SE2010)
#2011
mean2011<-sum(year2011)/length(year2011)
s2011<-sd(year2011)
SE2011<-s2011/(sqrt(length(year2011)))
n2011<-(length(year2011))
low.qt2011 <- mean2011-(qt(0.975,length(year2011))*SE2011)
high.qt2011 <- mean2011+(qt(0.975,length(year2011))*SE2011)
#2012
mean2012<-sum(year2012)/length(year2012)
s2012<-sd(year2012)
SE2012<-s2012/(sqrt(length(year2012)))
n2012<-(length(year2012))
low.qt2012 <- mean2012-(qt(0.975,length(year2012))*SE2012)
high.qt2012 <- mean2012+(qt(0.975,length(year2012))*SE2012)
#2013
mean2013<-sum(year2013)/length(year2013)
s2013<-sd(year2013)
SE2013<-s2013/(sqrt(length(year2013)))
n2013<-(length(year2013))
low.qt2013 <- mean2013-(qt(0.975,length(year2013))*SE2013)
high.qt2013 <- mean2013+(qt(0.975,length(year2013))*SE2013)
#Makes data frame to plot
n <- c(length(year2010),length(year2011),length(year2012),length(year2013))
mY <- c(mean(year2010),mean(year2011),mean(year2012),mean(year2013))
year <- Year
low.qt <- c(low.qt2010,low.qt2011,low.qt2012,low.qt2013)
high.qt <- c(high.qt2010,high.qt2011,high.qt2012,high.qt2013)
sd <- c(s2010,s2011,s2012,s2013)
sum.dat <- cbind(year,n,mY,low.qt,high.qt,sd)
sum.dat <- as.data.frame(sum.dat)
## ----raw table, echo=FALSE-----------------------------------------------
library(knitr)
kable(sum.dat, format = "markdown")
## ----raw graph, echo=FALSE, message=FALSE, warning=FALSE-----------------
#plot data
ggplot(sum.dat, aes(y = mY, x = year)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin = low.qt, ymax = high.qt), width = 0.1) +
theme_bw()
## ----raw graph 2, echo=FALSE, fig.height=6, fig.width=6, message=FALSE, warning=FALSE----
#PLOTS
par(mfrow=c(2,2))
plot(factor(year2010),xlim=c(0,6),ylim=c(0,40))
title(main="a)",sub="Sample size 3", ylab="Frequency",xlab="Calving interval",
cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
plot(factor(year2011),xlim=c(0,6),ylim=c(0,40))
title(main="b)",sub="Sample size 15", ylab="Frequency",xlab="Calving interval",col.main=4,cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
plot(factor(year2012),xlim=c(0,6),ylim=c(0,40))
title(main="c)",sub="Sample size 25", ylab="Frequency",xlab="Calving interval",col.main=4,cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
plot(factor(year2013),xlim=c(0,6),ylim=c(0,40))
title(main="d)",sub="Sample size 45", ylab="Frequency",xlab="Calving interval",col.main=4,cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
## ----raw graph 3, echo=FALSE, fig.height=6, fig.width=6, message=TRUE, warning=TRUE----
library(qpcR)
#data in one way for plot
rawdata <- qpcR:::cbind.na(year2010,year2011,year2012,year2013)
rawdata <- as.data.frame(rawdata)
#in correct format for ggplot2
year2010 <- data.frame(year2010,year = c("2010"))
year2010 <- rename(year2010, interval = year2010, year = year )
year2011 <- data.frame(year2011,year = c("2011"))
year2011 <- rename(year2011, interval = year2011, year = year )
year2012 <- data.frame(year2012,year = c("2012"))
year2012 <- rename(year2012, interval = year2012, year = year )
year2013 <- data.frame(year2013,year = c("2013"))
year2013 <- rename(year2013, interval = year2013, year = year )
ggplotraw <- rbind(year2010,year2011,year2012, year2013)
ggplotraw$interval <- as.numeric(as.character(ggplotraw$interval))
#sort(year2013$interval) - sort(sample.true)
ggplot(year2013,aes(x = interval)) +
geom_bar(alpha = 1, width = 0.9,fill = "black") +
xlab(expression("Calving"~"interval"~(italic("years")))) +
ylab(expression("Total"~"number"~"of"~"observations"~(italic("n")))) +
scale_y_continuous(breaks = c(0,5,10,15,20,25,30), limits = c(0,30)) +
theme(axis.line = element_line(colour = 'black', size = 0.65),
axis.ticks = element_line(colour = "black", size = 0.65),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_rect(fill = "white", colour = "black", size = 1),
panel.background = element_rect(fill = "white",
colour = NA),
axis.text = element_text(size = rel(0.8),
colour = "black"))
#PLOTS
#code to store figure
# png("Figure_2_NZSRW_calving_interval_2017_highres.png", width = 12, height = 14.8, units = 'cm', res = 1200)
# dev.off()
## ----missing intervals, echo=FALSE, fig.height=10, message=FALSE, warning=FALSE----
#################################Missing calving intervals################
#Intervals modified by accounting for missed intervals
#Bradford et al. 2008
#Raw Data
RealCI <- as.numeric(year2013$interval)
#Confidence interval
xlong <- RealCI
meanlong<-sum(xlong)/length(xlong)
slong<-sd(xlong)
SElong<-slong/(sqrt(length(xlong)))
nlong<-(length(xlong))
#Standard error and confidence intervals
#2 sided t value at the 95% level = 2.093
lowqtlong <- meanlong-(qt(0.975,nlong)*SElong)
highqtlong <- meanlong+(qt(0.975,nlong)*SElong)
####################MED CI########################################
# 2x 6's and 1x 5 replaced with 3threes
MedCI <- c(RealCI[RealCI < 5],3,3,3,3,2,3)
#sort(MedCI)
xmed<-MedCI
meanmed<-sum(xmed)/length(xmed)
smed<-sd(xmed)
SEmed<-smed/(sqrt(length(xmed)))
nmed<-(length(xmed))
#Standard error and confidence intervals
lowqtmed <- meanmed-(qt(0.975,length(xmed))*SEmed)
highqtmed <- meanmed+(qt(0.975,length(xmed))*SEmed)
############################SHORT CI##################################
#6,5 replaced with 2 year intervals
LowCI <- c(RealCI[RealCI < 4],3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)
xshort<-LowCI
meanshort<-mean(xshort)
sshort<-sd(xshort)
SEshort<-sshort/(sqrt(length(xshort)))
#Standard error and confidence intervals
lowqtshort <- meanshort-(qt(0.975,length(xshort))*SEshort)
highqtshort <- meanshort+(qt(0.975,length(xshort))*SEshort)
bdata <-qpcR:::cbind.na(RealCI,MedCI,LowCI)
bdata <- as.data.frame(bdata)
#Structure of data set
#str(bdata)
## ----missing intervals plot, echo=FALSE, fig.height=3.5, fig.width=5.5, message=FALSE, warning=FALSE----
#Basic plots
par(mfrow=c(1,3))
plot(factor(bdata$LowCI),main="Lowest possible interval")
plot(factor(bdata$MedCI), main="Medium possible interval")
plot(factor(bdata$RealCI),main="Observed interval")
## ----missing intervals plot2, fig.height=5.5, fig.width=4.5, message=FALSE, warning=FALSE, include=FALSE----
#Density basic plots
par(mfrow=c(3,1))
plot(density(as.numeric(as.character(LowCI)),bw=.5), main="Lowest possible interval")
plot(density(as.numeric(as.character(MedCI)),bw= 0.5), main="Medium possible interval")
plot(density(as.numeric(as.character(RealCI)),bw = 0.5),main="Observed interval")
## ----missing intervals table, fig.height=8, message=FALSE, warning=FALSE, include=FALSE----
###################################SUMMARY############################
#Pull out important information
Sumtable<-data.frame(variable = c("low.qt","mean","high.qt","sd", "SE"), short=c(lowqtshort,meanshort,highqtshort,sshort,SEshort),
medium=c(lowqtmed,meanmed,highqtmed,smed,SEmed),
real=c(lowqtlong,meanlong,highqtlong,slong,SElong))
#Make dataframe to plot
n <- c(length(LowCI),length(MedCI),length(year2013$interval))
mY <- c(mean(LowCI),mean(MedCI),mean(year2013$interval))
interval <-c("Low", "Medium","Observed")
low.qt <- c(lowqtshort,lowqtmed,low.qt2013)
high.qt <- c(highqtshort,highqtmed,high.qt2013)
sd <- c(sshort,smed,s2013)
Sumtable <- cbind(interval,n,mY,low.qt,high.qt,sd)
Sumtable <- as.data.frame(Sumtable)
Sumtable$n <- as.numeric(as.character(Sumtable$n))
Sumtable$mY <- as.numeric(as.character(Sumtable$mY))
Sumtable$low.qt <- as.numeric(as.character(Sumtable$low.qt))
Sumtable$high.qt <- as.numeric(as.character(Sumtable$high.qt))
Sumtable$sd <- as.numeric(as.character(Sumtable$sd))
Sumtable$interval <- as.character(Sumtable$interval)
## ----missing intervals plot3, echo=FALSE, fig.height=4, message=FALSE, warning=FALSE----
ggplot(Sumtable, aes(y = mY, x = interval)) +
geom_point(size = 5) +
geom_errorbar(aes(ymin = low.qt, ymax = high.qt), width = 0.05,size = 1, alpha = 0.5) +
scale_y_continuous(breaks = round(seq(2.3, 3.6, by = 0.2),1)) +
labs(y = "Mean calving interval",x = "Calving interval modification" ) +
geom_point(size = 3) +
theme_classic() +
theme_hc() +
theme(legend.position="none")
## ----missing_data_table, echo=FALSE--------------------------------------
library(knitr)
kable(Sumtable, format = "markdown",col.names = c("Interval","Sample size", "Mean", "Lower limit", "Higher limit", "SD"))
## ----srw_data_table, echo=FALSE------------------------------------------
library(knitr)
# setwd("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Data")
srwdat <- read.csv(file = "./R/Data/srw_data.csv")
#str(srwdat)
kable(srwdat, format = "markdown",col.names = c("Sample size","Mean", "Lower limit", "Higher limit", "SE","Author", "Location"))
## ----bootstrap single, echo=FALSE, fig.height=5--------------------------
############################NZ Simple sample##############################
#WITH replacement
# to try and match number of intervals observed in other populations
# find references
SAreps <- 1500
ARreps <- 800
Aussiereps <- 2000
low <- 1000
verylow <- 100
lowest <- 10
#Very raw plots
par(mfrow=c(2,3))
plot(factor(sample(year2013$interval,lowest,replace=T)),main = "3 intervals")
plot(factor(sample(year2013$interval,verylow,replace=T)),main = "10 intervals")
plot(factor(sample(year2013$interval,low,replace=T)),main = "30 intervals")
plot(factor(sample(year2013$interval,Aussiereps,replace=T)),main = "500 intervals")
plot(factor(sample(year2013$interval,ARreps,replace=T)),main = "800 intervals")
plot(factor(sample(year2013$interval,SAreps,replace=T)),main = "1500 intervals")
## ----bootstrap_multiple, echo=FALSE--------------------------------------
#do each one 1000 times
boots <- 1000
n <- c(1:1000)
###########################n10
var10 <- paste0("n_", 1:10)
sample10 <-matrix(data = NA, ncol = lowest, nrow = boots)
colnames(sample10) <- as.list(var10)
for (i in 1:boots) {
sample10 [i, ] <- sample(year2013$interval,lowest,replace=T)
} #i
sample10 <- as.data.frame(sample10)
sample10 <- sample10 %>%
mutate(mean10 = rowMeans(sample10))
sample10t <- as.matrix(sample10)
sample10t <-t(sample10t)
#########################verylow sample size
#set up variable names
var100 <- paste0("n_", 1:100)
sample100 <-matrix(data = NA, ncol = verylow, nrow = boots)
colnames(sample100) <- as.list(var100)
for (i in 1:boots) {
sample100 [i, ] <- sample(year2013$interval,verylow,replace=T)
} #i
sample100 <- as.data.frame(sample100)
sample100 <- sample100 %>%
mutate(mean100 = rowMeans(sample100))
#########################middle one
#set up variable names
var500 <- paste0("n_", 1:500)
sample500 <-matrix(data = NA, ncol = 500, nrow = boots)
colnames(sample500) <- as.list(var500)
for (i in 1:boots) {
sample500 [i, ] <- sample(year2013$interval,500,replace=T)
} #i
sample500 <- as.data.frame(sample500)
sample500 <- sample500 %>%
mutate(mean500 = rowMeans(sample500))
#########################low sample size
#set up variable names
var1000 <- paste0("n_", 1:1000)
sample1000 <-matrix(data = NA, ncol = low, nrow = boots)
colnames(sample1000) <- as.list(var1000)
for (i in 1:boots) {
sample1000 [i, ] <- sample(year2013$interval,low,replace=T)
} #i
sample1000 <- as.data.frame(sample1000)
sample1000 <- sample1000 %>%
mutate(mean1000 = rowMeans(sample1000))
#########################AUS sample size
#set up variable names
varA <- paste0("n_", 1:2000)
sampleA <-matrix(data = NA, ncol = Aussiereps, nrow = boots)
colnames(sampleA) <- as.list(varA)
for (i in 1:boots) {
sampleA [i, ] <- sample(year2013$interval,Aussiereps,replace=T)
} #i
sampleA <- as.data.frame(sampleA)
sampleA <- sampleA %>%
mutate(meanA = rowMeans(sampleA))
sampleAt <- t(sampleA)
for(i in c(1:ncol(sampleA))) {
sampleA[,i] <- as.numeric(as.character(sampleA[,i]))
}
#COnfidence intervals
ab <- sort(sampleA$meanA)
nab <- length(ab)
#low = 25/1000
ab2.5 <- ab[25]
#high = 975/1000
ab0.97.5 <- ab[975]
ab <- sort(sampleA$meanA)
nab <- length(ab)
#low = 25/1000
ab2.5 <- ab[25]
#high = 975/1000
ab0.97.5 <- ab[975]
## ----bootstrap plot2, fig.height=5, message=FALSE, warning=FALSE, include=FALSE----
#plot the data over each other to look at change in density
par(mfrow=c(1,1))
#plot(density(sample3$mean3,bw = .15),lwd = 3,lyt = 5, main = "", xlab = "Calving interval", box = FALSE,axis = FALSE)
plot(density(sample10$mean10,bw = .05),col ="black", lty = 1, main = "", lwd = 5,ylim = c(0,8),xlim = c(2,4.5), axes=FALSE,xlab = "Calving interval")
lines(density(sample100$mean100,bw = .05),col ="black", lty = 2, lwd = 4)
lines(density(sample500$mean500,bw = .05),col ="black", lty = 3, lwd = 3)
lines(density(sample1000$mean1000,bw = .05),col ="black", lty = 4, lwd = 2)
lines(density(sampleA$meanA,bw = .05),col ="black", lty = 5, lwd = 1)
legend('topright',title = "Legend", c("n=10, cv=8.12 ", "n=100, cv=2.43", "n=500, c.v=1.15", "n=1000, cv=0.79", "n=2000, cv=0.56"),bty = "n",
lty = c(1,2,3,4,5), lwd = c(5,4,3,2,1), cex=.75)
axis(1,lwd=2)
axis(2,lwd=2)
## ----final plot for publication1, echo=FALSE-----------------------------
#final [plot]
#size defined by NZJFMR
# 195 mm (h) ? 148 mm (w).
#ylab(expression("Total"~"number"~"of"~"observations"~(italic("n")))) +
plot(density(sample10$mean10,bw = .05),col ="black", lty = 3, main = "", lwd = 1,ylim = c(0,8),xlim = c(2.5,4.5), axes=FALSE, xlab = expression("Calving"~"interval"~(italic("years"))))
lines(density(sample100$mean100,bw = .05),col ="black", lty = 4, lwd = 1)
lines(density(sample500$mean500,bw = .05),col ="black", lty = 5, lwd = 1)
lines(density(sample1000$mean1000,bw = .05),col ="black", lty = 2, lwd = 1)
lines(density(sampleA$meanA,bw = .05),col ="black", lty = 1, lwd = 2)
legend(y = 8, x = 3.9,title = expression(bold("Sample size (n)")), c(expression(italic("n")~"="~"10"), expression(italic("n")~"="~"100"), expression(italic("n")~"="~"500"), expression(italic("n")~"="~"1000"), expression(italic("n")~"="~"2000")),bty = "n",
lty = c(3,4,5,2,1), lwd = c(1,1,1,1,2), cex=1)
axis(1,lwd=2)
axis(2,lwd=2)
# PLOT CODE FOR PUBLICATION
# png("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Figures/Figure_3_NZSRW_calving_interval_2017_lowres.png", width = 14.8, height = 14.8, units = 'cm', res = 400)
# dev.off()
#
#
# png("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Figures/Figure_3_NZSRW_calving_interval_2017_highres.png", width = 14.8, height = 14.8, units = 'cm', res = 1200)
#
# plot(density(sample10$mean10,bw = .05),col ="black", lty = 3, main = "", lwd = 1,ylim = c(0,8),xlim = c(2.5,4.5), axes=FALSE,xlab = expression("Calving"~"interval"~(italic("years"))))
# lines(density(sample100$mean100,bw = .05),col ="black", lty = 4, lwd = 1)
# lines(density(sample500$mean500,bw = .05),col ="black", lty = 2, lwd = 1)
# lines(density(sample1000$mean1000,bw = .05),col ="black", lty = 5, lwd = 1)
# lines(density(sampleA$meanA,bw = .05),col ="black", lty = 1, lwd = 2)
# legend(y = 8, x = 3.9,title = expression(bold("Sample size (n)")), c(expression(italic("n")~"="~"10"), expression(italic("n")~"="~"100"), expression(italic("n")~"="~"500"), expression(italic("n")~"="~"1000"), expression(italic("n")~"="~"2000")),bty = "n",
# lty = c(3,4,2,5,1), lwd = c(1,1,1,1,2), cex=1)
# axis(1,lwd=2)
# axis(2,lwd=2)
#
# dev.off()
## ----referee_comment_1, echo=TRUE----------------------------------------
#observed sample
rev.one <- bdata$RealCI[1:45]
#sample 45 times
sample.true <- year2013$interval
#power analysis
pwr.test.results <- power.t.test(n = 45,# sample size
delta = seq(0,0.99,0.001), #difference between means
sd = sd(sample.true), #observed variation
alternative = "one.sided", #observed test type
sig.level = 0.05) #significance level
#additional packages are avaliable for more complex analysis
#but have not done this as don't think it is needed
## ----referee_comment_1_plot, echo=FALSE, message=FALSE, warning=FALSE----
#sort data into ggplot format
pwr.analysis <- as.data.frame(cbind(
pwr.test.results$power,
pwr.test.results$delta))
colnames(pwr.analysis) <- c("Power","Mean.difference")
#sort data into ggplot format
pwr.analysis.1 <- pwr.analysis %>%
mutate(Alpha = 1- Power,
Mean.estimate = 3.31 + Mean.difference)
# %>%
# select(Alpha,Mean.estimate)
#work out where the cut-off is
a <- filter(pwr.analysis.1, Alpha < 0.05)
a[1,]
#plot data
ggplot(data = pwr.analysis.1, aes(x = Mean.estimate, y = Alpha)) +
geom_line(size = 1.5) +
geom_vline(xintercept = 3.903, col = "blue") +
geom_hline(yintercept = 0.05) +
theme(axis.line = element_line(colour = 'black', size = 0.65),
axis.ticks = element_line(colour = "black", size = 0.65),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_rect(fill = "white", colour = "black", size = 1),
panel.background = element_rect(fill = "white",
colour = NA),
axis.text = element_text(size = rel(0.8),
colour = "black")) +
ggtitle("Raw data result plot (n = 45)")
## ----referee_comment_2_plot, echo=FALSE, message=FALSE, warning=FALSE----
#observed sample
rev.one <- bdata$RealCI[1:45]
#sample 45 times
sample.true <- year2013$interval
#difference
diff <- 3.63-3.31 #observed mean of australian population
#power analysis
pwr.test.results <- power.t.test(n = seq(1,200,1),# sample size
delta = diff, #difference between means
sd = sd(sample.true), #observed variation
alternative = "one.sided", #observed test type
sig.level = 0.05) #significance level
#additional packages are avaliable for more complex analysis
#but have not done this as don't think it is needed
#sort data into ggplot format
pwr.analysis <- as.data.frame(cbind(
pwr.test.results$power,
pwr.test.results$n))
colnames(pwr.analysis) <- c("Power","Sample.size")
#sort data into ggplot format
pwr.analysis.1 <- pwr.analysis %>%
mutate(Alpha = 1- Power)
# %>%
# select(Alpha,Mean.estimate)
#work out where the cut-off is
a <- filter(pwr.analysis.1, Alpha < 0.05)
a[1,]
#plot data
ggplot(data = pwr.analysis.1, aes(x = Sample.size, y = Alpha)) +
geom_line(size = 1.5) +
geom_vline(xintercept = 45, col = "red") +
geom_vline(xintercept = 153, col = "blue") +
geom_hline(yintercept = 0.05) +
scale_y_continuous(limits = c(0,1)) +
theme(axis.line = element_line(colour = 'black', size = 0.65),
axis.ticks = element_line(colour = "black", size = 0.65),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_rect(fill = "white", colour = "black", size = 1),
panel.background = element_rect(fill = "white",
colour = NA),
axis.text = element_text(size = rel(0.8),
colour = "black")) +
ggtitle("Observed difference between Australian and NZ mean")
## ----missed individuals 1, echo=FALSE------------------------------------
dat <- read.csv("./R/Data/raw_observations_2012.csv")
#data structure
glimpse(dat)
head(dat)
#And the second dataset
dat1<- read.csv("./R/Data/RawCI.csv", header=T, quote="\"")
#data structure
glimpse(dat1)
## ----missed individuals 2, echo=FALSE, message=FALSE, warning=FALSE------
##I can then modify this data to
#restructure dataset of capture to long dataset
dat3 <- dplyr::select(dat, ID, X2006:X2012)%>%
gather(year, count,X2006:X2012)
#add data on calves
dat4 <- full_join(dat3,dat1, by = "ID")
dat5 <- dplyr::select(dat4,ID,year,count,Yr.first.seen,Calves,Calves.1,Calves.2)
dat6 <- filter(dat5,count >0)
glimpse(dat6)
dat7 <- mutate(dat6, year = ifelse(year == "X2006","2006", year),
year = ifelse(year == "X2007","2007", year),
year = ifelse(year == "X2008","2008", year),
year = ifelse(year == "X2009","2009", year),
year = ifelse(year == "X2010","2010", year),
year = ifelse(year == "X2011","2011", year),
year = ifelse(year == "X2012","2012", year))
a <- group_by(dat7, ID, Yr.first.seen) %>%
mutate(mother = ifelse(Yr.first.seen > 0, 1, 0)) %>%
filter(mother == 1) %>%
ungroup() %>%
dplyr::select(ID,year,Calves,Calves.1) %>%
filter(Calves.1<2013) %>%
filter(!year == Calves) %>%
filter(!year ==Calves.1)
a
## ----referee_comment3, echo=TRUE, message=FALSE, warning=FALSE-----------
greater.than.2 <- sample.true[sample.true>2]
#greater.than.2
mean.2<-sum(greater.than.2)/length(greater.than.2)
s.2<-sd(greater.than.2)
SE.2<-s2013/(sqrt(length(greater.than.2)))
n.2<-length(greater.than.2)
low.qt.2<- mean.2-(qt(0.975,length(greater.than.2))*SE.2)
high.qt.2 <- mean.2+(qt(0.975,length(greater.than.2))*SE.2)
#add it to the table from bradford data
Sumtable[4,] <- c("miss2year",n.2,mean.2,low.qt.2,
high.qt.2,sd(greater.than.2))
## ----different missing intervals 1, echo=TRUE----------------------------
########################### 2.2%
#parameters
boots <- 1000
n <- c(1:1000)
###round all percentages upwards
detect1 <- 44 # (45*1.02) - 45 = 0.9
detect2 <- 42 # (45*1.05) - 45 = 2.25
detect3 <- 40 # (45*1.10) - 45 = 4.5
sample2 <-rep(NA, 1000)
sample5 <-rep(NA, 1000)
sample10 <-rep(NA, 1000)
for (i in 1:boots) {
sample2[i]<-mean(sample(year2013$interval,detect1,replace=T))
sample5[i]<-mean(sample(year2013$interval,detect2,replace=T))
sample10[i]<-mean(sample(year2013$interval,detect3,replace=T))
} #i
######################estimates##############
sample2 <- sort(sample2)
#low = 25/1000
sample2.2.5 <- sample2[25]
#median
sample2.50 <- sample2[500]
#high = 975/1000
sample2.975 <- sample2[975]
sample5 <- sort(sample5)
#low = 25/1000
sample5.2.5 <- sample5[25]
#median
sample5.50 <- sample5[500]
#high = 975/1000
sample5.975 <- sample5[975]
sample10 <- sort(sample10)
#low = 25/1000
sample10.2.5 <- sample10[25]
#median
sample10.50 <- sample10[500]
#high = 975/1000
sample10.975 <- sample10[975]
#add it to the table from bradford data
Sumtable[5,] <- c("detect1",detect1,sample2.50,sample2.2.5,sample2.975,NA)
Sumtable[6,] <- c("detect2",detect2,sample5.50,sample5.2.5,sample5.975,NA)
Sumtable[7,] <- c("detect5",detect3,sample10.50,sample10.2.5,sample10.975,NA)
## ----detection sim.2-----------------------------------------------------
#be very careful as Dat is just IDS and no id of females with calves
#BUT Data is identified females...
length(Data$ID)
length(dat$ID)
glimpse(Data)
dat.detect <- dplyr::select(Data,ID,Calves,Calves.1, Calves.2) %>%
mutate(Calves = factor(Calves),
Calves.1 = factor(Calves.1),
Calves.2 = factor(Calves.2))
a <- as.data.frame.matrix(table(Data$ID,Data$Calves))
head(a)
a[,7] <-row.names(a)
colnames(a)[1] <- "y2006"
colnames(a)[2] <- "y2007"
colnames(a)[3] <- "y2008"
colnames(a)[4] <- "y2009"
colnames(a)[5] <- "y2010"
colnames(a)[6] <- "y2011"
colnames(a)[7] <- "ID"
a[,8] <- 0
colnames(a)[8] <- "y2012"
a[,9] <- 0
colnames(a)[9] <- "y2013"
a <- dplyr::select(a,ID,y2006,y2007,y2008, y2009, y2010, y2011, y2012, y2013)
b <- as.data.frame.matrix(table(Data$ID,Data$Calves.1))
head(b)
b[,5] <-row.names(b)
colnames(b)[5] <- "ID"
b[,6] <- 0
colnames(b)[6] <- "y2006"
b[,7] <- 0
colnames(b)[7] <- "y2007"
b[,8] <- 0
colnames(b)[8] <- "y2008"
b[,9] <- 0
colnames(b)[9] <- "y2009"
colnames(b)[1] <- "y2010"
colnames(b)[2] <- "y2011"
colnames(b)[3] <- "y2012"
colnames(b)[4] <- "y2013"
b <- dplyr::select(b,ID,y2006,y2007,y2008, y2009, y2010, y2011, y2012, y2013)
c <- as.data.frame.matrix(table(Data$ID,Data$Calves.2))
head(c)
colnames(c)[1] <- "y2013"
c[,2] <-row.names(c)
colnames(c)[2] <- "ID"
c[,3] <- 0
colnames(c)[3] <- "y2006"
c[,4] <- 0
colnames(c)[4] <- "y2007"
c[,5] <- 0
colnames(c)[5] <- "y2008"
c[,6] <- 0
colnames(c)[6] <- "y2009"
c[,7] <- 0
colnames(c)[7] <- "y2010"
c[,8] <- 0
colnames(c)[8] <- "y2011"
c[,9] <- 0
colnames(c)[9] <- "y2012"
c <- dplyr::select(c,ID,y2006,y2007,y2008, y2009, y2010, y2011, y2012,y2013)
countdat <- rbind(a,b,c)
glimpse(countdat)
# head(full.dat)
full.dat <- group_by(countdat, ID) %>%
summarise(y2006 = sum(y2006),
y2007 = sum(y2007),
y2008 = sum(y2008),
y2009 = sum(y2009),
y2010 = sum(y2010),
y2011 = sum(y2011),
y2012 = sum(y2012),
y2013 = sum(y2013))
2012-2006
##checking....
sort(Data$ID)
filter(Data, ID == "AI06022")
filter(Data, ID == "AI08340")
filter(Data, ID == "AI08343")
head(Data)
# glimpse(c)
# Data$Calves.1,
# # Spread and gather are complements
# df <- data.frame(x = c("a", "b"), y = c(3, 4), z = c(5, 6))
# df %>% spread(x, y) %>% gather(x, y, a:b, na.rm = TRUE)
## ----different missing intervals 2---------------------------------------
longer5.6 <- c(sample.true,5,6,6)
#greater.than.2
mean.56<-sum(longer5.6)/length(longer5.6)
s.56<-sd(longer5.6)
SE.56<-s.56/(sqrt(length(longer5.6)))
n.56<-(length(longer5.6))
low.qt.56<- mean.56-(qt(0.975,length(longer5.6))*SE.56)
high.qt.56 <- mean.56+(qt(0.975,length(longer5.6))*SE.56)
#add it to the table from bradford data
Sumtable[8,] <- c("longer.56",n.56,mean.56,low.qt.56,high.qt.56,sd(longer5.6))
###sort out numbering in dataframe
Sumtable <- as.data.frame(Sumtable)
Sumtable$n <- as.numeric(as.character(Sumtable$n))
Sumtable$mY <- as.numeric(as.character(Sumtable$mY))
Sumtable$low.qt <- as.numeric(as.character(Sumtable$low.qt))
Sumtable$high.qt <- as.numeric(as.character(Sumtable$high.qt))
Sumtable$sd <- as.numeric(as.character(Sumtable$sd))
Sumtable$interval <- as.character(Sumtable$interval)
## ----missing_data_table 2, echo=FALSE------------------------------------
library(knitr)
kable(Sumtable, format = "markdown",col.names = c("Interval","Sample size", "Mean", "Lower limit", "Higher limit", "SD"))
## ----referee_comment3_plot, echo=FALSE-----------------------------------
ggplot(Sumtable, aes(y = mY, x = interval)) +
geom_point(size = 5) +
geom_errorbar(aes(ymin = low.qt, ymax = high.qt), width = 0.05,size = 1, alpha = 0.5) +
scale_y_continuous(breaks = round(seq(2.3, 5, by = 0.2),1)) +
labs(y = "Mean calving interval",x = "Calving interval modification" ) +
geom_point(size = 3) +
theme_classic() +
theme_hc() +
theme(legend.position="none")
| /R/Rcode/Final_report_Davidson2017.R | permissive | davan690/R-for-population-dynamics | R | false | false | 29,418 | r | ## ----setup, echo=FALSE, warning = FALSE, message = FALSE-----------------
#NZJFMS manuscript
#15032017
#Anthony Davidson
#packages
library(boot)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(qpcR)
library(pwr)
library(ggthemes)
library(gridExtra)
#set working dir
#laptop
# github file location
# setwd("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Data")
#setwd("C:/Users/s435389/Dropbox/dqNZJMFR Publication Calving interval/NZJMFR Calving Analysis/Final code for publication")
#desktop
#setwd("C:/Users/s435389/Dropbox (Population_Stats)/NZJMFR Publication Calving interval/NZJMFR Calving Analysis/Final code for publication")
###############################DATA####################################
Data<- read.csv("./R/Data/RawCI.csv", header=T, quote="\"")
#Structure of data set
#str(Data)
#year recorded
Year<-unique(Data$Calves.1)
#calving interval observed in 2010
year2010a<-c(3,3,2)
year2010 <- filter(Data,Calves.1 < 2011)
year2010 <- year2010$Interval.1[!is.na(year2010$Interval.1)]
#calving interval observed in 2011
year2011a<-c(3,3,2,3,3,3,3,3,3,3,3,3,3,3,2)
year2011 <- filter(Data,Calves.1 < 2012)
year2011 <- year2011$Interval.1[!is.na(year2011$Interval.1)]
#calving interval observed in 2012
year2012a<-c(3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,
6,4,4,4,4,4,3,3,3,3)
year2012 <- filter(Data,Calves.1 < 2013)
year2012 <- year2012$Interval.1[!is.na(year2012$Interval.1)]
#calving interval observed in 2013
year2013a<-c(3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,
6,4,4,4,4,4,3,3,3,3,
6,5,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,2,2)
full <- c(Data$Interval.1,Data$Interval.2)
year2013<- full[!is.na(unlist(full))]
## ----basic, echo=FALSE, message=FALSE, warning=FALSE---------------------
#Confidence intervals
#2010
mean2010<-sum(year2010)/length(year2010)
s2010<-sd(year2010)
SE2010<-s2010/(sqrt(length(year2010)))
n2010<-(length(year2010))
low.qt2010 <- mean2010-(qt(0.975,length(year2010))*SE2010)
high.qt2010 <- mean2010+(qt(0.975,length(year2010))*SE2010)
#2011
mean2011<-sum(year2011)/length(year2011)
s2011<-sd(year2011)
SE2011<-s2011/(sqrt(length(year2011)))
n2011<-(length(year2011))
low.qt2011 <- mean2011-(qt(0.975,length(year2011))*SE2011)
high.qt2011 <- mean2011+(qt(0.975,length(year2011))*SE2011)
#2012
mean2012<-sum(year2012)/length(year2012)
s2012<-sd(year2012)
SE2012<-s2012/(sqrt(length(year2012)))
n2012<-(length(year2012))
low.qt2012 <- mean2012-(qt(0.975,length(year2012))*SE2012)
high.qt2012 <- mean2012+(qt(0.975,length(year2012))*SE2012)
#2013
mean2013<-sum(year2013)/length(year2013)
s2013<-sd(year2013)
SE2013<-s2013/(sqrt(length(year2013)))
n2013<-(length(year2013))
low.qt2013 <- mean2013-(qt(0.975,length(year2013))*SE2013)
high.qt2013 <- mean2013+(qt(0.975,length(year2013))*SE2013)
#Makes data frame to plot
n <- c(length(year2010),length(year2011),length(year2012),length(year2013))
mY <- c(mean(year2010),mean(year2011),mean(year2012),mean(year2013))
year <- Year
low.qt <- c(low.qt2010,low.qt2011,low.qt2012,low.qt2013)
high.qt <- c(high.qt2010,high.qt2011,high.qt2012,high.qt2013)
sd <- c(s2010,s2011,s2012,s2013)
sum.dat <- cbind(year,n,mY,low.qt,high.qt,sd)
sum.dat <- as.data.frame(sum.dat)
## ----raw table, echo=FALSE-----------------------------------------------
library(knitr)
kable(sum.dat, format = "markdown")
## ----raw graph, echo=FALSE, message=FALSE, warning=FALSE-----------------
#plot data
ggplot(sum.dat, aes(y = mY, x = year)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin = low.qt, ymax = high.qt), width = 0.1) +
theme_bw()
## ----raw graph 2, echo=FALSE, fig.height=6, fig.width=6, message=FALSE, warning=FALSE----
#PLOTS
par(mfrow=c(2,2))
plot(factor(year2010),xlim=c(0,6),ylim=c(0,40))
title(main="a)",sub="Sample size 3", ylab="Frequency",xlab="Calving interval",
cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
plot(factor(year2011),xlim=c(0,6),ylim=c(0,40))
title(main="b)",sub="Sample size 15", ylab="Frequency",xlab="Calving interval",col.main=4,cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
plot(factor(year2012),xlim=c(0,6),ylim=c(0,40))
title(main="c)",sub="Sample size 25", ylab="Frequency",xlab="Calving interval",col.main=4,cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
plot(factor(year2013),xlim=c(0,6),ylim=c(0,40))
title(main="d)",sub="Sample size 45", ylab="Frequency",xlab="Calving interval",col.main=4,cex.main = 1.5, font.main= 4, col.main= "blue",
cex.sub = 1, font.sub = 3, col.sub = "red")
box()
## ----raw graph 3, echo=FALSE, fig.height=6, fig.width=6, message=TRUE, warning=TRUE----
library(qpcR)
#data in one way for plot
rawdata <- qpcR:::cbind.na(year2010,year2011,year2012,year2013)
rawdata <- as.data.frame(rawdata)
#in correct format for ggplot2
year2010 <- data.frame(year2010,year = c("2010"))
year2010 <- rename(year2010, interval = year2010, year = year )
year2011 <- data.frame(year2011,year = c("2011"))
year2011 <- rename(year2011, interval = year2011, year = year )
year2012 <- data.frame(year2012,year = c("2012"))
year2012 <- rename(year2012, interval = year2012, year = year )
year2013 <- data.frame(year2013,year = c("2013"))
year2013 <- rename(year2013, interval = year2013, year = year )
ggplotraw <- rbind(year2010,year2011,year2012, year2013)
ggplotraw$interval <- as.numeric(as.character(ggplotraw$interval))
#sort(year2013$interval) - sort(sample.true)
ggplot(year2013,aes(x = interval)) +
geom_bar(alpha = 1, width = 0.9,fill = "black") +
xlab(expression("Calving"~"interval"~(italic("years")))) +
ylab(expression("Total"~"number"~"of"~"observations"~(italic("n")))) +
scale_y_continuous(breaks = c(0,5,10,15,20,25,30), limits = c(0,30)) +
theme(axis.line = element_line(colour = 'black', size = 0.65),
axis.ticks = element_line(colour = "black", size = 0.65),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_rect(fill = "white", colour = "black", size = 1),
panel.background = element_rect(fill = "white",
colour = NA),
axis.text = element_text(size = rel(0.8),
colour = "black"))
#PLOTS
#code to store figure
# png("Figure_2_NZSRW_calving_interval_2017_highres.png", width = 12, height = 14.8, units = 'cm', res = 1200)
# dev.off()
## ----missing intervals, echo=FALSE, fig.height=10, message=FALSE, warning=FALSE----
#################################Missing calving intervals################
#Intervals modified by accounting for missed intervals
#Bradford et al. 2008
#Raw Data
RealCI <- as.numeric(year2013$interval)
#Confidence interval
xlong <- RealCI
meanlong<-sum(xlong)/length(xlong)
slong<-sd(xlong)
SElong<-slong/(sqrt(length(xlong)))
nlong<-(length(xlong))
#Standard error and confidence intervals
#2 sided t value at the 95% level = 2.093
lowqtlong <- meanlong-(qt(0.975,nlong)*SElong)
highqtlong <- meanlong+(qt(0.975,nlong)*SElong)
####################MED CI########################################
# 2x 6's and 1x 5 replaced with 3threes
MedCI <- c(RealCI[RealCI < 5],3,3,3,3,2,3)
#sort(MedCI)
xmed<-MedCI
meanmed<-sum(xmed)/length(xmed)
smed<-sd(xmed)
SEmed<-smed/(sqrt(length(xmed)))
nmed<-(length(xmed))
#Standard error and confidence intervals
lowqtmed <- meanmed-(qt(0.975,length(xmed))*SEmed)
highqtmed <- meanmed+(qt(0.975,length(xmed))*SEmed)
############################SHORT CI##################################
#6,5 replaced with 2 year intervals
LowCI <- c(RealCI[RealCI < 4],3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2)
xshort<-LowCI
meanshort<-mean(xshort)
sshort<-sd(xshort)
SEshort<-sshort/(sqrt(length(xshort)))
#Standard error and confidence intervals
lowqtshort <- meanshort-(qt(0.975,length(xshort))*SEshort)
highqtshort <- meanshort+(qt(0.975,length(xshort))*SEshort)
bdata <-qpcR:::cbind.na(RealCI,MedCI,LowCI)
bdata <- as.data.frame(bdata)
#Structure of data set
#str(bdata)
## ----missing intervals plot, echo=FALSE, fig.height=3.5, fig.width=5.5, message=FALSE, warning=FALSE----
#Basic plots
par(mfrow=c(1,3))
plot(factor(bdata$LowCI),main="Lowest possible interval")
plot(factor(bdata$MedCI), main="Medium possible interval")
plot(factor(bdata$RealCI),main="Observed interval")
## ----missing intervals plot2, fig.height=5.5, fig.width=4.5, message=FALSE, warning=FALSE, include=FALSE----
#Density basic plots
par(mfrow=c(3,1))
plot(density(as.numeric(as.character(LowCI)),bw=.5), main="Lowest possible interval")
plot(density(as.numeric(as.character(MedCI)),bw= 0.5), main="Medium possible interval")
plot(density(as.numeric(as.character(RealCI)),bw = 0.5),main="Observed interval")
## ----missing intervals table, fig.height=8, message=FALSE, warning=FALSE, include=FALSE----
###################################SUMMARY############################
#Pull out important information
Sumtable<-data.frame(variable = c("low.qt","mean","high.qt","sd", "SE"), short=c(lowqtshort,meanshort,highqtshort,sshort,SEshort),
medium=c(lowqtmed,meanmed,highqtmed,smed,SEmed),
real=c(lowqtlong,meanlong,highqtlong,slong,SElong))
#Make dataframe to plot
n <- c(length(LowCI),length(MedCI),length(year2013$interval))
mY <- c(mean(LowCI),mean(MedCI),mean(year2013$interval))
interval <-c("Low", "Medium","Observed")
low.qt <- c(lowqtshort,lowqtmed,low.qt2013)
high.qt <- c(highqtshort,highqtmed,high.qt2013)
sd <- c(sshort,smed,s2013)
Sumtable <- cbind(interval,n,mY,low.qt,high.qt,sd)
Sumtable <- as.data.frame(Sumtable)
Sumtable$n <- as.numeric(as.character(Sumtable$n))
Sumtable$mY <- as.numeric(as.character(Sumtable$mY))
Sumtable$low.qt <- as.numeric(as.character(Sumtable$low.qt))
Sumtable$high.qt <- as.numeric(as.character(Sumtable$high.qt))
Sumtable$sd <- as.numeric(as.character(Sumtable$sd))
Sumtable$interval <- as.character(Sumtable$interval)
## ----missing intervals plot3, echo=FALSE, fig.height=4, message=FALSE, warning=FALSE----
ggplot(Sumtable, aes(y = mY, x = interval)) +
geom_point(size = 5) +
geom_errorbar(aes(ymin = low.qt, ymax = high.qt), width = 0.05,size = 1, alpha = 0.5) +
scale_y_continuous(breaks = round(seq(2.3, 3.6, by = 0.2),1)) +
labs(y = "Mean calving interval",x = "Calving interval modification" ) +
geom_point(size = 3) +
theme_classic() +
theme_hc() +
theme(legend.position="none")
## ----missing_data_table, echo=FALSE--------------------------------------
library(knitr)
kable(Sumtable, format = "markdown",col.names = c("Interval","Sample size", "Mean", "Lower limit", "Higher limit", "SD"))
## ----srw_data_table, echo=FALSE------------------------------------------
library(knitr)
# setwd("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Data")
srwdat <- read.csv(file = "./R/Data/srw_data.csv")
#str(srwdat)
kable(srwdat, format = "markdown",col.names = c("Sample size","Mean", "Lower limit", "Higher limit", "SE","Author", "Location"))
## ----bootstrap single, echo=FALSE, fig.height=5--------------------------
############################NZ Simple sample##############################
#WITH replacement
# to try and match number of intervals observed in other populations
# find references
SAreps <- 1500
ARreps <- 800
Aussiereps <- 2000
low <- 1000
verylow <- 100
lowest <- 10
#Very raw plots
par(mfrow=c(2,3))
plot(factor(sample(year2013$interval,lowest,replace=T)),main = "3 intervals")
plot(factor(sample(year2013$interval,verylow,replace=T)),main = "10 intervals")
plot(factor(sample(year2013$interval,low,replace=T)),main = "30 intervals")
plot(factor(sample(year2013$interval,Aussiereps,replace=T)),main = "500 intervals")
plot(factor(sample(year2013$interval,ARreps,replace=T)),main = "800 intervals")
plot(factor(sample(year2013$interval,SAreps,replace=T)),main = "1500 intervals")
## ----bootstrap_multiple, echo=FALSE--------------------------------------
#do each one 1000 times
boots <- 1000
n <- c(1:1000)
###########################n10
var10 <- paste0("n_", 1:10)
sample10 <-matrix(data = NA, ncol = lowest, nrow = boots)
colnames(sample10) <- as.list(var10)
for (i in 1:boots) {
sample10 [i, ] <- sample(year2013$interval,lowest,replace=T)
} #i
sample10 <- as.data.frame(sample10)
sample10 <- sample10 %>%
mutate(mean10 = rowMeans(sample10))
sample10t <- as.matrix(sample10)
sample10t <-t(sample10t)
#########################verylow sample size
#set up variable names
var100 <- paste0("n_", 1:100)
sample100 <-matrix(data = NA, ncol = verylow, nrow = boots)
colnames(sample100) <- as.list(var100)
for (i in 1:boots) {
sample100 [i, ] <- sample(year2013$interval,verylow,replace=T)
} #i
sample100 <- as.data.frame(sample100)
sample100 <- sample100 %>%
mutate(mean100 = rowMeans(sample100))
#########################middle one
#set up variable names
var500 <- paste0("n_", 1:500)
sample500 <-matrix(data = NA, ncol = 500, nrow = boots)
colnames(sample500) <- as.list(var500)
for (i in 1:boots) {
sample500 [i, ] <- sample(year2013$interval,500,replace=T)
} #i
sample500 <- as.data.frame(sample500)
sample500 <- sample500 %>%
mutate(mean500 = rowMeans(sample500))
#########################low sample size
#set up variable names
var1000 <- paste0("n_", 1:1000)
sample1000 <-matrix(data = NA, ncol = low, nrow = boots)
colnames(sample1000) <- as.list(var1000)
for (i in 1:boots) {
sample1000 [i, ] <- sample(year2013$interval,low,replace=T)
} #i
sample1000 <- as.data.frame(sample1000)
sample1000 <- sample1000 %>%
mutate(mean1000 = rowMeans(sample1000))
#########################AUS sample size
#set up variable names
varA <- paste0("n_", 1:2000)
sampleA <-matrix(data = NA, ncol = Aussiereps, nrow = boots)
colnames(sampleA) <- as.list(varA)
for (i in 1:boots) {
sampleA [i, ] <- sample(year2013$interval,Aussiereps,replace=T)
} #i
sampleA <- as.data.frame(sampleA)
sampleA <- sampleA %>%
mutate(meanA = rowMeans(sampleA))
sampleAt <- t(sampleA)
for(i in c(1:ncol(sampleA))) {
sampleA[,i] <- as.numeric(as.character(sampleA[,i]))
}
#COnfidence intervals
ab <- sort(sampleA$meanA)
nab <- length(ab)
#low = 25/1000
ab2.5 <- ab[25]
#high = 975/1000
ab0.97.5 <- ab[975]
ab <- sort(sampleA$meanA)
nab <- length(ab)
#low = 25/1000
ab2.5 <- ab[25]
#high = 975/1000
ab0.97.5 <- ab[975]
## ----bootstrap plot2, fig.height=5, message=FALSE, warning=FALSE, include=FALSE----
#plot the data over each other to look at change in density
par(mfrow=c(1,1))
#plot(density(sample3$mean3,bw = .15),lwd = 3,lyt = 5, main = "", xlab = "Calving interval", box = FALSE,axis = FALSE)
plot(density(sample10$mean10,bw = .05),col ="black", lty = 1, main = "", lwd = 5,ylim = c(0,8),xlim = c(2,4.5), axes=FALSE,xlab = "Calving interval")
lines(density(sample100$mean100,bw = .05),col ="black", lty = 2, lwd = 4)
lines(density(sample500$mean500,bw = .05),col ="black", lty = 3, lwd = 3)
lines(density(sample1000$mean1000,bw = .05),col ="black", lty = 4, lwd = 2)
lines(density(sampleA$meanA,bw = .05),col ="black", lty = 5, lwd = 1)
legend('topright',title = "Legend", c("n=10, cv=8.12 ", "n=100, cv=2.43", "n=500, c.v=1.15", "n=1000, cv=0.79", "n=2000, cv=0.56"),bty = "n",
lty = c(1,2,3,4,5), lwd = c(5,4,3,2,1), cex=.75)
axis(1,lwd=2)
axis(2,lwd=2)
## ----final plot for publication1, echo=FALSE-----------------------------
#final [plot]
#size defined by NZJFMR
# 195 mm (h) ? 148 mm (w).
#ylab(expression("Total"~"number"~"of"~"observations"~(italic("n")))) +
plot(density(sample10$mean10,bw = .05),col ="black", lty = 3, main = "", lwd = 1,ylim = c(0,8),xlim = c(2.5,4.5), axes=FALSE, xlab = expression("Calving"~"interval"~(italic("years"))))
lines(density(sample100$mean100,bw = .05),col ="black", lty = 4, lwd = 1)
lines(density(sample500$mean500,bw = .05),col ="black", lty = 5, lwd = 1)
lines(density(sample1000$mean1000,bw = .05),col ="black", lty = 2, lwd = 1)
lines(density(sampleA$meanA,bw = .05),col ="black", lty = 1, lwd = 2)
legend(y = 8, x = 3.9,title = expression(bold("Sample size (n)")), c(expression(italic("n")~"="~"10"), expression(italic("n")~"="~"100"), expression(italic("n")~"="~"500"), expression(italic("n")~"="~"1000"), expression(italic("n")~"="~"2000")),bty = "n",
lty = c(3,4,5,2,1), lwd = c(1,1,1,1,2), cex=1)
axis(1,lwd=2)
axis(2,lwd=2)
# PLOT CODE FOR PUBLICATION
# png("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Figures/Figure_3_NZSRW_calving_interval_2017_lowres.png", width = 14.8, height = 14.8, units = 'cm', res = 400)
# dev.off()
#
#
# png("C:/Users/s435389/R_packages/Davidson_2017_SRWrepro/Figures/Figure_3_NZSRW_calving_interval_2017_highres.png", width = 14.8, height = 14.8, units = 'cm', res = 1200)
#
# plot(density(sample10$mean10,bw = .05),col ="black", lty = 3, main = "", lwd = 1,ylim = c(0,8),xlim = c(2.5,4.5), axes=FALSE,xlab = expression("Calving"~"interval"~(italic("years"))))
# lines(density(sample100$mean100,bw = .05),col ="black", lty = 4, lwd = 1)
# lines(density(sample500$mean500,bw = .05),col ="black", lty = 2, lwd = 1)
# lines(density(sample1000$mean1000,bw = .05),col ="black", lty = 5, lwd = 1)
# lines(density(sampleA$meanA,bw = .05),col ="black", lty = 1, lwd = 2)
# legend(y = 8, x = 3.9,title = expression(bold("Sample size (n)")), c(expression(italic("n")~"="~"10"), expression(italic("n")~"="~"100"), expression(italic("n")~"="~"500"), expression(italic("n")~"="~"1000"), expression(italic("n")~"="~"2000")),bty = "n",
# lty = c(3,4,2,5,1), lwd = c(1,1,1,1,2), cex=1)
# axis(1,lwd=2)
# axis(2,lwd=2)
#
# dev.off()
## ----referee_comment_1, echo=TRUE----------------------------------------
#observed sample
rev.one <- bdata$RealCI[1:45]
#sample 45 times
sample.true <- year2013$interval
#power analysis
pwr.test.results <- power.t.test(n = 45,# sample size
delta = seq(0,0.99,0.001), #difference between means
sd = sd(sample.true), #observed variation
alternative = "one.sided", #observed test type
sig.level = 0.05) #significance level
#additional packages are avaliable for more complex analysis
#but have not done this as don't think it is needed
## ----referee_comment_1_plot, echo=FALSE, message=FALSE, warning=FALSE----
#sort data into ggplot format
pwr.analysis <- as.data.frame(cbind(
pwr.test.results$power,
pwr.test.results$delta))
colnames(pwr.analysis) <- c("Power","Mean.difference")
#sort data into ggplot format
pwr.analysis.1 <- pwr.analysis %>%
mutate(Alpha = 1- Power,
Mean.estimate = 3.31 + Mean.difference)
# %>%
# select(Alpha,Mean.estimate)
#work out where the cut-off is
a <- filter(pwr.analysis.1, Alpha < 0.05)
a[1,]
#plot data
ggplot(data = pwr.analysis.1, aes(x = Mean.estimate, y = Alpha)) +
geom_line(size = 1.5) +
geom_vline(xintercept = 3.903, col = "blue") +
geom_hline(yintercept = 0.05) +
theme(axis.line = element_line(colour = 'black', size = 0.65),
axis.ticks = element_line(colour = "black", size = 0.65),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_rect(fill = "white", colour = "black", size = 1),
panel.background = element_rect(fill = "white",
colour = NA),
axis.text = element_text(size = rel(0.8),
colour = "black")) +
ggtitle("Raw data result plot (n = 45)")
## ----referee_comment_2_plot, echo=FALSE, message=FALSE, warning=FALSE----
#observed sample
rev.one <- bdata$RealCI[1:45]
#sample 45 times
sample.true <- year2013$interval
#difference
diff <- 3.63-3.31 #observed mean of australian population
#power analysis
pwr.test.results <- power.t.test(n = seq(1,200,1),# sample size
delta = diff, #difference between means
sd = sd(sample.true), #observed variation
alternative = "one.sided", #observed test type
sig.level = 0.05) #significance level
#additional packages are avaliable for more complex analysis
#but have not done this as don't think it is needed
#sort data into ggplot format
pwr.analysis <- as.data.frame(cbind(
pwr.test.results$power,
pwr.test.results$n))
colnames(pwr.analysis) <- c("Power","Sample.size")
#sort data into ggplot format
pwr.analysis.1 <- pwr.analysis %>%
mutate(Alpha = 1- Power)
# %>%
# select(Alpha,Mean.estimate)
#work out where the cut-off is
a <- filter(pwr.analysis.1, Alpha < 0.05)
a[1,]
#plot data
ggplot(data = pwr.analysis.1, aes(x = Sample.size, y = Alpha)) +
geom_line(size = 1.5) +
geom_vline(xintercept = 45, col = "red") +
geom_vline(xintercept = 153, col = "blue") +
geom_hline(yintercept = 0.05) +
scale_y_continuous(limits = c(0,1)) +
theme(axis.line = element_line(colour = 'black', size = 0.65),
axis.ticks = element_line(colour = "black", size = 0.65),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_blank(),
strip.background = element_rect(fill = "white", colour = "black", size = 1),
panel.background = element_rect(fill = "white",
colour = NA),
axis.text = element_text(size = rel(0.8),
colour = "black")) +
ggtitle("Observed difference between Australian and NZ mean")
## ----missed individuals 1, echo=FALSE------------------------------------
dat <- read.csv("./R/Data/raw_observations_2012.csv")
#data structure
glimpse(dat)
head(dat)
#And the second dataset
dat1<- read.csv("./R/Data/RawCI.csv", header=T, quote="\"")
#data structure
glimpse(dat1)
## ----missed individuals 2, echo=FALSE, message=FALSE, warning=FALSE------
##I can then modify this data to
#restructure dataset of capture to long dataset
dat3 <- dplyr::select(dat, ID, X2006:X2012)%>%
gather(year, count,X2006:X2012)
#add data on calves
dat4 <- full_join(dat3,dat1, by = "ID")
dat5 <- dplyr::select(dat4,ID,year,count,Yr.first.seen,Calves,Calves.1,Calves.2)
dat6 <- filter(dat5,count >0)
glimpse(dat6)
dat7 <- mutate(dat6, year = ifelse(year == "X2006","2006", year),
year = ifelse(year == "X2007","2007", year),
year = ifelse(year == "X2008","2008", year),
year = ifelse(year == "X2009","2009", year),
year = ifelse(year == "X2010","2010", year),
year = ifelse(year == "X2011","2011", year),
year = ifelse(year == "X2012","2012", year))
a <- group_by(dat7, ID, Yr.first.seen) %>%
mutate(mother = ifelse(Yr.first.seen > 0, 1, 0)) %>%
filter(mother == 1) %>%
ungroup() %>%
dplyr::select(ID,year,Calves,Calves.1) %>%
filter(Calves.1<2013) %>%
filter(!year == Calves) %>%
filter(!year ==Calves.1)
a
## ----referee_comment3, echo=TRUE, message=FALSE, warning=FALSE-----------
greater.than.2 <- sample.true[sample.true>2]
#greater.than.2
mean.2<-sum(greater.than.2)/length(greater.than.2)
s.2<-sd(greater.than.2)
SE.2<-s2013/(sqrt(length(greater.than.2)))
n.2<-length(greater.than.2)
low.qt.2<- mean.2-(qt(0.975,length(greater.than.2))*SE.2)
high.qt.2 <- mean.2+(qt(0.975,length(greater.than.2))*SE.2)
#add it to the table from bradford data
Sumtable[4,] <- c("miss2year",n.2,mean.2,low.qt.2,
high.qt.2,sd(greater.than.2))
## ----different missing intervals 1, echo=TRUE----------------------------
########################### 2.2%
#parameters
boots <- 1000
n <- c(1:1000)
###round all percentages upwards
detect1 <- 44 # (45*1.02) - 45 = 0.9
detect2 <- 42 # (45*1.05) - 45 = 2.25
detect3 <- 40 # (45*1.10) - 45 = 4.5
sample2 <-rep(NA, 1000)
sample5 <-rep(NA, 1000)
sample10 <-rep(NA, 1000)
for (i in 1:boots) {
sample2[i]<-mean(sample(year2013$interval,detect1,replace=T))
sample5[i]<-mean(sample(year2013$interval,detect2,replace=T))
sample10[i]<-mean(sample(year2013$interval,detect3,replace=T))
} #i
######################estimates##############
sample2 <- sort(sample2)
#low = 25/1000
sample2.2.5 <- sample2[25]
#median
sample2.50 <- sample2[500]
#high = 975/1000
sample2.975 <- sample2[975]
sample5 <- sort(sample5)
#low = 25/1000
sample5.2.5 <- sample5[25]
#median
sample5.50 <- sample5[500]
#high = 975/1000
sample5.975 <- sample5[975]
sample10 <- sort(sample10)
#low = 25/1000
sample10.2.5 <- sample10[25]
#median
sample10.50 <- sample10[500]
#high = 975/1000
sample10.975 <- sample10[975]
#add it to the table from bradford data
Sumtable[5,] <- c("detect1",detect1,sample2.50,sample2.2.5,sample2.975,NA)
Sumtable[6,] <- c("detect2",detect2,sample5.50,sample5.2.5,sample5.975,NA)
Sumtable[7,] <- c("detect5",detect3,sample10.50,sample10.2.5,sample10.975,NA)
## ----detection sim.2-----------------------------------------------------
#be very careful as Dat is just IDS and no id of females with calves
#BUT Data is identified females...
length(Data$ID)
length(dat$ID)
glimpse(Data)
dat.detect <- dplyr::select(Data,ID,Calves,Calves.1, Calves.2) %>%
mutate(Calves = factor(Calves),
Calves.1 = factor(Calves.1),
Calves.2 = factor(Calves.2))
a <- as.data.frame.matrix(table(Data$ID,Data$Calves))
head(a)
a[,7] <-row.names(a)
colnames(a)[1] <- "y2006"
colnames(a)[2] <- "y2007"
colnames(a)[3] <- "y2008"
colnames(a)[4] <- "y2009"
colnames(a)[5] <- "y2010"
colnames(a)[6] <- "y2011"
colnames(a)[7] <- "ID"
a[,8] <- 0
colnames(a)[8] <- "y2012"
a[,9] <- 0
colnames(a)[9] <- "y2013"
a <- dplyr::select(a,ID,y2006,y2007,y2008, y2009, y2010, y2011, y2012, y2013)
b <- as.data.frame.matrix(table(Data$ID,Data$Calves.1))
head(b)
b[,5] <-row.names(b)
colnames(b)[5] <- "ID"
b[,6] <- 0
colnames(b)[6] <- "y2006"
b[,7] <- 0
colnames(b)[7] <- "y2007"
b[,8] <- 0
colnames(b)[8] <- "y2008"
b[,9] <- 0
colnames(b)[9] <- "y2009"
colnames(b)[1] <- "y2010"
colnames(b)[2] <- "y2011"
colnames(b)[3] <- "y2012"
colnames(b)[4] <- "y2013"
b <- dplyr::select(b,ID,y2006,y2007,y2008, y2009, y2010, y2011, y2012, y2013)
c <- as.data.frame.matrix(table(Data$ID,Data$Calves.2))
head(c)
colnames(c)[1] <- "y2013"
c[,2] <-row.names(c)
colnames(c)[2] <- "ID"
c[,3] <- 0
colnames(c)[3] <- "y2006"
c[,4] <- 0
colnames(c)[4] <- "y2007"
c[,5] <- 0
colnames(c)[5] <- "y2008"
c[,6] <- 0
colnames(c)[6] <- "y2009"
c[,7] <- 0
colnames(c)[7] <- "y2010"
c[,8] <- 0
colnames(c)[8] <- "y2011"
c[,9] <- 0
colnames(c)[9] <- "y2012"
c <- dplyr::select(c,ID,y2006,y2007,y2008, y2009, y2010, y2011, y2012,y2013)
countdat <- rbind(a,b,c)
glimpse(countdat)
# head(full.dat)
full.dat <- group_by(countdat, ID) %>%
summarise(y2006 = sum(y2006),
y2007 = sum(y2007),
y2008 = sum(y2008),
y2009 = sum(y2009),
y2010 = sum(y2010),
y2011 = sum(y2011),
y2012 = sum(y2012),
y2013 = sum(y2013))
2012-2006
##checking....
sort(Data$ID)
filter(Data, ID == "AI06022")
filter(Data, ID == "AI08340")
filter(Data, ID == "AI08343")
head(Data)
# glimpse(c)
# Data$Calves.1,
# # Spread and gather are complements
# df <- data.frame(x = c("a", "b"), y = c(3, 4), z = c(5, 6))
# df %>% spread(x, y) %>% gather(x, y, a:b, na.rm = TRUE)
## ----different missing intervals 2---------------------------------------
longer5.6 <- c(sample.true,5,6,6)
#greater.than.2
mean.56<-sum(longer5.6)/length(longer5.6)
s.56<-sd(longer5.6)
SE.56<-s.56/(sqrt(length(longer5.6)))
n.56<-(length(longer5.6))
low.qt.56<- mean.56-(qt(0.975,length(longer5.6))*SE.56)
high.qt.56 <- mean.56+(qt(0.975,length(longer5.6))*SE.56)
#add it to the table from bradford data
Sumtable[8,] <- c("longer.56",n.56,mean.56,low.qt.56,high.qt.56,sd(longer5.6))
###sort out numbering in dataframe
Sumtable <- as.data.frame(Sumtable)
Sumtable$n <- as.numeric(as.character(Sumtable$n))
Sumtable$mY <- as.numeric(as.character(Sumtable$mY))
Sumtable$low.qt <- as.numeric(as.character(Sumtable$low.qt))
Sumtable$high.qt <- as.numeric(as.character(Sumtable$high.qt))
Sumtable$sd <- as.numeric(as.character(Sumtable$sd))
Sumtable$interval <- as.character(Sumtable$interval)
## ----missing_data_table 2, echo=FALSE------------------------------------
library(knitr)
kable(Sumtable, format = "markdown",col.names = c("Interval","Sample size", "Mean", "Lower limit", "Higher limit", "SD"))
## ----referee_comment3_plot, echo=FALSE-----------------------------------
ggplot(Sumtable, aes(y = mY, x = interval)) +
geom_point(size = 5) +
geom_errorbar(aes(ymin = low.qt, ymax = high.qt), width = 0.05,size = 1, alpha = 0.5) +
scale_y_continuous(breaks = round(seq(2.3, 5, by = 0.2),1)) +
labs(y = "Mean calving interval",x = "Calving interval modification" ) +
geom_point(size = 3) +
theme_classic() +
theme_hc() +
theme(legend.position="none")
|
#' Quantile regression
#'
#' This fits a quantile regression to the data and draws the fitted quantiles
#' with lines. This is as a continuous analogue to [geom_boxplot()].
#'
#' @eval rd_aesthetics("geom", "quantile")
#' @export
#' @inheritParams layer
#' @inheritParams geom_point
#' @inheritParams geom_path
#' @param method.args List of additional arguments passed on to the modelling
#' function defined by `method`.
#' @param geom,stat Use to override the default connection between
#' `geom_quantile()` and `stat_quantile()`.
#' @examples
#' m <-
#' ggplot(mpg, aes(displ, 1 / hwy)) +
#' geom_point()
#' m + geom_quantile()
#' m + geom_quantile(quantiles = 0.5)
#' q10 <- seq(0.05, 0.95, by = 0.05)
#' m + geom_quantile(quantiles = q10)
#'
#' # You can also use rqss to fit smooth quantiles
#' m + geom_quantile(method = "rqss")
#' # Note that rqss doesn't pick a smoothing constant automatically, so
#' # you'll need to tweak lambda yourself
#' m + geom_quantile(method = "rqss", lambda = 0.1)
#'
#' # Set aesthetics to fixed value
#' m + geom_quantile(colour = "red", linewidth = 2, alpha = 0.5)
geom_quantile <- function(mapping = NULL, data = NULL,
stat = "quantile", position = "identity",
...,
lineend = "butt",
linejoin = "round",
linemitre = 10,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomQuantile,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list2(
lineend = lineend,
linejoin = linejoin,
linemitre = linemitre,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
#' @include geom-path.R
GeomQuantile <- ggproto("GeomQuantile", GeomPath,
default_aes = defaults(
aes(weight = 1, colour = "#3366FF", linewidth = 0.5),
GeomPath$default_aes
)
)
| /R/geom-quantile.R | no_license | cran/ggplot2 | R | false | false | 2,184 | r | #' Quantile regression
#'
#' This fits a quantile regression to the data and draws the fitted quantiles
#' with lines. This is as a continuous analogue to [geom_boxplot()].
#'
#' @eval rd_aesthetics("geom", "quantile")
#' @export
#' @inheritParams layer
#' @inheritParams geom_point
#' @inheritParams geom_path
#' @param method.args List of additional arguments passed on to the modelling
#' function defined by `method`.
#' @param geom,stat Use to override the default connection between
#' `geom_quantile()` and `stat_quantile()`.
#' @examples
#' m <-
#' ggplot(mpg, aes(displ, 1 / hwy)) +
#' geom_point()
#' m + geom_quantile()
#' m + geom_quantile(quantiles = 0.5)
#' q10 <- seq(0.05, 0.95, by = 0.05)
#' m + geom_quantile(quantiles = q10)
#'
#' # You can also use rqss to fit smooth quantiles
#' m + geom_quantile(method = "rqss")
#' # Note that rqss doesn't pick a smoothing constant automatically, so
#' # you'll need to tweak lambda yourself
#' m + geom_quantile(method = "rqss", lambda = 0.1)
#'
#' # Set aesthetics to fixed value
#' m + geom_quantile(colour = "red", linewidth = 2, alpha = 0.5)
geom_quantile <- function(mapping = NULL, data = NULL,
stat = "quantile", position = "identity",
...,
lineend = "butt",
linejoin = "round",
linemitre = 10,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomQuantile,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list2(
lineend = lineend,
linejoin = linejoin,
linemitre = linemitre,
na.rm = na.rm,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
#' @include geom-path.R
GeomQuantile <- ggproto("GeomQuantile", GeomPath,
default_aes = defaults(
aes(weight = 1, colour = "#3366FF", linewidth = 0.5),
GeomPath$default_aes
)
)
|
require(tidyverse)
require(modules)
cfg <- modules::use("config.R")
fx_style <- readRDS(cfg$PATH_FX_STYLE_BY_YEAR)
tibble(style = c('dotted', 'allupper', 'upcamel', 'other', 'alllower', 'lowcamel', 'snake'),
long_name = c("dotted.func", "ALLUPPER", "UpperCamel", "other", "alllower", "lowerCamel", "lower_snake")) ->
naming_conv
fx_style %>% mutate(alllower = alllower / total,
allupper = allupper / total,
upcamel = upcamel / total,
lowcamel = lowcamel / total,
snake = snake / total,
dotted = dotted / total,
other = other / total) %>%
select(-total) %>%
gather(key = 'style', value = 'share', -pub_year) %>%
left_join(naming_conv, by = 'style') %>%
mutate(opacity = ifelse(style %in% c('dotted', 'snake', 'lowcamel', 'upcamel'), 0.8, 0.4)) %>%
mutate(long_name = fct_relevel(long_name,
"dotted.func", "ALLUPPER", "UpperCamel", "other", "alllower", "lowerCamel", "lower_snake")) %>%
mutate(percentage = share * 100) %>%
ggplot(aes(x = pub_year, y = percentage, col = long_name, alpha = opacity)) +
geom_line() + scale_color_manual(values = RColorBrewer::brewer.pal(7, 'Dark2')) +
xlab("Year") + ylab("Share of all functions (%)") +
theme(plot.title = element_text(size = 24, face = "bold"), plot.subtitle = element_text(size = 10), axis.text = element_text(size = 15), axis.title=element_text(size=14,face="bold")) +
theme(rect = element_rect(fill = "transparent")) +
theme(legend.position = "none") -> prob_plot
ggsave('visualization_fun/func_prob_plot.png', prob_plot, width = 6, height = 5, units = 'in', bg = "transparent")
fx_style %>% mutate(alllower = alllower / total,
allupper = allupper / total,
upcamel = upcamel / total,
lowcamel = lowcamel / total,
snake = snake / total,
dotted = dotted / total,
other = other / total) %>%
select(-total) %>% mutate(entropy = -
(alllower * log(alllower) +
allupper * log(allupper) +
upcamel * log(upcamel) +
lowcamel * log(lowcamel) +
ifelse(snake != 0, snake * log(snake), 0) +
dotted * log(dotted) +
other * log(other))) %>%
ggplot(aes(x = pub_year, y = entropy)) + geom_line() -> entropy_plot
ggsave('visualization_fun/func_entropy_plot.png', entropy_plot)
fx_style %>% mutate(alllower = alllower / total,
allupper = allupper / total,
upcamel = upcamel / total,
lowcamel = lowcamel / total,
snake = snake / total,
dotted = dotted / total,
other = other / total) %>%
select(-total) %>% mutate(entropy = -
(alllower * log(alllower) +
allupper * log(allupper) +
upcamel * log(upcamel) +
lowcamel * log(lowcamel) +
ifelse(snake != 0, snake * log(snake), 0) +
dotted * log(dotted) +
other * log(other))) %>% mutate(type = "function_name_style") %>% select(pub_year, entropy, type) %>% saveRDS('visualization_fun/entropy_fx_name.RDS')
| /1functionnames03_function_name_vis.R | no_license | chainsawriot/rstyle | R | false | false | 3,709 | r | require(tidyverse)
require(modules)
cfg <- modules::use("config.R")
fx_style <- readRDS(cfg$PATH_FX_STYLE_BY_YEAR)
tibble(style = c('dotted', 'allupper', 'upcamel', 'other', 'alllower', 'lowcamel', 'snake'),
long_name = c("dotted.func", "ALLUPPER", "UpperCamel", "other", "alllower", "lowerCamel", "lower_snake")) ->
naming_conv
fx_style %>% mutate(alllower = alllower / total,
allupper = allupper / total,
upcamel = upcamel / total,
lowcamel = lowcamel / total,
snake = snake / total,
dotted = dotted / total,
other = other / total) %>%
select(-total) %>%
gather(key = 'style', value = 'share', -pub_year) %>%
left_join(naming_conv, by = 'style') %>%
mutate(opacity = ifelse(style %in% c('dotted', 'snake', 'lowcamel', 'upcamel'), 0.8, 0.4)) %>%
mutate(long_name = fct_relevel(long_name,
"dotted.func", "ALLUPPER", "UpperCamel", "other", "alllower", "lowerCamel", "lower_snake")) %>%
mutate(percentage = share * 100) %>%
ggplot(aes(x = pub_year, y = percentage, col = long_name, alpha = opacity)) +
geom_line() + scale_color_manual(values = RColorBrewer::brewer.pal(7, 'Dark2')) +
xlab("Year") + ylab("Share of all functions (%)") +
theme(plot.title = element_text(size = 24, face = "bold"), plot.subtitle = element_text(size = 10), axis.text = element_text(size = 15), axis.title=element_text(size=14,face="bold")) +
theme(rect = element_rect(fill = "transparent")) +
theme(legend.position = "none") -> prob_plot
ggsave('visualization_fun/func_prob_plot.png', prob_plot, width = 6, height = 5, units = 'in', bg = "transparent")
fx_style %>% mutate(alllower = alllower / total,
allupper = allupper / total,
upcamel = upcamel / total,
lowcamel = lowcamel / total,
snake = snake / total,
dotted = dotted / total,
other = other / total) %>%
select(-total) %>% mutate(entropy = -
(alllower * log(alllower) +
allupper * log(allupper) +
upcamel * log(upcamel) +
lowcamel * log(lowcamel) +
ifelse(snake != 0, snake * log(snake), 0) +
dotted * log(dotted) +
other * log(other))) %>%
ggplot(aes(x = pub_year, y = entropy)) + geom_line() -> entropy_plot
ggsave('visualization_fun/func_entropy_plot.png', entropy_plot)
fx_style %>% mutate(alllower = alllower / total,
allupper = allupper / total,
upcamel = upcamel / total,
lowcamel = lowcamel / total,
snake = snake / total,
dotted = dotted / total,
other = other / total) %>%
select(-total) %>% mutate(entropy = -
(alllower * log(alllower) +
allupper * log(allupper) +
upcamel * log(upcamel) +
lowcamel * log(lowcamel) +
ifelse(snake != 0, snake * log(snake), 0) +
dotted * log(dotted) +
other * log(other))) %>% mutate(type = "function_name_style") %>% select(pub_year, entropy, type) %>% saveRDS('visualization_fun/entropy_fx_name.RDS')
|
\name{Convert R function to the Rfast's coresponding}
\alias{as.Rfast.function}
\title{
Convert R function to the Rfast's coresponding
}
\description{
Convert R function to the Rfast's coresponding.
}
\usage{
as.Rfast.function(Rfunction.name,margin=NULL)
}
\arguments{
\item{Rfunction.name}{
An character value with the name of the function.
}
\item{margin}{
A logical function for return the column-row wise function.
}
}
\details{
Given the name of R function, it returns the coresponding function's name from Rfast.
}
\value{
The coresponding Rfast function.
}
\author{
Manos Papadakis and Michail Tsagris
R implementation and documentation: Manos Papadakis <papadakm95@gmail.com>
and Michail Tsagris <mtsagris@yahoo.gr>.
}
\seealso{
\code{ \link{colsums}, \link{colMedians}, \link{colVars}
}
}
\examples{
res<-as.Rfast.function("var")
}
| /fuzzedpackages/Rfast/man/as.Rfast.function.Rd | no_license | akhikolla/testpackages | R | false | false | 909 | rd | \name{Convert R function to the Rfast's coresponding}
\alias{as.Rfast.function}
\title{
Convert R function to the Rfast's coresponding
}
\description{
Convert R function to the Rfast's coresponding.
}
\usage{
as.Rfast.function(Rfunction.name,margin=NULL)
}
\arguments{
\item{Rfunction.name}{
An character value with the name of the function.
}
\item{margin}{
A logical function for return the column-row wise function.
}
}
\details{
Given the name of R function, it returns the coresponding function's name from Rfast.
}
\value{
The coresponding Rfast function.
}
\author{
Manos Papadakis and Michail Tsagris
R implementation and documentation: Manos Papadakis <papadakm95@gmail.com>
and Michail Tsagris <mtsagris@yahoo.gr>.
}
\seealso{
\code{ \link{colsums}, \link{colMedians}, \link{colVars}
}
}
\examples{
res<-as.Rfast.function("var")
}
|
#!/usr/bin/Rscript
# gen_gp_2_kerns.R Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 01.03.2018
## This file generates from a standard gaussian process
require(mds.methods)
source('../lib/some_gp_funcs.R')
#Pick a seed
seed <- 1234
#Quickload
load(paste('./data/onegpnd_', seed, '.RData', sep = ''))
######### Generate some data with specified weirdness
set.seed(seed)
n <- 50
p <- 5
X <- matrix(runif(n*p), ncol = p)
#Create the kernels and the response
kern <- kernel_factory(lengthscale=0.1)
nugget <- 0.01
y <- gen_gp(X, kern, nugget)
##For 1D only, plot the points as well as the normal GP fit.
if (p == 1) {
quartz()
cols <- c('red', 'blue')
plot(X, y, lwd=0)
text(X, y, 1:n, col = cols[(r > s) + 1])
XX <- as.matrix(seq(0,1,length.out=200), ncol = p)
mu <- gp_post_mean_factory(X, y, kernn, nugget)
yy <- sapply(1:nrow(XX), function(xx) mu(XX[xx,]))
points(XX, yy, col = 'red', type = 'l')
}
##### Compare visualizations using arc length and euclidean distance
post_mean <- gp_post_mean_factory(X, y, kern, nugget)
gp.dist <- function(a, b) gp_arclen(post_mean, a, b)
euclidean.dist <- function(a, b) norm(b-a, '2')
gparc_mds <- smacof_forward_mds(high_d = X, weights = rep(1,p), dist.func = gp.dist,
n.inits = 1, std = F)
euc_mds <- smacof_forward_mds(high_d = X, weights = rep(1,p),
dist.func = euclidean.dist,
n.inits = 1)
##Plot the GParclength one in polar coords
cart2polar <- function(X) {
Y <- matrix(NA, ncol = ncol(X), nrow = nrow(X))
Y[,1] <- sqrt(X[,1]^2 + X[,2]^2)
Y[,2] <- atan(X[,2] / X[,1])
return(Y)
}
# Compare the output
low_d1 <- gparc_mds$par
low_d2 <- euc_mds$par
cols <- c('red', 'blue')
par(mfrow=c(1,2))
plot(low_d1, lwd = 0, main = paste('GPArclength MDS', 'stress:',gparc_mds$value),
lty=0)
text(low_d1[,1], low_d1[,2], 1:n)
plot(low_d2, lwd = 0, main = paste('Euclidean MDS', 'stress:',euc_mds$value), lty=0)
text(low_d2[,1], low_d2[,2], 1:n)
#Get the two distance matrices and compare them
low_1 <- as.matrix(dist(low_d1))
low_2 <- as.matrix(dist(low_d2))
print(norm(low_1 - low_2))
## Save the output
save.image(paste('./data/onegpnd_', seed, '.RData', sep = ''))
| /R/good_examples/onegp_nd.R | no_license | NathanWycoff/GPArcLength | R | false | false | 2,275 | r | #!/usr/bin/Rscript
# gen_gp_2_kerns.R Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 01.03.2018
## This file generates from a standard gaussian process
require(mds.methods)
source('../lib/some_gp_funcs.R')
#Pick a seed
seed <- 1234
#Quickload
load(paste('./data/onegpnd_', seed, '.RData', sep = ''))
######### Generate some data with specified weirdness
set.seed(seed)
n <- 50
p <- 5
X <- matrix(runif(n*p), ncol = p)
#Create the kernels and the response
kern <- kernel_factory(lengthscale=0.1)
nugget <- 0.01
y <- gen_gp(X, kern, nugget)
##For 1D only, plot the points as well as the normal GP fit.
if (p == 1) {
quartz()
cols <- c('red', 'blue')
plot(X, y, lwd=0)
text(X, y, 1:n, col = cols[(r > s) + 1])
XX <- as.matrix(seq(0,1,length.out=200), ncol = p)
mu <- gp_post_mean_factory(X, y, kernn, nugget)
yy <- sapply(1:nrow(XX), function(xx) mu(XX[xx,]))
points(XX, yy, col = 'red', type = 'l')
}
##### Compare visualizations using arc length and euclidean distance
post_mean <- gp_post_mean_factory(X, y, kern, nugget)
gp.dist <- function(a, b) gp_arclen(post_mean, a, b)
euclidean.dist <- function(a, b) norm(b-a, '2')
gparc_mds <- smacof_forward_mds(high_d = X, weights = rep(1,p), dist.func = gp.dist,
n.inits = 1, std = F)
euc_mds <- smacof_forward_mds(high_d = X, weights = rep(1,p),
dist.func = euclidean.dist,
n.inits = 1)
##Plot the GParclength one in polar coords
cart2polar <- function(X) {
Y <- matrix(NA, ncol = ncol(X), nrow = nrow(X))
Y[,1] <- sqrt(X[,1]^2 + X[,2]^2)
Y[,2] <- atan(X[,2] / X[,1])
return(Y)
}
# Compare the output
low_d1 <- gparc_mds$par
low_d2 <- euc_mds$par
cols <- c('red', 'blue')
par(mfrow=c(1,2))
plot(low_d1, lwd = 0, main = paste('GPArclength MDS', 'stress:',gparc_mds$value),
lty=0)
text(low_d1[,1], low_d1[,2], 1:n)
plot(low_d2, lwd = 0, main = paste('Euclidean MDS', 'stress:',euc_mds$value), lty=0)
text(low_d2[,1], low_d2[,2], 1:n)
#Get the two distance matrices and compare them
low_1 <- as.matrix(dist(low_d1))
low_2 <- as.matrix(dist(low_d2))
print(norm(low_1 - low_2))
## Save the output
save.image(paste('./data/onegpnd_', seed, '.RData', sep = ''))
|
library(stringr)
library(ggplot2)
#1.Write a function read_post that reads
#a single Craigslist post from a text file.
#===========================================
#In each function to read title,text, price etc.
#I extract them by seeing which line they are located in
#since these information in all the post have the same location.
#function to read title
read_title <- function(post){
title <- post[1]
return(title)
}
#function to read text
read_text <- function(post){
text <- post[3:(length(post)-8)]
text <- paste(text, collapse = "\n")#put them together so that
#they can in a single box of data frame
return(text)
}
#function to read date
read_date <- function(post){
date <- post[length(post)-6]
date <- gsub('Date Posted|\\: ','',date)
return(date)
}
#function to read price
read_price <- function(post){
price <- post[length(post)-5]
price<-gsub('Price|\\: |\\$','',price)
return(price)
}
#function to read latitude
read_latitude <- function(post){
latitude <- post[length(post)-4]
latitude <- gsub('Latitude|\\: ','',latitude)
return(latitude)
}
#function to read longitude
read_longitude <- function(post){
longitude <- post[length(post)-3]
longitude <- gsub('Longitude|\\: ','',longitude)
return(longitude)
}
#function to read bedrooms
read_bedrooms <- function(post){
bedrooms <- post[length(post)-2]
bedrooms <- gsub('Bedrooms|\\: ','',bedrooms)
return(bedrooms)
}
#function to read bathrooms
read_bathrooms <- function(post){
bathrooms <- post[length(post)-1]
bathrooms <- gsub('Bathrooms|\\: ','',bathrooms)
return(bathrooms)
}
#function to read sqft
read_sqft <- function(post){
sqft <- post[length(post)]
sqft <- gsub('Sqft|\\: ','',sqft)
return(sqft)
}
#function to read a single post
read_post <- function(file){
post <- readLines(file)
title <- read_title(post)
text <- read_text(post)
date <- read_date(post)
price <- read_price(post)
latitude <- read_latitude(post)
longitude <- read_longitude(post)
bedrooms <- read_bedrooms(post)
bathrooms <- read_bathrooms(post)
sqft <- read_sqft(post)
return(c(title,text,date,price,latitude,longitude,bedrooms,bathrooms,sqft))
}
#2.Write a function read_all_posts that uses read_post (from Question 1) to read
#all information from all posts in a directory and return them in a single data frame.
#=====================================================
read_all_posts <- function(directory){
files = list.files(directory,full.names = TRUE,recursive = TRUE)
all_files = sapply(files,read_post)
post_df <- data.frame(t(all_files))
names(post_df)[1:9]<-c('title','text','date','price','latitude','longitude','bedrooms','bathrooms','sqft')
rownames(post_df)<-NULL
return(post_df)
}
#read all the post
post = read_all_posts('messy')
#4.Extract the rental price from the title of each Craigslist post.
#==================================================
#extract price from the titles
title_price = str_extract(post$title,'\\$[0-9]+')
#remove the $ from the price
title_price = gsub('\\$','',title_price)
#put it into the data frame
post$title_price = title_price
#check the type of two variables
typeof(post$price)
typeof(post$title_price)
#change two variables into numerical.
post$price = as.character(post$price)
post$price = as.numeric(post$price)
post$title_price = as.numeric(post$title_price)
#How do these prices compare to the user-specified prices?
#check the difference between between them
difference = post$price - post$title_price
difference = difference[!is.na(difference)]
table(difference == 0)#all the price are the same.
#5.Extract the deposit amount from the text of each Craigslist post.
#The reason why I use str_extract here is that I find
#that the deposit appears befor the pet deposit in most
#text. So we only need to extract the first deposit.
#Draw a sentence with Deposit or deposit.
#Since some text do not have period to split each sentence,
#I set a limit on that:
#in a long sentence without period,
#we only read 30 characters before deposit and 30 characters after deposit.
deposit_sentence = str_extract(post$text,'[^[.-]]{0,30}[Dd]eposits?[^.]{0,30}')
tail(deposit_sentence)
head(deposit_sentence)
#remove the , from deposit, extract$and number from that,
#and then remove the $
deposit = gsub('\\,','',deposit_sentence)
deposit = str_extract(deposit,'\\$[0-9]+')
deposit = gsub('\\$','',deposit)
deposit = as.numeric(deposit)
#set the deposit to data frame
post$deposit = deposit
#check how many deposits do we get.
table(!is.na(deposit))
#Adjust the price like report 3.
#sometimes the values are not acctualy that large.
#There was just an issue in parsing the data
#If we read the posting for the highest priced apartment
#Then we see the price range is $3408-3742
#the price in this data is 34083742
post$price[post$price>=30000000] <- 3408
post$price[post$price==9951095] <- 995
#smaller than 100 price delete
post$price[post$price <= 100] <- NA
ggplot(post,aes(x = price,y = deposit)) + geom_point()+
labs(title = 'Relation between deposit and price') +
geom_density2d()
#6. Extract a categorical feature from each Craigslist post
# (any part) that measures whether the apartment allows
#pets: cats, dogs, both, or none.
#=================================================
#Find Cats in text
#Since there are many words like cation in the
#text, when detect cat, we should aviod i behind the cat.
cats = str_detect(post$text,'[Cc]ats?[^i]')
#detect the situation that cats are not allowed.
no_cats1 = str_detect(post$text,'[Cc]ats?[^i][^.]{0,10}[Nn]ot[^.]{0,5}[Aa]llowed')
no_cats2 = str_detect(post$text,'[Nn]o[^.]{0,10}[Cc]ats?[^i]')
no_cats = no_cats1 | no_cats2
#The pattern for the cats
cats = cats&!no_cats
#Find Dogs in text, similar to cats
dogs = str_detect(post$text,'[Dd]ogs?')
no_dogs1 = str_detect(post$text,'[Dd]ogs? [^.]{0,10}[Nn]ot[^.]{0,5}[Aa]llowed')
no_dogs2 = str_detect(post$text,'[Nn]o[^.]{0,10} [Dd]ogs?')
no_dogs = no_dogs1|no_dogs2
dogs = dogs&!no_dogs
#both allow cats and dogs
both = cats&dogs
#do not allow cats or dogs
none = no_cats&no_dogs
#put pets situation into data frame
post$pets = 'unknown'
post$pets[cats] = 'cats'
post$pets[dogs] = 'dogs'
post$pets[both] = 'both'
post$pets[none] = 'none'
#Before finish the pets' policy problem, let's extract the
#post that mentioned pets from the original post. Since some
#changes will be made on post$pets if we continue on pets' policy
#To get pet deposit from the post, we can extract pet deposit
#from the text that mention pets
pets_post <- subset(post,pets != 'none' & pets != 'unknown')
#Now let's continue on
#whether the apartment allows pets: cats, dogs, both, or none.
#For this problem, we have many unknown situation.
#Usually, If a post does not mention that, we assume
#the apartment allow both cats and dogs
post$pets[post$pets == 'unknown'] ='both'
table(post$pets)
#As for other kinds of pets, we can consider bird, fish and rabbit
#since these kinds of pets are most common.
#The strategy here is similar with dogs and cats
other_pets = str_detect(post$text,"([Bb]ird|[Ff]ish|[Rr]abbit)")
no_other_pets1 = str_detect(post$text,'([Bb]ird|[Ff]ish|[Rr]abbit)[^.]*[Nn]ot[^.]*[Aa]llowed')
no_other_pets2 = str_detect(post$text,'[Nn]o[^.]*[Dd]ogs?')
no_other_pets = no_other_pets1|no_other_pets2
other_pets = other_pets&!no_other_pets
table(other_pets)#see how many apartments allow other pets
#Now, let's investigate in pets deposit, we should extract
#the pets deposit at first
#Situation1:$xxx pets xxx deposits
#Extract this pattern first.
pets_deposit_sentence = str_extract(pets_post$text,
'\\$[0-9]+[^.]*[Pp]ets?[^.]*[Dd]eposits?')
#extract the $xxx from the sentence and remove$
pet_deposit1 = gsub('\\,','',pets_deposit_sentence)
pet_deposit1 = str_extract(pet_deposit1,'\\$[0-9]+')
pet_deposit1 = gsub('\\$','',pet_deposit1)
pet_deposit1 = as.numeric(pet_deposit1)
pet_deposit1 = pet_deposit1[!is.na(pet_deposit1)]
#find which texts have these patterns and put deposit
#into the data frame
pets_deposit_pattern1 = str_detect(pets_post$text,
'\\$[0-9]+[^.]*[Pp]ets?[^.]*[Dd]eposits?')
pets_post$pets_deposit = 'none'
pets_post$pets_deposit[pets_deposit_pattern1] = pet_deposit1
#Situation2: pets xxx$xxx deposits
#The way I do this is similar to situation1
pets_deposit_sentence2 = str_extract(pets_post$text,
'[Dd]eposits?[^.]*\\$[0-9]+[^.]*[Pp]ets?')
pet_deposit2 = gsub('\\,','',pets_deposit_sentence2)
pet_deposit2 = str_extract(pet_deposit2,'\\$[0-9]+')
pet_deposit2 = gsub('\\$','',pet_deposit2)
pet_deposit2 = as.numeric(pet_deposit2)
pet_deposit2 = pet_deposit2[!is.na(pet_deposit2)]
pets_deposit_pattern2 = str_detect(pets_post$text,
'[Dd]eposits?[^.]*\\$[0-9]+[^.]*[Pp]ets?')
pets_post$pets_deposit[pets_deposit_pattern2] = pet_deposit2
#Situation3: pets xxx deposits xxx$xxx
#what I do here is similar to situation1
pets_deposit_sentence3 = str_extract(pets_post$text,
'[Pp]ets?[^.]*[Dd]eposits?[^.]*\\$[0-9]+')
pet_deposit3 = gsub('\\,','',pets_deposit_sentence3)
pet_deposit3 = str_extract(pet_deposit3,'\\$[0-9]+')
pet_deposit3 = gsub('\\$','',pet_deposit3)
pet_deposit3 = as.numeric(pet_deposit3)
pet_deposit3 = pet_deposit3[!is.na(pet_deposit3)]
pets_deposit_pattern3 = str_detect(pets_post$text,
'[Pp]ets?[^.]*[Dd]eposits?[^.]*\\$[0-9]+')
pets_post$pets_deposit[pets_deposit_pattern3] = pet_deposit3
#Make a graphic that shows how pet deposits are distributed
#set none as NA
pets_post$pets_deposit[pets_post$pets_deposit == 'none'] <- NA
#since the pet deposit usually will not
#be to high, for the pet deposit over $2500, maybe there are
#some problems with that when we extract that. So we set
#pets deposit over $2500 as NA.
pets_post$pets_deposit = as.numeric(pets_post$pets_deposit)
pets_post$pets_deposit[pets_post$pets_deposit > 2500] <- NA
ggplot(pets_post,aes(x = pets_deposit)) + geom_histogram() +
labs(title = 'Distribution of pets deposit',x = 'pets deposit')
#7.Extract a categorical feature from each Craigslist post that measures
#whether each apartment has some kind of heating: a heater, a fireplace
#(including wood-burning stoves), both, or neither of these.
#======================================================
#Detect heater from text
heater = str_detect(tolower(post$text),'heater')
#Detct fireplace from text
fireplace = str_detect(tolower(post$text),'fireplace')
wood_burning_stoves = str_detect(tolower(post$text),'wood[\\- ]?burning stove')
fireplace = fireplace | wood_burning_stoves
both = heater&fireplace
#put the heating into the data frame
post$heating = 'none'
post$heating[heater] = 'heater'
post$heating[fireplace] = 'fireplace'
post$heating[both] = 'both'
#for the post that does not mention any of these, we assume
#that this apartment has neither of these
post$heating[post$heating == 'none'] = 'neither'
#Detect air conditioning in the post
#it can be air conditioning or air-condition.
air_conditioning = str_detect(tolower(post$text),'air[\\- ]?condition')
#put air conditioning it the post
#Usually, if an apartment do not have air conditioner,
#they will not say in their post, so we can initilaize
#post$air_conditioner as none.
post$air_condition = 'none'
post$air_condition[air_conditioning] = 'TRUE'
#check the number of heating and air conditioning
table(post$heating != 'neither')
heater_amount = 10010
table(post$air_condition == 'TRUE')
air_condition_amount = 8514
#Do apartments with air conditioning typically have heating?
post_air_conditioning = subset(post,air_condition == 'TRUE')
post_air_conditioning$heating[post_air_conditioning$heating != 'neither'] <- 'TRUE'
post_air_conditioning$heating[post_air_conditioning$heating == 'neither'] <- 'FALSE'
#Draw a barplot to reflect amount
ggplot(post_air_conditioning,aes(heating)) + geom_bar() +
labs(title = 'Wether having heating')
table(post_air_conditioning$heating)
#Do apartments with heating typically have air conditioning?
post_heating = subset(post,post$heating != 'neither')
post_heating$air_condition[post_heating$air_condition == 'none'] <- 'FALSE'
#Draw a barplot to reflect amount
ggplot(post_heating,aes(air_condition)) + geom_bar() +
labs(title = 'Wether having air conditioning')
#8. Craigslist has an optional feature to hide email addresses
#and phone numbers from web scrapers like the one that scraped this data set.
#==========================================================
#phone has the form xxx-xxx-xxxx or (xxx) xxx-xxx etc.
phone_number = str_detect(post$text,'[(]?[0-9]{3}[)]?[- ]?[0-9]{3}[- ][0-9]{4}')
#email has the form xxx@xxxx.xxx.
email_addresses = str_detect(post$text,'[^ ]+\\@[^ ]*?\\.[A-z]{2,3}')
do_not_hide = phone_number | email_addresses
#Put the situation into the data frame
post$whether_hide = 'Hide'
post$whether_hide[do_not_hide] = 'Do not hide'
table(post$whether_hide)#only 25 do not hide
#draw a bar plot to reflect the ammount
ggplot(post,aes(whether_hide)) + geom_bar() +
labs(title = 'Whether hide the contact information')
| /report5.R | no_license | Jiegu000/sharing1 | R | false | false | 13,303 | r | library(stringr)
library(ggplot2)
#1.Write a function read_post that reads
#a single Craigslist post from a text file.
#===========================================
#In each function to read title,text, price etc.
#I extract them by seeing which line they are located in
#since these information in all the post have the same location.
#function to read title
read_title <- function(post){
title <- post[1]
return(title)
}
#function to read text
read_text <- function(post){
text <- post[3:(length(post)-8)]
text <- paste(text, collapse = "\n")#put them together so that
#they can in a single box of data frame
return(text)
}
#function to read date
read_date <- function(post){
date <- post[length(post)-6]
date <- gsub('Date Posted|\\: ','',date)
return(date)
}
#function to read price
read_price <- function(post){
price <- post[length(post)-5]
price<-gsub('Price|\\: |\\$','',price)
return(price)
}
#function to read latitude
read_latitude <- function(post){
latitude <- post[length(post)-4]
latitude <- gsub('Latitude|\\: ','',latitude)
return(latitude)
}
#function to read longitude
read_longitude <- function(post){
longitude <- post[length(post)-3]
longitude <- gsub('Longitude|\\: ','',longitude)
return(longitude)
}
#function to read bedrooms
read_bedrooms <- function(post){
bedrooms <- post[length(post)-2]
bedrooms <- gsub('Bedrooms|\\: ','',bedrooms)
return(bedrooms)
}
#function to read bathrooms
read_bathrooms <- function(post){
bathrooms <- post[length(post)-1]
bathrooms <- gsub('Bathrooms|\\: ','',bathrooms)
return(bathrooms)
}
#function to read sqft
read_sqft <- function(post){
sqft <- post[length(post)]
sqft <- gsub('Sqft|\\: ','',sqft)
return(sqft)
}
#function to read a single post
read_post <- function(file){
post <- readLines(file)
title <- read_title(post)
text <- read_text(post)
date <- read_date(post)
price <- read_price(post)
latitude <- read_latitude(post)
longitude <- read_longitude(post)
bedrooms <- read_bedrooms(post)
bathrooms <- read_bathrooms(post)
sqft <- read_sqft(post)
return(c(title,text,date,price,latitude,longitude,bedrooms,bathrooms,sqft))
}
#2.Write a function read_all_posts that uses read_post (from Question 1) to read
#all information from all posts in a directory and return them in a single data frame.
#=====================================================
read_all_posts <- function(directory){
files = list.files(directory,full.names = TRUE,recursive = TRUE)
all_files = sapply(files,read_post)
post_df <- data.frame(t(all_files))
names(post_df)[1:9]<-c('title','text','date','price','latitude','longitude','bedrooms','bathrooms','sqft')
rownames(post_df)<-NULL
return(post_df)
}
#read all the post
post = read_all_posts('messy')
#4.Extract the rental price from the title of each Craigslist post.
#==================================================
#extract price from the titles
title_price = str_extract(post$title,'\\$[0-9]+')
#remove the $ from the price
title_price = gsub('\\$','',title_price)
#put it into the data frame
post$title_price = title_price
#check the type of two variables
typeof(post$price)
typeof(post$title_price)
#change two variables into numerical.
post$price = as.character(post$price)
post$price = as.numeric(post$price)
post$title_price = as.numeric(post$title_price)
#How do these prices compare to the user-specified prices?
#check the difference between between them
difference = post$price - post$title_price
difference = difference[!is.na(difference)]
table(difference == 0)#all the price are the same.
#5.Extract the deposit amount from the text of each Craigslist post.
#The reason why I use str_extract here is that I find
#that the deposit appears befor the pet deposit in most
#text. So we only need to extract the first deposit.
#Draw a sentence with Deposit or deposit.
#Since some text do not have period to split each sentence,
#I set a limit on that:
#in a long sentence without period,
#we only read 30 characters before deposit and 30 characters after deposit.
deposit_sentence = str_extract(post$text,'[^[.-]]{0,30}[Dd]eposits?[^.]{0,30}')
tail(deposit_sentence)
head(deposit_sentence)
#remove the , from deposit, extract$and number from that,
#and then remove the $
deposit = gsub('\\,','',deposit_sentence)
deposit = str_extract(deposit,'\\$[0-9]+')
deposit = gsub('\\$','',deposit)
deposit = as.numeric(deposit)
#set the deposit to data frame
post$deposit = deposit
#check how many deposits do we get.
table(!is.na(deposit))
#Adjust the price like report 3.
#sometimes the values are not acctualy that large.
#There was just an issue in parsing the data
#If we read the posting for the highest priced apartment
#Then we see the price range is $3408-3742
#the price in this data is 34083742
post$price[post$price>=30000000] <- 3408
post$price[post$price==9951095] <- 995
#smaller than 100 price delete
post$price[post$price <= 100] <- NA
ggplot(post,aes(x = price,y = deposit)) + geom_point()+
labs(title = 'Relation between deposit and price') +
geom_density2d()
#6. Extract a categorical feature from each Craigslist post
# (any part) that measures whether the apartment allows
#pets: cats, dogs, both, or none.
#=================================================
#Find Cats in text
#Since there are many words like cation in the
#text, when detect cat, we should aviod i behind the cat.
cats = str_detect(post$text,'[Cc]ats?[^i]')
#detect the situation that cats are not allowed.
no_cats1 = str_detect(post$text,'[Cc]ats?[^i][^.]{0,10}[Nn]ot[^.]{0,5}[Aa]llowed')
no_cats2 = str_detect(post$text,'[Nn]o[^.]{0,10}[Cc]ats?[^i]')
no_cats = no_cats1 | no_cats2
#The pattern for the cats
cats = cats&!no_cats
#Find Dogs in text, similar to cats
dogs = str_detect(post$text,'[Dd]ogs?')
no_dogs1 = str_detect(post$text,'[Dd]ogs? [^.]{0,10}[Nn]ot[^.]{0,5}[Aa]llowed')
no_dogs2 = str_detect(post$text,'[Nn]o[^.]{0,10} [Dd]ogs?')
no_dogs = no_dogs1|no_dogs2
dogs = dogs&!no_dogs
#both allow cats and dogs
both = cats&dogs
#do not allow cats or dogs
none = no_cats&no_dogs
#put pets situation into data frame
post$pets = 'unknown'
post$pets[cats] = 'cats'
post$pets[dogs] = 'dogs'
post$pets[both] = 'both'
post$pets[none] = 'none'
#Before finish the pets' policy problem, let's extract the
#post that mentioned pets from the original post. Since some
#changes will be made on post$pets if we continue on pets' policy
#To get pet deposit from the post, we can extract pet deposit
#from the text that mention pets
pets_post <- subset(post,pets != 'none' & pets != 'unknown')
#Now let's continue on
#whether the apartment allows pets: cats, dogs, both, or none.
#For this problem, we have many unknown situation.
#Usually, If a post does not mention that, we assume
#the apartment allow both cats and dogs
post$pets[post$pets == 'unknown'] ='both'
table(post$pets)
#As for other kinds of pets, we can consider bird, fish and rabbit
#since these kinds of pets are most common.
#The strategy here is similar with dogs and cats
other_pets = str_detect(post$text,"([Bb]ird|[Ff]ish|[Rr]abbit)")
no_other_pets1 = str_detect(post$text,'([Bb]ird|[Ff]ish|[Rr]abbit)[^.]*[Nn]ot[^.]*[Aa]llowed')
no_other_pets2 = str_detect(post$text,'[Nn]o[^.]*[Dd]ogs?')
no_other_pets = no_other_pets1|no_other_pets2
other_pets = other_pets&!no_other_pets
table(other_pets)#see how many apartments allow other pets
#Now, let's investigate in pets deposit, we should extract
#the pets deposit at first
#Situation1:$xxx pets xxx deposits
#Extract this pattern first.
pets_deposit_sentence = str_extract(pets_post$text,
'\\$[0-9]+[^.]*[Pp]ets?[^.]*[Dd]eposits?')
#extract the $xxx from the sentence and remove$
pet_deposit1 = gsub('\\,','',pets_deposit_sentence)
pet_deposit1 = str_extract(pet_deposit1,'\\$[0-9]+')
pet_deposit1 = gsub('\\$','',pet_deposit1)
pet_deposit1 = as.numeric(pet_deposit1)
pet_deposit1 = pet_deposit1[!is.na(pet_deposit1)]
#find which texts have these patterns and put deposit
#into the data frame
pets_deposit_pattern1 = str_detect(pets_post$text,
'\\$[0-9]+[^.]*[Pp]ets?[^.]*[Dd]eposits?')
pets_post$pets_deposit = 'none'
pets_post$pets_deposit[pets_deposit_pattern1] = pet_deposit1
#Situation2: pets xxx$xxx deposits
#The way I do this is similar to situation1
pets_deposit_sentence2 = str_extract(pets_post$text,
'[Dd]eposits?[^.]*\\$[0-9]+[^.]*[Pp]ets?')
pet_deposit2 = gsub('\\,','',pets_deposit_sentence2)
pet_deposit2 = str_extract(pet_deposit2,'\\$[0-9]+')
pet_deposit2 = gsub('\\$','',pet_deposit2)
pet_deposit2 = as.numeric(pet_deposit2)
pet_deposit2 = pet_deposit2[!is.na(pet_deposit2)]
pets_deposit_pattern2 = str_detect(pets_post$text,
'[Dd]eposits?[^.]*\\$[0-9]+[^.]*[Pp]ets?')
pets_post$pets_deposit[pets_deposit_pattern2] = pet_deposit2
#Situation3: pets xxx deposits xxx$xxx
#what I do here is similar to situation1
pets_deposit_sentence3 = str_extract(pets_post$text,
'[Pp]ets?[^.]*[Dd]eposits?[^.]*\\$[0-9]+')
pet_deposit3 = gsub('\\,','',pets_deposit_sentence3)
pet_deposit3 = str_extract(pet_deposit3,'\\$[0-9]+')
pet_deposit3 = gsub('\\$','',pet_deposit3)
pet_deposit3 = as.numeric(pet_deposit3)
pet_deposit3 = pet_deposit3[!is.na(pet_deposit3)]
pets_deposit_pattern3 = str_detect(pets_post$text,
'[Pp]ets?[^.]*[Dd]eposits?[^.]*\\$[0-9]+')
pets_post$pets_deposit[pets_deposit_pattern3] = pet_deposit3
#Make a graphic that shows how pet deposits are distributed
#set none as NA
pets_post$pets_deposit[pets_post$pets_deposit == 'none'] <- NA
#since the pet deposit usually will not
#be to high, for the pet deposit over $2500, maybe there are
#some problems with that when we extract that. So we set
#pets deposit over $2500 as NA.
pets_post$pets_deposit = as.numeric(pets_post$pets_deposit)
pets_post$pets_deposit[pets_post$pets_deposit > 2500] <- NA
ggplot(pets_post,aes(x = pets_deposit)) + geom_histogram() +
labs(title = 'Distribution of pets deposit',x = 'pets deposit')
#7.Extract a categorical feature from each Craigslist post that measures
#whether each apartment has some kind of heating: a heater, a fireplace
#(including wood-burning stoves), both, or neither of these.
#======================================================
#Detect heater from text
heater = str_detect(tolower(post$text),'heater')
#Detct fireplace from text
fireplace = str_detect(tolower(post$text),'fireplace')
wood_burning_stoves = str_detect(tolower(post$text),'wood[\\- ]?burning stove')
fireplace = fireplace | wood_burning_stoves
both = heater&fireplace
#put the heating into the data frame
post$heating = 'none'
post$heating[heater] = 'heater'
post$heating[fireplace] = 'fireplace'
post$heating[both] = 'both'
#for the post that does not mention any of these, we assume
#that this apartment has neither of these
post$heating[post$heating == 'none'] = 'neither'
#Detect air conditioning in the post
#it can be air conditioning or air-condition.
air_conditioning = str_detect(tolower(post$text),'air[\\- ]?condition')
#put air conditioning it the post
#Usually, if an apartment do not have air conditioner,
#they will not say in their post, so we can initilaize
#post$air_conditioner as none.
post$air_condition = 'none'
post$air_condition[air_conditioning] = 'TRUE'
#check the number of heating and air conditioning
table(post$heating != 'neither')
heater_amount = 10010
table(post$air_condition == 'TRUE')
air_condition_amount = 8514
#Do apartments with air conditioning typically have heating?
post_air_conditioning = subset(post,air_condition == 'TRUE')
post_air_conditioning$heating[post_air_conditioning$heating != 'neither'] <- 'TRUE'
post_air_conditioning$heating[post_air_conditioning$heating == 'neither'] <- 'FALSE'
#Draw a barplot to reflect amount
ggplot(post_air_conditioning,aes(heating)) + geom_bar() +
labs(title = 'Wether having heating')
table(post_air_conditioning$heating)
#Do apartments with heating typically have air conditioning?
post_heating = subset(post,post$heating != 'neither')
post_heating$air_condition[post_heating$air_condition == 'none'] <- 'FALSE'
#Draw a barplot to reflect amount
ggplot(post_heating,aes(air_condition)) + geom_bar() +
labs(title = 'Wether having air conditioning')
#8. Craigslist has an optional feature to hide email addresses
#and phone numbers from web scrapers like the one that scraped this data set.
#==========================================================
#phone has the form xxx-xxx-xxxx or (xxx) xxx-xxx etc.
phone_number = str_detect(post$text,'[(]?[0-9]{3}[)]?[- ]?[0-9]{3}[- ][0-9]{4}')
#email has the form xxx@xxxx.xxx.
email_addresses = str_detect(post$text,'[^ ]+\\@[^ ]*?\\.[A-z]{2,3}')
do_not_hide = phone_number | email_addresses
#Put the situation into the data frame
post$whether_hide = 'Hide'
post$whether_hide[do_not_hide] = 'Do not hide'
table(post$whether_hide)#only 25 do not hide
#draw a bar plot to reflect the ammount
ggplot(post,aes(whether_hide)) + geom_bar() +
labs(title = 'Whether hide the contact information')
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
# holds the cached value or NULL if nothing is cached
# initially nothing is cached so set it to NULL
cache <- NULL
# store a matrix
setMatrix <- function(newValue) {
x <<- newValue
# since the matrix is assigned a new value, flush the cache
cache <<- NULL
}
# returns the stored matrix
getMatrix <- function() {
x
}
# cache the given argument
cacheInverse <- function(solve) {
cache <<- solve
}
# get the cached value
getInverse <- function() {
cache
}
# return a list. Each named element of the list is a function
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# get the cached value
inverse <- y$getInverse()
# if a cached value exists return it
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# otherwise get the matrix, caclulate the inverse and store it in
# the cache
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# return the inverse
inverse
}
| /cachematrix.R | no_license | teekswati/ProgrammingAssignment2 | R | false | false | 1,654 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
# holds the cached value or NULL if nothing is cached
# initially nothing is cached so set it to NULL
cache <- NULL
# store a matrix
setMatrix <- function(newValue) {
x <<- newValue
# since the matrix is assigned a new value, flush the cache
cache <<- NULL
}
# returns the stored matrix
getMatrix <- function() {
x
}
# cache the given argument
cacheInverse <- function(solve) {
cache <<- solve
}
# get the cached value
getInverse <- function() {
cache
}
# return a list. Each named element of the list is a function
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# get the cached value
inverse <- y$getInverse()
# if a cached value exists return it
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# otherwise get the matrix, caclulate the inverse and store it in
# the cache
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# return the inverse
inverse
}
|
WriteMatrixToFile <- function(tmpMatrix, tmpFileName, blnRowNames,
blnColNames) {
output <- file(tmpFileName, "at")
write.table(tmpMatrix, output, sep="\t", quote=FALSE,
row.names=blnRowNames, col.names=blnColNames)
close(output)
}
test_WriteMatrixToFile <- function() {
checkException(WriteMatrixToFile(tmpMatrix= "string",
tmpFileName= 1, blnRowNames=TRUE,
blnColNames=TRUE), msg="Unable to use a number for file name")
}
| /inst/unitTests/test_WriteMatrixToFile.R | no_license | nlawlor/multiClust | R | false | false | 482 | r | WriteMatrixToFile <- function(tmpMatrix, tmpFileName, blnRowNames,
blnColNames) {
output <- file(tmpFileName, "at")
write.table(tmpMatrix, output, sep="\t", quote=FALSE,
row.names=blnRowNames, col.names=blnColNames)
close(output)
}
test_WriteMatrixToFile <- function() {
checkException(WriteMatrixToFile(tmpMatrix= "string",
tmpFileName= 1, blnRowNames=TRUE,
blnColNames=TRUE), msg="Unable to use a number for file name")
}
|
library(kerasR)
### Name: Optimizers
### Title: Optimizers
### Aliases: Optimizers SGD RMSprop Adagrad Adadelta Adam Adamax Nadam
### ** Examples
if(keras_available()) {
X_train <- matrix(rnorm(100 * 10), nrow = 100)
Y_train <- to_categorical(matrix(sample(0:2, 100, TRUE), ncol = 1), 3)
mod <- Sequential()
mod$add(Dense(units = 50, input_shape = dim(X_train)[2]))
mod$add(Activation("relu"))
mod$add(Dense(units = 3))
mod$add(Activation("softmax"))
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = SGD())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = RMSprop())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adagrad())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adadelta())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adam())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adamax())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Nadam())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
}
| /data/genthat_extracted_code/kerasR/examples/Optimizers.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,841 | r | library(kerasR)
### Name: Optimizers
### Title: Optimizers
### Aliases: Optimizers SGD RMSprop Adagrad Adadelta Adam Adamax Nadam
### ** Examples
if(keras_available()) {
X_train <- matrix(rnorm(100 * 10), nrow = 100)
Y_train <- to_categorical(matrix(sample(0:2, 100, TRUE), ncol = 1), 3)
mod <- Sequential()
mod$add(Dense(units = 50, input_shape = dim(X_train)[2]))
mod$add(Activation("relu"))
mod$add(Dense(units = 3))
mod$add(Activation("softmax"))
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = SGD())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = RMSprop())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adagrad())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adadelta())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adam())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Adamax())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
keras_compile(mod, loss = 'categorical_crossentropy', optimizer = Nadam())
keras_fit(mod, X_train, Y_train, batch_size = 32, epochs = 5,
verbose = 0, validation_split = 0.2)
}
|
## packages in R
?diamonds
library(ggplot2)
| /ch_2__vec_and_dfs/13__Packages.R | no_license | asamadgithub/R__complete | R | false | false | 47 | r | ## packages in R
?diamonds
library(ggplot2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_gene.R
\name{wb_variants}
\alias{wb_variants}
\title{Wormbase gene variants}
\usage{
wb_variants(gene)
}
\arguments{
\item{name}{of C. elegans gene to search for.}
}
\value{
data frame of nine columns: Wormbase variant ID, allele name, isoform ID,
locations (exon, intron, etc), molecular change (deletion, subsitution, etc),
gene effect (missense, nonsense, etc), amino acid change, amino acid position,
and associated strain. All empty fields contain NA as a character. If the gene
is not found in C. elegans, an empty data frame will be returned.
}
\description{
Using a given gene name, the Wormbase REST API is queried to create a data
frame containing information on the different variants of the gene, and the
associated strains.
}
| /man/wb_variants.Rd | permissive | AndersenLab/wormbase-api | R | false | true | 824 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_gene.R
\name{wb_variants}
\alias{wb_variants}
\title{Wormbase gene variants}
\usage{
wb_variants(gene)
}
\arguments{
\item{name}{of C. elegans gene to search for.}
}
\value{
data frame of nine columns: Wormbase variant ID, allele name, isoform ID,
locations (exon, intron, etc), molecular change (deletion, subsitution, etc),
gene effect (missense, nonsense, etc), amino acid change, amino acid position,
and associated strain. All empty fields contain NA as a character. If the gene
is not found in C. elegans, an empty data frame will be returned.
}
\description{
Using a given gene name, the Wormbase REST API is queried to create a data
frame containing information on the different variants of the gene, and the
associated strains.
}
|
## In this assignment, we are creating a list that contains a matrix, either
## a matrix inverse or NULL, a function to get the matrix out, and a function
## that either gets the matrix inverse if it's already been solved for, and if
## it's not, invert the matrix, then store the matrix inverse so that you don't
## have to calculate it agian.
## In other words, it's a really, really, really convoluted (dare I say dumb?)
## way of implimenting an object.
## Furthermore, we are creating another function which extracts the inverse
## of the matrix if it's already been calculated, or calculates the matrix
## inverse and tells the quasi-object what the matrix inverse is if it hasn't
## already been calculated.
## This function constructs the list described above
## (which is masquerading as an object).
makeCacheMatrix <- function(x = matrix())
{
inverse = NULL
set_matrix <- function(y)
{
x <<- y
inverse <<- NULL
}
get_matrix <- function() x
set_inverse <- function(the_inverse) inverse <<- the_inverse
get_inverse <- function() inverse
list(set_inverse = set_inverse, get_inverse = get_inverse,
set_matrix = set_matrix, get_matrix = get_matrix)
}
## This function returns the inverse stored in x if it has already been
## calculated. If not, it calculates the inverse, sets in inverse in x,
## then returns the inverse.
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
inverse <- x$get_inverse()
if(!is.null(inverse))
{
message("Getting cached data")
return(inverse)
}
matrix <- x$get_matrix()
matrix_inverse <- solve(matrix)
x$set_inverse(matrix_inverse)
matrix_inverse
}
| /cachematrix.R | no_license | MrMorden81/ProgrammingAssignment2 | R | false | false | 1,661 | r | ## In this assignment, we are creating a list that contains a matrix, either
## a matrix inverse or NULL, a function to get the matrix out, and a function
## that either gets the matrix inverse if it's already been solved for, and if
## it's not, invert the matrix, then store the matrix inverse so that you don't
## have to calculate it agian.
## In other words, it's a really, really, really convoluted (dare I say dumb?)
## way of implimenting an object.
## Furthermore, we are creating another function which extracts the inverse
## of the matrix if it's already been calculated, or calculates the matrix
## inverse and tells the quasi-object what the matrix inverse is if it hasn't
## already been calculated.
## This function constructs the list described above
## (which is masquerading as an object).
makeCacheMatrix <- function(x = matrix())
{
inverse = NULL
set_matrix <- function(y)
{
x <<- y
inverse <<- NULL
}
get_matrix <- function() x
set_inverse <- function(the_inverse) inverse <<- the_inverse
get_inverse <- function() inverse
list(set_inverse = set_inverse, get_inverse = get_inverse,
set_matrix = set_matrix, get_matrix = get_matrix)
}
## This function returns the inverse stored in x if it has already been
## calculated. If not, it calculates the inverse, sets in inverse in x,
## then returns the inverse.
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
inverse <- x$get_inverse()
if(!is.null(inverse))
{
message("Getting cached data")
return(inverse)
}
matrix <- x$get_matrix()
matrix_inverse <- solve(matrix)
x$set_inverse(matrix_inverse)
matrix_inverse
}
|
context("cusum_alpha_sim")
test_that("Output of alpha simulation", {
expected_results <- 0.038
works <- round(
cusum_alpha_sim(
failure_probability = 0.05,
n_patients = 100,
odds_multiplier = 2,
n_simulation = 10000,
limit = 2.96,
seed = 2046
),
3
)
expect_equal(works, expected_results)
})
test_that("Output of alpha simulation improv", {
expected_results <- 0.026
works <- round(
cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 0.5,
n_simulation = 10000,
limit = - 2.96,
seed = 2046
),
3
)
expect_equal(works, expected_results)
})
test_that("Warning if CL is not same direction as OM 1",
expect_that(cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 0.5,
n_simulation = 1000,
limit = 2.96,
seed = 2046
),
gives_warning())
)
test_that("Warning if CL is not same direction as OM 2",
expect_that(cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 2,
n_simulation = 1000,
limit = - 2.96,
seed = 2046
),
gives_warning())
)
test_that("Error if OM = 1",
expect_that(cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 1,
n_simulation = 1000,
limit = 2,
seed = 2046),
throws_error())
)
test_that("Warning for recoding failure_prob",
expect_that(cusum_alpha_sim(
failure_probability = .95,
n_patients = 100,
odds_multiplier = 2,
n_simulation = 1000,
limit = 2.96,
seed = 2046
),
gives_warning()
)
)
| /fuzzedpackages/cusum/tests/testthat/test-cusum_alpha_sim.R | no_license | akhikolla/testpackages | R | false | false | 2,060 | r | context("cusum_alpha_sim")
test_that("Output of alpha simulation", {
expected_results <- 0.038
works <- round(
cusum_alpha_sim(
failure_probability = 0.05,
n_patients = 100,
odds_multiplier = 2,
n_simulation = 10000,
limit = 2.96,
seed = 2046
),
3
)
expect_equal(works, expected_results)
})
test_that("Output of alpha simulation improv", {
expected_results <- 0.026
works <- round(
cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 0.5,
n_simulation = 10000,
limit = - 2.96,
seed = 2046
),
3
)
expect_equal(works, expected_results)
})
test_that("Warning if CL is not same direction as OM 1",
expect_that(cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 0.5,
n_simulation = 1000,
limit = 2.96,
seed = 2046
),
gives_warning())
)
test_that("Warning if CL is not same direction as OM 2",
expect_that(cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 2,
n_simulation = 1000,
limit = - 2.96,
seed = 2046
),
gives_warning())
)
test_that("Error if OM = 1",
expect_that(cusum_alpha_sim(
failure_probability = 0.1,
n_patients = 100,
odds_multiplier = 1,
n_simulation = 1000,
limit = 2,
seed = 2046),
throws_error())
)
test_that("Warning for recoding failure_prob",
expect_that(cusum_alpha_sim(
failure_probability = .95,
n_patients = 100,
odds_multiplier = 2,
n_simulation = 1000,
limit = 2.96,
seed = 2046
),
gives_warning()
)
)
|
#' Identify the biological entity (BE) targeted by probes and
#' construct the CQL sub-query to map probes to the BE
#'
#' Internal use
#'
#' @param platform the platform of the probes
#'
#' @return A character value corresponding to the sub-query.
#' The \code{attr(,"be")} correspond to the BE targeted by probes
#'
#' @seealso \code{\link{genBePath}}, \code{\link{listPlatforms}}
#'
genProbePath <- function(platform){
be <- getTargetedBe(platform)
beid <- paste0(be, "ID")
qs <- sprintf(
'-[:targets]->(:%s)-[:is_replaced_by|is_associated_to*0..]->()-[:identifies]->',
beid
)
attr(qs, "be") <- be
return(qs)
}
| /R/genProbePath.R | no_license | sankleta/BED | R | false | false | 654 | r | #' Identify the biological entity (BE) targeted by probes and
#' construct the CQL sub-query to map probes to the BE
#'
#' Internal use
#'
#' @param platform the platform of the probes
#'
#' @return A character value corresponding to the sub-query.
#' The \code{attr(,"be")} correspond to the BE targeted by probes
#'
#' @seealso \code{\link{genBePath}}, \code{\link{listPlatforms}}
#'
genProbePath <- function(platform){
be <- getTargetedBe(platform)
beid <- paste0(be, "ID")
qs <- sprintf(
'-[:targets]->(:%s)-[:is_replaced_by|is_associated_to*0..]->()-[:identifies]->',
beid
)
attr(qs, "be") <- be
return(qs)
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.03959114463618e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615828120-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.03959114463618e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
#' Run a dynamic simulation
#'
#' Computes the values of the state variables for a sequence of times.
#'
#' @param model Object of class \code{rodeo} representing the model.
#' @param vars Named vector of the state variables' initial values.
#' @param pars Named vector of parameters.
#' @param times Vector of times for which the states are computed.
#' @param dllfile Shared library file holding the compiled model.
#'
#' @return The object returned by \code{deSolve::ode}.
#'
#' @note An error is generated if the integration was not successful.
#'
#' @author David Kneis \email{david.kneis@@tu-dresden.de}
#'
#' @export
simul <- function(model, vars, pars, times, dllfile) {
# Assign data
model$setVars(vars)
model$setPars(pars)
# Set tolerances
rtol <- model$getVarsTable()$rtol
atol <- model$getVarsTable()$atol
# Load library
ext <- substr(.Platform$dynlib.ext, 2, nchar(.Platform$dynlib.ext))
dllname <- sub(pattern=paste0("(.+)[.]",ext,"$"),replacement="\\1",
x=basename(dllfile))
dyn.load(dllfile)
# Integrate
out <- deSolve::ode(y=model$getVars(), times=times, func="derivs_wrapped",
rtol=rtol, atol=atol, dllname=dllname,
initfunc="initmod", nout=model$lenPros(), outnames=model$namesPros(),
parms=model$getPars())
if (attr(out,which="istate",exact=TRUE)[1] != 2)
stop(paste0("Integration failed.\n----- The initial values were:\n",
paste(names(vars),vars,sep="=",collapse="\n"),"\n----- The parameters were:\n",
paste(names(pars),pars,sep="=",collapse="\n")
))
# Clean up and return
dyn.unload(dllfile)
return(out)
}
#' Compute steady-state solution
#'
#' Estimates the values of the state variables for steady-state conditions.
#'
#' @param time Scalar numeric value. All external forcings used in the steady
#' state computations are kept constant at their respective values for the
#' given point in time. If no external forcings are present (autonomous
#' models), the value is ignored.
#'
#' @inheritParams simul
#'
#' @return The object returned by \code{rootSolve::steady}. The \code{y}-
#' component of this object has names based on the \code{ynames}
#' attribute.
#'
#' @note An error is generated if steady-state estimation was not successful.
#'
#' @author David Kneis \email{david.kneis@@tu-dresden.de}
#'
#' @export
stst <- function(model, vars, pars, time, dllfile) {
# Assign data
model$setVars(vars)
model$setPars(pars)
# Set tolerances
rtol <- model$getVarsTable()$rtol
atol <- model$getVarsTable()$atol
# Load library
ext <- substr(.Platform$dynlib.ext, 2, nchar(.Platform$dynlib.ext))
dllname <- sub(pattern=paste0("(.+)[.]",ext,"$"),replacement="\\1",
x=basename(dllfile))
dyn.load(dllfile)
# Compute steady state solution
out <- rootSolve::steady(y=model$getVars(), time=time, func="derivs_wrapped",
parms=model$getPars(), method="stode", rtol=rtol, atol=atol,
dllname=dllname, initfunc="initmod",
nout=model$lenPros(), outnames=model$namesPros())
if (!attr(out, which="steady",exact=TRUE))
stop(paste0("Steady-state estimation failed.\n----- The initial values were:\n",
paste(names(vars),vars,sep="=",collapse="\n"),"\n----- The parameters were:\n",
paste(names(pars),pars,sep="=",collapse="\n")
))
names(out$y)= attr(out, which="ynames",exact=TRUE)
# Clean up and return
dyn.unload(dllfile)
return(out)
}
| /R/simul.r | no_license | dkneis/rodeoApp | R | false | false | 3,393 | r | #' Run a dynamic simulation
#'
#' Computes the values of the state variables for a sequence of times.
#'
#' @param model Object of class \code{rodeo} representing the model.
#' @param vars Named vector of the state variables' initial values.
#' @param pars Named vector of parameters.
#' @param times Vector of times for which the states are computed.
#' @param dllfile Shared library file holding the compiled model.
#'
#' @return The object returned by \code{deSolve::ode}.
#'
#' @note An error is generated if the integration was not successful.
#'
#' @author David Kneis \email{david.kneis@@tu-dresden.de}
#'
#' @export
simul <- function(model, vars, pars, times, dllfile) {
# Assign data
model$setVars(vars)
model$setPars(pars)
# Set tolerances
rtol <- model$getVarsTable()$rtol
atol <- model$getVarsTable()$atol
# Load library
ext <- substr(.Platform$dynlib.ext, 2, nchar(.Platform$dynlib.ext))
dllname <- sub(pattern=paste0("(.+)[.]",ext,"$"),replacement="\\1",
x=basename(dllfile))
dyn.load(dllfile)
# Integrate
out <- deSolve::ode(y=model$getVars(), times=times, func="derivs_wrapped",
rtol=rtol, atol=atol, dllname=dllname,
initfunc="initmod", nout=model$lenPros(), outnames=model$namesPros(),
parms=model$getPars())
if (attr(out,which="istate",exact=TRUE)[1] != 2)
stop(paste0("Integration failed.\n----- The initial values were:\n",
paste(names(vars),vars,sep="=",collapse="\n"),"\n----- The parameters were:\n",
paste(names(pars),pars,sep="=",collapse="\n")
))
# Clean up and return
dyn.unload(dllfile)
return(out)
}
#' Compute steady-state solution
#'
#' Estimates the values of the state variables for steady-state conditions.
#'
#' @param time Scalar numeric value. All external forcings used in the steady
#' state computations are kept constant at their respective values for the
#' given point in time. If no external forcings are present (autonomous
#' models), the value is ignored.
#'
#' @inheritParams simul
#'
#' @return The object returned by \code{rootSolve::steady}. The \code{y}-
#' component of this object has names based on the \code{ynames}
#' attribute.
#'
#' @note An error is generated if steady-state estimation was not successful.
#'
#' @author David Kneis \email{david.kneis@@tu-dresden.de}
#'
#' @export
stst <- function(model, vars, pars, time, dllfile) {
# Assign data
model$setVars(vars)
model$setPars(pars)
# Set tolerances
rtol <- model$getVarsTable()$rtol
atol <- model$getVarsTable()$atol
# Load library
ext <- substr(.Platform$dynlib.ext, 2, nchar(.Platform$dynlib.ext))
dllname <- sub(pattern=paste0("(.+)[.]",ext,"$"),replacement="\\1",
x=basename(dllfile))
dyn.load(dllfile)
# Compute steady state solution
out <- rootSolve::steady(y=model$getVars(), time=time, func="derivs_wrapped",
parms=model$getPars(), method="stode", rtol=rtol, atol=atol,
dllname=dllname, initfunc="initmod",
nout=model$lenPros(), outnames=model$namesPros())
if (!attr(out, which="steady",exact=TRUE))
stop(paste0("Steady-state estimation failed.\n----- The initial values were:\n",
paste(names(vars),vars,sep="=",collapse="\n"),"\n----- The parameters were:\n",
paste(names(pars),pars,sep="=",collapse="\n")
))
names(out$y)= attr(out, which="ynames",exact=TRUE)
# Clean up and return
dyn.unload(dllfile)
return(out)
}
|
# Script to run simulation on cluster
library(tidyverse)
spIds <- unique(
gsub(
'.*_|.csv', '', dir(file.path('..', 'data', 'parameters'))
)
)
# create and export csv sim parameters
sim_pars <- expand_grid(
sp1 = spIds,
sp2 = spIds,
n_time = 3000,
deltaTime = 1,
plotSize = 180,
param_method = 'random',
replication = 1:50
) |>
mutate(
seed = replication
) |>
# remove duplicated species
filter(sp1 != sp2) |>
write_csv('simulation_pars.csv')
# generate init population distribution
# so each replication has the same initial state and competition values
# There is an issue that each replicate will have a different Lmax and therefore
# a different N vector. To work around this issue, I generate a init and comp
# vector with the lowest Lmax among spIds and iterations (422) and for each
# sim run I will complete the remaining N vector with zeros
source('R/kernel.R')
pars_fake <- list('growth' = c('Lmax' = 422))
N_init <- init_pop(
pars_fake,
L = 127,
h = 1,
N = 0.5
)
saveRDS(
list(
'N_init' = N_init
),
'N_init_comp.RDS'
)
# Slurm returns an error for array above 10k
# loop over simulations so each batch is limited to 10k arrays
i_max <- floor(nrow(sim_pars)/10000)
batch_max <- rep(10000, i_max)
batch_max[i_max + 1] <- nrow(sim_pars)/10
# write slurm bash file
for(i in 0:i_max)
{
bash_file <- paste0('#!/bin/bash
#SBATCH --account=def-dgravel
#SBATCH -t 1-10:00:00
#SBATCH --mem-per-cpu=2000M
#SBATCH --ntasks=1
#SBATCH --job-name=ipm', i, '
#SBATCH --mail-user=willian.vieira@usherbrooke.ca
#SBATCH --mail-type=ALL
#SBATCH --array=1-', batch_max[i + 1], '
module load StdEnv/2020 r/4.1.0
BATCH=', i, ' R -f ipm_i.R
')
# save bash file
writeLines(bash_file, paste0('sub_', i, '.sh'))
# run slurm
system(paste0('sbatch sub_', i, '.sh'))
}
| /simulations/coexistence/run_ipm.R | permissive | willvieira/forest-IPM | R | false | false | 1,845 | r | # Script to run simulation on cluster
library(tidyverse)
spIds <- unique(
gsub(
'.*_|.csv', '', dir(file.path('..', 'data', 'parameters'))
)
)
# create and export csv sim parameters
sim_pars <- expand_grid(
sp1 = spIds,
sp2 = spIds,
n_time = 3000,
deltaTime = 1,
plotSize = 180,
param_method = 'random',
replication = 1:50
) |>
mutate(
seed = replication
) |>
# remove duplicated species
filter(sp1 != sp2) |>
write_csv('simulation_pars.csv')
# generate init population distribution
# so each replication has the same initial state and competition values
# There is an issue that each replicate will have a different Lmax and therefore
# a different N vector. To work around this issue, I generate a init and comp
# vector with the lowest Lmax among spIds and iterations (422) and for each
# sim run I will complete the remaining N vector with zeros
source('R/kernel.R')
pars_fake <- list('growth' = c('Lmax' = 422))
N_init <- init_pop(
pars_fake,
L = 127,
h = 1,
N = 0.5
)
saveRDS(
list(
'N_init' = N_init
),
'N_init_comp.RDS'
)
# Slurm returns an error for array above 10k
# loop over simulations so each batch is limited to 10k arrays
i_max <- floor(nrow(sim_pars)/10000)
batch_max <- rep(10000, i_max)
batch_max[i_max + 1] <- nrow(sim_pars)/10
# write slurm bash file
for(i in 0:i_max)
{
bash_file <- paste0('#!/bin/bash
#SBATCH --account=def-dgravel
#SBATCH -t 1-10:00:00
#SBATCH --mem-per-cpu=2000M
#SBATCH --ntasks=1
#SBATCH --job-name=ipm', i, '
#SBATCH --mail-user=willian.vieira@usherbrooke.ca
#SBATCH --mail-type=ALL
#SBATCH --array=1-', batch_max[i + 1], '
module load StdEnv/2020 r/4.1.0
BATCH=', i, ' R -f ipm_i.R
')
# save bash file
writeLines(bash_file, paste0('sub_', i, '.sh'))
# run slurm
system(paste0('sbatch sub_', i, '.sh'))
}
|
#Getting and Cleaning Data Course Project
library(dplyr)
# reading the train data
trainingData_X <- read.table("./CourseProject/UCI HAR Dataset/train/X_train.txt")
trainingData_Y <- read.table("./CourseProject/UCI HAR Dataset/train/Y_train.txt")
Sub_training <- read.table("./CourseProject/UCI HAR Dataset/train/subject_train.txt")
# reading test data
testData_X <- read.table("./CourseProject/UCI HAR Dataset/test/X_test.txt")
testData_Y <- read.table("./CourseProject/UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./CourseProject/UCI HAR Dataset/test/subject_test.txt")
# reading data description
variable_names <- read.table("./CourseProject/UCI HAR Dataset/features.txt")
# reading activity labels
activity_labels <- read.table("./CourseProject/UCI HAR Dataset/activity_labels.txt")
# Merges the training and the test sets to create one data set.
total_X <- rbind(trainingData_X, testData_X)
total_Y <- rbind(trainingData_Y, testData_Y)
Sub_total <- rbind(Sub_training, Sub_test)
# Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
total_X <- total_X[,selected_var[,1]]
# Uses descriptive activity names to name the activities in the data set
colnames(total_Y) <- "activity"
total_Y$activitylabel <- factor(total_Y$activity, labels = as.character(activity_labels[,2]))
activitylabel <- total_Y[,-1]
# Appropriately labels the data set with descriptive variable names.
colnames(total_X) <- variable_names[selected_var[,1],2]
# From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "subject"
total <- cbind(total_X, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "tidydata.txt", row.names = FALSE, col.names = TRUE) | /Getting-and-Cleaning-Data-Week-4-AssignmentProject/run_analysis.R | no_license | SrinathKunka/datasciencecoursera | R | false | false | 2,012 | r | #Getting and Cleaning Data Course Project
library(dplyr)
# reading the train data
trainingData_X <- read.table("./CourseProject/UCI HAR Dataset/train/X_train.txt")
trainingData_Y <- read.table("./CourseProject/UCI HAR Dataset/train/Y_train.txt")
Sub_training <- read.table("./CourseProject/UCI HAR Dataset/train/subject_train.txt")
# reading test data
testData_X <- read.table("./CourseProject/UCI HAR Dataset/test/X_test.txt")
testData_Y <- read.table("./CourseProject/UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./CourseProject/UCI HAR Dataset/test/subject_test.txt")
# reading data description
variable_names <- read.table("./CourseProject/UCI HAR Dataset/features.txt")
# reading activity labels
activity_labels <- read.table("./CourseProject/UCI HAR Dataset/activity_labels.txt")
# Merges the training and the test sets to create one data set.
total_X <- rbind(trainingData_X, testData_X)
total_Y <- rbind(trainingData_Y, testData_Y)
Sub_total <- rbind(Sub_training, Sub_test)
# Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
total_X <- total_X[,selected_var[,1]]
# Uses descriptive activity names to name the activities in the data set
colnames(total_Y) <- "activity"
total_Y$activitylabel <- factor(total_Y$activity, labels = as.character(activity_labels[,2]))
activitylabel <- total_Y[,-1]
# Appropriately labels the data set with descriptive variable names.
colnames(total_X) <- variable_names[selected_var[,1],2]
# From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "subject"
total <- cbind(total_X, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(funs(mean))
write.table(total_mean, file = "tidydata.txt", row.names = FALSE, col.names = TRUE) |
#!/usr/bin/env Rscript
suppressPackageStartupMessages({
library(RMINC)
library(dplyr)
library(purrr)
library(readxl)
library(tidyr)
library(ggplot2)
})
# Read latest summaries
civet_summaries <-
read.csv("../data/combined_neuroanatomy20170929.csv"
, stringsAsFactors = FALSE)
civet_files_CIHR <-
civet.getAllFilenames(
filter(civet_summaries, src == "CIHR")
, idvar = "scan", prefix = "d8_", basedir = "margot_subjs2.1/"
, civetVersion = "2.1.0"
, cnf = yaml::yaml.load_file("margot_subjs2.1/d8_0001_01/CBRAIN.params.yml")
)
civet_paths <- c("../data/civetOutputs/pond_2-1_20170227"
, "../data/civetOutputs/pond_addition_20170908")
civet_pathed <-
filter(civet_summaries, src == "POND") %>%
mutate(scan_np = gsub("-", "_", sub("MR160-", "", scan))
, civet_path =
sapply(scan_np, function(subj){
paths <- file.path(civet_paths, subj)
for(path in paths)
if(file.exists(path)) return(dirname(path))
return(NA)
})
)
civet_files_PND <-
civet.getAllFilenames(
filter(civet_pathed, civet_path == civet_paths[1])
, idvar = "scan_np", prefix = "MR160_", basedir = "../data/civetOutputs/pond_2-1_20170227/"
, civetVersion = "2.1.0"
, cnf = yaml::yaml.load_file("../data/civetOutputs/pond_2-1_20170227/088_0002_01_002/CBRAIN_Colosse-384672-1.params.yml")
)
civet_files_PND_update <-
civet.getAllFilenames(
filter(civet_pathed, civet_path == civet_paths[2])
, idvar = "scan_np", prefix = "MR160_", basedir = "../data/civetOutputs/pond_addition_20170908/"
, civetVersion = "2.1.0"
, cnf = yaml::yaml.load_file("../data/civetOutputs/pond_addition_20170908/088_0405_01_002/CBRAIN_Mammouth-453451-1.params.yml")
)
civet_files <- bind_rows(civet_files_CIHR, civet_files_PND, civet_files_PND_update)
civet_files$new_scanner <- civet_files$scan_date > as.Date("2016-06-01")
civet_files <- filter(civet_files, sapply(RSL_mean_curvature_left, file.exists))
iqs <-
read.csv("../data/combined_iqs20170620.csv") %>%
filter(!is.na(iq)) %>%
group_by(visit) %>%
arrange(desc(scan_date)) %>%
slice(1) %>%
ungroup
## Filter
self_intersect_limit <- 150
civet_deduped <-
civet_files %>%
filter(Dx %in% c("ASD", "CTRL")) %>%
filter(LEFT_SURF_SURF < self_intersect_limit
, RIGHT_SURF_SURF < self_intersect_limit
, RIGHT_INTER < self_intersect_limit
, LEFT_INTER < self_intersect_limit
, !is.na(age_scan)
, age_scan < 50) %>%
# group_by(subject) %>% #remove this stanza to skip delongi
# slice(`if`(any(best_of_subject), which(best_of_subject), 1)) %>%
# ungroup %>%
mutate(passed_old_qc = QC_PASS & best_of_subject) %>%
left_join(iqs %>% select(visit, iq), by = "visit") %>%
filter(!duplicated(scan)) %>% ## when merging CIVET dupes can happen
mutate(RSL_mean_curvature_left = sub("left", "left_abs", RSL_mean_curvature_left)
, RSL_mean_curvature_right = sub("right", "right_abs", RSL_mean_curvature_right))
model_data <-
civet_deduped %>%
mutate(age_scan = as.numeric(scale(age_scan, center = TRUE))
, bv = as.numeric(scale(BRAIN_VOL, center = TRUE))
, crbv = as.numeric(scale(BRAIN_VOL^(1/3), center = TRUE))
, ttrbv = as.numeric(scale(BRAIN_VOL^(2/3), center = TRUE))
, sex = relevel(factor(sex), "M")
, iq = as.numeric(scale(iq))
, Dx = factor(Dx, c("CTRL","ASD")))
## Bring in surfaces
surface_left <- read_obj("../data/CIVET_2.0/CIVET_2.0_icbm_avg_mid_sym_mc_left.obj")
surface_right <- read_obj("../data/CIVET_2.0_icbm_avg_mid_sym_mc_right.obj")
aal_left_mask <-
readLines("../data/CIVET_2.0/CIVET_2.0_AAL_left.txt") %>%
as.numeric %>%
.[1:40962] %>%
`!=`(0)
aal_right_mask <-
readLines("../data/CIVET_2.0/CIVET_2.0_AAL_right.txt") %>%
as.numeric %>%
.[1:40962] %>%
`!=`(0)
## Setup vertex tables
lt <- vertexTable(civet_deduped$nativeRMS_RSLtlink_left)
rt <- vertexTable(civet_deduped$nativeRMS_RSLtlink_right)
la <- vertexTable(civet_deduped$midSurfaceleftNativeArea)
ra <- vertexTable(civet_deduped$midSurfacerightNativeArea)
lv <- vertexTable(civet_deduped$SurfaceleftNativeVolume)
rv <- vertexTable(civet_deduped$SurfacerightNativeVolume)
lmc <- vertexTable(civet_deduped$RSL_mean_curvature_left)
rmc <- vertexTable(civet_deduped$RSL_mean_curvature_right)
thickness <- rbind(rt[aal_right_mask,], lt[aal_left_mask,])
area <- rbind(ra[aal_right_mask,], la[aal_left_mask,])
volume <- rbind(rv[aal_right_mask,], lv[aal_left_mask,])
mean_curv <- rbind(rmc[aal_right_mask,], lmc[aal_left_mask,])
## Generate random shuffles so that each imputated set gets the same randomizations
shuffles <- sapply(seq_len(5000), function(i) sample(seq_len(nrow(civet_deduped))))
#del for space
rm(lt, rt, la, ra, lv, rv, lmc, rmc)
## Write out objects
to_date <- format(Sys.Date(), "%Y%m%d")
save.image(
file.path(".."
, paste0("cortical_objects_longi"
, to_date
, ".rda")))
| /setup/setup_data_full.R | no_license | cfhammill/autism-sex-differences-code-appendix | R | false | false | 5,073 | r | #!/usr/bin/env Rscript
suppressPackageStartupMessages({
library(RMINC)
library(dplyr)
library(purrr)
library(readxl)
library(tidyr)
library(ggplot2)
})
# Read latest summaries
civet_summaries <-
read.csv("../data/combined_neuroanatomy20170929.csv"
, stringsAsFactors = FALSE)
civet_files_CIHR <-
civet.getAllFilenames(
filter(civet_summaries, src == "CIHR")
, idvar = "scan", prefix = "d8_", basedir = "margot_subjs2.1/"
, civetVersion = "2.1.0"
, cnf = yaml::yaml.load_file("margot_subjs2.1/d8_0001_01/CBRAIN.params.yml")
)
civet_paths <- c("../data/civetOutputs/pond_2-1_20170227"
, "../data/civetOutputs/pond_addition_20170908")
civet_pathed <-
filter(civet_summaries, src == "POND") %>%
mutate(scan_np = gsub("-", "_", sub("MR160-", "", scan))
, civet_path =
sapply(scan_np, function(subj){
paths <- file.path(civet_paths, subj)
for(path in paths)
if(file.exists(path)) return(dirname(path))
return(NA)
})
)
civet_files_PND <-
civet.getAllFilenames(
filter(civet_pathed, civet_path == civet_paths[1])
, idvar = "scan_np", prefix = "MR160_", basedir = "../data/civetOutputs/pond_2-1_20170227/"
, civetVersion = "2.1.0"
, cnf = yaml::yaml.load_file("../data/civetOutputs/pond_2-1_20170227/088_0002_01_002/CBRAIN_Colosse-384672-1.params.yml")
)
civet_files_PND_update <-
civet.getAllFilenames(
filter(civet_pathed, civet_path == civet_paths[2])
, idvar = "scan_np", prefix = "MR160_", basedir = "../data/civetOutputs/pond_addition_20170908/"
, civetVersion = "2.1.0"
, cnf = yaml::yaml.load_file("../data/civetOutputs/pond_addition_20170908/088_0405_01_002/CBRAIN_Mammouth-453451-1.params.yml")
)
civet_files <- bind_rows(civet_files_CIHR, civet_files_PND, civet_files_PND_update)
civet_files$new_scanner <- civet_files$scan_date > as.Date("2016-06-01")
civet_files <- filter(civet_files, sapply(RSL_mean_curvature_left, file.exists))
iqs <-
read.csv("../data/combined_iqs20170620.csv") %>%
filter(!is.na(iq)) %>%
group_by(visit) %>%
arrange(desc(scan_date)) %>%
slice(1) %>%
ungroup
## Filter
self_intersect_limit <- 150
civet_deduped <-
civet_files %>%
filter(Dx %in% c("ASD", "CTRL")) %>%
filter(LEFT_SURF_SURF < self_intersect_limit
, RIGHT_SURF_SURF < self_intersect_limit
, RIGHT_INTER < self_intersect_limit
, LEFT_INTER < self_intersect_limit
, !is.na(age_scan)
, age_scan < 50) %>%
# group_by(subject) %>% #remove this stanza to skip delongi
# slice(`if`(any(best_of_subject), which(best_of_subject), 1)) %>%
# ungroup %>%
mutate(passed_old_qc = QC_PASS & best_of_subject) %>%
left_join(iqs %>% select(visit, iq), by = "visit") %>%
filter(!duplicated(scan)) %>% ## when merging CIVET dupes can happen
mutate(RSL_mean_curvature_left = sub("left", "left_abs", RSL_mean_curvature_left)
, RSL_mean_curvature_right = sub("right", "right_abs", RSL_mean_curvature_right))
model_data <-
civet_deduped %>%
mutate(age_scan = as.numeric(scale(age_scan, center = TRUE))
, bv = as.numeric(scale(BRAIN_VOL, center = TRUE))
, crbv = as.numeric(scale(BRAIN_VOL^(1/3), center = TRUE))
, ttrbv = as.numeric(scale(BRAIN_VOL^(2/3), center = TRUE))
, sex = relevel(factor(sex), "M")
, iq = as.numeric(scale(iq))
, Dx = factor(Dx, c("CTRL","ASD")))
## Bring in surfaces
surface_left <- read_obj("../data/CIVET_2.0/CIVET_2.0_icbm_avg_mid_sym_mc_left.obj")
surface_right <- read_obj("../data/CIVET_2.0_icbm_avg_mid_sym_mc_right.obj")
aal_left_mask <-
readLines("../data/CIVET_2.0/CIVET_2.0_AAL_left.txt") %>%
as.numeric %>%
.[1:40962] %>%
`!=`(0)
aal_right_mask <-
readLines("../data/CIVET_2.0/CIVET_2.0_AAL_right.txt") %>%
as.numeric %>%
.[1:40962] %>%
`!=`(0)
## Setup vertex tables
lt <- vertexTable(civet_deduped$nativeRMS_RSLtlink_left)
rt <- vertexTable(civet_deduped$nativeRMS_RSLtlink_right)
la <- vertexTable(civet_deduped$midSurfaceleftNativeArea)
ra <- vertexTable(civet_deduped$midSurfacerightNativeArea)
lv <- vertexTable(civet_deduped$SurfaceleftNativeVolume)
rv <- vertexTable(civet_deduped$SurfacerightNativeVolume)
lmc <- vertexTable(civet_deduped$RSL_mean_curvature_left)
rmc <- vertexTable(civet_deduped$RSL_mean_curvature_right)
thickness <- rbind(rt[aal_right_mask,], lt[aal_left_mask,])
area <- rbind(ra[aal_right_mask,], la[aal_left_mask,])
volume <- rbind(rv[aal_right_mask,], lv[aal_left_mask,])
mean_curv <- rbind(rmc[aal_right_mask,], lmc[aal_left_mask,])
## Generate random shuffles so that each imputated set gets the same randomizations
shuffles <- sapply(seq_len(5000), function(i) sample(seq_len(nrow(civet_deduped))))
#del for space
rm(lt, rt, la, ra, lv, rv, lmc, rmc)
## Write out objects
to_date <- format(Sys.Date(), "%Y%m%d")
save.image(
file.path(".."
, paste0("cortical_objects_longi"
, to_date
, ".rda")))
|
library(tm)
library(caret)
library(RWeka)
setwd("C:/Users/schinnamgar/Desktop/Final research/Short answers/Data")
set.seed(123)
for(iter in 1:10)
{
num=5
while(num<=25)
{
datasetname <- paste("dataset",iter,".csv",sep="")
dataset_raw_name <- paste("dataset",iter,"-","raw",".","csv",sep="")
dataset_tf_name <- paste("dataset",iter,"-","tf-",num,".","csv",sep="")
dataset_metrics_name <- paste("svm-classification","-","dataset",iter,"-","tf",num,"-","metrics",".","csv",sep="")
dataset_predictions_name <- paste("svm-classification","-","dataset",iter,"-","tf",num,"-","predictions",".","csv",sep="")
if(file.exists(dataset_metrics_name) | file.exists(dataset_predictions_name))
{
file.remove(dataset_metrics_name)
file.remove(dataset_predictions_name)
}
orig_dataset <- read.csv(datasetname)
orig_dtmdataset <- read.csv(dataset_raw_name)
temp <- cbind("Original Dimensions","")
write.table(temp,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dimensions <- cbind(dim(orig_dtmdataset)[1],(dim(orig_dtmdataset)[2]-1))
write.table(dimensions,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dataset <- read.csv(dataset_tf_name)
temp <- cbind("tf filtered Dimensions","")
write.table(temp,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dimensions <- cbind(dim(dataset)[1],dim(dataset)[2])
write.table(dimensions,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dataset$class <- as.factor(orig_dataset[,2])
ctrl <- trainControl(method = "cv", number = 10,savePred = T)
model <- train(class ~ ., data = dataset, method = "svmLinear", trControl = ctrl)
modelpred <- model$pred
write.csv(modelpred,dataset_predictions_name,na="0")
library(mlearning)
library(Metrics)
results <- read.csv(dataset_predictions_name)
mean_abs_err <- mae(as.numeric(results$pred),as.numeric(results$obs))
results$obs <- factor(results$obs)
results$pred <- factor(results$pred,levels=c(levels(results$obs)))
mean_abs_err <- mae(as.numeric(results$pred),as.numeric(results$obs))
results$pred <- factor(results$pred,levels=c(levels(results$obs)))
confusiontable <-confusionMatrix(results$pred,results$obs)
write.table(confusiontable$overall[1],dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F)
write.table(confusiontable$overall[2],dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F)
calFscore <- confusion(as.factor(as.numeric(results$pred)),as.factor(as.numeric(results$obs)))
summary_fscore <- summary(calFscore, type = c("Fscore", "Recall", "Precision"))
temp <- cbind("FScore",mean(summary_fscore$Fscore))
write.table(temp,dataset_metrics_name,append=T,sep=",",row.names=F,col.names=F,,eol = "\n")
temp <- cbind("Mean Absolute Error",mean_abs_err)
write.table(temp,dataset_metrics_name,append=T,sep=",",row.names=F,col.names=F,eol = "\n")
num=num+5
}
} | /classification-svm-tf.R | no_license | SUNILKUMARCHINNAMGARI/phdrcode | R | false | false | 2,882 | r | library(tm)
library(caret)
library(RWeka)
setwd("C:/Users/schinnamgar/Desktop/Final research/Short answers/Data")
set.seed(123)
for(iter in 1:10)
{
num=5
while(num<=25)
{
datasetname <- paste("dataset",iter,".csv",sep="")
dataset_raw_name <- paste("dataset",iter,"-","raw",".","csv",sep="")
dataset_tf_name <- paste("dataset",iter,"-","tf-",num,".","csv",sep="")
dataset_metrics_name <- paste("svm-classification","-","dataset",iter,"-","tf",num,"-","metrics",".","csv",sep="")
dataset_predictions_name <- paste("svm-classification","-","dataset",iter,"-","tf",num,"-","predictions",".","csv",sep="")
if(file.exists(dataset_metrics_name) | file.exists(dataset_predictions_name))
{
file.remove(dataset_metrics_name)
file.remove(dataset_predictions_name)
}
orig_dataset <- read.csv(datasetname)
orig_dtmdataset <- read.csv(dataset_raw_name)
temp <- cbind("Original Dimensions","")
write.table(temp,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dimensions <- cbind(dim(orig_dtmdataset)[1],(dim(orig_dtmdataset)[2]-1))
write.table(dimensions,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dataset <- read.csv(dataset_tf_name)
temp <- cbind("tf filtered Dimensions","")
write.table(temp,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dimensions <- cbind(dim(dataset)[1],dim(dataset)[2])
write.table(dimensions,dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F,row.names=F)
dataset$class <- as.factor(orig_dataset[,2])
ctrl <- trainControl(method = "cv", number = 10,savePred = T)
model <- train(class ~ ., data = dataset, method = "svmLinear", trControl = ctrl)
modelpred <- model$pred
write.csv(modelpred,dataset_predictions_name,na="0")
library(mlearning)
library(Metrics)
results <- read.csv(dataset_predictions_name)
mean_abs_err <- mae(as.numeric(results$pred),as.numeric(results$obs))
results$obs <- factor(results$obs)
results$pred <- factor(results$pred,levels=c(levels(results$obs)))
mean_abs_err <- mae(as.numeric(results$pred),as.numeric(results$obs))
results$pred <- factor(results$pred,levels=c(levels(results$obs)))
confusiontable <-confusionMatrix(results$pred,results$obs)
write.table(confusiontable$overall[1],dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F)
write.table(confusiontable$overall[2],dataset_metrics_name,append=T,sep=",",eol = "\n",col.names=F)
calFscore <- confusion(as.factor(as.numeric(results$pred)),as.factor(as.numeric(results$obs)))
summary_fscore <- summary(calFscore, type = c("Fscore", "Recall", "Precision"))
temp <- cbind("FScore",mean(summary_fscore$Fscore))
write.table(temp,dataset_metrics_name,append=T,sep=",",row.names=F,col.names=F,,eol = "\n")
temp <- cbind("Mean Absolute Error",mean_abs_err)
write.table(temp,dataset_metrics_name,append=T,sep=",",row.names=F,col.names=F,eol = "\n")
num=num+5
}
} |
# Read headers and just the required data from the file into the data frame
header <- read.table('household_power_consumption.txt', header = FALSE,
sep = ';', nrows = 1, colClasses = 'character')
data <- read.table('household_power_consumption.txt', header = FALSE, sep = ';',
na.strings = c('?'), skip = 66637, nrows = 2880)
colnames(data) <- header[1, ]
data$Datetime <- strptime(paste(data$Date, data$Time), '%d/%m/%Y %H:%M:%S')
# Plot 3
png('./plot3.png', width = 480, height = 480, bg = 'transparent')
plot(data$Datetime, data$Sub_metering_1, type = 'l', main = '', xlab = '',
ylab = 'Energy sub metering')
lines(data$Datetime, data$Sub_metering_2, col = 'red')
lines(data$Datetime, data$Sub_metering_3, col = 'blue')
legend('topright', lty = c(1, 1, 1), col = c('black', 'red', 'blue'),
legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
dev.off()
| /plot3.R | no_license | Peque/ExData_Plotting1 | R | false | false | 924 | r |
# Read headers and just the required data from the file into the data frame
header <- read.table('household_power_consumption.txt', header = FALSE,
sep = ';', nrows = 1, colClasses = 'character')
data <- read.table('household_power_consumption.txt', header = FALSE, sep = ';',
na.strings = c('?'), skip = 66637, nrows = 2880)
colnames(data) <- header[1, ]
data$Datetime <- strptime(paste(data$Date, data$Time), '%d/%m/%Y %H:%M:%S')
# Plot 3
png('./plot3.png', width = 480, height = 480, bg = 'transparent')
plot(data$Datetime, data$Sub_metering_1, type = 'l', main = '', xlab = '',
ylab = 'Energy sub metering')
lines(data$Datetime, data$Sub_metering_2, col = 'red')
lines(data$Datetime, data$Sub_metering_3, col = 'blue')
legend('topright', lty = c(1, 1, 1), col = c('black', 'red', 'blue'),
legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
dev.off()
|
testlist <- list(data = structure(-5.48612930076962e+303, .Dim = c(1L, 1L )), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610555958-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 154 | r | testlist <- list(data = structure(-5.48612930076962e+303, .Dim = c(1L, 1L )), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grib_cube.R
\name{grib_cube}
\alias{grib_cube}
\title{Create 3D volume of a GRIB variable}
\usage{
grib_cube(gribObj, shortName, typeOfLevel, decreasing = FALSE)
}
\arguments{
\item{gribObj}{\code{GRIB} class object.}
\item{shortName}{The short name given in the GRIB file of the variable to
select.}
\item{typeOfLevel}{The vertical coordinate to use as given by the typeOfLevel
key in the GRIB file.}
\item{decreasing}{Parameter to tell the array's vertical coordinate to be
increasing or decreasing.}
}
\value{
Returns a three-dimenional array.
}
\description{
\code{grib_cube} creates a three-dimensional array from one variable along a
chosen vertical coordinate.
}
\details{
\code{grib_cube} is a wrapper function for \code{grib_select} to conveniently
create a three-dimensional cube. The user inputs a variable to search for and
the vertical coordinate to use when finding each level.
Because \code{grib_cube} uses \code{grib_select}, speed can become an issue.
This is meant as a convenience to "get the job done". If you want more speed,
it will always be better to know which message number you want, set up your
own loop, and use \code{grib_get_message} as that will avoid the overhead of
searching through the GRIB file.
}
\examples{
g <- grib_open(system.file("extdata", "lfpw.grib1", package = "gribr"))
cube <- grib_cube(g, 'u', 'isobaricInhPa', TRUE)
grib_close(g)
}
\seealso{
\code{\link{grib_get_message}} \code{\link{grib_list}}
\code{\link{grib_expand_grids}} \code{\link{grib_latlons}}
\code{\link{grib_select}}
}
| /man/grib_cube.Rd | permissive | nawendt/gribr | R | false | true | 1,621 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grib_cube.R
\name{grib_cube}
\alias{grib_cube}
\title{Create 3D volume of a GRIB variable}
\usage{
grib_cube(gribObj, shortName, typeOfLevel, decreasing = FALSE)
}
\arguments{
\item{gribObj}{\code{GRIB} class object.}
\item{shortName}{The short name given in the GRIB file of the variable to
select.}
\item{typeOfLevel}{The vertical coordinate to use as given by the typeOfLevel
key in the GRIB file.}
\item{decreasing}{Parameter to tell the array's vertical coordinate to be
increasing or decreasing.}
}
\value{
Returns a three-dimenional array.
}
\description{
\code{grib_cube} creates a three-dimensional array from one variable along a
chosen vertical coordinate.
}
\details{
\code{grib_cube} is a wrapper function for \code{grib_select} to conveniently
create a three-dimensional cube. The user inputs a variable to search for and
the vertical coordinate to use when finding each level.
Because \code{grib_cube} uses \code{grib_select}, speed can become an issue.
This is meant as a convenience to "get the job done". If you want more speed,
it will always be better to know which message number you want, set up your
own loop, and use \code{grib_get_message} as that will avoid the overhead of
searching through the GRIB file.
}
\examples{
g <- grib_open(system.file("extdata", "lfpw.grib1", package = "gribr"))
cube <- grib_cube(g, 'u', 'isobaricInhPa', TRUE)
grib_close(g)
}
\seealso{
\code{\link{grib_get_message}} \code{\link{grib_list}}
\code{\link{grib_expand_grids}} \code{\link{grib_latlons}}
\code{\link{grib_select}}
}
|
#-------------------Header------------------------------------------------
# Author: Daniel Fridljand
# Date: 05/02/2021
# Purpose: plot data
#
#***************************************************************************
#------------------SET-UP--------------------------------------------------
# clear memory
rm(list = ls(all = TRUE))
# load packages, install if missing
packages <- c(
"data.table", "magrittr", "shiny", "ggplot2", "ggpubr", "scales", "grid", "cowplot",
"dplyr", "stringr", "tidyr",
"gridExtra", "grid", "lattice"
)
for (p in packages) {
if (p %in% rownames(installed.packages()) == FALSE) install.packages(p)
suppressMessages(library(p, character.only = T, warn.conflicts = FALSE, quietly = TRUE))
}
options(dplyr.summarise.inform = FALSE)
options(scipen = 10000)
# Pass in arguments
args <- commandArgs(trailingOnly = T)
summaryDir <- args[7]
figuresDir <- args[8]
scenarioI <- args[10]
methodI <- args[11]
min_ageI <- args[13]
# TODO delete
if (rlang::is_empty(args)) {
summaryDir <- "data/17_summary"
figuresDir <- "data/18_figures"
min_ageI <- 25
scenarioI <- "real"
methodI <- "di_gee"
}
file_list <- list.files(summaryDir)
file_list <- file.path(summaryDir, file_list[grepl("attr_bur", file_list)])
all_burden <- fread(file.path(summaryDir, "all_burd.csv"))
all_burden <- all_burden %>% filter(min_age == min_ageI)
rm(file_list)
theme_set(theme_classic(base_family = "Helvetica")); options(bitmapType ="cairo");
# dir.create(file.path(figuresDir, methodI), recursive = T, showWarnings = F)
### ----- read stuff----
all_burden <- all_burden %>%
filter(Gender.Code == "All genders" & svi_bin == "All" & measure1 == "Deaths" & measure2 == "age-adjusted rate per 100,000" &
source == "National Vital Statistics System" & Region == "United States")
## -- figure 3, attributable burden---
# TODO method di_gee/burnett
all_burden1 <- all_burden %>% filter(Education == 666 & Ethnicity != "All, All Origins" & rural_urban_class == "All")
g1 <- ggplot(all_burden1, aes(x = Year, y = value, color = Ethnicity))
all_burden2 <- all_burden %>%
filter(Education != 666 & Ethnicity == "All, All Origins"
& rural_urban_class == "All")
all_burden2$Education <- factor(all_burden2$Education, # Relevel group factor
levels = c("High school graduate or lower",
"Some college education but no 4-year college degree",
"4-year college graduate or higher"))
g2 <- ggplot(all_burden2, aes(x = Year, y = value, color = Education))
all_burden3 <- all_burden %>%
filter(Education == 666 & Ethnicity == "All, All Origins" &
rural_urban_class != "All" & Year >= 2000)
all_burden3$rural_urban_class <- factor(all_burden3$rural_urban_class, # Relevel group factor
levels = c("Large metro",
"Small-medium metro",
"Non metro"))
g3 <- ggplot(all_burden3, aes(x = Year, y = value, color = rural_urban_class))
## --set range---
min1 <- min(c(all_burden1$value, all_burden2$value, all_burden3$value))
max1 <- max(c(all_burden1$value, all_burden2$value, all_burden3$value))
g1 <- g1 + ylim(min1, max1)
g2 <- g2 + ylim(min1, max1)
g3 <- g3 + ylim(min1, max1)
plots <- list(g1, g2, g3)
rm(min1, max1)
rm(
all_burden1, all_burden2, all_burden3,
g1, g2, g3
)
#----formatting------
group.colors <- c(
RColorBrewer::brewer.pal(n = 12, name = "Paired")[c(1:6, 8:10, 12)],
RColorBrewer::brewer.pal(n = 6, name = "Spectral")[1:2]
)
group.colors[c(12, 2)] <- group.colors[c(2, 12)]
names(group.colors) <- c(
"NH White",
"Hispanic or Latino White",
"Black American",
"White",
"Asian or Pacific Islander",
"American Indian or Alaska Native",
"High school graduate or lower",
"Some college education but no 4-year college degree",
"4-year college graduate or higher",
"Non metro",
"Large metro",
"Small-medium metro"
)
plots <- lapply(plots, function(g) {
g +
geom_line(size = 1.5) +
xlab("Year") +
scale_colour_manual(values = group.colors
, limits = force
) +
theme(legend.title = element_blank(), legend.text=element_text(size=8)) +
guides(color = guide_legend(ncol = 1, byrow = TRUE)) #3
})
#legend_plot <- as_ggplot(legend_plot)
legend_plots <- lapply(plots, function(g){
g %>%
get_legend %>%
as_ggplot
})
plots <- lapply(plots, function(g) {
g + #theme(legend.position = "bottom",
# axis.title.y = element_blank())
theme(legend.position = "none", axis.title.y = element_blank())
})
## --- arrange plots----
lay <- rbind(
c(4, NA, 5, NA, 6),
c(NA, NA, NA, NA, NA),
c(1, NA, 2, NA, 3),
c(7, NA, 8, NA, 9)
)
t1 <- grobTree(
rectGrob(gp = gpar(fill = "grey")),
textGrob("Race-Ethnicity", gp = gpar(fontsize = 10, fontface = "bold"))
)
t2 <- grobTree(
rectGrob(gp = gpar(fill = "grey")),
textGrob("Education", gp = gpar(fontsize = 10, fontface = "bold"))
)
t3 <- grobTree(
rectGrob(gp = gpar(fill = "grey")),
textGrob("Rurality", gp = gpar(fontsize = 10, fontface = "bold"))
)
gs <- append(plots, list(t1, t2, t3))
gs <- append(gs, legend_plots)
# gs <- lapply(1:9, function(ii) grobTree(rectGrob(gp = gpar(fill = ii, alpha = 0.5)), textGrob(ii)))
blank_space <- 0.05
figure_width <- 1.3
figure_hight <- 1
g_combined <- grid.arrange(
grobs = gs,
widths = c(figure_width, blank_space, figure_width, blank_space, figure_width),
heights = c(0.1, blank_space, figure_hight, 1),
layout_matrix = lay
)
# https://stackoverflow.com/questions/40265494/ggplot-grobs-align-with-tablegrob
ggsave(file.path(figuresDir, paste0(methodI, "-", scenarioI), "figureS4.png"), dpi = 300, g_combined, height = 4, width = 8)
| /pipeline/33_figureS4.R | permissive | FridljDa/pm25_inequality | R | false | false | 5,861 | r | #-------------------Header------------------------------------------------
# Author: Daniel Fridljand
# Date: 05/02/2021
# Purpose: plot data
#
#***************************************************************************
#------------------SET-UP--------------------------------------------------
# clear memory
rm(list = ls(all = TRUE))
# load packages, install if missing
packages <- c(
"data.table", "magrittr", "shiny", "ggplot2", "ggpubr", "scales", "grid", "cowplot",
"dplyr", "stringr", "tidyr",
"gridExtra", "grid", "lattice"
)
for (p in packages) {
if (p %in% rownames(installed.packages()) == FALSE) install.packages(p)
suppressMessages(library(p, character.only = T, warn.conflicts = FALSE, quietly = TRUE))
}
options(dplyr.summarise.inform = FALSE)
options(scipen = 10000)
# Pass in arguments
args <- commandArgs(trailingOnly = T)
summaryDir <- args[7]
figuresDir <- args[8]
scenarioI <- args[10]
methodI <- args[11]
min_ageI <- args[13]
# TODO delete
if (rlang::is_empty(args)) {
summaryDir <- "data/17_summary"
figuresDir <- "data/18_figures"
min_ageI <- 25
scenarioI <- "real"
methodI <- "di_gee"
}
file_list <- list.files(summaryDir)
file_list <- file.path(summaryDir, file_list[grepl("attr_bur", file_list)])
all_burden <- fread(file.path(summaryDir, "all_burd.csv"))
all_burden <- all_burden %>% filter(min_age == min_ageI)
rm(file_list)
theme_set(theme_classic(base_family = "Helvetica")); options(bitmapType ="cairo");
# dir.create(file.path(figuresDir, methodI), recursive = T, showWarnings = F)
### ----- read stuff----
all_burden <- all_burden %>%
filter(Gender.Code == "All genders" & svi_bin == "All" & measure1 == "Deaths" & measure2 == "age-adjusted rate per 100,000" &
source == "National Vital Statistics System" & Region == "United States")
## -- figure 3, attributable burden---
# TODO method di_gee/burnett
all_burden1 <- all_burden %>% filter(Education == 666 & Ethnicity != "All, All Origins" & rural_urban_class == "All")
g1 <- ggplot(all_burden1, aes(x = Year, y = value, color = Ethnicity))
all_burden2 <- all_burden %>%
filter(Education != 666 & Ethnicity == "All, All Origins"
& rural_urban_class == "All")
all_burden2$Education <- factor(all_burden2$Education, # Relevel group factor
levels = c("High school graduate or lower",
"Some college education but no 4-year college degree",
"4-year college graduate or higher"))
g2 <- ggplot(all_burden2, aes(x = Year, y = value, color = Education))
all_burden3 <- all_burden %>%
filter(Education == 666 & Ethnicity == "All, All Origins" &
rural_urban_class != "All" & Year >= 2000)
all_burden3$rural_urban_class <- factor(all_burden3$rural_urban_class, # Relevel group factor
levels = c("Large metro",
"Small-medium metro",
"Non metro"))
g3 <- ggplot(all_burden3, aes(x = Year, y = value, color = rural_urban_class))
## --set range---
min1 <- min(c(all_burden1$value, all_burden2$value, all_burden3$value))
max1 <- max(c(all_burden1$value, all_burden2$value, all_burden3$value))
g1 <- g1 + ylim(min1, max1)
g2 <- g2 + ylim(min1, max1)
g3 <- g3 + ylim(min1, max1)
plots <- list(g1, g2, g3)
rm(min1, max1)
rm(
all_burden1, all_burden2, all_burden3,
g1, g2, g3
)
#----formatting------
group.colors <- c(
RColorBrewer::brewer.pal(n = 12, name = "Paired")[c(1:6, 8:10, 12)],
RColorBrewer::brewer.pal(n = 6, name = "Spectral")[1:2]
)
group.colors[c(12, 2)] <- group.colors[c(2, 12)]
names(group.colors) <- c(
"NH White",
"Hispanic or Latino White",
"Black American",
"White",
"Asian or Pacific Islander",
"American Indian or Alaska Native",
"High school graduate or lower",
"Some college education but no 4-year college degree",
"4-year college graduate or higher",
"Non metro",
"Large metro",
"Small-medium metro"
)
plots <- lapply(plots, function(g) {
g +
geom_line(size = 1.5) +
xlab("Year") +
scale_colour_manual(values = group.colors
, limits = force
) +
theme(legend.title = element_blank(), legend.text=element_text(size=8)) +
guides(color = guide_legend(ncol = 1, byrow = TRUE)) #3
})
#legend_plot <- as_ggplot(legend_plot)
legend_plots <- lapply(plots, function(g){
g %>%
get_legend %>%
as_ggplot
})
plots <- lapply(plots, function(g) {
g + #theme(legend.position = "bottom",
# axis.title.y = element_blank())
theme(legend.position = "none", axis.title.y = element_blank())
})
## --- arrange plots----
lay <- rbind(
c(4, NA, 5, NA, 6),
c(NA, NA, NA, NA, NA),
c(1, NA, 2, NA, 3),
c(7, NA, 8, NA, 9)
)
t1 <- grobTree(
rectGrob(gp = gpar(fill = "grey")),
textGrob("Race-Ethnicity", gp = gpar(fontsize = 10, fontface = "bold"))
)
t2 <- grobTree(
rectGrob(gp = gpar(fill = "grey")),
textGrob("Education", gp = gpar(fontsize = 10, fontface = "bold"))
)
t3 <- grobTree(
rectGrob(gp = gpar(fill = "grey")),
textGrob("Rurality", gp = gpar(fontsize = 10, fontface = "bold"))
)
gs <- append(plots, list(t1, t2, t3))
gs <- append(gs, legend_plots)
# gs <- lapply(1:9, function(ii) grobTree(rectGrob(gp = gpar(fill = ii, alpha = 0.5)), textGrob(ii)))
blank_space <- 0.05
figure_width <- 1.3
figure_hight <- 1
g_combined <- grid.arrange(
grobs = gs,
widths = c(figure_width, blank_space, figure_width, blank_space, figure_width),
heights = c(0.1, blank_space, figure_hight, 1),
layout_matrix = lay
)
# https://stackoverflow.com/questions/40265494/ggplot-grobs-align-with-tablegrob
ggsave(file.path(figuresDir, paste0(methodI, "-", scenarioI), "figureS4.png"), dpi = 300, g_combined, height = 4, width = 8)
|
\name{ggNNC}
\alias{ggNNC}
\alias{erDataSeq}
\alias{convert.threshold.to.er}
\alias{convert.er.to.threshold}
\title{
Visualising Numbers Needed for Change
}
\description{
These functions can be used to visualise Numbers Needed for Change. \code{erDataSeq} is a helper function to generate an Event Rate Data Sequence, and it uses \code{convert.threshold.to.er} and \code{convert.er.to.threshold} to convert thresholds to event rates and vice versa.
}
\usage{
erDataSeq(er = NULL, threshold = NULL,
mean = NULL, sd = NULL,
eventIfHigher = TRUE,
pRange = c(1e-06, 0.99999),
xStep = 0.01)
ggNNC(cerDataSeq, d = NULL, eventDesirable = TRUE,
r = 1, xlab = "Continuous outcome",
plotTitle = c("Numbers Needed for Change = ", ""),
theme = theme_bw(), lineSize = 1,
cerColor = "#EBF2F8", eerColor = "#172F47",
cerLineColor = "#888888", eerLineColor = "#000000",
dArrowColor = "#000000", cerAlpha = 0.66,
eerAlpha = 0.66, xLim = NULL,
xLimAutoDensityTolerance = 0.001,
showLegend = TRUE, verticalLineColor = "#172F47",
desirableColor = "#00FF00", desirableAlpha = 0.2,
undesirableColor = "#FF0000", undesirableAlpha = 0.2,
desirableTextColor = "#009900",
undesirableTextColor = "#990000",
dArrowDistance = 0.04 * max(cerDataSeq$density),
dLabelDistance = 0.08 * max(cerDataSeq$density))
convert.threshold.to.er(threshold, mean, sd,
eventIfHigher = TRUE,
pdist = pnorm)
convert.er.to.threshold(er, mean, sd,
eventIfHigher = TRUE,
qdist = qnorm)
}
\arguments{
\item{er}{
Event rate to visualise (or convert).
}
\item{threshold}{
If the event rate is not available, a threshold value can be specified
instead, which is then used in conjunction with the mean
(\code{mean}) and standard deviation (\code{sd}) and assuming
a normal distribution to compute the event rate.
}
\item{mean}{
The mean of the control group distribution.
}
\item{sd}{
The standard deviation (of the control distribution, but assumed to
be the same for both distributions).
}
\item{eventIfHigher}{
Whether scores above or below the threshold are considered 'an event'.
}
\item{pRange}{
The range of probabilities for which to so the distribution.
}
\item{xStep}{
Precision of the drawn distribution; higher values mean lower
precision/granularity/resolution.
}
\item{cerDataSeq}{
The \code{cerDataSeq} object.
}
\item{d}{
The value of Cohen's \emph{d}.
}
\item{eventDesirable}{
Whether an event is desirable or undesirable.
}
\item{r}{
The correlation between the determinant and behavior (for mediated NNC's).
}
\item{xlab}{
The label to display for the X axis.
}
\item{plotTitle}{
The title of the plot; either one character value, this value if
used; if two, they are considered a prefix and suffix to be pre/appended
to the NNC value.
}
\item{theme}{
The theme to use for the plot.
}
\item{lineSize}{
The thickness of the lines in the plot.
}
\item{cerColor}{
The color to use for the event rate portion of the control
group distribution.
}
\item{eerColor}{
The color to use for the event rate portion of the experimental
group distribution.
}
\item{cerLineColor}{
The line color to use for the control group distribution.
}
\item{eerLineColor}{
The line color to use for the experimental group distribution.
}
\item{dArrowColor}{
The color of the arrow to show the effect size.
}
\item{cerAlpha}{
The alpha value (transparency) to use for the control group distribution.
}
\item{eerAlpha}{
The alpha value (transparency) to use for the control group distribution.
}
\item{xLim}{
This can be used to manually specify the limits for the X axis; if
\code{NULL}, sensible limits will be derived using \code{xLimAutoDensityTolerance}.
}
\item{xLimAutoDensityTolerance}{
If \code{xLim} is \code{NULL}, the limits will be set where the density
falls below this proportion of its maximum value.
}
\item{showLegend}{
Whether to show the legend (only if showing two distributions).
}
\item{verticalLineColor}{
The color of the vertical line used to indicate the threshold.
}
\item{desirableColor}{
The color for the desirable portion of the X axis.
}
\item{desirableAlpha}{
The alpha for the desirable portion of the X axis.
}
\item{undesirableColor}{
The color for the undesirable portion of the X axis.
}
\item{undesirableAlpha}{
The color for the undesirable portion of the X axis.
}
\item{desirableTextColor}{
The color for the text to indicate the desirable portion of the X axis.
}
\item{undesirableTextColor}{
The color for the text to indicate the undesirable portion of the X axis.
}
\item{dArrowDistance}{
The distance of the effect size arrow from the top of the distributions.
}
\item{dLabelDistance}{
The distance of the effect size label from the top of the distributions.
}
\item{pdist, qdist}{
Distributions to use when converting thresholds to event rates and vice
versa; defaults to the normal distribution.
}
}
\details{
These functions are used by \code{\link{nnc}} to show the distributions,
and event rates. They probably won't be used much on their own.
}
\value{
\code{erDataSeq} returns a data sequence; \code{ggNNC} a
\code{\link{ggplot}}.
}
\references{
Gruijters, S. L. K., & Peters, G.-J. Y. (2017). Introducing the Numbers Needed for Change (NNC): A practical measure of effect size for intervention research.
}
\author{
Gjalt-Jorn Peters & Stefan Gruijters
Maintainer: Gjalt-Jorn Peters <gjalt-jorn@userfriendlyscience.com>
}
\seealso{
\code{\link{nnc}}
}
\examples{
### Show distribution for an event rate value of 125
ggNNC(erDataSeq(threshold=125, mean=90, sd=30));
### If the event occurs under the threshold instead of
### above it
ggNNC(erDataSeq(threshold=125, mean=90, sd=30,
eventIfHigher = FALSE));
### ... And for undesirable events (note how
### desirability is an argument for ggNNC, whereas
### whether an event occurs 'above' or 'below' the
### threshold is an argument for erDataSeq):
ggNNC(erDataSeq(threshold=125, mean=90, sd=30,
eventIfHigher = FALSE),
eventDesirable = FALSE);
### Show event rate for both experimental and
### control conditions, and show the numbers
### needed for change
ggNNC(erDataSeq(threshold=125, mean=90, sd=30), d=.5);
### Illustration of how even with very large effect
### sizes, if the control event rate is very high,
### you'll still need a high number of NNC
ggNNC(erDataSeq(er=.9), d=1);
}
\keyword{ utilities }
| /man/ggNNC.Rd | no_license | Matherion/userfriendlyscience | R | false | false | 6,697 | rd | \name{ggNNC}
\alias{ggNNC}
\alias{erDataSeq}
\alias{convert.threshold.to.er}
\alias{convert.er.to.threshold}
\title{
Visualising Numbers Needed for Change
}
\description{
These functions can be used to visualise Numbers Needed for Change. \code{erDataSeq} is a helper function to generate an Event Rate Data Sequence, and it uses \code{convert.threshold.to.er} and \code{convert.er.to.threshold} to convert thresholds to event rates and vice versa.
}
\usage{
erDataSeq(er = NULL, threshold = NULL,
mean = NULL, sd = NULL,
eventIfHigher = TRUE,
pRange = c(1e-06, 0.99999),
xStep = 0.01)
ggNNC(cerDataSeq, d = NULL, eventDesirable = TRUE,
r = 1, xlab = "Continuous outcome",
plotTitle = c("Numbers Needed for Change = ", ""),
theme = theme_bw(), lineSize = 1,
cerColor = "#EBF2F8", eerColor = "#172F47",
cerLineColor = "#888888", eerLineColor = "#000000",
dArrowColor = "#000000", cerAlpha = 0.66,
eerAlpha = 0.66, xLim = NULL,
xLimAutoDensityTolerance = 0.001,
showLegend = TRUE, verticalLineColor = "#172F47",
desirableColor = "#00FF00", desirableAlpha = 0.2,
undesirableColor = "#FF0000", undesirableAlpha = 0.2,
desirableTextColor = "#009900",
undesirableTextColor = "#990000",
dArrowDistance = 0.04 * max(cerDataSeq$density),
dLabelDistance = 0.08 * max(cerDataSeq$density))
convert.threshold.to.er(threshold, mean, sd,
eventIfHigher = TRUE,
pdist = pnorm)
convert.er.to.threshold(er, mean, sd,
eventIfHigher = TRUE,
qdist = qnorm)
}
\arguments{
\item{er}{
Event rate to visualise (or convert).
}
\item{threshold}{
If the event rate is not available, a threshold value can be specified
instead, which is then used in conjunction with the mean
(\code{mean}) and standard deviation (\code{sd}) and assuming
a normal distribution to compute the event rate.
}
\item{mean}{
The mean of the control group distribution.
}
\item{sd}{
The standard deviation (of the control distribution, but assumed to
be the same for both distributions).
}
\item{eventIfHigher}{
Whether scores above or below the threshold are considered 'an event'.
}
\item{pRange}{
The range of probabilities for which to so the distribution.
}
\item{xStep}{
Precision of the drawn distribution; higher values mean lower
precision/granularity/resolution.
}
\item{cerDataSeq}{
The \code{cerDataSeq} object.
}
\item{d}{
The value of Cohen's \emph{d}.
}
\item{eventDesirable}{
Whether an event is desirable or undesirable.
}
\item{r}{
The correlation between the determinant and behavior (for mediated NNC's).
}
\item{xlab}{
The label to display for the X axis.
}
\item{plotTitle}{
The title of the plot; either one character value, this value if
used; if two, they are considered a prefix and suffix to be pre/appended
to the NNC value.
}
\item{theme}{
The theme to use for the plot.
}
\item{lineSize}{
The thickness of the lines in the plot.
}
\item{cerColor}{
The color to use for the event rate portion of the control
group distribution.
}
\item{eerColor}{
The color to use for the event rate portion of the experimental
group distribution.
}
\item{cerLineColor}{
The line color to use for the control group distribution.
}
\item{eerLineColor}{
The line color to use for the experimental group distribution.
}
\item{dArrowColor}{
The color of the arrow to show the effect size.
}
\item{cerAlpha}{
The alpha value (transparency) to use for the control group distribution.
}
\item{eerAlpha}{
The alpha value (transparency) to use for the control group distribution.
}
\item{xLim}{
This can be used to manually specify the limits for the X axis; if
\code{NULL}, sensible limits will be derived using \code{xLimAutoDensityTolerance}.
}
\item{xLimAutoDensityTolerance}{
If \code{xLim} is \code{NULL}, the limits will be set where the density
falls below this proportion of its maximum value.
}
\item{showLegend}{
Whether to show the legend (only if showing two distributions).
}
\item{verticalLineColor}{
The color of the vertical line used to indicate the threshold.
}
\item{desirableColor}{
The color for the desirable portion of the X axis.
}
\item{desirableAlpha}{
The alpha for the desirable portion of the X axis.
}
\item{undesirableColor}{
The color for the undesirable portion of the X axis.
}
\item{undesirableAlpha}{
The color for the undesirable portion of the X axis.
}
\item{desirableTextColor}{
The color for the text to indicate the desirable portion of the X axis.
}
\item{undesirableTextColor}{
The color for the text to indicate the undesirable portion of the X axis.
}
\item{dArrowDistance}{
The distance of the effect size arrow from the top of the distributions.
}
\item{dLabelDistance}{
The distance of the effect size label from the top of the distributions.
}
\item{pdist, qdist}{
Distributions to use when converting thresholds to event rates and vice
versa; defaults to the normal distribution.
}
}
\details{
These functions are used by \code{\link{nnc}} to show the distributions,
and event rates. They probably won't be used much on their own.
}
\value{
\code{erDataSeq} returns a data sequence; \code{ggNNC} a
\code{\link{ggplot}}.
}
\references{
Gruijters, S. L. K., & Peters, G.-J. Y. (2017). Introducing the Numbers Needed for Change (NNC): A practical measure of effect size for intervention research.
}
\author{
Gjalt-Jorn Peters & Stefan Gruijters
Maintainer: Gjalt-Jorn Peters <gjalt-jorn@userfriendlyscience.com>
}
\seealso{
\code{\link{nnc}}
}
\examples{
### Show distribution for an event rate value of 125
ggNNC(erDataSeq(threshold=125, mean=90, sd=30));
### If the event occurs under the threshold instead of
### above it
ggNNC(erDataSeq(threshold=125, mean=90, sd=30,
eventIfHigher = FALSE));
### ... And for undesirable events (note how
### desirability is an argument for ggNNC, whereas
### whether an event occurs 'above' or 'below' the
### threshold is an argument for erDataSeq):
ggNNC(erDataSeq(threshold=125, mean=90, sd=30,
eventIfHigher = FALSE),
eventDesirable = FALSE);
### Show event rate for both experimental and
### control conditions, and show the numbers
### needed for change
ggNNC(erDataSeq(threshold=125, mean=90, sd=30), d=.5);
### Illustration of how even with very large effect
### sizes, if the control event rate is very high,
### you'll still need a high number of NNC
ggNNC(erDataSeq(er=.9), d=1);
}
\keyword{ utilities }
|
# The data directory contains 4 files:
# - data_description.txt
# - test.csv
# - train.csv
# - sample_submission.csv
##### Clean Up Workspace #####
rm(list = ls()) # Remove Previous Workspace
gc(reset = TRUE) # Garbage Collection
########## Install and/or Load Packages ##########
packages <- function(x, repos = "http://cran.r-project.org", ...) {
x <- deparse(substitute(x))
if (!require(x, character.only = TRUE)) {
install.packages(pkgs = x, dependencies = TRUE, repos = repos, ...)
library(x, character.only = TRUE)
}
}
# Load libraries
# update.packages(repos = "http://cran.r-project.org") # Updates Packages (Do Periodically)
packages(data.table) # Data Frame Complement
packages(doParallel) # Parallel Computing
packages(foreach) # Parallel Computing
packages(jsonlite) # JSON Data
packages(reshape2) # Manipulate Datasets
packages(pdftools) # PDF to TXT Editor
packages(splitstackshape) # Stack and Reshape Datasets After Splitting Concatenated Values
packages(stringi) # Character/String Editor
packages(stringr) # Character/String Editor
packages(tm) # Text Mining
packages(dplyr) # Splitting, applying, and combining data
packages(boot) # Contains cv.glm
packages(leaps) # For regsubsets
packages(ggplot2)
packages(glmnet)
packages(forcats)
packages(caret)
##### Define functions #####
# Changes NAs to None for Categorical and 0 for Numeric
munge <- function(df) {
require(forcats)
tdf <- df
colClasses <- sapply(tdf, class)
#CATEGORICAL ADD NA TO LEVELS
tdf[colClasses =="factor"] <- lapply(tdf[colClasses == "factor"], function(x) fct_explicit_na(x, "None") )
#NUMERIC TURN NAS TO 0
tdf[colClasses != "factor"] <- lapply(tdf[colClasses != "factor"], function(x) { x[is.na(x)] <- 0; x})
tdf
}
# Cost function: Root Mean Squared Logarithmic Error
rmsle <- function(yhat, y) {
return(sqrt(mean((yhat - y) ** 2)))
}
# Function to return variable names related to regsubsets output
var_select <- function (x, end) {
vars <- c()
begin <- names(x)
for (begin.feature in begin) {
for (end.feature in end) {
if (startsWith(end.feature, begin.feature)) {
vars <- union(vars, begin.feature)
}
}
}
return(vars)
}
# Sets ordinality in categorical variables (improves prediction accuracy)
fixOrdinals <- function(df) {
df$LotShape <- factor(df$LotShape, c("IR3", "IR2", "IR1", "Reg"), ordered = TRUE)
df$Utilities <- factor(df$Utilities, c("ELO", "NoSeWa", "NoSewr", "AllPub"), ordered = TRUE)
df$LandSlope <- factor(df$LandSlope, c("Sev", "Mod", "Gtl"), ordered = TRUE)
df$ExterQual <- factor(df$ExterQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$ExterCond <- factor(df$ExterCond, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$BsmtQual <- factor(df$BsmtQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$BsmtCond <- factor(df$BsmtCond, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$BsmtExposure <- factor(df$BsmtExposure, c("None", "No", "Mn", "Av", "Gd"), ordered = TRUE)
df$BsmtFinType1 <- factor(df$BsmtFinType1, c("None", "Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = TRUE)
df$BsmtFinType2 <- factor(df$BsmtFinType2, c("None", "Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = TRUE)
df$HeatingQC <- factor(df$HeatingQC, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$Electrical <- factor(df$Electrical, c("Mix", "FuseP", "FuseF", "FuseA", "SBrkr"), ordered = TRUE)
df$KitchenQual <- factor(df$KitchenQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$Functional <- factor(df$Functional, c("Typ", "Min1", "Min2", "Mod", "Maj1", "Maj2", "Sev", "Sal"), ordered = TRUE)
df$FireplaceQu <- factor(df$FireplaceQu, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$GarageFinish <- factor(df$GarageFinish, c("None", "Unf", "RFn", "Fin"), ordered = TRUE)
df$GarageQual <- factor(df$GarageQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$GarageCond <- factor(df$GarageCond, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$PavedDrive <- factor(df$PavedDrive, c("N", "P", "Y"), ordered = TRUE)
df$PoolQC <- factor(df$PoolQC, c("None", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$Electrical[df$Electrical == "Mix"] <- "FuseP"
df$Utilities[df$Utilities == "None"] <- "AllPub"
df$BsmtQual[df$BsmtQual == "None"] <- "Fa"
df$BsmtCond[df$BsmtCond == "None"] <- "Fa"
df$KitchenQual[df$KitchenQual == "None"] <- "Fa"
df$Functional[df$Functional == "None"] <- "Typ"
df$FireplaceQu[df$FireplaceQu == "None"] <- "Fa"
df$GarageQual[df$GarageQual == "None"] <- "Fa"
df$GarageCond[df$GarageCond == "None"] <- "Fa"
df
}
##### Set up the data set #####
# Set working directory
setwd("/Users/jholc89/Google Drive/R/house_prices/files")
# Read training and test set
train <- read.csv("train.csv")
test <- read.csv("test.csv")
##### Feature engineering #####
# Impute 0 for NAs
train <- munge(train)
test <- munge(test)
# Fix Ordinals
train <- fixOrdinals(train)
test <- fixOrdinals(test)
# Quadratic terms
train$YearBuilt2 <- train$YearBuilt**2
test$YearBuilt2 <- test$YearBuilt**2
# Impute specific values in the training set
train[which(train$GarageYrBlt == 0), "GarageYrBlt"] <- train$YearBuilt
# Impute specific values in the test set
test[which(test$MSSubClass == 150), "MSSubClass"] <- 160
test[which(test$GarageYrBlt == 0), "GarageYrBlt"] <- test$YearBuilt
# Impute missing Electrical
train[which(is.na(train$Electrical)), "Electrical"] <- "SBrkr"
# Reduce MasVnrType
train$MasVnrType[train$MasVnrType == "BrkCmn"] <- "None"
test$MasVnrType[test$MasVnrType == "BrkCmn"] <- "None"
# Reduce Land Contour
train$LandContour[train$LandContour == "Bnk"] <- "Lvl"
train$LandContour[train$LandContour == "Low"] <- "HLS"
test$LandContour[test$LandContour == "Bnk"] <- "Lvl"
test$LandContour[test$LandContour == "Low"] <- "HLS"
# Reduce Lot Config
train$LotConfig[train$LotConfig == "FR3"] <- "FR2"
test$LotConfig[test$LotConfig == "FR3"] <- "FR2"
# Reduce LotShape to Reg or IR
train$LotShape <- as.factor(with(train, ifelse(LotShape == "Reg", "Reg", "IR")))
test$LotShape <- as.factor(with(test, ifelse(LotShape == "Reg", "Reg", "IR")))
# Reduce LandSlope to Gtl or NotGtl
train$LandSlope <- as.factor(with(train, ifelse(LandSlope == "Gtl", "Gtl", "NotGtl")))
test$LandSlope <- as.factor(with(test, ifelse(LandSlope == "Gtl", "Gtl", "NotGtl")))
# Reduce MiscFeature to Shed or No Shed
train$MiscFeature[train$MiscFeature != "None"] <- "Shed"
test$MiscFeature[test$MiscFeature != "None"] <- "Shed"
# Reduce Condition 1
train$Condition1[train$Condition1 == "PosA"] <- "PosN"
train$Condition1[train$Condition1 %in% c("RRAe", "RRAn", "RRNe", "RRNn")] <- "RRAn"
test$Condition1[test$Condition1 == "PosA"] <- "PosN"
test$Condition1[test$Condition1 %in% c("RRAe", "RRAn", "RRNe", "RRNn")] <- "RRAn"
# Reduce Foundation
train$Foundation[train$Foundation %in% c("Slab", "Stone", "Wood")] <- "Slab"
test$Foundation[test$Foundation %in% c("Slab", "Stone", "Wood")] <- "Slab"
# Heating
train$Heating[train$Heating == "GasW"] <- "GasA"
train$Heating[train$Heating != "GasA"] <- "OthW"
# Change MSSubClass to categorical
train$MSSubClass <- as.factor(train$MSSubClass)
test$MSSubClass <- as.factor(test$MSSubClass)
# Remove observations with outliers
train <- train[which(train$GrLivArea < 4000), ] # Total square footage
train <- train[-which(train$LotFrontage > 300), ] # Lot frontage
train <- train[which(train$LotArea < 200000), ] # Lot area
# Log transform variables
train$SalePrice <- log(train$SalePrice + 1)
train$LotArea <- log(train$LotArea + 1)
train$GrLivArea <- log(train$GrLivArea + 1)
train$LotFrontage <- log(train$LotFrontage + 1)
train$TotalBsmtSF <- log(train$TotalBsmtSF + 1)
test$LotArea <- log(test$LotArea + 1)
test$GrLivArea <- log(test$GrLivArea + 1)
test$LotFrontage <- log(test$LotFrontage + 1)
test$TotalBsmtSF <- log(test$TotalBsmtSF + 1)
##### Matrices #####
# Separate feature matrix and response vector from training data
X <- select(train, -SalePrice, -Id)
X <- model.matrix(~ ., data = X)
Y <- train[, 81]
# Matrix for test data
X.test <- select(test, -Id)
X.test <- model.matrix(~ ., data = X.test)
# Remove columns from train not in test
X <- X[, -which(colnames(X) %in% setdiff(colnames(X), colnames(X.test)))]
X.test <- X.test[, -which(colnames(X.test) %in% setdiff(colnames(X.test), colnames(X)))]
##### Models #####
# Lasso fit
set.seed(123)
lasso.fit <- cv.glmnet(x=X, y=Y, alpha=1)
plot(lasso.fit)
# Ridge regression fit
# Compute CV error for a grid of lambda solutions and plot
ridge.fit <- cv.glmnet(x=X, y=Y, alpha=0)
lambda.hat <- ridge.fit$lambda.min
plot(x = log(ridge.fit$lambda), y = ridge.fit$cvm, xlab = 'log(lambda)', ylab = 'CV error',
main = 'CV Errors for Lambda using Ridge (Default Grid)', type = 'l')
abline(v = log(lambda.hat))
# Fix grid
lambda.new <- seq(lambda.hat, lambda.hat * .01, length = 100)
ridge.fit <- cv.glmnet(x=X, y=Y, alpha=0, lambda=lambda.new)
lambda.hat <- ridge.fit$lambda.min
plot(x = ridge.fit$lambda, y = ridge.fit$cvm, xlab = 'lambda', ylab = 'CV error',
main = 'CV Errors for Lambda using Ridge (Corrected Grid)', type = 'l')
abline(v = ridge.fit$lambda.min)
# Refitted lasso
select.feats <- which(abs(coef(lasso.fit, s='lambda.1se'))[-1] > 1e-16)
refit.fit <- lm(Y ~ ., data = data.frame(X[, select.feats]))
###################################
##### Produce submission file #####
###################################
# Check training error
yhat.train.ridge <- predict(ridge.fit, newx = X, s = 'lambda.min')
yhat.train.lasso <- predict(lasso.fit, newx = X, s = 'lambda.min')
yhat.train.refit <- predict(refit.fit, newdata = data.frame(X))
rmsle(yhat.train.ridge, train$SalePrice)
rmsle(yhat.train.lasso, train$SalePrice)
rmsle(yhat.train.refit, train$SalePrice)
# Predict house prices (yhat)
yhat <- exp(predict(ridge.fit, newx = X.test, s = 'lambda.min')) # Kaggle score: .11771
yhat <- exp(predict(lasso.fit, newx = X.test, s = 'lambda.min')) # Kaggle score: .12070
yhat <- exp(predict(refit.fit, newdata = data.frame(X.test))) # Kaggle score: .12231
# Average ridge and lasso predictions
yhat.ridge <- exp(predict(ridge.fit, newx = X.test, s = 'lambda.min'))
yhat.lasso <- exp(predict(lasso.fit, newx = X.test, s = 'lambda.min'))
yhat.refit <- exp(predict(refit.fit, newdata = data.frame(X.test)))
yhat <- (yhat.ridge + yhat.lasso + yhat.refit) / 3
# Create submission.csv file
yhat
any(is.na(yhat))
prediction <- data.frame(test$Id, yhat)
colnames(prediction) <- c("Id", "SalePrice")
write.csv(prediction, "submission.csv", row.names = FALSE, quote = FALSE)
# Write matrix to .csv
write.csv(X, "train_mat.csv", row.names = F, quote = F)
write.csv(X.test, "test_mat.csv", row.names = F, quote = F)
##### Explore the training data set #####
# Missing values, duplicate data, etc.
str(train) # Structure of the df: # of obs, # of variables, types of variables
any(is.na(train)) # TRUE if missing values exist, FALSE otherwise
colSums(sapply(train, is.na)) # Number of missing values per column
sum(is.na(train)) / (nrow(train) * ncol(train)) # Percentage of values that are missing
nrow(train) - nrow(unique(train)) # Number of duplicate rows
# Area graphs of numeric variables ***CONSIDER sqrt transform of sale price to correct heteroskedasticity***
ggplot(train, aes(SalePrice)) + geom_area(stat = "bin") # Sale price
ggplot(train, aes(GrLivArea)) + geom_area(stat = "bin") # Square footage
ggplot(train, aes(LotArea)) + geom_area(stat = "bin") # Lot area
ggplot(train, aes(TotalBsmtSF)) + geom_area(stat = "bin") # Basement square footage
ggplot(train, aes(LotFrontage)) + geom_area(stat = "bin") # Lot frontage
# Scatterplots with numerical variables against sale price
ggplot(train %>% filter(LotFrontage > 0), aes(x = LotFrontage, y = SalePrice)) + geom_point() + geom_smooth(method = lm) # Lot frontage
ggplot(train, aes(x = LotArea, y = SalePrice)) + geom_point() # Lot area
ggplot(train, aes(x = OverallQual, y = SalePrice)) + geom_point() # Overall quality
ggplot(train, aes(x = OverallCond, y = SalePrice)) + geom_point() # Overall condition
ggplot(train, aes(x = YearBuilt, y = SalePrice)) + geom_point() # Year built
ggplot(train, aes(x = YearRemodAdd, y = SalePrice)) + geom_point() # Year remodeled
ggplot(train, aes(x = MasVnrArea, y = SalePrice)) + geom_point() # Masonry veneer area
ggplot(train, aes(x = ExterQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = ExterCond, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtCond, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtExposure, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinType1, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinSF1, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinType2, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinSF2, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtUnfSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = TotalBsmtSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = HeatingQC, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = X1stFlrSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = X2ndFlrSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = LowQualFinSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GrLivArea, y = SalePrice)) + geom_point() # Square footage
ggplot(train, aes(x = BsmtFullBath, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtHalfBath, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = FullBath, y = SalePrice)) + geom_point() # Number of full baths
ggplot(train, aes(x = HalfBath, y = SalePrice)) + geom_point() # Number of half baths
ggplot(train, aes(x = BedroomAbvGr, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = KitchenAbvGr, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = KitchenQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = TotRmsAbvGrd, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = Functional, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = Fireplaces, y = SalePrice)) + geom_point() # Number of fireplaces
ggplot(train, aes(x = FireplaceQu, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GarageYrBlt, y = SalePrice)) + geom_point() # Year garage was built
ggplot(train, aes(x = GarageFinish, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GarageCars, y = SalePrice)) + geom_point() # Car capacity of garage
ggplot(train, aes(x = GarageArea, y = SalePrice)) + geom_point() # Area of garage
ggplot(train, aes(x = GarageQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GarageCond, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = WoodDeckSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = OpenPorchSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = EnclosedPorch, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = X3SsnPorch, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = ScreenPorch, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = PoolArea, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = PoolQC, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = Fence, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = MiscVal, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = MoSold, y = SalePrice)) + geom_point() # Month sold
ggplot(train, aes(x = YrSold, y = SalePrice)) + geom_point() # Year sold
# Box plots with categorical variables against sale price
ggplot(train, aes(x = MSSubClass, y = SalePrice)) + geom_boxplot() # Type of dwelling
ggplot(train, aes(x = MSZoning, y = SalePrice)) + geom_boxplot() # Zoning classification
ggplot(train, aes(x = Street, y = SalePrice)) + geom_boxplot() # Type of road access
ggplot(train, aes(x = Alley, y = SalePrice)) + geom_boxplot() # Type of alley access
ggplot(train, aes(x = LotShape, y = SalePrice)) + geom_boxplot() # Shape of property
ggplot(train, aes(x = LandContour, y = SalePrice)) + geom_boxplot() # Flatness of property
ggplot(train, aes(x = Utilities, y = SalePrice)) + geom_boxplot() # Utilities available
ggplot(train, aes(x = LotConfig, y = SalePrice)) + geom_boxplot() # Lot configuration
ggplot(train, aes(x = LandSlope, y = SalePrice)) + geom_boxplot() # Slope of property
ggplot(train, aes(x = Neighborhood, y = SalePrice)) + geom_boxplot() # Neighborhood
ggplot(train, aes(x = Condition1, y = SalePrice)) + geom_boxplot() # Proximity to various conditions
ggplot(train, aes(x = Condition2, y = SalePrice)) + geom_boxplot() # Proximity to various conditions (2)
ggplot(train, aes(x = BldgType, y = SalePrice)) + geom_boxplot() # Type of dwelling
ggplot(train, aes(x = HouseStyle, y = SalePrice)) + geom_boxplot() # Style of dwelling
ggplot(train, aes(x = RoofStyle, y = SalePrice)) + geom_boxplot() # Type of roof
ggplot(train, aes(x = RoofMatl, y = SalePrice)) + geom_boxplot() # Roof material
ggplot(train, aes(x = Exterior1st, y = SalePrice)) + geom_boxplot() # Exterior covering
ggplot(train, aes(x = Exterior2nd, y = SalePrice)) + geom_boxplot() # Exterior covering (2)
ggplot(train, aes(x = MasVnrType, y = SalePrice)) + geom_boxplot() # Masonry veneer type
ggplot(train, aes(x = Foundation, y = SalePrice)) + geom_boxplot() # Type of foundation
ggplot(train, aes(x = Heating, y = SalePrice)) + geom_boxplot() # Type of heating
ggplot(train, aes(x = CentralAir, y = SalePrice)) + geom_boxplot() # Central air conditioning
ggplot(train, aes(x = Electrical, y = SalePrice)) + geom_boxplot() # Electrical system
ggplot(train, aes(x = GarageType, y = SalePrice)) + geom_boxplot() # Garage type
ggplot(train, aes(x = PavedDrive, y = SalePrice)) + geom_boxplot() # Paved driveway
ggplot(train, aes(x = MiscFeature, y = SalePrice)) + geom_boxplot() # Miscellaneous feature
ggplot(train, aes(x = SaleType, y = SalePrice)) + geom_boxplot() # Type of sale
ggplot(train, aes(x = SaleCondition, y = SalePrice)) + geom_boxplot() # Condition of sale
##### Extra stuff #####
# Bin the sale price into groups
price.bin <- cut(Y$SalePrice, breaks = 50)
# Distinguish between categorical and numerical variables
train.cat <- select(train, which(sapply(train, is.factor)))
train.num <- select(train, which(sapply(train, is.numeric)))
| /house_prices.R | no_license | jeremyholcombe/house-prices-lasso-regression | R | false | false | 18,841 | r |
# The data directory contains 4 files:
# - data_description.txt
# - test.csv
# - train.csv
# - sample_submission.csv
##### Clean Up Workspace #####
rm(list = ls()) # Remove Previous Workspace
gc(reset = TRUE) # Garbage Collection
########## Install and/or Load Packages ##########
packages <- function(x, repos = "http://cran.r-project.org", ...) {
x <- deparse(substitute(x))
if (!require(x, character.only = TRUE)) {
install.packages(pkgs = x, dependencies = TRUE, repos = repos, ...)
library(x, character.only = TRUE)
}
}
# Load libraries
# update.packages(repos = "http://cran.r-project.org") # Updates Packages (Do Periodically)
packages(data.table) # Data Frame Complement
packages(doParallel) # Parallel Computing
packages(foreach) # Parallel Computing
packages(jsonlite) # JSON Data
packages(reshape2) # Manipulate Datasets
packages(pdftools) # PDF to TXT Editor
packages(splitstackshape) # Stack and Reshape Datasets After Splitting Concatenated Values
packages(stringi) # Character/String Editor
packages(stringr) # Character/String Editor
packages(tm) # Text Mining
packages(dplyr) # Splitting, applying, and combining data
packages(boot) # Contains cv.glm
packages(leaps) # For regsubsets
packages(ggplot2)
packages(glmnet)
packages(forcats)
packages(caret)
##### Define functions #####
# Changes NAs to None for Categorical and 0 for Numeric
munge <- function(df) {
require(forcats)
tdf <- df
colClasses <- sapply(tdf, class)
#CATEGORICAL ADD NA TO LEVELS
tdf[colClasses =="factor"] <- lapply(tdf[colClasses == "factor"], function(x) fct_explicit_na(x, "None") )
#NUMERIC TURN NAS TO 0
tdf[colClasses != "factor"] <- lapply(tdf[colClasses != "factor"], function(x) { x[is.na(x)] <- 0; x})
tdf
}
# Cost function: Root Mean Squared Logarithmic Error
rmsle <- function(yhat, y) {
return(sqrt(mean((yhat - y) ** 2)))
}
# Function to return variable names related to regsubsets output
var_select <- function (x, end) {
vars <- c()
begin <- names(x)
for (begin.feature in begin) {
for (end.feature in end) {
if (startsWith(end.feature, begin.feature)) {
vars <- union(vars, begin.feature)
}
}
}
return(vars)
}
# Sets ordinality in categorical variables (improves prediction accuracy)
fixOrdinals <- function(df) {
df$LotShape <- factor(df$LotShape, c("IR3", "IR2", "IR1", "Reg"), ordered = TRUE)
df$Utilities <- factor(df$Utilities, c("ELO", "NoSeWa", "NoSewr", "AllPub"), ordered = TRUE)
df$LandSlope <- factor(df$LandSlope, c("Sev", "Mod", "Gtl"), ordered = TRUE)
df$ExterQual <- factor(df$ExterQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$ExterCond <- factor(df$ExterCond, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$BsmtQual <- factor(df$BsmtQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$BsmtCond <- factor(df$BsmtCond, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$BsmtExposure <- factor(df$BsmtExposure, c("None", "No", "Mn", "Av", "Gd"), ordered = TRUE)
df$BsmtFinType1 <- factor(df$BsmtFinType1, c("None", "Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = TRUE)
df$BsmtFinType2 <- factor(df$BsmtFinType2, c("None", "Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"), ordered = TRUE)
df$HeatingQC <- factor(df$HeatingQC, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$Electrical <- factor(df$Electrical, c("Mix", "FuseP", "FuseF", "FuseA", "SBrkr"), ordered = TRUE)
df$KitchenQual <- factor(df$KitchenQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$Functional <- factor(df$Functional, c("Typ", "Min1", "Min2", "Mod", "Maj1", "Maj2", "Sev", "Sal"), ordered = TRUE)
df$FireplaceQu <- factor(df$FireplaceQu, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$GarageFinish <- factor(df$GarageFinish, c("None", "Unf", "RFn", "Fin"), ordered = TRUE)
df$GarageQual <- factor(df$GarageQual, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$GarageCond <- factor(df$GarageCond, c("Po", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$PavedDrive <- factor(df$PavedDrive, c("N", "P", "Y"), ordered = TRUE)
df$PoolQC <- factor(df$PoolQC, c("None", "Fa", "TA", "Gd", "Ex"), ordered = TRUE)
df$Electrical[df$Electrical == "Mix"] <- "FuseP"
df$Utilities[df$Utilities == "None"] <- "AllPub"
df$BsmtQual[df$BsmtQual == "None"] <- "Fa"
df$BsmtCond[df$BsmtCond == "None"] <- "Fa"
df$KitchenQual[df$KitchenQual == "None"] <- "Fa"
df$Functional[df$Functional == "None"] <- "Typ"
df$FireplaceQu[df$FireplaceQu == "None"] <- "Fa"
df$GarageQual[df$GarageQual == "None"] <- "Fa"
df$GarageCond[df$GarageCond == "None"] <- "Fa"
df
}
##### Set up the data set #####
# Set working directory
setwd("/Users/jholc89/Google Drive/R/house_prices/files")
# Read training and test set
train <- read.csv("train.csv")
test <- read.csv("test.csv")
##### Feature engineering #####
# Impute 0 for NAs
train <- munge(train)
test <- munge(test)
# Fix Ordinals
train <- fixOrdinals(train)
test <- fixOrdinals(test)
# Quadratic terms
train$YearBuilt2 <- train$YearBuilt**2
test$YearBuilt2 <- test$YearBuilt**2
# Impute specific values in the training set
train[which(train$GarageYrBlt == 0), "GarageYrBlt"] <- train$YearBuilt
# Impute specific values in the test set
test[which(test$MSSubClass == 150), "MSSubClass"] <- 160
test[which(test$GarageYrBlt == 0), "GarageYrBlt"] <- test$YearBuilt
# Impute missing Electrical
train[which(is.na(train$Electrical)), "Electrical"] <- "SBrkr"
# Reduce MasVnrType
train$MasVnrType[train$MasVnrType == "BrkCmn"] <- "None"
test$MasVnrType[test$MasVnrType == "BrkCmn"] <- "None"
# Reduce Land Contour
train$LandContour[train$LandContour == "Bnk"] <- "Lvl"
train$LandContour[train$LandContour == "Low"] <- "HLS"
test$LandContour[test$LandContour == "Bnk"] <- "Lvl"
test$LandContour[test$LandContour == "Low"] <- "HLS"
# Reduce Lot Config
train$LotConfig[train$LotConfig == "FR3"] <- "FR2"
test$LotConfig[test$LotConfig == "FR3"] <- "FR2"
# Reduce LotShape to Reg or IR
train$LotShape <- as.factor(with(train, ifelse(LotShape == "Reg", "Reg", "IR")))
test$LotShape <- as.factor(with(test, ifelse(LotShape == "Reg", "Reg", "IR")))
# Reduce LandSlope to Gtl or NotGtl
train$LandSlope <- as.factor(with(train, ifelse(LandSlope == "Gtl", "Gtl", "NotGtl")))
test$LandSlope <- as.factor(with(test, ifelse(LandSlope == "Gtl", "Gtl", "NotGtl")))
# Reduce MiscFeature to Shed or No Shed
train$MiscFeature[train$MiscFeature != "None"] <- "Shed"
test$MiscFeature[test$MiscFeature != "None"] <- "Shed"
# Reduce Condition 1
train$Condition1[train$Condition1 == "PosA"] <- "PosN"
train$Condition1[train$Condition1 %in% c("RRAe", "RRAn", "RRNe", "RRNn")] <- "RRAn"
test$Condition1[test$Condition1 == "PosA"] <- "PosN"
test$Condition1[test$Condition1 %in% c("RRAe", "RRAn", "RRNe", "RRNn")] <- "RRAn"
# Reduce Foundation
train$Foundation[train$Foundation %in% c("Slab", "Stone", "Wood")] <- "Slab"
test$Foundation[test$Foundation %in% c("Slab", "Stone", "Wood")] <- "Slab"
# Heating
train$Heating[train$Heating == "GasW"] <- "GasA"
train$Heating[train$Heating != "GasA"] <- "OthW"
# Change MSSubClass to categorical
train$MSSubClass <- as.factor(train$MSSubClass)
test$MSSubClass <- as.factor(test$MSSubClass)
# Remove observations with outliers
train <- train[which(train$GrLivArea < 4000), ] # Total square footage
train <- train[-which(train$LotFrontage > 300), ] # Lot frontage
train <- train[which(train$LotArea < 200000), ] # Lot area
# Log transform variables
train$SalePrice <- log(train$SalePrice + 1)
train$LotArea <- log(train$LotArea + 1)
train$GrLivArea <- log(train$GrLivArea + 1)
train$LotFrontage <- log(train$LotFrontage + 1)
train$TotalBsmtSF <- log(train$TotalBsmtSF + 1)
test$LotArea <- log(test$LotArea + 1)
test$GrLivArea <- log(test$GrLivArea + 1)
test$LotFrontage <- log(test$LotFrontage + 1)
test$TotalBsmtSF <- log(test$TotalBsmtSF + 1)
##### Matrices #####
# Separate feature matrix and response vector from training data
X <- select(train, -SalePrice, -Id)
X <- model.matrix(~ ., data = X)
Y <- train[, 81]
# Matrix for test data
X.test <- select(test, -Id)
X.test <- model.matrix(~ ., data = X.test)
# Remove columns from train not in test
X <- X[, -which(colnames(X) %in% setdiff(colnames(X), colnames(X.test)))]
X.test <- X.test[, -which(colnames(X.test) %in% setdiff(colnames(X.test), colnames(X)))]
##### Models #####
# Lasso fit
set.seed(123)
lasso.fit <- cv.glmnet(x=X, y=Y, alpha=1)
plot(lasso.fit)
# Ridge regression fit
# Compute CV error for a grid of lambda solutions and plot
ridge.fit <- cv.glmnet(x=X, y=Y, alpha=0)
lambda.hat <- ridge.fit$lambda.min
plot(x = log(ridge.fit$lambda), y = ridge.fit$cvm, xlab = 'log(lambda)', ylab = 'CV error',
main = 'CV Errors for Lambda using Ridge (Default Grid)', type = 'l')
abline(v = log(lambda.hat))
# Fix grid
lambda.new <- seq(lambda.hat, lambda.hat * .01, length = 100)
ridge.fit <- cv.glmnet(x=X, y=Y, alpha=0, lambda=lambda.new)
lambda.hat <- ridge.fit$lambda.min
plot(x = ridge.fit$lambda, y = ridge.fit$cvm, xlab = 'lambda', ylab = 'CV error',
main = 'CV Errors for Lambda using Ridge (Corrected Grid)', type = 'l')
abline(v = ridge.fit$lambda.min)
# Refitted lasso
select.feats <- which(abs(coef(lasso.fit, s='lambda.1se'))[-1] > 1e-16)
refit.fit <- lm(Y ~ ., data = data.frame(X[, select.feats]))
###################################
##### Produce submission file #####
###################################
# Check training error
yhat.train.ridge <- predict(ridge.fit, newx = X, s = 'lambda.min')
yhat.train.lasso <- predict(lasso.fit, newx = X, s = 'lambda.min')
yhat.train.refit <- predict(refit.fit, newdata = data.frame(X))
rmsle(yhat.train.ridge, train$SalePrice)
rmsle(yhat.train.lasso, train$SalePrice)
rmsle(yhat.train.refit, train$SalePrice)
# Predict house prices (yhat)
yhat <- exp(predict(ridge.fit, newx = X.test, s = 'lambda.min')) # Kaggle score: .11771
yhat <- exp(predict(lasso.fit, newx = X.test, s = 'lambda.min')) # Kaggle score: .12070
yhat <- exp(predict(refit.fit, newdata = data.frame(X.test))) # Kaggle score: .12231
# Average ridge and lasso predictions
yhat.ridge <- exp(predict(ridge.fit, newx = X.test, s = 'lambda.min'))
yhat.lasso <- exp(predict(lasso.fit, newx = X.test, s = 'lambda.min'))
yhat.refit <- exp(predict(refit.fit, newdata = data.frame(X.test)))
yhat <- (yhat.ridge + yhat.lasso + yhat.refit) / 3
# Create submission.csv file
yhat
any(is.na(yhat))
prediction <- data.frame(test$Id, yhat)
colnames(prediction) <- c("Id", "SalePrice")
write.csv(prediction, "submission.csv", row.names = FALSE, quote = FALSE)
# Write matrix to .csv
write.csv(X, "train_mat.csv", row.names = F, quote = F)
write.csv(X.test, "test_mat.csv", row.names = F, quote = F)
##### Explore the training data set #####
# Missing values, duplicate data, etc.
str(train) # Structure of the df: # of obs, # of variables, types of variables
any(is.na(train)) # TRUE if missing values exist, FALSE otherwise
colSums(sapply(train, is.na)) # Number of missing values per column
sum(is.na(train)) / (nrow(train) * ncol(train)) # Percentage of values that are missing
nrow(train) - nrow(unique(train)) # Number of duplicate rows
# Area graphs of numeric variables ***CONSIDER sqrt transform of sale price to correct heteroskedasticity***
ggplot(train, aes(SalePrice)) + geom_area(stat = "bin") # Sale price
ggplot(train, aes(GrLivArea)) + geom_area(stat = "bin") # Square footage
ggplot(train, aes(LotArea)) + geom_area(stat = "bin") # Lot area
ggplot(train, aes(TotalBsmtSF)) + geom_area(stat = "bin") # Basement square footage
ggplot(train, aes(LotFrontage)) + geom_area(stat = "bin") # Lot frontage
# Scatterplots with numerical variables against sale price
ggplot(train %>% filter(LotFrontage > 0), aes(x = LotFrontage, y = SalePrice)) + geom_point() + geom_smooth(method = lm) # Lot frontage
ggplot(train, aes(x = LotArea, y = SalePrice)) + geom_point() # Lot area
ggplot(train, aes(x = OverallQual, y = SalePrice)) + geom_point() # Overall quality
ggplot(train, aes(x = OverallCond, y = SalePrice)) + geom_point() # Overall condition
ggplot(train, aes(x = YearBuilt, y = SalePrice)) + geom_point() # Year built
ggplot(train, aes(x = YearRemodAdd, y = SalePrice)) + geom_point() # Year remodeled
ggplot(train, aes(x = MasVnrArea, y = SalePrice)) + geom_point() # Masonry veneer area
ggplot(train, aes(x = ExterQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = ExterCond, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtCond, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtExposure, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinType1, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinSF1, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinType2, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtFinSF2, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtUnfSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = TotalBsmtSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = HeatingQC, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = X1stFlrSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = X2ndFlrSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = LowQualFinSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GrLivArea, y = SalePrice)) + geom_point() # Square footage
ggplot(train, aes(x = BsmtFullBath, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = BsmtHalfBath, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = FullBath, y = SalePrice)) + geom_point() # Number of full baths
ggplot(train, aes(x = HalfBath, y = SalePrice)) + geom_point() # Number of half baths
ggplot(train, aes(x = BedroomAbvGr, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = KitchenAbvGr, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = KitchenQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = TotRmsAbvGrd, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = Functional, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = Fireplaces, y = SalePrice)) + geom_point() # Number of fireplaces
ggplot(train, aes(x = FireplaceQu, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GarageYrBlt, y = SalePrice)) + geom_point() # Year garage was built
ggplot(train, aes(x = GarageFinish, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GarageCars, y = SalePrice)) + geom_point() # Car capacity of garage
ggplot(train, aes(x = GarageArea, y = SalePrice)) + geom_point() # Area of garage
ggplot(train, aes(x = GarageQual, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = GarageCond, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = WoodDeckSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = OpenPorchSF, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = EnclosedPorch, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = X3SsnPorch, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = ScreenPorch, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = PoolArea, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = PoolQC, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = Fence, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = MiscVal, y = SalePrice)) + geom_point() #
ggplot(train, aes(x = MoSold, y = SalePrice)) + geom_point() # Month sold
ggplot(train, aes(x = YrSold, y = SalePrice)) + geom_point() # Year sold
# Box plots with categorical variables against sale price
ggplot(train, aes(x = MSSubClass, y = SalePrice)) + geom_boxplot() # Type of dwelling
ggplot(train, aes(x = MSZoning, y = SalePrice)) + geom_boxplot() # Zoning classification
ggplot(train, aes(x = Street, y = SalePrice)) + geom_boxplot() # Type of road access
ggplot(train, aes(x = Alley, y = SalePrice)) + geom_boxplot() # Type of alley access
ggplot(train, aes(x = LotShape, y = SalePrice)) + geom_boxplot() # Shape of property
ggplot(train, aes(x = LandContour, y = SalePrice)) + geom_boxplot() # Flatness of property
ggplot(train, aes(x = Utilities, y = SalePrice)) + geom_boxplot() # Utilities available
ggplot(train, aes(x = LotConfig, y = SalePrice)) + geom_boxplot() # Lot configuration
ggplot(train, aes(x = LandSlope, y = SalePrice)) + geom_boxplot() # Slope of property
ggplot(train, aes(x = Neighborhood, y = SalePrice)) + geom_boxplot() # Neighborhood
ggplot(train, aes(x = Condition1, y = SalePrice)) + geom_boxplot() # Proximity to various conditions
ggplot(train, aes(x = Condition2, y = SalePrice)) + geom_boxplot() # Proximity to various conditions (2)
ggplot(train, aes(x = BldgType, y = SalePrice)) + geom_boxplot() # Type of dwelling
ggplot(train, aes(x = HouseStyle, y = SalePrice)) + geom_boxplot() # Style of dwelling
ggplot(train, aes(x = RoofStyle, y = SalePrice)) + geom_boxplot() # Type of roof
ggplot(train, aes(x = RoofMatl, y = SalePrice)) + geom_boxplot() # Roof material
ggplot(train, aes(x = Exterior1st, y = SalePrice)) + geom_boxplot() # Exterior covering
ggplot(train, aes(x = Exterior2nd, y = SalePrice)) + geom_boxplot() # Exterior covering (2)
ggplot(train, aes(x = MasVnrType, y = SalePrice)) + geom_boxplot() # Masonry veneer type
ggplot(train, aes(x = Foundation, y = SalePrice)) + geom_boxplot() # Type of foundation
ggplot(train, aes(x = Heating, y = SalePrice)) + geom_boxplot() # Type of heating
ggplot(train, aes(x = CentralAir, y = SalePrice)) + geom_boxplot() # Central air conditioning
ggplot(train, aes(x = Electrical, y = SalePrice)) + geom_boxplot() # Electrical system
ggplot(train, aes(x = GarageType, y = SalePrice)) + geom_boxplot() # Garage type
ggplot(train, aes(x = PavedDrive, y = SalePrice)) + geom_boxplot() # Paved driveway
ggplot(train, aes(x = MiscFeature, y = SalePrice)) + geom_boxplot() # Miscellaneous feature
ggplot(train, aes(x = SaleType, y = SalePrice)) + geom_boxplot() # Type of sale
ggplot(train, aes(x = SaleCondition, y = SalePrice)) + geom_boxplot() # Condition of sale
##### Extra stuff #####
# Bin the sale price into groups
price.bin <- cut(Y$SalePrice, breaks = 50)
# Distinguish between categorical and numerical variables
train.cat <- select(train, which(sapply(train, is.factor)))
train.num <- select(train, which(sapply(train, is.numeric)))
|
#' Treatment Assignment for Regression Discontinuity
#'
#' \code{treat_assign} computes the treatment variable, \code{t}, based on the cutoff of
#' assignment variable, \code{x}.
#' This is an internal function and is typically not directly invoked by the user.
#' It can be accessed using the triple colon, as in rddapp:::treat_assign().
#'
#' @param x A numeric vector containing the assignment variable, \code{x}.
#' @param cut A numeric value containing the cutpoint at which assignment to the treatment is determined. The default is 0.
#' @param t.design A string specifying the treatment option according to design.
#' Options are \code{"g"} (treatment is assigned if \code{x} is greater than its cutoff),
#' \code{"geq"} (treatment is assigned if \code{x} is greater than or equal to its cutoff),
#' \code{"l"} (treatment is assigned if \code{x} is less than its cutoff),
#' and \code{"leq"} (treatment is assigned if \code{x} is less than or equal to its cutoff).
#' The default is \code{"l"}.
#'
#' @return \code{treat_assign} returns the treatment variable as a vector according to the design,
#' where 1 means the treated group and 0 means the control group.
treat_assign <- function(x, cut = 0, t.design = "l") {
if(t.design == "geq")
return(as.integer(x >= cut))
if(t.design == "g")
return(as.integer(x > cut))
if(t.design == "leq")
return(as.integer(x <= cut))
if(t.design == "l")
return(as.integer(x < cut))
stop("Treatment design must be one of 'g', 'geq', 'l', 'leq'.")
}
| /R/treat_assign.R | no_license | felixthoemmes/rddapp | R | false | false | 1,533 | r | #' Treatment Assignment for Regression Discontinuity
#'
#' \code{treat_assign} computes the treatment variable, \code{t}, based on the cutoff of
#' assignment variable, \code{x}.
#' This is an internal function and is typically not directly invoked by the user.
#' It can be accessed using the triple colon, as in rddapp:::treat_assign().
#'
#' @param x A numeric vector containing the assignment variable, \code{x}.
#' @param cut A numeric value containing the cutpoint at which assignment to the treatment is determined. The default is 0.
#' @param t.design A string specifying the treatment option according to design.
#' Options are \code{"g"} (treatment is assigned if \code{x} is greater than its cutoff),
#' \code{"geq"} (treatment is assigned if \code{x} is greater than or equal to its cutoff),
#' \code{"l"} (treatment is assigned if \code{x} is less than its cutoff),
#' and \code{"leq"} (treatment is assigned if \code{x} is less than or equal to its cutoff).
#' The default is \code{"l"}.
#'
#' @return \code{treat_assign} returns the treatment variable as a vector according to the design,
#' where 1 means the treated group and 0 means the control group.
treat_assign <- function(x, cut = 0, t.design = "l") {
if(t.design == "geq")
return(as.integer(x >= cut))
if(t.design == "g")
return(as.integer(x > cut))
if(t.design == "leq")
return(as.integer(x <= cut))
if(t.design == "l")
return(as.integer(x < cut))
stop("Treatment design must be one of 'g', 'geq', 'l', 'leq'.")
}
|
library(ggmap)
calling <- dbConnect(odbc::odbc(), "DWH")
mobility_loc <- dbFetch(dbSendQuery(calling,
"
SELECT
S.SUSG_REF_NUM,
S.RECORDOPENINGTIME,
S.SERVEDMSISDN,
N.CELL_NAME,
N.LATITUDE ,
N.LONGITUDE
FROM DWH_PROD.SGSN_FACTS S
LEFT OUTER JOIN DWH_PROD.NETWORK_NODE N ON (S.NETWORK_NODE_ID = N.ID)
WHERE S.RECORDOPENINGTIME >= '2018-02-01'
AND S.RECORDOPENINGTIME < '2018-03-01'
AND SERVEDMSISDN = 'XXXXXXXX'
"
), n=-1)
library(googleway)
google_geocode(address = "Tallinn",
key = '')
head(mobility_loc)
# Get Map background
map <- get_map(location = c(lon = 24.67787, lat = 59.45713),
zoom = 10, source = "stamen", maptype = "toner-background")
# Get Google Map background
map <- get_map(location = c(lon = 24.67787, lat = 59.45713),
zoom = 10, source = "google", maptype = "hybrid", api_key="")
# API key
register_google(key = '')
# plot
ggmap(map) +
geom_path(data = mobility_loc, aes(x= as.numeric(LONGITUDE), y= as.numeric(LATITUDE), color= SUSG_REF_NUM ), alpha=0.3) +
coord_map(xlim = c(24.65, 24.85),ylim = c(59.40, 59.47)) +
theme(legend.position="none")
| /google_geocode_API_positioning.R | no_license | alexus987/R | R | false | false | 1,713 | r | library(ggmap)
calling <- dbConnect(odbc::odbc(), "DWH")
mobility_loc <- dbFetch(dbSendQuery(calling,
"
SELECT
S.SUSG_REF_NUM,
S.RECORDOPENINGTIME,
S.SERVEDMSISDN,
N.CELL_NAME,
N.LATITUDE ,
N.LONGITUDE
FROM DWH_PROD.SGSN_FACTS S
LEFT OUTER JOIN DWH_PROD.NETWORK_NODE N ON (S.NETWORK_NODE_ID = N.ID)
WHERE S.RECORDOPENINGTIME >= '2018-02-01'
AND S.RECORDOPENINGTIME < '2018-03-01'
AND SERVEDMSISDN = 'XXXXXXXX'
"
), n=-1)
library(googleway)
google_geocode(address = "Tallinn",
key = '')
head(mobility_loc)
# Get Map background
map <- get_map(location = c(lon = 24.67787, lat = 59.45713),
zoom = 10, source = "stamen", maptype = "toner-background")
# Get Google Map background
map <- get_map(location = c(lon = 24.67787, lat = 59.45713),
zoom = 10, source = "google", maptype = "hybrid", api_key="")
# API key
register_google(key = '')
# plot
ggmap(map) +
geom_path(data = mobility_loc, aes(x= as.numeric(LONGITUDE), y= as.numeric(LATITUDE), color= SUSG_REF_NUM ), alpha=0.3) +
coord_map(xlim = c(24.65, 24.85),ylim = c(59.40, 59.47)) +
theme(legend.position="none")
|
library('plyr')
library('ggplot2')
library('plotrix')
library('cowplot')
library('reshape')
library('gplots')
library('gridExtra')
remove <- c("")
M <- read.delim('FA.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
FA <- M[! M %in% remove]
M <- read.delim('AJ.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
AJ <- M[! M %in% remove]
M <- read.delim('TJ.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
TJ <- M[! M %in% remove]
M <- read.delim('GJ.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
GJ <- M[! M %in% remove]
M <- read.delim('RAC.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
RAC <- M[! M %in% remove]
M <- read.delim('ECM.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
ECM <- M[! M %in% remove]
#INT <- Reduce(intersect, list(ECM,AJ))
#INT <- c(INT, 'IQGAP1')
#INT <- c('CDC42','RAC1','IQGAP1','ARHGDIA','CTNNB1','APC','GSK3B','CTNNA1','DVL2')
#INT <- c('CDC42','RAC1','VCL','MYH9','RHOA','PTK2','SRC','PAK1','PXN')
#INT <- c('RHOB','RAC1','RHOC','RHOG','PTK2','IL8','DKK1','MAPK9','WNT3')
#INT <- c('WNT2','WNT2B','WNT4','WNT9A','WNT8B','WNT11','WNT10B','WNT9B','WNT3','WNT7B','WNT5A')
#INT <- c('ARPC1A','ARPC1A','ARPC2','ARPC3','ARPC4,ARPC4-TTLL3,TTLL3','ARPC5','ACTR2')
#INT <- FA
#INT <- c('DAG1','RHOB','ARPC3','RAC1','AKT2','WNT5A','ROR1')
#INT <- c('WNT3','WNT5A')
#INT <- c('WNT3','WNT5A','FZD2','ROR1','RAC1','RHOA','CTNNB1','DVL1','DVL2','DVL3','DKK1','PTPB1','LRP5','LRP6','GSK3B','CDC42','PXN','JNK','VCL')
#INT <- c('ABCB1','CTNNB1')
INT <- c('C3AR1','C5AR1')
M1 <- as.data.frame(read.table('genes0.txt', header = TRUE))
M1 <- M1[M1$gene_name %in% INT,]
#M1 <- M1[,c(2,4:7)]
M1 <- M1[,c(2,4,6,7)]
M2 <- as.data.frame(read.table('genes2.txt', header = TRUE))
M2 <- M2[M2$gene_name %in% INT,]
#M2 <- M2[,c(2,4:7)]
M2 <- M2[,c(2,4,6,7)]
M3 <- as.data.frame(read.table('genes6.txt', header = TRUE))
M3 <- M3[M3$gene_name %in% INT,]
#M3 <- M3[,c(2,4:7)]
M3 <- M3[,c(2,4,6,7)]
M <- merge(M1, M2, by='gene_name')
M <- merge(M, M3, by='gene_name')
#M <- merge(M1, M2, by="gene_name", all = T)
#M <- merge(M, M3, by="gene_name", all = T)
#M[is.na(M)] <- 0
#names(M) <- c('Gene',
# 'CCM1_0','CCM2_0','CCM3_0','WT_0',
# 'CCM1_2','CCM2_2','CCM3_2','WT_2',
# 'CCM1_6','CCM2_6','CCM3_6','WT_6')
names(M) <- c('Gene',
'CCM1_0','CCM3_0','WT_0',
'CCM1_2','CCM3_2','WT_2',
'CCM1_6','CCM3_6','WT_6')
#M[,c(2:10)] <- M[,c(2:10)] + 0.000001
#M[,2:4] <- M[,2:4]/M[,4]-1
#M[,5:7] <- M[,5:7]/M[,7]-1
#M[,8:10] <- M[,8:10]/M[,10]-1
#M <- MN[,c(1,2,5,8,3,6,9,4,7,10)]
#M <- M[,c(1,2,5,8,3,6,9,4,7,10)]
#M[,2:5] <- M[,2:5] - M[,5]
#M[,6:9] <- M[,6:9] - M[,9]
#M[,10:13] <- M[,10:13] - M[,13]
#x <- as.matrix(M[,c(2:13)])
x <- as.matrix(M[,c(2:10)])
row.names(x) <- M$Gene
#x <- x[,c(1,5,9,2,6,10,3,7,11,4,8,12)]
heatmap.2(x, cellnote=round(x,2), notecol = 'black',
scale = "row", col = bluered(100), dendogram = 'none', Colv=FALSE, #Rowv = FALSE,
trace = "none", density.info = "none")
heat <- heatmap.2(x, scale = "row", col = bluered(100), Colv=FALSE, #Rowv = FALSE,
trace = "none", density.info = "none")
heat
ord_genes <- rownames(x)[heat$rowInd]
M <- M[match(ord_genes, M$Gene),]
ML <- melt(M, by='Gene')
names(ML) <- c('Gene','Type','Percent_Change')
ML$Gene <- factor(ML$Gene, levels = M$Gene)
colors <- c("#FFCCCC", "#FF3333", "#990000", "#99CCFF", "#3399FF", "#004C99", "#E0E0E0", "#A0A0A0", "#404040")
p <- ggplot() +
geom_point(data = ML, aes(x=Percent_Change, y=Gene, color=Type)) +
scale_color_manual(values=colors) #+ ylim(-1.2,2)
p
p <- list()
d <- list()
i <- 1
for(name in M$Gene){
d[[i]] <- ML[ML$Gene == name, ]
p[[i]] <- ggplot(data = d[[i]]) +
geom_point(aes(x=Percent_Change, y=Gene, color=Type)) +
scale_color_manual(values=colors)
i <- i+1
}
do.call("grid.arrange", c(p))
write(ord_genes, 'selected.txt')
| /Expression Analysis/intersections.R | no_license | aazhur/Biomechanics-Endothelial-Cells | R | false | false | 3,993 | r | library('plyr')
library('ggplot2')
library('plotrix')
library('cowplot')
library('reshape')
library('gplots')
library('gridExtra')
remove <- c("")
M <- read.delim('FA.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
FA <- M[! M %in% remove]
M <- read.delim('AJ.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
AJ <- M[! M %in% remove]
M <- read.delim('TJ.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
TJ <- M[! M %in% remove]
M <- read.delim('GJ.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
GJ <- M[! M %in% remove]
M <- read.delim('RAC.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
RAC <- M[! M %in% remove]
M <- read.delim('ECM.txt', header = FALSE, sep = "\t", dec = ".")
M <- c(as.matrix(M))
ECM <- M[! M %in% remove]
#INT <- Reduce(intersect, list(ECM,AJ))
#INT <- c(INT, 'IQGAP1')
#INT <- c('CDC42','RAC1','IQGAP1','ARHGDIA','CTNNB1','APC','GSK3B','CTNNA1','DVL2')
#INT <- c('CDC42','RAC1','VCL','MYH9','RHOA','PTK2','SRC','PAK1','PXN')
#INT <- c('RHOB','RAC1','RHOC','RHOG','PTK2','IL8','DKK1','MAPK9','WNT3')
#INT <- c('WNT2','WNT2B','WNT4','WNT9A','WNT8B','WNT11','WNT10B','WNT9B','WNT3','WNT7B','WNT5A')
#INT <- c('ARPC1A','ARPC1A','ARPC2','ARPC3','ARPC4,ARPC4-TTLL3,TTLL3','ARPC5','ACTR2')
#INT <- FA
#INT <- c('DAG1','RHOB','ARPC3','RAC1','AKT2','WNT5A','ROR1')
#INT <- c('WNT3','WNT5A')
#INT <- c('WNT3','WNT5A','FZD2','ROR1','RAC1','RHOA','CTNNB1','DVL1','DVL2','DVL3','DKK1','PTPB1','LRP5','LRP6','GSK3B','CDC42','PXN','JNK','VCL')
#INT <- c('ABCB1','CTNNB1')
INT <- c('C3AR1','C5AR1')
M1 <- as.data.frame(read.table('genes0.txt', header = TRUE))
M1 <- M1[M1$gene_name %in% INT,]
#M1 <- M1[,c(2,4:7)]
M1 <- M1[,c(2,4,6,7)]
M2 <- as.data.frame(read.table('genes2.txt', header = TRUE))
M2 <- M2[M2$gene_name %in% INT,]
#M2 <- M2[,c(2,4:7)]
M2 <- M2[,c(2,4,6,7)]
M3 <- as.data.frame(read.table('genes6.txt', header = TRUE))
M3 <- M3[M3$gene_name %in% INT,]
#M3 <- M3[,c(2,4:7)]
M3 <- M3[,c(2,4,6,7)]
M <- merge(M1, M2, by='gene_name')
M <- merge(M, M3, by='gene_name')
#M <- merge(M1, M2, by="gene_name", all = T)
#M <- merge(M, M3, by="gene_name", all = T)
#M[is.na(M)] <- 0
#names(M) <- c('Gene',
# 'CCM1_0','CCM2_0','CCM3_0','WT_0',
# 'CCM1_2','CCM2_2','CCM3_2','WT_2',
# 'CCM1_6','CCM2_6','CCM3_6','WT_6')
names(M) <- c('Gene',
'CCM1_0','CCM3_0','WT_0',
'CCM1_2','CCM3_2','WT_2',
'CCM1_6','CCM3_6','WT_6')
#M[,c(2:10)] <- M[,c(2:10)] + 0.000001
#M[,2:4] <- M[,2:4]/M[,4]-1
#M[,5:7] <- M[,5:7]/M[,7]-1
#M[,8:10] <- M[,8:10]/M[,10]-1
#M <- MN[,c(1,2,5,8,3,6,9,4,7,10)]
#M <- M[,c(1,2,5,8,3,6,9,4,7,10)]
#M[,2:5] <- M[,2:5] - M[,5]
#M[,6:9] <- M[,6:9] - M[,9]
#M[,10:13] <- M[,10:13] - M[,13]
#x <- as.matrix(M[,c(2:13)])
x <- as.matrix(M[,c(2:10)])
row.names(x) <- M$Gene
#x <- x[,c(1,5,9,2,6,10,3,7,11,4,8,12)]
heatmap.2(x, cellnote=round(x,2), notecol = 'black',
scale = "row", col = bluered(100), dendogram = 'none', Colv=FALSE, #Rowv = FALSE,
trace = "none", density.info = "none")
heat <- heatmap.2(x, scale = "row", col = bluered(100), Colv=FALSE, #Rowv = FALSE,
trace = "none", density.info = "none")
heat
ord_genes <- rownames(x)[heat$rowInd]
M <- M[match(ord_genes, M$Gene),]
ML <- melt(M, by='Gene')
names(ML) <- c('Gene','Type','Percent_Change')
ML$Gene <- factor(ML$Gene, levels = M$Gene)
colors <- c("#FFCCCC", "#FF3333", "#990000", "#99CCFF", "#3399FF", "#004C99", "#E0E0E0", "#A0A0A0", "#404040")
p <- ggplot() +
geom_point(data = ML, aes(x=Percent_Change, y=Gene, color=Type)) +
scale_color_manual(values=colors) #+ ylim(-1.2,2)
p
p <- list()
d <- list()
i <- 1
for(name in M$Gene){
d[[i]] <- ML[ML$Gene == name, ]
p[[i]] <- ggplot(data = d[[i]]) +
geom_point(aes(x=Percent_Change, y=Gene, color=Type)) +
scale_color_manual(values=colors)
i <- i+1
}
do.call("grid.arrange", c(p))
write(ord_genes, 'selected.txt')
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{stripe_update_invoice_item}
\alias{stripe_update_invoice_item}
\title{Update an Invoice Item.}
\usage{
stripe_update_invoice_item(api_key, item_id, args)
}
\arguments{
\item{api_key}{Your Stripe API Key}
\item{item_id}{The invoice item id you want to update}
\item{args}{Can include
\describe{
\item{amount}{\emph{optional} Amount in cents for item.}
\item{description}{\emph{optional} A string to attach to item for easy tracking.}
\item{metadata}{\emph{optional} key/value pairs that you can attach to an invoice item.}
}}
}
\value{
A data frame with the updated invoice item if successful.
}
\description{
Update the information or amount of an invoice item.
}
| /man/stripe_update_invoice_item.Rd | no_license | isabella232/RStripe | R | false | false | 735 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{stripe_update_invoice_item}
\alias{stripe_update_invoice_item}
\title{Update an Invoice Item.}
\usage{
stripe_update_invoice_item(api_key, item_id, args)
}
\arguments{
\item{api_key}{Your Stripe API Key}
\item{item_id}{The invoice item id you want to update}
\item{args}{Can include
\describe{
\item{amount}{\emph{optional} Amount in cents for item.}
\item{description}{\emph{optional} A string to attach to item for easy tracking.}
\item{metadata}{\emph{optional} key/value pairs that you can attach to an invoice item.}
}}
}
\value{
A data frame with the updated invoice item if successful.
}
\description{
Update the information or amount of an invoice item.
}
|
#' Get occurrence data
#'
#' Retrieve ALA occurrence data via the "occurrence download" web service. At least one of \code{taxon}, \code{wkt}, or \code{fq} must be supplied for a valid query. Note that the current service is limited to a maximum of 500000 records per request.
#'
#' @author Atlas of Living Australia \email{support@@ala.org.au}
#' @references \itemize{
#' \item \url{http://api.ala.org.au/}
#' \item Field definitions: \url{https://docs.google.com/spreadsheet/ccc?key=0AjNtzhUIIHeNdHhtcFVSM09qZ3c3N3ItUnBBc09TbHc}
#' \item WKT reference: \url{http://www.geoapi.org/3.0/javadoc/org/opengis/referencing/doc-files/WKT.html}
#' }
#' @param taxon string: (optional) taxonomic query of the form field:value (e.g. "genus:Macropus") or a free text search ("Alaba vibex")
#' @param wkt string: (optional) a WKT (well-known text) string providing a spatial polygon within which to search, e.g. "POLYGON((140 -37,151 -37,151 -26,140.131 -26,140 -37))"
#' @param fq string: (optional) character string or vector of strings, specifying filters to be applied to the original query. These are of the form "INDEXEDFIELD:VALUE" e.g. "kingdom:Fungi".
#' See \code{ala_fields("occurrence")} for all the fields that are queryable.
#' NOTE that fq matches are case-sensitive, but sometimes the entries in the fields are
#' not consistent in terms of case (e.g. kingdom names "Fungi" and "Plantae" but "ANIMALIA").
#' fq matches are ANDed by default (e.g. c("field1:abc","field2:def") will match records that have
#' field1 value "abc" and field2 value "def"). To obtain OR behaviour, use the form c("field1:abc
#' OR field2:def")
#' @param fields string vector: (optional) a vector of field names to return. Note that the columns of the returned data frame
#' are not guaranteed to retain the ordering of the field names given here. If not specified, a default list of fields will be returned. See \code{ala_fields("occurrence")} for valid field names. Field names can be passed as full names (e.g. "Radiation - lowest period (Bio22)") rather than id ("el871")
#' @param extra string vector: (optional) a vector of field names to include in addition to those specified in \code{fields}. This is useful if you would like the default list of fields (i.e. when \code{fields} parameter is not specified) plus some additional extras. See \code{ala_fields("occurrence")} for valid field names. Field names can be passed as full names (e.g. "Radiation - lowest period (Bio22)") rather than id ("el871")
#' @param qa string vector: (optional) list of record issues to include in the download. See \code{ala_fields("assertions")} for valid values, or use "none" to include no record issues
#' @param download_reason_id numeric or string: (required unless record_count_only is TRUE) a reason code for the download, either as a numeric ID (currently 0--11) or a string (see \code{\link{ala_reasons}} for a list of valid ID codes and names). The download_reason_id can be passed directly to this function, or alternatively set using \code{ala_config(download_reason_id=...)}
#' @param reason string: (optional) user-supplied description of the reason for the download. Providing this information is optional but will help the ALA to better support users by building a better understanding of user communities and their data requests
#' @param verbose logical: show additional progress information? [default is set by ala_config()]
#' @param record_count_only logical: if TRUE, return just the count of records that would be downloaded, but don't download them. Note that the record count is always re-retrieved from the ALA, regardless of the caching settings. If a cached copy of this query exists on the local machine, the actual data set size may therefore differ from this record count
#' @param use_layer_names logical: if TRUE, layer names will be used as layer column names in the returned data frame (e.g. "radiationLowestPeriodBio22"). Otherwise, layer id value will be used for layer column names (e.g. "el871")
#' @param use_data_table logical: if TRUE, attempt to read the data.csv file using the fread function from the data.table package. Requires data.table to be available. If this fails with an error or warning, or if use_data_table is FALSE, then read.table will be used (which may be slower)
#'
#' @return Data frame of occurrence results, with one row per occurrence record. The columns of the dataframe will depend on the requested fields
#' @seealso \code{\link{ala_reasons}} for download reasons; \code{\link{ala_config}}
#' @examples
#' x=occurrences(taxon="data_resource_uid:dr356",record_count_only=TRUE) ## count of records from this data provider
#' x=occurrences(taxon="data_resource_uid:dr356",download_reason_id=10) ## download records, with standard fields
#' \dontrun{
#' x=occurrences(taxon="data_resource_uid:dr356",download_reason_id=10,fields=ala_fields("occurrence")$name) ## download records, with all fields
#' x=occurrences(taxon="macropus",fields=c("longitude","latitude","common_name","taxon_name","el807"),download_reason_id=10) ## download records, with specified fields
#' x=occurrences(taxon="macropus",wkt="POLYGON((145 -37,150 -37,150 -30,145 -30,145 -37))",download_reason_id=10,qa="none") ## download records in polygon, with no quality assertion information
#'
#' y=occurrences(taxon="alaba vibex",fields=c("latitude","longitude","el874"),download_reason_id=10)
#' str(y)
#' # equivalent direct webservice call: http://biocache.ala.org.au/ws/occurrences/index/download?reasonTypeId=10&q=Alaba%20vibex&fields=latitude,longitude,el874&qa=none
#'
#' occurrences(taxon="Eucalyptus gunnii",fields=c("latitude","longitude"),qa="none",fq="basis_of_record:LivingSpecimen",download_reason_id=10)
#' # equivalent direct webservice call: http://biocache.ala.org.au/ws/occurrences/index/download?reasonTypeId=10&q=Eucalyptus%20gunnii&fields=latitude,longitude&qa=none&fq=basis_of_record:LivingSpecimen
#' }
#' @export occurrences
## NOTE - the all-fields example caused a segfault on rforge, so don't take it out of the dontrun block [this one: x=occurrences(taxon="data_resource_uid:dr356",download_reason_id=10,fields=ala_fields("occurrence")$name) ## download records, with all fields]
## TODO document fq alone as a query
## TODO: more extensive testing, particularly of the csv-conversion process
## TODO LATER: add params: lat, lon, radius (for specifying a search circle)
occurrences=function(taxon,wkt,fq,fields,extra,qa,download_reason_id=ala_config()$download_reason_id,reason,verbose=ala_config()$verbose,record_count_only=FALSE,use_layer_names=TRUE,use_data_table=TRUE) {
## check input parms are sensible
assert_that(is.flag(record_count_only))
#taxon = clean_string(taxon) ## clean up the taxon name # no - because this can be an indexed query like field1:value1
base_url=paste(ala_config()$base_url_biocache,"occurrences/index/download",sep="")
this_query=list()
## have we specified a taxon?
if (!missing(taxon)) {
assert_that(is.string(taxon))
this_query$q=taxon
}
## wkt string
if (!missing(wkt)) {
assert_that(is.string(wkt))
this_query$wkt=wkt
}
if (!missing(fq)) {
assert_that(is.character(fq))
## can have multiple fq parameters, need to specify in url as fq=a:b&fq=c:d&fq=...
check_fq(fq,type="occurrence") ## check that fq fields are valid
fq=as.list(fq)
names(fq)=rep("fq",length(fq))
this_query=c(this_query,fq)
}
if (length(this_query)==0) {
## not a valid request!
stop("invalid request: need at least one of taxon, fq, or wkt to be specified")
}
## check the number of records
if (record_count_only) {
## check using e.g. http://biocache.ala.org.au/ws/occurrences/search?q=*:*&pageSize=0&facet=off
temp_query=this_query
temp_query$pageSize=0
temp_query$facet="off"
this_url=parse_url(paste(ala_config()$base_url_biocache,"occurrences/search",sep=""))
this_url$query=temp_query
this_url=build_url(this_url)
# ## don't need to check number of records if caching is on and we already have the file
# cache_file_exists=file.exists(ala_cache_filename(this_url))
# if ((ala_config()$caching %in% c("off","refresh")) | (!cache_file_exists & ala_config()$caching=="on")) {
## check
# num_records=cached_get(url=this_url,type="json")$totalRecords
# cat(sprintf('ALA4R occurrences: downloading dataset with %d records',num_records))
#}
return(cached_get(url=this_url,type="json",caching="off",verbose=verbose)$totalRecords)
}
assert_that(is.flag(use_data_table))
assert_that(is.flag(use_layer_names))
reason_ok=!is.na(download_reason_id)
if (reason_ok) {
valid_reasons=ala_reasons()
download_reason_id=convert_reason(download_reason_id) ## convert from string to numeric if needed
reason_ok=download_reason_id %in% valid_reasons$id
}
if (! reason_ok) {
stop("download_reason_id must be a valid reason_id. See ala_reasons(). Set this value directly here or through ala_config(download_reason_id=...)")
}
if (!missing(fields)) {
assert_that(is.character(fields))
## user has specified some fields
fields=fields_name_to_id(fields=fields,fields_type="occurrence") ## replace long names with ids
valid_fields=ala_fields(fields_type="occurrence")
unknown=setdiff(fields,valid_fields$name)
if (length(unknown)>0) {
stop("invalid fields requested: ", str_c(unknown,collapse=", "), ". See ala_fields(\"occurrence\")")
}
this_query$fields=str_c(fields,collapse=",")
}
if (!missing(extra)) {
assert_that(is.character(extra))
extra=fields_name_to_id(fields=extra,fields_type="occurrence") ## replace long names with ids
valid_fields=ala_fields(fields_type="occurrence")
unknown=setdiff(extra,valid_fields$name)
if (length(unknown)>0) {
stop("invalid extra fields requested: ", str_c(unknown,collapse=", "), ". See ala_fields(\"occurrence\")")
}
this_query$extra=str_c(extra,collapse=",")
}
if (!missing(qa)) {
assert_that(is.character(qa))
valid_fields=c("none",ala_fields(fields_type="assertions")$name) ## valid entries for qa
unknown=setdiff(qa,valid_fields)
if (length(unknown)>0) {
stop("invalid qa fields requested: ", str_c(unknown,collapse=", "), ". See ala_fields(\"assertions\")")
}
this_query$qa=str_c(qa,collapse=",")
}
if (!missing(reason)) {
assert_that(is.string(reason))
this_query$reason=reason
}
this_query$reasonTypeId=download_reason_id
this_query$esc="\\" ## force backslash-escaping of quotes rather than double-quote escaping
this_query$sep="\t" ## tab-delimited
this_query$file="data" ## to ensure that file is named "data.csv" within the zip file
this_url=parse_url(base_url)
this_url$query=this_query
## these downloads can potentially be large, so we want to download directly to file and then read the file
thisfile=cached_get(url=build_url(this_url),type="binary_filename",verbose=verbose)
if (!(file.info(thisfile)$size>0)) {
## empty file
x=NULL
## actually this isn't a sufficient check, since even with empty data.csv file inside, the outer zip file will be > 0 bytes. Check again below on the actual data.csv file
} else {
## if data.table is available, first try using this
read_ok=FALSE
if (use_data_table & is.element('data.table', installed.packages()[,1])) { ## if data.table package is available
require(data.table) ## load it
tryCatch({
## first need to extract data.csv from the zip file
## this may end up making fread() slower than direct read.table() ... needs testing
tempsubdir=tempfile(pattern="dir")
if (verbose) {
cat(sprintf(" ALA4R: unzipping downloaded occurrences data.csv file into %s\n",tempsubdir))
}
dir.create(tempsubdir)
unzip(thisfile,files=c("data.csv"),junkpaths=TRUE,exdir=tempsubdir)
## first check if file is empty
if (file.info(file.path(tempsubdir,"data.csv"))$size>0) {
x=fread(file.path(tempsubdir,"data.csv"),stringsAsFactors=FALSE,header=TRUE,verbose=verbose)
## make sure names of x are valid, as per data.table
setnames(x,make.names(names(x)))
## now coerce it back to data.frame (for now at least, unless we decide to not do this!)
x=as.data.frame(x)
if (!empty(x)) {
## convert column data types
## ALA supplies *all* values as quoted text, even numeric, and they appear here as character type
## we will convert whatever looks like numeric or logical to those classes
x=colwise(convert_dt)(x)
}
read_ok=TRUE
} else {
x=data.frame() ## empty result set
read_ok=TRUE
}
}, warning=function(e) {
if (verbose) {
warning("ALA4R: reading of csv as data.table failed, will fall back to read.table (may be slow). The warning message was: ",e)
}
read_ok=FALSE
}
, error=function(e) {
if (verbose) {
warning("ALA4R: reading of csv as data.table failed, will fall back to read.table (may be slow). The error message was: ",e)
}
read_ok=FALSE
})
}
if (!read_ok) {
x=read.table(unz(thisfile,filename="data.csv"),header=TRUE,comment.char="",as.is=TRUE)
if (!empty(x)) {
## convert column data types
## read.table handles quoted numerics but not quoted logicals
x=colwise(convert_dt)(x,test_numeric=FALSE)
}
}
if (!empty(x)) {
if (nrow(x)==500000) {
warning("Only 500000 data rows were returned from the ALA server: this might not be the full data set you need. Contact support@ala.org.au")
}
names(x)=str_replace_all(names(x),"^(el|cl)\\.([0-9]+)","\\1\\2") ## change e.g. el.xxx to elxxx
## TODO WTF is "cl.1050.b" etc?
if (use_layer_names) {
names(x)=make.names(fields_id_to_name(names(x),fields_type="layers"))
} else {
names(x)=make.names(fields_name_to_id(names(x),fields_type="layers",make_names=TRUE)) ## use make_names because names here have dots instead of spaces (not tested)
}
names(x)=rename_variables(names(x),type="occurrence")
names(x)=rename_variables(names(x),type="assertions")
## remove unwanted columns
xcols=setdiff(names(x),unwanted_columns("occurrence"))
x=subset(x,select=xcols)
## also read the citation info
## this file won't exist if there are no rows in the data.csv file, so only do it if nrow(x)>0
xc=read.table(unz(thisfile,"citation.csv"),header=TRUE,comment.char="",as.is=TRUE)
} else {
if (ala_config()$warn_on_empty) {
warning("no matching records were returned")
}
if (!missing(wkt)) {
wkt_ok=check_wkt(wkt)
if (is.na(wkt_ok)) {
warning("WKT string may not be valid: ",wkt)
} else if (!wkt_ok) {
warning("WKT string appears to be invalid: ",wkt)
}
}
xc=NULL
}
x=list(data=x,meta=xc)
}
class(x) <- c('occurrences',class(x)) #add the occurrences class
x
}
| /R/occurrences.R | no_license | jjvanderwal/ALA4R | R | false | false | 16,178 | r | #' Get occurrence data
#'
#' Retrieve ALA occurrence data via the "occurrence download" web service. At least one of \code{taxon}, \code{wkt}, or \code{fq} must be supplied for a valid query. Note that the current service is limited to a maximum of 500000 records per request.
#'
#' @author Atlas of Living Australia \email{support@@ala.org.au}
#' @references \itemize{
#' \item \url{http://api.ala.org.au/}
#' \item Field definitions: \url{https://docs.google.com/spreadsheet/ccc?key=0AjNtzhUIIHeNdHhtcFVSM09qZ3c3N3ItUnBBc09TbHc}
#' \item WKT reference: \url{http://www.geoapi.org/3.0/javadoc/org/opengis/referencing/doc-files/WKT.html}
#' }
#' @param taxon string: (optional) taxonomic query of the form field:value (e.g. "genus:Macropus") or a free text search ("Alaba vibex")
#' @param wkt string: (optional) a WKT (well-known text) string providing a spatial polygon within which to search, e.g. "POLYGON((140 -37,151 -37,151 -26,140.131 -26,140 -37))"
#' @param fq string: (optional) character string or vector of strings, specifying filters to be applied to the original query. These are of the form "INDEXEDFIELD:VALUE" e.g. "kingdom:Fungi".
#' See \code{ala_fields("occurrence")} for all the fields that are queryable.
#' NOTE that fq matches are case-sensitive, but sometimes the entries in the fields are
#' not consistent in terms of case (e.g. kingdom names "Fungi" and "Plantae" but "ANIMALIA").
#' fq matches are ANDed by default (e.g. c("field1:abc","field2:def") will match records that have
#' field1 value "abc" and field2 value "def"). To obtain OR behaviour, use the form c("field1:abc
#' OR field2:def")
#' @param fields string vector: (optional) a vector of field names to return. Note that the columns of the returned data frame
#' are not guaranteed to retain the ordering of the field names given here. If not specified, a default list of fields will be returned. See \code{ala_fields("occurrence")} for valid field names. Field names can be passed as full names (e.g. "Radiation - lowest period (Bio22)") rather than id ("el871")
#' @param extra string vector: (optional) a vector of field names to include in addition to those specified in \code{fields}. This is useful if you would like the default list of fields (i.e. when \code{fields} parameter is not specified) plus some additional extras. See \code{ala_fields("occurrence")} for valid field names. Field names can be passed as full names (e.g. "Radiation - lowest period (Bio22)") rather than id ("el871")
#' @param qa string vector: (optional) list of record issues to include in the download. See \code{ala_fields("assertions")} for valid values, or use "none" to include no record issues
#' @param download_reason_id numeric or string: (required unless record_count_only is TRUE) a reason code for the download, either as a numeric ID (currently 0--11) or a string (see \code{\link{ala_reasons}} for a list of valid ID codes and names). The download_reason_id can be passed directly to this function, or alternatively set using \code{ala_config(download_reason_id=...)}
#' @param reason string: (optional) user-supplied description of the reason for the download. Providing this information is optional but will help the ALA to better support users by building a better understanding of user communities and their data requests
#' @param verbose logical: show additional progress information? [default is set by ala_config()]
#' @param record_count_only logical: if TRUE, return just the count of records that would be downloaded, but don't download them. Note that the record count is always re-retrieved from the ALA, regardless of the caching settings. If a cached copy of this query exists on the local machine, the actual data set size may therefore differ from this record count
#' @param use_layer_names logical: if TRUE, layer names will be used as layer column names in the returned data frame (e.g. "radiationLowestPeriodBio22"). Otherwise, layer id value will be used for layer column names (e.g. "el871")
#' @param use_data_table logical: if TRUE, attempt to read the data.csv file using the fread function from the data.table package. Requires data.table to be available. If this fails with an error or warning, or if use_data_table is FALSE, then read.table will be used (which may be slower)
#'
#' @return Data frame of occurrence results, with one row per occurrence record. The columns of the dataframe will depend on the requested fields
#' @seealso \code{\link{ala_reasons}} for download reasons; \code{\link{ala_config}}
#' @examples
#' x=occurrences(taxon="data_resource_uid:dr356",record_count_only=TRUE) ## count of records from this data provider
#' x=occurrences(taxon="data_resource_uid:dr356",download_reason_id=10) ## download records, with standard fields
#' \dontrun{
#' x=occurrences(taxon="data_resource_uid:dr356",download_reason_id=10,fields=ala_fields("occurrence")$name) ## download records, with all fields
#' x=occurrences(taxon="macropus",fields=c("longitude","latitude","common_name","taxon_name","el807"),download_reason_id=10) ## download records, with specified fields
#' x=occurrences(taxon="macropus",wkt="POLYGON((145 -37,150 -37,150 -30,145 -30,145 -37))",download_reason_id=10,qa="none") ## download records in polygon, with no quality assertion information
#'
#' y=occurrences(taxon="alaba vibex",fields=c("latitude","longitude","el874"),download_reason_id=10)
#' str(y)
#' # equivalent direct webservice call: http://biocache.ala.org.au/ws/occurrences/index/download?reasonTypeId=10&q=Alaba%20vibex&fields=latitude,longitude,el874&qa=none
#'
#' occurrences(taxon="Eucalyptus gunnii",fields=c("latitude","longitude"),qa="none",fq="basis_of_record:LivingSpecimen",download_reason_id=10)
#' # equivalent direct webservice call: http://biocache.ala.org.au/ws/occurrences/index/download?reasonTypeId=10&q=Eucalyptus%20gunnii&fields=latitude,longitude&qa=none&fq=basis_of_record:LivingSpecimen
#' }
#' @export occurrences
## NOTE - the all-fields example caused a segfault on rforge, so don't take it out of the dontrun block [this one: x=occurrences(taxon="data_resource_uid:dr356",download_reason_id=10,fields=ala_fields("occurrence")$name) ## download records, with all fields]
## TODO document fq alone as a query
## TODO: more extensive testing, particularly of the csv-conversion process
## TODO LATER: add params: lat, lon, radius (for specifying a search circle)
occurrences=function(taxon,wkt,fq,fields,extra,qa,download_reason_id=ala_config()$download_reason_id,reason,verbose=ala_config()$verbose,record_count_only=FALSE,use_layer_names=TRUE,use_data_table=TRUE) {
## check input parms are sensible
assert_that(is.flag(record_count_only))
#taxon = clean_string(taxon) ## clean up the taxon name # no - because this can be an indexed query like field1:value1
base_url=paste(ala_config()$base_url_biocache,"occurrences/index/download",sep="")
this_query=list()
## have we specified a taxon?
if (!missing(taxon)) {
assert_that(is.string(taxon))
this_query$q=taxon
}
## wkt string
if (!missing(wkt)) {
assert_that(is.string(wkt))
this_query$wkt=wkt
}
if (!missing(fq)) {
assert_that(is.character(fq))
## can have multiple fq parameters, need to specify in url as fq=a:b&fq=c:d&fq=...
check_fq(fq,type="occurrence") ## check that fq fields are valid
fq=as.list(fq)
names(fq)=rep("fq",length(fq))
this_query=c(this_query,fq)
}
if (length(this_query)==0) {
## not a valid request!
stop("invalid request: need at least one of taxon, fq, or wkt to be specified")
}
## check the number of records
if (record_count_only) {
## check using e.g. http://biocache.ala.org.au/ws/occurrences/search?q=*:*&pageSize=0&facet=off
temp_query=this_query
temp_query$pageSize=0
temp_query$facet="off"
this_url=parse_url(paste(ala_config()$base_url_biocache,"occurrences/search",sep=""))
this_url$query=temp_query
this_url=build_url(this_url)
# ## don't need to check number of records if caching is on and we already have the file
# cache_file_exists=file.exists(ala_cache_filename(this_url))
# if ((ala_config()$caching %in% c("off","refresh")) | (!cache_file_exists & ala_config()$caching=="on")) {
## check
# num_records=cached_get(url=this_url,type="json")$totalRecords
# cat(sprintf('ALA4R occurrences: downloading dataset with %d records',num_records))
#}
return(cached_get(url=this_url,type="json",caching="off",verbose=verbose)$totalRecords)
}
assert_that(is.flag(use_data_table))
assert_that(is.flag(use_layer_names))
reason_ok=!is.na(download_reason_id)
if (reason_ok) {
valid_reasons=ala_reasons()
download_reason_id=convert_reason(download_reason_id) ## convert from string to numeric if needed
reason_ok=download_reason_id %in% valid_reasons$id
}
if (! reason_ok) {
stop("download_reason_id must be a valid reason_id. See ala_reasons(). Set this value directly here or through ala_config(download_reason_id=...)")
}
if (!missing(fields)) {
assert_that(is.character(fields))
## user has specified some fields
fields=fields_name_to_id(fields=fields,fields_type="occurrence") ## replace long names with ids
valid_fields=ala_fields(fields_type="occurrence")
unknown=setdiff(fields,valid_fields$name)
if (length(unknown)>0) {
stop("invalid fields requested: ", str_c(unknown,collapse=", "), ". See ala_fields(\"occurrence\")")
}
this_query$fields=str_c(fields,collapse=",")
}
if (!missing(extra)) {
assert_that(is.character(extra))
extra=fields_name_to_id(fields=extra,fields_type="occurrence") ## replace long names with ids
valid_fields=ala_fields(fields_type="occurrence")
unknown=setdiff(extra,valid_fields$name)
if (length(unknown)>0) {
stop("invalid extra fields requested: ", str_c(unknown,collapse=", "), ". See ala_fields(\"occurrence\")")
}
this_query$extra=str_c(extra,collapse=",")
}
if (!missing(qa)) {
assert_that(is.character(qa))
valid_fields=c("none",ala_fields(fields_type="assertions")$name) ## valid entries for qa
unknown=setdiff(qa,valid_fields)
if (length(unknown)>0) {
stop("invalid qa fields requested: ", str_c(unknown,collapse=", "), ". See ala_fields(\"assertions\")")
}
this_query$qa=str_c(qa,collapse=",")
}
if (!missing(reason)) {
assert_that(is.string(reason))
this_query$reason=reason
}
this_query$reasonTypeId=download_reason_id
this_query$esc="\\" ## force backslash-escaping of quotes rather than double-quote escaping
this_query$sep="\t" ## tab-delimited
this_query$file="data" ## to ensure that file is named "data.csv" within the zip file
this_url=parse_url(base_url)
this_url$query=this_query
## these downloads can potentially be large, so we want to download directly to file and then read the file
thisfile=cached_get(url=build_url(this_url),type="binary_filename",verbose=verbose)
if (!(file.info(thisfile)$size>0)) {
## empty file
x=NULL
## actually this isn't a sufficient check, since even with empty data.csv file inside, the outer zip file will be > 0 bytes. Check again below on the actual data.csv file
} else {
## if data.table is available, first try using this
read_ok=FALSE
if (use_data_table & is.element('data.table', installed.packages()[,1])) { ## if data.table package is available
require(data.table) ## load it
tryCatch({
## first need to extract data.csv from the zip file
## this may end up making fread() slower than direct read.table() ... needs testing
tempsubdir=tempfile(pattern="dir")
if (verbose) {
cat(sprintf(" ALA4R: unzipping downloaded occurrences data.csv file into %s\n",tempsubdir))
}
dir.create(tempsubdir)
unzip(thisfile,files=c("data.csv"),junkpaths=TRUE,exdir=tempsubdir)
## first check if file is empty
if (file.info(file.path(tempsubdir,"data.csv"))$size>0) {
x=fread(file.path(tempsubdir,"data.csv"),stringsAsFactors=FALSE,header=TRUE,verbose=verbose)
## make sure names of x are valid, as per data.table
setnames(x,make.names(names(x)))
## now coerce it back to data.frame (for now at least, unless we decide to not do this!)
x=as.data.frame(x)
if (!empty(x)) {
## convert column data types
## ALA supplies *all* values as quoted text, even numeric, and they appear here as character type
## we will convert whatever looks like numeric or logical to those classes
x=colwise(convert_dt)(x)
}
read_ok=TRUE
} else {
x=data.frame() ## empty result set
read_ok=TRUE
}
}, warning=function(e) {
if (verbose) {
warning("ALA4R: reading of csv as data.table failed, will fall back to read.table (may be slow). The warning message was: ",e)
}
read_ok=FALSE
}
, error=function(e) {
if (verbose) {
warning("ALA4R: reading of csv as data.table failed, will fall back to read.table (may be slow). The error message was: ",e)
}
read_ok=FALSE
})
}
if (!read_ok) {
x=read.table(unz(thisfile,filename="data.csv"),header=TRUE,comment.char="",as.is=TRUE)
if (!empty(x)) {
## convert column data types
## read.table handles quoted numerics but not quoted logicals
x=colwise(convert_dt)(x,test_numeric=FALSE)
}
}
if (!empty(x)) {
if (nrow(x)==500000) {
warning("Only 500000 data rows were returned from the ALA server: this might not be the full data set you need. Contact support@ala.org.au")
}
names(x)=str_replace_all(names(x),"^(el|cl)\\.([0-9]+)","\\1\\2") ## change e.g. el.xxx to elxxx
## TODO WTF is "cl.1050.b" etc?
if (use_layer_names) {
names(x)=make.names(fields_id_to_name(names(x),fields_type="layers"))
} else {
names(x)=make.names(fields_name_to_id(names(x),fields_type="layers",make_names=TRUE)) ## use make_names because names here have dots instead of spaces (not tested)
}
names(x)=rename_variables(names(x),type="occurrence")
names(x)=rename_variables(names(x),type="assertions")
## remove unwanted columns
xcols=setdiff(names(x),unwanted_columns("occurrence"))
x=subset(x,select=xcols)
## also read the citation info
## this file won't exist if there are no rows in the data.csv file, so only do it if nrow(x)>0
xc=read.table(unz(thisfile,"citation.csv"),header=TRUE,comment.char="",as.is=TRUE)
} else {
if (ala_config()$warn_on_empty) {
warning("no matching records were returned")
}
if (!missing(wkt)) {
wkt_ok=check_wkt(wkt)
if (is.na(wkt_ok)) {
warning("WKT string may not be valid: ",wkt)
} else if (!wkt_ok) {
warning("WKT string appears to be invalid: ",wkt)
}
}
xc=NULL
}
x=list(data=x,meta=xc)
}
class(x) <- c('occurrences',class(x)) #add the occurrences class
x
}
|
\name{rev}
\alias{rev,timeSeries-method}
\alias{rev.timeSeries}
\title{Reverse a 'timeSeries'}
\description{
Reverses an uni- or multivariate \code{"timeSeries"} object.
}
\usage{
\S4method{rev}{timeSeries}(x)
}
\arguments{
\item{x}{
an uni- or multivariate \code{"timeSeries"} object.
}
}
\value{
a \code{"timeSeries"} object
}
\examples{
\dontshow{set.seed(1234)}
## Create Dummy "timeSeries" -
tS <- dummyMonthlySeries()
## Reverse Series -
rev(tS)
}
\keyword{chron}
| /man/base-rev.Rd | no_license | cran/timeSeries | R | false | false | 600 | rd | \name{rev}
\alias{rev,timeSeries-method}
\alias{rev.timeSeries}
\title{Reverse a 'timeSeries'}
\description{
Reverses an uni- or multivariate \code{"timeSeries"} object.
}
\usage{
\S4method{rev}{timeSeries}(x)
}
\arguments{
\item{x}{
an uni- or multivariate \code{"timeSeries"} object.
}
}
\value{
a \code{"timeSeries"} object
}
\examples{
\dontshow{set.seed(1234)}
## Create Dummy "timeSeries" -
tS <- dummyMonthlySeries()
## Reverse Series -
rev(tS)
}
\keyword{chron}
|
#' dist_to_pars
#'
#' @param dist One of gamma, stable, pvf
#' @param logfrailtypar log theta and log llambda
#' @param pvfm The pvfm
#'
#' @keywords internal
#' @return A list with 4 elements: aalpha, ggamma (the parameters of the Laplace transform), llambda (distance parameter) and dist_id.
dist_to_pars <- function(dist, logfrailtypar, pvfm) {
if (dist == "gamma") {
aalpha <- ggamma <- exp(logfrailtypar[1])
dist_id <- 0L
}
# if (dist == "stable") {
# theta <- exp(logfrailtypar) + 1 # so theta >1
# bbeta <- 1 - 1/theta # so bbeta in (0,1), that's what's important
# alpha <- theta / (theta - 1) # alpha = 1/beta for scaling
# dist_id <- 1L
# }
if (dist == "stable") {
# theta <- exp(logfrailtypar) + 1 # so theta >1
# bbeta <- 1 - 1/theta
aalpha <- 1
#bbeta <- 1 - exp(logfrailtypar) / (exp(logfrailtypar) + 1)
ggamma <- exp(logfrailtypar[1]) / (exp(logfrailtypar[1]) + 1)
dist_id <- 1L
}
if (dist == "pvf") {
aalpha <- abs((pvfm + 1)/pvfm * exp(logfrailtypar[1]))
ggamma <- (pvfm + 1) * exp(logfrailtypar[1])
dist_id <- 2L
}
list(aalpha = aalpha, ggamma = ggamma, llambda = exp(logfrailtypar[2]), dist = dist_id)
}
| /R/dynfrail_aux.R | no_license | cran/dynfrail | R | false | false | 1,217 | r | #' dist_to_pars
#'
#' @param dist One of gamma, stable, pvf
#' @param logfrailtypar log theta and log llambda
#' @param pvfm The pvfm
#'
#' @keywords internal
#' @return A list with 4 elements: aalpha, ggamma (the parameters of the Laplace transform), llambda (distance parameter) and dist_id.
dist_to_pars <- function(dist, logfrailtypar, pvfm) {
if (dist == "gamma") {
aalpha <- ggamma <- exp(logfrailtypar[1])
dist_id <- 0L
}
# if (dist == "stable") {
# theta <- exp(logfrailtypar) + 1 # so theta >1
# bbeta <- 1 - 1/theta # so bbeta in (0,1), that's what's important
# alpha <- theta / (theta - 1) # alpha = 1/beta for scaling
# dist_id <- 1L
# }
if (dist == "stable") {
# theta <- exp(logfrailtypar) + 1 # so theta >1
# bbeta <- 1 - 1/theta
aalpha <- 1
#bbeta <- 1 - exp(logfrailtypar) / (exp(logfrailtypar) + 1)
ggamma <- exp(logfrailtypar[1]) / (exp(logfrailtypar[1]) + 1)
dist_id <- 1L
}
if (dist == "pvf") {
aalpha <- abs((pvfm + 1)/pvfm * exp(logfrailtypar[1]))
ggamma <- (pvfm + 1) * exp(logfrailtypar[1])
dist_id <- 2L
}
list(aalpha = aalpha, ggamma = ggamma, llambda = exp(logfrailtypar[2]), dist = dist_id)
}
|
library(DESeq)
library(waveslim)
library(brainwaver)
fisher <- function(x)
{
x <- x[!is.na(x)]
q <- -2*sum(log(x))
d <- 2*length(x)
1-pchisq(q, df=d)
}
argument<-commandArgs(trailingOnly = TRUE)
FileName<-substr(argument[1],11,nchar(argument[1])) # -filename
fdr<-substr(argument[2],6,nchar(argument[2])) #-fdr
Output1<-substr(argument[3],10,nchar(argument[3])) # -output1
Output2<-substr(argument[4],10,nchar(argument[4])) # -output2
fdr<-as.numeric(fdr)
countsTable<-read.delim(FileName,header=TRUE,stringsAsFactors=TRUE)
countsRead<-countsTable[,3:4]
rownames(countsRead)<-paste(countsTable$gene,countsTable$exon,sep = ":")
conds<-rep(c("c1", "c2"))
cds <- newCountDataSet( countsRead, conds )
cds <- estimateSizeFactors( cds )
sizeFactors(cds)
# estimateVarianceFunctions deprecated, replaced with estimateDispersions
#cds <- estimateVarianceFunctions( cds,method="blind" )
cds <- estimateDispersions( cds,method="blind",sharingMode="fit-only")
res <- nbinomTest( cds, "c1", "c2")
temp<- res[!(is.na(res$pval)), ]
#resSig<-temp[temp$pval<=pvalue, ]
#resSigDataFrame=data.frame(resSig$id,resSig$pval)
#write.table(resSigDataFrame,file="DE_Output_Significant_exon.txt",sep="\t")
q.value<-data.frame(gene=countsTable[,1],p=rep(0,nrow(countsTable)))
q.value[,2]<- res[,7]
genename<-unique(countsTable[,1])
p.combined<-data.frame(gene=genename,pval=numeric(length(genename)))
for(i in 1:length(genename)){
trans <- q.value[q.value[,1]==genename[i],]
p.combined[i,2]<- fisher(trans[,2])
}
thresh<-compute.FDR(p.combined$pval,fdr)
#resSig<-p.combined[p.combined$pval<=thresh, ]
write.table(p.combined,file=Output1,sep="\t")
write.table(paste("FDR=",fdr),,file=Output2,row.names = FALSE,col.names = FALSE)
write.table(p.combined[p.combined$pval<=thresh, ],file=Output2,,append = TRUE,sep="\t")
| /rseqflow2/DE/Deseq_WithoutReplicate_FDR.r | no_license | falconer502/RseqFlow | R | false | false | 1,827 | r | library(DESeq)
library(waveslim)
library(brainwaver)
fisher <- function(x)
{
x <- x[!is.na(x)]
q <- -2*sum(log(x))
d <- 2*length(x)
1-pchisq(q, df=d)
}
argument<-commandArgs(trailingOnly = TRUE)
FileName<-substr(argument[1],11,nchar(argument[1])) # -filename
fdr<-substr(argument[2],6,nchar(argument[2])) #-fdr
Output1<-substr(argument[3],10,nchar(argument[3])) # -output1
Output2<-substr(argument[4],10,nchar(argument[4])) # -output2
fdr<-as.numeric(fdr)
countsTable<-read.delim(FileName,header=TRUE,stringsAsFactors=TRUE)
countsRead<-countsTable[,3:4]
rownames(countsRead)<-paste(countsTable$gene,countsTable$exon,sep = ":")
conds<-rep(c("c1", "c2"))
cds <- newCountDataSet( countsRead, conds )
cds <- estimateSizeFactors( cds )
sizeFactors(cds)
# estimateVarianceFunctions deprecated, replaced with estimateDispersions
#cds <- estimateVarianceFunctions( cds,method="blind" )
cds <- estimateDispersions( cds,method="blind",sharingMode="fit-only")
res <- nbinomTest( cds, "c1", "c2")
temp<- res[!(is.na(res$pval)), ]
#resSig<-temp[temp$pval<=pvalue, ]
#resSigDataFrame=data.frame(resSig$id,resSig$pval)
#write.table(resSigDataFrame,file="DE_Output_Significant_exon.txt",sep="\t")
q.value<-data.frame(gene=countsTable[,1],p=rep(0,nrow(countsTable)))
q.value[,2]<- res[,7]
genename<-unique(countsTable[,1])
p.combined<-data.frame(gene=genename,pval=numeric(length(genename)))
for(i in 1:length(genename)){
trans <- q.value[q.value[,1]==genename[i],]
p.combined[i,2]<- fisher(trans[,2])
}
thresh<-compute.FDR(p.combined$pval,fdr)
#resSig<-p.combined[p.combined$pval<=thresh, ]
write.table(p.combined,file=Output1,sep="\t")
write.table(paste("FDR=",fdr),,file=Output2,row.names = FALSE,col.names = FALSE)
write.table(p.combined[p.combined$pval<=thresh, ],file=Output2,,append = TRUE,sep="\t")
|
######################################################################################
#### Getting and Cleaning Data Course Project based on ####
#### Human Activity Recognition Using Smartphones Data Set ####
#### Davide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra and ####
#### Jorge L. Reyes-Ortiz. ####
#### A Public Domain Dataset for Human Activity Recognition Using Smartphones. ####
#### 21th European Symposium on Artificial Neural Networks, ####
#### Computational Intelligence and Machine Learning, ESANN 2013. ####
#### Bruges, Belgium 24-26 April 2013. ####
######################################################################################
#Data downloaded date: "Sun Jun 25 12:27:38 2017"
#Reading training and test data sets from .txt files
traindata = read.table("X_train.txt")
testdata = read.table("X_test.txt")
#################################################################################
##### 1. Merges the training and the test sets to create one data set. #####
#################################################################################
#Merging training and test sets
data <- rbind(traindata, testdata) #merged data set
dim(data) #10299 rows and 561 columns
#Reading measurmenets labels form .txt file
features = read.table("features.txt")
#Reading tranining and test labels form .txt files
trainlabels = read.table("y_train.txt")
testlabels = read.table("y_test.txt")
#Merging the training and the test labels.
labels <- rbind(trainlabels, testlabels) #merged labels
names(labels)<- c("activity") #variable named activity
dim(labels) #10299 rows and 1 columns
#Adding variable activity to data set
data <- cbind(data, labels)
dim(data) #10299 rows and 562 columns
#########################################################
#### 2. Extracts only the measurements on the ####
####mean and standard deviation for each measurement.####
#########################################################
#Identifying measurements on the mean and standard deviation
measurements <- as.character(features$V2) #converting to character
grepl("mean\\(\\)|std\\(\\)", measurements) #logical vector for measurements of interest
measurements[grepl("mean\\(\\)|std\\(\\)", measurements)] #selected only mean() or std() measurementes
#Subsetting data with only mean() and std() measurementes
selecteddata<-data[,grepl("mean\\(\\)|std\\(\\)", measurements)]
dim(selecteddata)# 10299 rows and 67 columns
#######################################################################################
#### 3. Uses descriptive activity names to name the activities in the data set ####
#######################################################################################
#Reading activity labels form .txt file
activitylabels = read.table("activity_labels.txt")
#Assingning descriptive activity names to name the activities in the data set
selecteddata$activity <- as.factor(selecteddata$activity) #convert variable as factor
levels(selecteddata$activity) <- activitylabels$V2
#####################################################################################
#### 4. Appropriately labels the data set with descriptive variable names. ####
#####################################################################################
#Assigning descrptive variable names
varnames <- measurements[grepl("mean\\(\\)|std\\(\\)", measurements)]
names(selecteddata) <- c(varnames, "activity")
names(selecteddata)
###################################################################
#### 5. From the data set in step 4, creates a second, ####
#### independent tidy data set with the average ####
#### of each variable for each activity and each subject. ####
###################################################################
#Second data set data set with only the average of each variable
#for each activity and each subject.
!grepl("std\\(\\)", names(selecteddata)) #logical condition for variables in the second data
finaldata <- selecteddata[,!grepl("std\\(\\)", names(selecteddata))]
dim(finaldata) #10299 rows and 34 columns
names(finaldata)
#Reading subject .txt files
trainsubject = read.table("subject_train.txt")
testsubject = read.table("subject_test.txt")
subject <- rbind(trainsubject, testsubject)
names(subject) <- c("subject")
dim(subject)
####Adding subject ID to data
finaldata <- cbind(subject, finaldata)
dim(finaldata)
finaldata <- finaldata[,-2]
names(finaldata)
#### Calculating mean by subject and activity
library(reshape2)
melteddata <- melt(finaldata, id=c("subject", "activity"))
tidydata <- dcast(melteddata, subject+activity ~ variable, mean)
#Creating .txt file
write.table(tidydata, "tidydata.txt", row.name=FALSE)
| /run_analysis.R | no_license | EdelRodea/Getting-and-Cleaning-Data-Course-Project | R | false | false | 4,956 | r | ######################################################################################
#### Getting and Cleaning Data Course Project based on ####
#### Human Activity Recognition Using Smartphones Data Set ####
#### Davide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra and ####
#### Jorge L. Reyes-Ortiz. ####
#### A Public Domain Dataset for Human Activity Recognition Using Smartphones. ####
#### 21th European Symposium on Artificial Neural Networks, ####
#### Computational Intelligence and Machine Learning, ESANN 2013. ####
#### Bruges, Belgium 24-26 April 2013. ####
######################################################################################
#Data downloaded date: "Sun Jun 25 12:27:38 2017"
#Reading training and test data sets from .txt files
traindata = read.table("X_train.txt")
testdata = read.table("X_test.txt")
#################################################################################
##### 1. Merges the training and the test sets to create one data set. #####
#################################################################################
#Merging training and test sets
data <- rbind(traindata, testdata) #merged data set
dim(data) #10299 rows and 561 columns
#Reading measurmenets labels form .txt file
features = read.table("features.txt")
#Reading tranining and test labels form .txt files
trainlabels = read.table("y_train.txt")
testlabels = read.table("y_test.txt")
#Merging the training and the test labels.
labels <- rbind(trainlabels, testlabels) #merged labels
names(labels)<- c("activity") #variable named activity
dim(labels) #10299 rows and 1 columns
#Adding variable activity to data set
data <- cbind(data, labels)
dim(data) #10299 rows and 562 columns
#########################################################
#### 2. Extracts only the measurements on the ####
####mean and standard deviation for each measurement.####
#########################################################
#Identifying measurements on the mean and standard deviation
measurements <- as.character(features$V2) #converting to character
grepl("mean\\(\\)|std\\(\\)", measurements) #logical vector for measurements of interest
measurements[grepl("mean\\(\\)|std\\(\\)", measurements)] #selected only mean() or std() measurementes
#Subsetting data with only mean() and std() measurementes
selecteddata<-data[,grepl("mean\\(\\)|std\\(\\)", measurements)]
dim(selecteddata)# 10299 rows and 67 columns
#######################################################################################
#### 3. Uses descriptive activity names to name the activities in the data set ####
#######################################################################################
#Reading activity labels form .txt file
activitylabels = read.table("activity_labels.txt")
#Assingning descriptive activity names to name the activities in the data set
selecteddata$activity <- as.factor(selecteddata$activity) #convert variable as factor
levels(selecteddata$activity) <- activitylabels$V2
#####################################################################################
#### 4. Appropriately labels the data set with descriptive variable names. ####
#####################################################################################
#Assigning descrptive variable names
varnames <- measurements[grepl("mean\\(\\)|std\\(\\)", measurements)]
names(selecteddata) <- c(varnames, "activity")
names(selecteddata)
###################################################################
#### 5. From the data set in step 4, creates a second, ####
#### independent tidy data set with the average ####
#### of each variable for each activity and each subject. ####
###################################################################
#Second data set data set with only the average of each variable
#for each activity and each subject.
!grepl("std\\(\\)", names(selecteddata)) #logical condition for variables in the second data
finaldata <- selecteddata[,!grepl("std\\(\\)", names(selecteddata))]
dim(finaldata) #10299 rows and 34 columns
names(finaldata)
#Reading subject .txt files
trainsubject = read.table("subject_train.txt")
testsubject = read.table("subject_test.txt")
subject <- rbind(trainsubject, testsubject)
names(subject) <- c("subject")
dim(subject)
####Adding subject ID to data
finaldata <- cbind(subject, finaldata)
dim(finaldata)
finaldata <- finaldata[,-2]
names(finaldata)
#### Calculating mean by subject and activity
library(reshape2)
melteddata <- melt(finaldata, id=c("subject", "activity"))
tidydata <- dcast(melteddata, subject+activity ~ variable, mean)
#Creating .txt file
write.table(tidydata, "tidydata.txt", row.name=FALSE)
|
rm(list = ls())
library(ggplot2)
library(reshape2)
#library(mptools)
sysInfo <- Sys.info()
runDate <- Sys.Date()
tBegin <- Sys.time()
# Change the directory structure according to the computer
ifelse(sysInfo[1] == 'Linux',
source('~/Documents/R/TomosFunctions.R'),
source('~/R/TomosFunctions.R'))
F0 <- dir('data/Harvest Models/')
F1 <- F0[grep('.csv', F0)]
# data were extracted from the .mp files using Matlab:
stageNames <- c('nef', 'pjf', 'bjf', 'saf', 'maf', 'adf',
'nem', 'pjm', 'bjm', 'sam', 'mam', 'adm')
# stageNames.title <- c('hatchling (F)', 'pelagic juvenile (F)',
# 'benthic juvenile (F)', 'subadult (F)',
# 'maturing adult (F)', 'adult (F)',
# 'hatchling (M)', 'pelagic juvenile (M)',
# 'benthic juvenile (M)', 'subadult (M)',
# 'maturing adult (M)', 'adult (M)')
dat <- vector(mode = "list",
length = length(F1))
plots <- vector(mode = "list",
length = length(F1) )
for (k in 1:length(F1)){
datafile <- paste0('data/Harvest Models/', F1[k])
dat0 <- read.csv(file = datafile, header = F)
colnames(dat0) <- stageNames
dat[[k]] <- dat0[3:dim(dat0)[1],]
dat[[k]]$time <- seq(from = 0, to = 200, by = 5)
df1 <- dat[[k]]
fname <- unlist(strsplit(F1[k], '.csv'))
pname <- substr(fname, start = (nchar(fname)-3),
stop = nchar(fname))
if (length(grep('3000AF_1000AM', F1[k])) == 1){
trtmt <- 'Hvt-3000AdF, 3000AdM'
} else if (length(grep('3000AF_300AM', F1[k])) == 1) {
trtmt <- 'Hvt-3000AdF, 300AdM'
} else if (length(grep('3000AF_AM', F1[k])) == 1){
trtmt <- 'Hvt-3000AdF, 3000AdM'
} else if (length(grep('4000AF', F1[k])) == 1){
trtmt <- 'Hvt-4000AdF'
} else if (length(grep('6000AF__1.0', F1[k])) == 1){
trtmt <- 'Hvt-6000AdF'
} else if (length(grep('6000AF_1.04', F1[k])) == 1){
trtmt <- 'Mvt(+7)-6000AdF'
} else if (length(grep('3000AF_SA', F1[k])) == 1){
trtmt <- 'Hvt-3000AdF, 3000SubAd'
}
plots[[k]] <- ggplot(data = df1) +
labs(title = paste(trtmt, pname, sep = " : "),
y = 'Adult',
x = 'Time') +
geom_rect(aes(xmin = 20, xmax = 120, ymin = 0, ymax = Inf),
alpha = 0.1) +
geom_point(aes(x = time,
y = adf),
size = 5) +
geom_point(aes(x = time,
y = adm),
shape = 17,
color = 'black',
fill = 'black',
size = 5)
ggsave(paste0('figures/',
unlist(strsplit(F1[k], '.csv')), '.png'),
dpi = 600)
}
| /extract_results_harvest.R | no_license | mteguchi/nGBR_greens | R | false | false | 2,667 | r |
rm(list = ls())
library(ggplot2)
library(reshape2)
#library(mptools)
sysInfo <- Sys.info()
runDate <- Sys.Date()
tBegin <- Sys.time()
# Change the directory structure according to the computer
ifelse(sysInfo[1] == 'Linux',
source('~/Documents/R/TomosFunctions.R'),
source('~/R/TomosFunctions.R'))
F0 <- dir('data/Harvest Models/')
F1 <- F0[grep('.csv', F0)]
# data were extracted from the .mp files using Matlab:
stageNames <- c('nef', 'pjf', 'bjf', 'saf', 'maf', 'adf',
'nem', 'pjm', 'bjm', 'sam', 'mam', 'adm')
# stageNames.title <- c('hatchling (F)', 'pelagic juvenile (F)',
# 'benthic juvenile (F)', 'subadult (F)',
# 'maturing adult (F)', 'adult (F)',
# 'hatchling (M)', 'pelagic juvenile (M)',
# 'benthic juvenile (M)', 'subadult (M)',
# 'maturing adult (M)', 'adult (M)')
dat <- vector(mode = "list",
length = length(F1))
plots <- vector(mode = "list",
length = length(F1) )
for (k in 1:length(F1)){
datafile <- paste0('data/Harvest Models/', F1[k])
dat0 <- read.csv(file = datafile, header = F)
colnames(dat0) <- stageNames
dat[[k]] <- dat0[3:dim(dat0)[1],]
dat[[k]]$time <- seq(from = 0, to = 200, by = 5)
df1 <- dat[[k]]
fname <- unlist(strsplit(F1[k], '.csv'))
pname <- substr(fname, start = (nchar(fname)-3),
stop = nchar(fname))
if (length(grep('3000AF_1000AM', F1[k])) == 1){
trtmt <- 'Hvt-3000AdF, 3000AdM'
} else if (length(grep('3000AF_300AM', F1[k])) == 1) {
trtmt <- 'Hvt-3000AdF, 300AdM'
} else if (length(grep('3000AF_AM', F1[k])) == 1){
trtmt <- 'Hvt-3000AdF, 3000AdM'
} else if (length(grep('4000AF', F1[k])) == 1){
trtmt <- 'Hvt-4000AdF'
} else if (length(grep('6000AF__1.0', F1[k])) == 1){
trtmt <- 'Hvt-6000AdF'
} else if (length(grep('6000AF_1.04', F1[k])) == 1){
trtmt <- 'Mvt(+7)-6000AdF'
} else if (length(grep('3000AF_SA', F1[k])) == 1){
trtmt <- 'Hvt-3000AdF, 3000SubAd'
}
plots[[k]] <- ggplot(data = df1) +
labs(title = paste(trtmt, pname, sep = " : "),
y = 'Adult',
x = 'Time') +
geom_rect(aes(xmin = 20, xmax = 120, ymin = 0, ymax = Inf),
alpha = 0.1) +
geom_point(aes(x = time,
y = adf),
size = 5) +
geom_point(aes(x = time,
y = adm),
shape = 17,
color = 'black',
fill = 'black',
size = 5)
ggsave(paste0('figures/',
unlist(strsplit(F1[k], '.csv')), '.png'),
dpi = 600)
}
|
## read the data files and standartise
require(rgdal)
require(raster)
require(maptools)
setwd("Data")
# Creating a directory to save modyfied files
save.dir=paste(getwd(),'Modyfied',sep='/')
dir.create(save.dir, showWarnings=FALSE)
## read data drom data package folder
setwd("Data package 20180309")
# there are two types of data in this folders: tiff and shapefile
## read all the tif data files
tiffiles = list.files(pattern ='*.tif$', full.names=FALSE, include.dirs=FALSE, no..=TRUE )
roads=raster("/Volumes/Second/roads/Roads_100")
plot(roads)
filenames=NULL
for (i in tiffiles) {
#create RasterLayer objects with corresponding names of the files
name = substr(basename(i), 1, nchar(basename(i)) - 4) #remove extension from the names
do.call('=',list(name, raster(i)))
filenames=c(filenames,name)
}
## read all the shapefiles from the folder
shapefiles = list.files(pattern ='*.shp$', full.names=FALSE, include.dirs=FALSE, no..=TRUE )
for (i in shapefiles) {
# read the shape file
name = substr(basename(i), 1, nchar(basename(i)) - 4) #remove extension from the names
do.call('=',list(name, readShapeSpatial(i)))
filenames=c(filenames,name)
}
raster.res=NULL
raster.ext=NULL
filenames=NULL
for (i in tiffiles) {
#create RasterLayer objects with corresponding names of the files
name = substr(basename(i), 1, nchar(basename(i)) - 4)
do.call('=',list(name, raster(i)))
#Saving objects resolutions and extents
filenames=c(filenames,name)
raster.res=c(raster.res, xres(get(i)))
raster.ext=rbind(raster.ext, as.vector(extent(get(i))))
}
colnames(raster.ext)=c('xmin','xmax','ymin','ymax')
# Printing raster info
print(cbind(filenames, raster.res, raster.ext), quote=FALSE)
setwd("..")
| /SDM/read data.R | no_license | thekoshkina/WSSSP-Modelling | R | false | false | 1,748 | r | ## read the data files and standartise
require(rgdal)
require(raster)
require(maptools)
setwd("Data")
# Creating a directory to save modyfied files
save.dir=paste(getwd(),'Modyfied',sep='/')
dir.create(save.dir, showWarnings=FALSE)
## read data drom data package folder
setwd("Data package 20180309")
# there are two types of data in this folders: tiff and shapefile
## read all the tif data files
tiffiles = list.files(pattern ='*.tif$', full.names=FALSE, include.dirs=FALSE, no..=TRUE )
roads=raster("/Volumes/Second/roads/Roads_100")
plot(roads)
filenames=NULL
for (i in tiffiles) {
#create RasterLayer objects with corresponding names of the files
name = substr(basename(i), 1, nchar(basename(i)) - 4) #remove extension from the names
do.call('=',list(name, raster(i)))
filenames=c(filenames,name)
}
## read all the shapefiles from the folder
shapefiles = list.files(pattern ='*.shp$', full.names=FALSE, include.dirs=FALSE, no..=TRUE )
for (i in shapefiles) {
# read the shape file
name = substr(basename(i), 1, nchar(basename(i)) - 4) #remove extension from the names
do.call('=',list(name, readShapeSpatial(i)))
filenames=c(filenames,name)
}
raster.res=NULL
raster.ext=NULL
filenames=NULL
for (i in tiffiles) {
#create RasterLayer objects with corresponding names of the files
name = substr(basename(i), 1, nchar(basename(i)) - 4)
do.call('=',list(name, raster(i)))
#Saving objects resolutions and extents
filenames=c(filenames,name)
raster.res=c(raster.res, xres(get(i)))
raster.ext=rbind(raster.ext, as.vector(extent(get(i))))
}
colnames(raster.ext)=c('xmin','xmax','ymin','ymax')
# Printing raster info
print(cbind(filenames, raster.res, raster.ext), quote=FALSE)
setwd("..")
|
###REG SIMPLE
#Ex : rendement mais
#Y : rendement
Y = c(16,
18,
23,
24,
28,
29,
26,
31,
32,
34) ; Y[1:3]
#X : quantit? engrais
X = c(20,
24,
28,
22,
32,
28,
32,
36,
41,
41) ; X[1:3]
library(ggplot2)
#ajout test de blabla
| /MonScript.r | no_license | Neutral666CreeP/TEST10 | R | false | false | 341 | r | ###REG SIMPLE
#Ex : rendement mais
#Y : rendement
Y = c(16,
18,
23,
24,
28,
29,
26,
31,
32,
34) ; Y[1:3]
#X : quantit? engrais
X = c(20,
24,
28,
22,
32,
28,
32,
36,
41,
41) ; X[1:3]
library(ggplot2)
#ajout test de blabla
|
library(ape)
testtree <- read.tree("1725_52.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1725_52_unrooted.txt") | /codeml_files/newick_trees_processed/1725_52/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("1725_52.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1725_52_unrooted.txt") |
plot2 <- function(){
# set working directory and read data in
setwd("C:/Users/Matteo/Documents/Courses/04 - Exploratory Data Analysis/R data & Code/Course Project 1")
data <- read.table("household_power_consumption.txt",header=TRUE,sep=";",colClasses = "character")
# subset data for "2007-02-01" and "2007-02-02"
data_red <- data[(data$Date == "1/2/2007") | (data$Date == "2/2/2007"), ]
data_red[,3:9] <- sapply(data_red[,3:9],as.numeric)
# convert dates and times to Date/Time classes
library(lubridate)
data_red$Date <- dmy(data_red$Date)
data_red$Time <- hms(data_red$Time)
# make plot and save file
png(filename = "plot2.png",width=480,height=480)
plot(data_red$Global_active_power,type="l",xaxt="n",xlab="",main="",ylab="Global active power (kilowatts)")
axis(1,at=c(0,1441,2880),labels=c("Thu","Fri","Sat"), col.axis="black")
dev.off()
} | /plot2.R | no_license | MatteoCass/ExData_Plotting1 | R | false | false | 990 | r | plot2 <- function(){
# set working directory and read data in
setwd("C:/Users/Matteo/Documents/Courses/04 - Exploratory Data Analysis/R data & Code/Course Project 1")
data <- read.table("household_power_consumption.txt",header=TRUE,sep=";",colClasses = "character")
# subset data for "2007-02-01" and "2007-02-02"
data_red <- data[(data$Date == "1/2/2007") | (data$Date == "2/2/2007"), ]
data_red[,3:9] <- sapply(data_red[,3:9],as.numeric)
# convert dates and times to Date/Time classes
library(lubridate)
data_red$Date <- dmy(data_red$Date)
data_red$Time <- hms(data_red$Time)
# make plot and save file
png(filename = "plot2.png",width=480,height=480)
plot(data_red$Global_active_power,type="l",xaxt="n",xlab="",main="",ylab="Global active power (kilowatts)")
axis(1,at=c(0,1441,2880),labels=c("Thu","Fri","Sat"), col.axis="black")
dev.off()
} |
# DRA use Mi1 to define a coord trans -----------------------------------------
# - 2 col of DRA R7/8
DRAR7_me <- nlapply(DRAR7, function(x) subset(x, pointsinside(x, ME_msh))) #me portion
DRAR8_me <- nlapply(DRAR8, function(x) subset(x, pointsinside(x, ME_msh)))
# DRA_ref_com <- rbind(xyzmatrix(DRAR7_me), xyzmatrix(DRAR8_me)) %>% colMeans()
DRA_ref_com <- rbind(xyzmatrix(DRAR7_me[[1]]), xyzmatrix(DRAR8_me[[1]])) %>% colMeans() #change back to polar col
ii <- sweep(xyz_M5_avg, 2, DRA_ref_com)^2 %>% rowSums() %>% sqrt() %>% order() %>% head(.,7)
DRA_Mi1 <- nlapply(Mi1[ii], function(x) subset(x, pointsinside(x,ME_msh,rval='distance') > 0))
# pca
node_xyz <- xyzmatrix(DRA_Mi1)
DRA_me_pca <- prcomp(node_xyz)
if (DRA_me_pca$rotation[,1] %*% c(-0.84, 0.20, -0.49) < 0) {
DRA_me_pca$rotation <- - DRA_me_pca$rotation
}
if (t(cross3D(DRA_me_pca$rotation[,1],DRA_me_pca$rotation[,2])) %*% DRA_me_pca$rotation[,3] < 0 ) {
DRA_me_pca$rotation[,3] <- - DRA_me_pca$rotation[,3]
}
# Mi1_xform_DRA <- xEucl_neu(Mi1, DRA_me_pca$rotation, DRA_me_pca$center)
# Mi1_me_xform_DRA <- xEucl_neu(Mi1_me, DRA_me_pca$rotation, DRA_me_pca$center)
DRAR7_xform <- xEucl_neu(DRAR7, DRA_me_pca$rotation, DRA_me_pca$center)
DRAR8_xform <- xEucl_neu(DRAR8, DRA_me_pca$rotation, DRA_me_pca$center)
# DRA Mi1
anno_Mi1_DRA <- catmaid_query_by_annotation('DRA_column') # DRA col, light blue, or darker gray
anno_Mi1_DRA_excl <- catmaid_query_by_annotation('non_PR_column') # DRA col excluded
ind_Mi1_DRA <- which(anno_Mi1$skid %in% anno_Mi1_DRA$skid)
ind_Mi1_DRA_hcol <- c(133, 268, 42)
ind_Mi1_DRA <- ind_Mi1_DRA[!(ind_Mi1_DRA %in% ind_Mi1_DRA_hcol)] #exclude home column
ind_Mi1_DRA_excl <- which(anno_Mi1$skid %in% anno_Mi1_DRA_excl$skid)
# # DEBUG, id pale and yellow col Mi1 index
# nopen3d()
# plot3d(DRAR7, lwd =2)
# points3d(xyz_M5_avg, size = 10, col='gray')
# # identify3d(xyz_M5_avg)
# - meshes
# - transfomr medulla mesh
DRA_ME_msh_xform <- ME_msh
DRA_ME_msh_xform$vb[1:3,] <- sweep(t(ME_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
DRA_AOTU_msh_xform <- AOTU_msh
DRA_AOTU_msh_xform$vb[1:3,] <- sweep(t(AOTU_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
DRA_ME_L_msh_xform <- ME_L_msh
DRA_ME_L_msh_xform$vb[1:3,] <- sweep(t(ME_L_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
DRA_PLP_R_msh_xform <- PLP_R_msh
DRA_PLP_R_msh_xform$vb[1:3,] <- sweep(t(PLP_R_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
# - axes
DRA_axis_ori <- c(-110000, -50000, -40000)
DRA_axis_lat <- -DRA_me_pca$rotation[1,] * 15000
DRA_axis_dor <- DRA_me_pca$rotation[2,] * -15000
DRA_axis_post <- DRA_me_pca$rotation[3,] * 15000
# - scale bar
DRA_xAng <- -30 #rotation angle around x-axis, align with long axis of ME
# DRA_xAng <- 20 # align col
DRA_xAngRot <- (DRA_xAng - 90)/180*pi
DRA_xRot <- matrix(c(1,0,0,
0, cos(DRA_xAngRot), sin(DRA_xAngRot),
0, -sin(DRA_xAngRot), cos(DRA_xAngRot)), ncol = 3, byrow = T)
DRA_xAngRot2 <- DRA_xAng/180*pi
DRA_xRot2 <- matrix(c(1,0,0,
0, cos(DRA_xAngRot2), sin(DRA_xAngRot2),
0, -sin(DRA_xAngRot2), cos(DRA_xAngRot2)), ncol = 3, byrow = T)
# top view
DRA_xAng_top <- 30
DRA_xAngRot_top <- (DRA_xAng_top)/180*pi
DRA_xRot_top <- matrix(c(1,0,0,
0, cos(DRA_xAngRot_top), sin(DRA_xAngRot_top),
0, -sin(DRA_xAngRot_top), cos(DRA_xAngRot_top)), ncol = 3, byrow = T)
DRA_yAng_top <- 90
DRA_yAngRot_top <- DRA_yAng_top/180*pi
DRA_yRot_top <- matrix(c(1,0,0,
0, cos(DRA_yAngRot_top), sin(DRA_yAngRot_top),
0, -sin(DRA_yAngRot_top), cos(DRA_yAngRot_top)), ncol = 3, byrow = T)
DRA_zAng_top <- 80
DRA_zAngRot_top <- (DRA_zAng_top)/180*pi
DRA_zRot_top <- matrix(c(1,0,0,
0, cos(DRA_zAngRot_top), sin(DRA_zAngRot_top),
0, -sin(DRA_zAngRot_top), cos(DRA_zAngRot_top)), ncol = 3, byrow = T)
DRA_sbar <- matrix(c(30000,-5000,40000, 30000,5000,40000), ncol=3,byrow=T) %*%
t(matrix(c(1,0,0,
0,0,1,
0,-1,0), ncol=3, byrow=T)) %*% t(DRA_xRot2)
# # -- side, anterior view
# DRA_sbar_side <- matrix(c(40000,-5000,20000, 40000,5000,20000), ncol=3,byrow=T) %*%
# t(matrix(c(1,0,0,
# 0,0,1,
# 0,-1,0), ncol=3, byrow=T)) %*% t(DRA_xRot2)
#
# #rotation angle around y-axis to level layer 5
# DRA_yAng <- 0
# DRA_yAngRot <- -DRA_yAng/180*pi
# DRA_yRot <- matrix(c(cos(DRA_yAngRot), 0, sin(DRA_yAngRot),
# 0,1,0,
# -sin(DRA_yAngRot), 0, cos(DRA_yAngRot)), ncol = 3, byrow = T)
#
# # DRA_sbar_rot90 <- DRA_sbar %*% t(matrix(c(1,0,0,
# # 0,0,1,
# # 0,-1,0), ncol=3, byrow=T)) %*% t(DRA_xRot) %*% t(DRA_yRot)
#
# DRA_sbar_rot90 <- matrix(c(30000,5000,10000, 30000,5000,20000), ncol=3,byrow=T)
# -- side, polar view
DRA_sbar_side = matrix(c(30000,5000,10000, 30000,5000,20000), ncol=3,byrow=T) %*% t(DRA_xRot2)
#rotation angle around y-axis to level layer 5
DRA_yAng <- -20
DRA_yAngRot <- -DRA_yAng/180*pi
DRA_yRot <- matrix(c(cos(DRA_yAngRot), 0, sin(DRA_yAngRot),
0,1,0,
-sin(DRA_yAngRot), 0, cos(DRA_yAngRot)), ncol = 3, byrow = T)
DRA_sbar_rot90 <- DRA_sbar %*% t(DRA_xRot) %*% t(DRA_yRot)
DRA_sbar_side_5 <- DRA_sbar_side %*% t(DRA_xRot) %*% t(DRA_yRot)
# col for top view --------------------------------------------------------
aMe12_ind_Mi1 <- list()
Mi1_ind_aMe12 <- list()
for (ii_n in 1:3) {
tar <- aMe12[[ii_n]]
ii_col = match(tar$tags$`columnar branch`, tar$d$PointNo)
xyz_col = xyzmatrix(tar$d[ii_col,])
aMe12_ind_Mi1[[ii_n]] <- apply(xyz_col, 1, function(x) order(rowSums(sweep(Mi1_M5_xyz,2,x)^2))[1] )
Mi1_ind_aMe12[[ii_n]] <- apply(Mi1_M5_xyz, 1, function(x) order(rowSums(sweep(xyz_col,2,x)^2))[1] )
}
i1 <- sqrt(rowSums(sweep(xyz_M5_avg, 2, xyz_M5_avg[ind_Mi1_DRA_hcol[1],], '-')^2)) <60000
i2 <- sqrt(rowSums(sweep(xyz_M5_avg, 2, xyz_M5_avg[ind_Mi1_DRA_hcol[2],], '-')^2)) <30000
i3 <- sqrt(rowSums(sweep(xyz_M5_avg, 2, xyz_M5_avg[ind_Mi1_DRA_hcol[3],], '-')^2)) <30000
ind_Mi1_top <- which(i1|i2|i3)
ind_Mi1_top_grey <- ind_Mi1_top[!(ind_Mi1_top %in% c(ind_Mi1_DRA_excl, ind_Mi1_DRA))]
ind_Mi1_top_red <- ind_Mi1_top[ind_Mi1_top %in% c(ind_Mi1_DRA)]
ind_Mi1_top_y <- ind_Mi1_top_grey[!(ind_Mi1_top_grey %in% unlist(aMe12_ind_Mi1))]
ind_Mi1_top_p <- ind_Mi1_top_grey[(ind_Mi1_top_grey %in% unlist(aMe12_ind_Mi1))]
# meshes ------------------------------------------------------------------
# - Mi1 M5 nodes
xyz_M5_avg_xform_DRA <- sweep(xyz_M5_avg, 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
xyz_M5_avg_xform_DRA_yz <- xyz_M5_avg_xform_DRA
xyz_M5_avg_xform_DRA_yz[,1] <- 0
# Nnb <- 4
# ind_nb4 <- xyz_M5_avg_xform_DRA_yz^2 %>% rowSums() %>% order() %>% .[1:Nnb]
Nnb <- 16
ind_nb16 <- xyz_M5_avg_xform_DRA_yz^2 %>% rowSums() %>% order() %>% .[1:Nnb]
# Nnb <- 25
# ind_nb25 <- xyz_M5_avg_xform_DRA_yz^2 %>% rowSums() %>% order() %>% .[1:Nnb]
# nopen3d()
# points3d(xyz_M5_avg_xform_DRA_yz, col='grey', size = 20)
# points3d(xyz_M5_avg_xform_DRA, col='grey', size = 10)
# # points3d(ref_com4,col='red',size=10)
# points3d(xyz_M5_avg_xform_DRA_yz[ind_nb25,], size=25, col='blue')
#
# # # ind_16 <- identify3d(xyz_M5_avg_xform_yz)
# # Mi1_16 <- xEucl_neu(Mi1[ind_16], me_pca$rotation, me_pca$center)
#
# plot3d(DRAR7_xform, col='green', lwd=2)
# plot3d(DRAR8_xform, col='gold2', lwd=2)
# make new local mesh
# ref_com_avg <- colMeans(ref_com4)
xyz <- t(DRA_ME_msh_xform$vb[1:3,])
dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb16,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
xyz_msh <- xyz[dd < 1.2*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb16,])), ]
DRA_ME_msh_local_large <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
xyz_msh <- xyz[dd < 0.8*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb16,])), ]
DRA_ME_msh_local <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
# xyz_msh <- xyz[dd < max(dist(ref_com4)), ]
# ME_msh_local_2 <- ashape3d(xyz_msh, alpha = 50000) %>% as.mesh3d()
# - transfomr LO mesh
DRA_LO_msh_xform <- LO_msh
DRA_LO_msh_xform$vb[1:3,] <- sweep(t(LO_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
# ME layer middle DRAR7, polar view ----------------------------------------------------------------
# -- ME boundary
yz_msh <- t(DRA_ME_msh_local$vb)
# yz_dorsal <- data.frame(y = seq(20000, -20000, length.out = 20),
# z = seq(0, -0, length.out = 20))
#
# yz_posterior <- data.frame(y = seq(0, 0, length.out = 16),
# z = seq(-20000, 20000, length.out = 16))
yz_dorsal <- data.frame(y = seq(5000, -45000, length.out = 20),
z = seq(0, -0, length.out = 20))
yz_posterior <- data.frame(y = seq(0, 0, length.out = 16),
z = seq(-45000, 5000, length.out = 16))
# choose yz
nopen3d()
points3d(yz_dorsal)
points3d(yz_posterior, col='red')
# points3d(yz_msh[,1:3])
shade3d(DRA_ME_msh_local, alpha=0.1, col='gray')
# shade3d(DRA_ME_msh_xform, alpha=0.1, col='gold')
axes3d(c('x','y','z')); title3d('','','x','y','z')
# points on mesh
xyz <- t(DRA_ME_msh_xform$vb[1:3,])
dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb16,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
xyz_msh <- xyz[dd < max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb16,])), ]
# top
xyz_top <- xyz_msh[xyz_msh[,1] > 17000, ]
x <- xyz_top[,1]; y <- xyz_top[,2]; z <- xyz_top[,3]
fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
top_bd_dorsal <- cbind(valfit, yz_dorsal)
valfit <- predict(fitlm, yz_posterior) #generate values from the fit
top_bd_posterior <- cbind(valfit, yz_posterior)
# bottom
xyz_bot <- xyz_msh[xyz_msh[,1] < -24000, ]
x <- xyz_bot[,1]; y <- xyz_bot[,2]; z <- xyz_bot[,3]
fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
bot_bd_dorsal <- cbind(valfit, yz_dorsal)
valfit <- predict(fitlm, yz_posterior) #generate values from the fit
bot_bd_posterior <- cbind(valfit, yz_posterior)
# # 2D mesh as outline
# DRA_ME_msh_local_bd <- rbind(top_bd_dorsal, apply(bot_bd_dorsal, MARGIN = 2, rev))
# DRA_ME_msh_local_bd <- rbind(DRA_ME_msh_local_bd[-1,], DRA_ME_msh_local_bd[1,]) %>%
# cbind(DRA_ME_msh_local_bd, .) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# # -- layer Fischbach
# bp <- c(0, 0.55,0.4,0.5,0.2,0.35,0.4,0.6,0.3,0.7,0.4) %>% rev()
# bp_prob <- cumsum(bp)/sum(bp)
# x_qua <- apply(cbind(top_bd$valfit, bot_bd$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp_prob) )
# layers <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp_prob)) {
# cc <- cbind(x_qua[j,], yz)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# layers <- rbind(layers, c2)
# }
# layers[,1] <- layers[,1] + 3000
# --layers from 7-column
# bp7c <- c(0, 5.5,14.5,20,23,28,36,42,48,56,61) #layer 1 to 10
bp7c <- c(0, 5.5, 14.5, 20, 23, 28, 33, 39, 45, 56, 61) #layer 1 to 10, modify 6-7-8-9 by Mi1, C2, Tm5 and Tm20
bp7c <- max(bp7c) - bp7c %>% rev() #layer 10 to 1
bp7c_prob <- bp7c/max(bp7c)
x_qua <- apply(cbind(top_bd_dorsal$valfit, bot_bd_dorsal$valfit), MARGIN = 1,
function(x) quantile(x, probs = bp7c_prob) )
DRA_layers_ME_dorsal <- matrix(ncol = 3, nrow = 0)
for (j in 1:length(bp7c_prob)) {
cc <- cbind(x_qua[j,], yz_dorsal)
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_dorsal <- rbind(DRA_layers_ME_dorsal, c2)
}
# DRA_layers_ME_dorsal[,1] <- DRA_layers_ME_dorsal[,1] * 0.97 + 1100
x_qua <- apply(cbind(top_bd_posterior$valfit, bot_bd_posterior$valfit), MARGIN = 1,
function(x) quantile(x, probs = bp7c_prob) )
DRA_layers_ME_posterior <- matrix(ncol = 3, nrow = 0)
for (j in 1:length(bp7c_prob)) {
cc <- cbind(x_qua[j,], yz_posterior)
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
}
bot2 <- DRA_layers_ME_posterior[(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),]
top2 <- DRA_layers_ME_posterior[(nrow(DRA_layers_ME_posterior)-1):(nrow(DRA_layers_ME_posterior)), ]
DRA_layers_ME_posterior <- DRA_layers_ME_posterior[-c(
(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),
(nrow(DRA_layers_ME_posterior)-1):(nrow(DRA_layers_ME_posterior))), ]
cc <- cbind(x_qua[2:10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
# add round corner
pt_corner <- rbind(bot2,
as.numeric(c(x_qua[2,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
ptArc <- round_corner(pt_corner)
cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
pt_corner <- rbind(top2,
as.numeric(c(x_qua[10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
ptArc <- round_corner(pt_corner)
cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
DRA_layers_ME_posterior_rot90 <- DRA_layers_ME_posterior %*% t(DRA_xRot2)
# bd_Mx <- x_qua[,ncol(x_qua)] %>% rev()
# bd_Mx <- bd_Mx * 0.97 + 1100
# bd_Mx <- bd_Mx[-length(bd_Mx)] + diff(bd_Mx)/2
# DRA_layers_ME_anno <- cbind(bd_Mx, yz_dorsal[nrow(yz_dorsal),])
# # ME layer side DRAR7, 2 - posterior ---------------------------------------------------
#
# # Nnb <- 9
# # ind_nb <- sweep(xyz_M5_avg_xform_DRA, 2,
# # xyz_M5_avg_xform_DRA[ind_Mi1_DRA_hcol[2],])^2 %>%
# # rowSums() %>%
# # order() %>%
# # .[1:Nnb]
# # ind_nb <- c(704, 644, 419, 217, 323, 537, 642,221)
#
# ind_nb <- c(521, 325, 656, 770, 268, 518, 516, 517, 269, 774, 322, 494, 425)
#
# # nopen3d()
# # points3d(xyz_M5_avg_xform_DRA_yz, col='grey', size = 20)
# # # points3d(xyz_M5_avg_xform_DRA, col='grey', size = 10)
# # # points3d(ref_com4,col='red',size=10)
# # points3d(xyz_M5_avg_xform_DRA_yz[ind_nb,], size=25, col='blue')
# # plot3d(DRAR7_xform, col='green', lwd=2)
# # plot3d(DRAR8_xform, col='gold2', lwd=2)
#
# # -- make local mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < 1.2*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# DRA_ME_msh_local_2 <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
#
# # -- ME boundary
# yz_msh <- t(DRA_ME_msh_local_2$vb)
# yz_dorsal <- data.frame(y = seq(-20000, -50000, length.out = 20),
# z = seq(-0000, -60000, length.out = 20))
# yz_posterior <- data.frame(y = seq(-10000, -45000, length.out = 16),
# z = seq(-40000, -25000, length.out = 16))
#
# # choose yz
# nopen3d()
# plot3d(DRAR7_xform[[2]], lwd=2)
# # points3d(yz_msh[,1:3])
# shade3d(DRA_ME_msh_local_2, alpha=0.1, col='gray')
# points3d(yz_dorsal)
# points3d(yz_posterior, col='red')
# # shade3d(DRA_ME_msh_xform, alpha=0.1, col='gold')
# axes3d(c('x','y','z')); title3d('','','x','y','z')
#
# points3d(xyz_bot)
# # points3d(top_bd_dorsal_2)
# # points3d(bot_bd_dorsal_2)
#
# # points on mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# # top
# xyz_top <- xyz_msh[xyz_msh[,1] > 30000, ]
# x <- xyz_top[,1]; y <- xyz_top[,2]; z <- xyz_top[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# top_bd_dorsal_2 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# top_bd_posterior_2 <- cbind(valfit, yz_posterior)
# # bottom
# xyz_bot <- xyz_msh[xyz_msh[,1] < -18000 & xyz_msh[,1] > -28000, ]
# x <- xyz_bot[,1]; y <- xyz_bot[,2]; z <- xyz_bot[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# bot_bd_dorsal_2 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# bot_bd_posterior_2 <- cbind(valfit, yz_posterior)
#
# x_qua <- apply(cbind(top_bd_dorsal_2$valfit, bot_bd_dorsal_2$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_dorsal_2 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_dorsal)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_dorsal_2 <- rbind(DRA_layers_ME_dorsal_2, c2)
# }
# # manual translation
# DRA_layers_ME_dorsal_2[,1] <- DRA_layers_ME_dorsal_2[,1] * 1.3 - 12000
#
# x_qua <- apply(cbind(top_bd_posterior_2$valfit, bot_bd_posterior_2$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_posterior_2 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_posterior)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
# }
# bot2 <- DRA_layers_ME_posterior_2[(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),]
# top2 <- DRA_layers_ME_posterior_2[(nrow(DRA_layers_ME_posterior_2)-1):(nrow(DRA_layers_ME_posterior_2)), ]
# DRA_layers_ME_posterior_2 <- DRA_layers_ME_posterior_2[-c(
# (nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),
# (nrow(DRA_layers_ME_posterior_2)-1):(nrow(DRA_layers_ME_posterior_2))), ]
#
# cc <- cbind(x_qua[2:10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
#
# # add round corner
# pt_corner <- rbind(bot2,
# as.numeric(c(x_qua[2,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
#
# pt_corner <- rbind(top2,
# as.numeric(c(x_qua[10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
#
# # manual adjust
# DRA_layers_ME_posterior_2[,1] <- DRA_layers_ME_posterior_2[,1]*1.1 - 9000
#
# # DRA_layers_ME_posterior_2_rot90 <- DRA_layers_ME_posterior_2 %*% t(DRA_xRot2)
#
#
#
#
# # # for DRAR7 - 3
# # # -- ME boundary
# # yz_msh <- t(DRA_ME_msh_local_2$vb)
# # yz_dorsal <- data.frame(y = seq(-20000, 30000, length.out = 20),
# # z = seq(100000, 50000, length.out = 20))
# #
# # # bottom
# # xyz_bot <- xyz_msh[xyz_msh[,1] < -45000 & xyz_msh[,1] > -70000, ]
# # # top
# # xyz_top <- xyz_msh[xyz_msh[,1] > 21000, ]
# # ME layer side DRAR7, 3 - anterior ---------------------------------------
#
# Nnb <- 16
# ind_nb <- sweep(xyz_M5_avg_xform_DRA, 2,
# xyz_M5_avg_xform_DRA[ind_Mi1_DRA_hcol[3],])^2 %>%
# rowSums() %>%
# order() %>%
# .[1:Nnb]
#
# nopen3d()
# points3d(xyz_M5_avg_xform_DRA_yz, col='grey', size = 20)
# # points3d(xyz_M5_avg_xform_DRA, col='grey', size = 10)
# # points3d(ref_com4,col='red',size=10)
# points3d(xyz_M5_avg_xform_DRA_yz[ind_nb,], size=25, col='blue')
# plot3d(DRAR7_xform, col='green', lwd=2)
# plot3d(DRAR8_xform, col='gold2', lwd=2)
#
# # -- make local mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < 1.2*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# DRA_ME_msh_local_3 <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
#
# # -- ME boundary
# # yz_msh <- t(DRA_ME_msh_local_3$vb)
# # yz_dorsal <- data.frame(y = seq(80000, 35000, length.out = 20),
# # z = seq(-40000, -0000, length.out = 20))
# #
# # yz_posterior <- data.frame(y = seq(30000, 70000, length.out = 16),
# # z = seq(-40000, -8000, length.out = 16))
#
# yz_msh <- t(DRA_ME_msh_local_3$vb)
# yz_dorsal <- data.frame(y = seq(-35000, 30000, length.out = 16),
# z = seq(-30000, 10000, length.out = 16))
#
# yz_posterior <- data.frame(y = seq( -15000,7500, length.out = 20),
# z = seq( 21300,-12000, length.out = 20))
#
# # choose yz
# nopen3d()
# shade3d(DRA_ME_msh_local_3, alpha=0.1, col='gray')
# plot3d(DRAR7_xform[[3]], lwd=2)
# points3d(yz_dorsal)
# points3d(yz_posterior, col='red')
# # points3d(yz_msh[,1:3])
# # shade3d(DRA_ME_msh_xform, alpha=0.1, col='gold')
# axes3d(c('x','y','z')); title3d('','','x','y','z')
# # points3d(xyz_bot)
# # points3d(top_bd_dorsal_3)
# # points3d(bot_bd_dorsal_3)
#
# # points on mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# # top
# xyz_top <- xyz_msh[xyz_msh[,1] > 0, ]
# x <- xyz_top[,1]; y <- xyz_top[,2]; z <- xyz_top[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# top_bd_dorsal_3 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# top_bd_posterior_3 <- cbind(valfit, yz_posterior)
# # bottom
# # xyz_bot <- xyz_msh[xyz_msh[,1] < -24000, ]
# xyz_bot <- xyz_msh[xyz_msh[,1] < -24000 & xyz_msh[,2] < 68000, ]
# x <- xyz_bot[,1]; y <- xyz_bot[,2]; z <- xyz_bot[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# bot_bd_dorsal_3 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# bot_bd_posterior_3 <- cbind(valfit, yz_posterior)
#
# x_qua <- apply(cbind(top_bd_dorsal_3$valfit, bot_bd_dorsal_3$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_dorsal_3 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_dorsal)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_dorsal_3 <- rbind(DRA_layers_ME_dorsal_3, c2)
# }
#
# # manual translation
# DRA_layers_ME_dorsal_3[,1] <- DRA_layers_ME_dorsal_3[,1] - 4000
#
#
# x_qua <- apply(cbind(top_bd_posterior_3$valfit, bot_bd_posterior_3$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_posterior_3 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_posterior)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
# }
# bot2 <- DRA_layers_ME_posterior_3[(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),]
# top2 <- DRA_layers_ME_posterior_3[(nrow(DRA_layers_ME_posterior_3)-1):(nrow(DRA_layers_ME_posterior_3)), ]
# DRA_layers_ME_posterior_3 <- DRA_layers_ME_posterior_3[-c(
# (nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),
# (nrow(DRA_layers_ME_posterior_3)-1):(nrow(DRA_layers_ME_posterior_3))), ]
#
# cc <- cbind(x_qua[2:10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
#
# # add round corner
# pt_corner <- rbind(bot2,
# as.numeric(c(x_qua[2,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
#
# pt_corner <- rbind(top2,
# as.numeric(c(x_qua[10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
#
# # manual adjust
# DRA_layers_ME_posterior_3[,1] <- DRA_layers_ME_posterior_3[,1] - 5500
#
# # DRA_layers_ME_posterior_3_rot90 <- DRA_layers_ME_posterior_3 %*% t(DRA_xRot2)
# # SAVE --------------------------------------------------------------------
#
# save(DRA_layers_ME_dorsal, DRA_layers_ME_dorsal_2, DRA_layers_ME_dorsal_3,
# DRA_layers_ME_posterior, DRA_layers_ME_posterior_2, DRA_layers_ME_posterior_3,
# DRA_layers_ME_posterior_rot90, file = "data/DRA_layers.RData")
# # Dm-DRA1 test view ---------------------------------------------------------------------
#
# type <- "Dm-DRA1"
# tb <- read.csv(paste("table_by_type_out_DRA/", "DRAR7R8_outgoing_", type, ".csv", sep = ''))
#
# skid <- tb$skid %>% na.omit()
# neu <- read.neurons.catmaid(skid, .progress='text' )
# neu_xform <- xEucl_neu(neu, DRA_me_pca$rotation, DRA_me_pca$center)
#
# ii <- 4 #4, 2, 1 for 3 seed col positions
#
#
# # - top
# nopen3d()
# par3d('windowRect' = c(100,100,1300,1300))
# plot3d(neu_xform[[ii]], col='gray40', lwd=2, soma = T, WithNodes = F, lit=F)
# pch3d(xyz_M5_avg_xform_DRA_yz[ind_Mi1_DRA_hcol,,drop=F], radius=5000,col="#b7252a",pch=16,alpha=0.7)
# pch3d(xyz_M5_avg_xform_DRA_yz[ind_Mi1_top_grey,], radius=5000,col="grey",pch=16,alpha=0.2)
# pch3d(xyz_M5_avg_xform_DRA_yz[ind_Mi1_top_red,], radius=5000,col="#b7252a",pch=16,alpha=0.3)
# segments3d(sweep(DRA_sbar,2,c(0,0,-20000)), lwd=2)
# text3d(colMeans(DRA_sbar)+c(0,0,23000), texts = "10 um", cex = 1.5)
# rgl.viewpoint(fov=0,zoom=1, userMatrix= t(translationMatrix(1e4,0,0)) %*%
# rotationMatrix(-80/180*pi,0,0,1) %*%
# rotationMatrix(-90/180*pi,0,1,0) %*%
# rotationMatrix(DRA_xAng/180*pi,1,0,0) )
#
# # add syn from R7
# conn_in_R7 <- catmaid_get_connectors_between(pre_skids = skid_R7s, post_skids = skid[ii])
# if (!is.null(conn_in_R7)) {
# conn_in_R7 <- conn_in_R7[, c("post_node_x", "post_node_y", "post_node_z")]
# conn_in_R7_xform <- sweep(as.matrix(conn_in_R7), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
# pch3d(conn_in_R7_xform, pch=16, radius=1000, alpha=0.7, col=pal_syn['R7'])
# }
#
# # # save
# # rgl.snapshot(filename = paste("F5_", type, "_", skid[ii], "_top.png", sep = ''))
#
#
# # # - side
# # nopen3d()
# # par3d('windowRect' = c(100,100,1300,1300))
# # plot3d(neu_xform[[ii]], col='gray40', lwd=2, soma = T, WithNodes = F, lit=F)
# # segments3d(DRA_sbar_side, lwd=2)
# # plot3d(DRAR7_xform[[1]], col=pal_syn["R7"], lwd=2, soma = T, WithNodes = F, lit=F)
# # segments3d(DRA_layers_ME_dorsal, lwd=1)
# # rgl.viewpoint(fov=0,zoom=0.75, userMatrix= rotationMatrix(90/180*pi,0,0,1) %*%
# # rotationMatrix(0/180*pi,1,0,0) %*%
# # rotationMatrix(DRA_yAng/180*pi,0,1,0) ) # add 15 deg for the tilt
# # conn_in_R7 <- catmaid_get_connectors_between(pre_skids = skid_R7s, post_skids = skid[ii])
# # if (!is.null(conn_in_R7)) {
# # conn_in_R7 <- conn_in_R7[, c("post_node_x", "post_node_y", "post_node_z")]
# # conn_in_R7_xform <- sweep(as.matrix(conn_in_R7), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
# # pch3d(conn_in_R7_xform, pch=16, radius=1000, alpha=0.7, col=pal_syn['R7'])
# # }
# #
# # # save
# # rgl.snapshot(filename = paste("F5_", type, "_", skid[ii], "_side_dorsal.png", sep = ''))
# #
#
#
# # rot 90
# ii_hc <- sweep(xyz_M5_avg_xform_DRA[ind_Mi1_DRA_hcol,], 2, colMeans(xyzmatrix(neu_xform[[ii]]$d)), '-') %>%
# .^2 %>% rowSums() %>% order()
# ii_hc[1]
#
# nopen3d()
# par3d('windowRect' = c(100,100,1300,1300))
# plot3d(neu_xform[[ii]], col='gray40', lwd=2, soma = T, WithNodes = F, lit=F)
# if (ii_hc[1] == 1) {
# segments3d(DRA_layers_ME_posterior_rot90, lwd=1)
# rgl.viewpoint(fov=0,zoom=1, userMatrix= rotationMatrix(90/180*pi,0,0,1) %*%
# rotationMatrix((DRA_xAng-90)/180*pi,1,0,0) %*%
# rotationMatrix(DRA_yAng/180*pi,0,1,0))
# } else if (ii_hc[1] == 2) {
# segments3d(DRA_layers_ME_posterior_2, lwd=1)
# rgl.viewpoint(fov=0,zoom=1,
# userMatrix= matrix(c(-0.084, 0.870, -0.485, -1044.4827,
# 0.996, 0.077, -0.034, 392.4445,
# 0.0077, -0.486, -0.8738, -13105.9336,
# 0,0,0,1), ncol = 4, byrow = T))
# } else if (ii_hc[1] == 3) {
# segments3d(DRA_layers_ME_posterior_3, lwd=1)
# rgl.viewpoint(fov=0,zoom=1,
# userMatrix= matrix(c(0.164, -0.752, -0.6374, -11506.360,
# 0.984, 0.1626, 0.062, 5351.796,
# 0.0569, -0.6379, 0.7679, 0.000,
# 0,0,0,1), ncol = 4, byrow = T))
# }
#
# conn_in_R7 <- catmaid_get_connectors_between(pre_skids = skid_R7s, post_skids = skid[ii])
# if (!is.null(conn_in_R7)) {
# conn_in_R7 <- conn_in_R7[, c("post_node_x", "post_node_y", "post_node_z")]
# conn_in_R7_xform <- sweep(as.matrix(conn_in_R7), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
# pch3d(conn_in_R7_xform, pch=16, radius=1000, alpha=0.7, col=pal_syn['R7'])
# }
#
# # # save
# # rgl.snapshot(filename = paste("F5_", type, "_", skid[ii], "_side_posterial.png", sep = ''))
| /layer_DRA_polar.R | no_license | artxz/FAFB-photoreceptor-connectivity | R | false | false | 31,005 | r | # DRA use Mi1 to define a coord trans -----------------------------------------
# - 2 col of DRA R7/8
DRAR7_me <- nlapply(DRAR7, function(x) subset(x, pointsinside(x, ME_msh))) #me portion
DRAR8_me <- nlapply(DRAR8, function(x) subset(x, pointsinside(x, ME_msh)))
# DRA_ref_com <- rbind(xyzmatrix(DRAR7_me), xyzmatrix(DRAR8_me)) %>% colMeans()
DRA_ref_com <- rbind(xyzmatrix(DRAR7_me[[1]]), xyzmatrix(DRAR8_me[[1]])) %>% colMeans() #change back to polar col
ii <- sweep(xyz_M5_avg, 2, DRA_ref_com)^2 %>% rowSums() %>% sqrt() %>% order() %>% head(.,7)
DRA_Mi1 <- nlapply(Mi1[ii], function(x) subset(x, pointsinside(x,ME_msh,rval='distance') > 0))
# pca
node_xyz <- xyzmatrix(DRA_Mi1)
DRA_me_pca <- prcomp(node_xyz)
if (DRA_me_pca$rotation[,1] %*% c(-0.84, 0.20, -0.49) < 0) {
DRA_me_pca$rotation <- - DRA_me_pca$rotation
}
if (t(cross3D(DRA_me_pca$rotation[,1],DRA_me_pca$rotation[,2])) %*% DRA_me_pca$rotation[,3] < 0 ) {
DRA_me_pca$rotation[,3] <- - DRA_me_pca$rotation[,3]
}
# Mi1_xform_DRA <- xEucl_neu(Mi1, DRA_me_pca$rotation, DRA_me_pca$center)
# Mi1_me_xform_DRA <- xEucl_neu(Mi1_me, DRA_me_pca$rotation, DRA_me_pca$center)
DRAR7_xform <- xEucl_neu(DRAR7, DRA_me_pca$rotation, DRA_me_pca$center)
DRAR8_xform <- xEucl_neu(DRAR8, DRA_me_pca$rotation, DRA_me_pca$center)
# DRA Mi1
anno_Mi1_DRA <- catmaid_query_by_annotation('DRA_column') # DRA col, light blue, or darker gray
anno_Mi1_DRA_excl <- catmaid_query_by_annotation('non_PR_column') # DRA col excluded
ind_Mi1_DRA <- which(anno_Mi1$skid %in% anno_Mi1_DRA$skid)
ind_Mi1_DRA_hcol <- c(133, 268, 42)
ind_Mi1_DRA <- ind_Mi1_DRA[!(ind_Mi1_DRA %in% ind_Mi1_DRA_hcol)] #exclude home column
ind_Mi1_DRA_excl <- which(anno_Mi1$skid %in% anno_Mi1_DRA_excl$skid)
# # DEBUG, id pale and yellow col Mi1 index
# nopen3d()
# plot3d(DRAR7, lwd =2)
# points3d(xyz_M5_avg, size = 10, col='gray')
# # identify3d(xyz_M5_avg)
# - meshes
# - transfomr medulla mesh
DRA_ME_msh_xform <- ME_msh
DRA_ME_msh_xform$vb[1:3,] <- sweep(t(ME_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
DRA_AOTU_msh_xform <- AOTU_msh
DRA_AOTU_msh_xform$vb[1:3,] <- sweep(t(AOTU_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
DRA_ME_L_msh_xform <- ME_L_msh
DRA_ME_L_msh_xform$vb[1:3,] <- sweep(t(ME_L_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
DRA_PLP_R_msh_xform <- PLP_R_msh
DRA_PLP_R_msh_xform$vb[1:3,] <- sweep(t(PLP_R_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
# - axes
DRA_axis_ori <- c(-110000, -50000, -40000)
DRA_axis_lat <- -DRA_me_pca$rotation[1,] * 15000
DRA_axis_dor <- DRA_me_pca$rotation[2,] * -15000
DRA_axis_post <- DRA_me_pca$rotation[3,] * 15000
# - scale bar
DRA_xAng <- -30 #rotation angle around x-axis, align with long axis of ME
# DRA_xAng <- 20 # align col
DRA_xAngRot <- (DRA_xAng - 90)/180*pi
DRA_xRot <- matrix(c(1,0,0,
0, cos(DRA_xAngRot), sin(DRA_xAngRot),
0, -sin(DRA_xAngRot), cos(DRA_xAngRot)), ncol = 3, byrow = T)
DRA_xAngRot2 <- DRA_xAng/180*pi
DRA_xRot2 <- matrix(c(1,0,0,
0, cos(DRA_xAngRot2), sin(DRA_xAngRot2),
0, -sin(DRA_xAngRot2), cos(DRA_xAngRot2)), ncol = 3, byrow = T)
# top view
DRA_xAng_top <- 30
DRA_xAngRot_top <- (DRA_xAng_top)/180*pi
DRA_xRot_top <- matrix(c(1,0,0,
0, cos(DRA_xAngRot_top), sin(DRA_xAngRot_top),
0, -sin(DRA_xAngRot_top), cos(DRA_xAngRot_top)), ncol = 3, byrow = T)
DRA_yAng_top <- 90
DRA_yAngRot_top <- DRA_yAng_top/180*pi
DRA_yRot_top <- matrix(c(1,0,0,
0, cos(DRA_yAngRot_top), sin(DRA_yAngRot_top),
0, -sin(DRA_yAngRot_top), cos(DRA_yAngRot_top)), ncol = 3, byrow = T)
DRA_zAng_top <- 80
DRA_zAngRot_top <- (DRA_zAng_top)/180*pi
DRA_zRot_top <- matrix(c(1,0,0,
0, cos(DRA_zAngRot_top), sin(DRA_zAngRot_top),
0, -sin(DRA_zAngRot_top), cos(DRA_zAngRot_top)), ncol = 3, byrow = T)
DRA_sbar <- matrix(c(30000,-5000,40000, 30000,5000,40000), ncol=3,byrow=T) %*%
t(matrix(c(1,0,0,
0,0,1,
0,-1,0), ncol=3, byrow=T)) %*% t(DRA_xRot2)
# # -- side, anterior view
# DRA_sbar_side <- matrix(c(40000,-5000,20000, 40000,5000,20000), ncol=3,byrow=T) %*%
# t(matrix(c(1,0,0,
# 0,0,1,
# 0,-1,0), ncol=3, byrow=T)) %*% t(DRA_xRot2)
#
# #rotation angle around y-axis to level layer 5
# DRA_yAng <- 0
# DRA_yAngRot <- -DRA_yAng/180*pi
# DRA_yRot <- matrix(c(cos(DRA_yAngRot), 0, sin(DRA_yAngRot),
# 0,1,0,
# -sin(DRA_yAngRot), 0, cos(DRA_yAngRot)), ncol = 3, byrow = T)
#
# # DRA_sbar_rot90 <- DRA_sbar %*% t(matrix(c(1,0,0,
# # 0,0,1,
# # 0,-1,0), ncol=3, byrow=T)) %*% t(DRA_xRot) %*% t(DRA_yRot)
#
# DRA_sbar_rot90 <- matrix(c(30000,5000,10000, 30000,5000,20000), ncol=3,byrow=T)
# -- side, polar view
DRA_sbar_side = matrix(c(30000,5000,10000, 30000,5000,20000), ncol=3,byrow=T) %*% t(DRA_xRot2)
#rotation angle around y-axis to level layer 5
DRA_yAng <- -20
DRA_yAngRot <- -DRA_yAng/180*pi
DRA_yRot <- matrix(c(cos(DRA_yAngRot), 0, sin(DRA_yAngRot),
0,1,0,
-sin(DRA_yAngRot), 0, cos(DRA_yAngRot)), ncol = 3, byrow = T)
DRA_sbar_rot90 <- DRA_sbar %*% t(DRA_xRot) %*% t(DRA_yRot)
DRA_sbar_side_5 <- DRA_sbar_side %*% t(DRA_xRot) %*% t(DRA_yRot)
# col for top view --------------------------------------------------------
aMe12_ind_Mi1 <- list()
Mi1_ind_aMe12 <- list()
for (ii_n in 1:3) {
tar <- aMe12[[ii_n]]
ii_col = match(tar$tags$`columnar branch`, tar$d$PointNo)
xyz_col = xyzmatrix(tar$d[ii_col,])
aMe12_ind_Mi1[[ii_n]] <- apply(xyz_col, 1, function(x) order(rowSums(sweep(Mi1_M5_xyz,2,x)^2))[1] )
Mi1_ind_aMe12[[ii_n]] <- apply(Mi1_M5_xyz, 1, function(x) order(rowSums(sweep(xyz_col,2,x)^2))[1] )
}
i1 <- sqrt(rowSums(sweep(xyz_M5_avg, 2, xyz_M5_avg[ind_Mi1_DRA_hcol[1],], '-')^2)) <60000
i2 <- sqrt(rowSums(sweep(xyz_M5_avg, 2, xyz_M5_avg[ind_Mi1_DRA_hcol[2],], '-')^2)) <30000
i3 <- sqrt(rowSums(sweep(xyz_M5_avg, 2, xyz_M5_avg[ind_Mi1_DRA_hcol[3],], '-')^2)) <30000
ind_Mi1_top <- which(i1|i2|i3)
ind_Mi1_top_grey <- ind_Mi1_top[!(ind_Mi1_top %in% c(ind_Mi1_DRA_excl, ind_Mi1_DRA))]
ind_Mi1_top_red <- ind_Mi1_top[ind_Mi1_top %in% c(ind_Mi1_DRA)]
ind_Mi1_top_y <- ind_Mi1_top_grey[!(ind_Mi1_top_grey %in% unlist(aMe12_ind_Mi1))]
ind_Mi1_top_p <- ind_Mi1_top_grey[(ind_Mi1_top_grey %in% unlist(aMe12_ind_Mi1))]
# meshes ------------------------------------------------------------------
# - Mi1 M5 nodes
xyz_M5_avg_xform_DRA <- sweep(xyz_M5_avg, 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
xyz_M5_avg_xform_DRA_yz <- xyz_M5_avg_xform_DRA
xyz_M5_avg_xform_DRA_yz[,1] <- 0
# Nnb <- 4
# ind_nb4 <- xyz_M5_avg_xform_DRA_yz^2 %>% rowSums() %>% order() %>% .[1:Nnb]
Nnb <- 16
ind_nb16 <- xyz_M5_avg_xform_DRA_yz^2 %>% rowSums() %>% order() %>% .[1:Nnb]
# Nnb <- 25
# ind_nb25 <- xyz_M5_avg_xform_DRA_yz^2 %>% rowSums() %>% order() %>% .[1:Nnb]
# nopen3d()
# points3d(xyz_M5_avg_xform_DRA_yz, col='grey', size = 20)
# points3d(xyz_M5_avg_xform_DRA, col='grey', size = 10)
# # points3d(ref_com4,col='red',size=10)
# points3d(xyz_M5_avg_xform_DRA_yz[ind_nb25,], size=25, col='blue')
#
# # # ind_16 <- identify3d(xyz_M5_avg_xform_yz)
# # Mi1_16 <- xEucl_neu(Mi1[ind_16], me_pca$rotation, me_pca$center)
#
# plot3d(DRAR7_xform, col='green', lwd=2)
# plot3d(DRAR8_xform, col='gold2', lwd=2)
# make new local mesh
# ref_com_avg <- colMeans(ref_com4)
xyz <- t(DRA_ME_msh_xform$vb[1:3,])
dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb16,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
xyz_msh <- xyz[dd < 1.2*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb16,])), ]
DRA_ME_msh_local_large <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
xyz_msh <- xyz[dd < 0.8*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb16,])), ]
DRA_ME_msh_local <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
# xyz_msh <- xyz[dd < max(dist(ref_com4)), ]
# ME_msh_local_2 <- ashape3d(xyz_msh, alpha = 50000) %>% as.mesh3d()
# - transfomr LO mesh
DRA_LO_msh_xform <- LO_msh
DRA_LO_msh_xform$vb[1:3,] <- sweep(t(LO_msh$vb[1:3,]), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation %>% t()
# ME layer middle DRAR7, polar view ----------------------------------------------------------------
# -- ME boundary
yz_msh <- t(DRA_ME_msh_local$vb)
# yz_dorsal <- data.frame(y = seq(20000, -20000, length.out = 20),
# z = seq(0, -0, length.out = 20))
#
# yz_posterior <- data.frame(y = seq(0, 0, length.out = 16),
# z = seq(-20000, 20000, length.out = 16))
yz_dorsal <- data.frame(y = seq(5000, -45000, length.out = 20),
z = seq(0, -0, length.out = 20))
yz_posterior <- data.frame(y = seq(0, 0, length.out = 16),
z = seq(-45000, 5000, length.out = 16))
# choose yz
nopen3d()
points3d(yz_dorsal)
points3d(yz_posterior, col='red')
# points3d(yz_msh[,1:3])
shade3d(DRA_ME_msh_local, alpha=0.1, col='gray')
# shade3d(DRA_ME_msh_xform, alpha=0.1, col='gold')
axes3d(c('x','y','z')); title3d('','','x','y','z')
# points on mesh
xyz <- t(DRA_ME_msh_xform$vb[1:3,])
dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb16,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
xyz_msh <- xyz[dd < max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb16,])), ]
# top
xyz_top <- xyz_msh[xyz_msh[,1] > 17000, ]
x <- xyz_top[,1]; y <- xyz_top[,2]; z <- xyz_top[,3]
fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
top_bd_dorsal <- cbind(valfit, yz_dorsal)
valfit <- predict(fitlm, yz_posterior) #generate values from the fit
top_bd_posterior <- cbind(valfit, yz_posterior)
# bottom
xyz_bot <- xyz_msh[xyz_msh[,1] < -24000, ]
x <- xyz_bot[,1]; y <- xyz_bot[,2]; z <- xyz_bot[,3]
fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
bot_bd_dorsal <- cbind(valfit, yz_dorsal)
valfit <- predict(fitlm, yz_posterior) #generate values from the fit
bot_bd_posterior <- cbind(valfit, yz_posterior)
# # 2D mesh as outline
# DRA_ME_msh_local_bd <- rbind(top_bd_dorsal, apply(bot_bd_dorsal, MARGIN = 2, rev))
# DRA_ME_msh_local_bd <- rbind(DRA_ME_msh_local_bd[-1,], DRA_ME_msh_local_bd[1,]) %>%
# cbind(DRA_ME_msh_local_bd, .) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# # -- layer Fischbach
# bp <- c(0, 0.55,0.4,0.5,0.2,0.35,0.4,0.6,0.3,0.7,0.4) %>% rev()
# bp_prob <- cumsum(bp)/sum(bp)
# x_qua <- apply(cbind(top_bd$valfit, bot_bd$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp_prob) )
# layers <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp_prob)) {
# cc <- cbind(x_qua[j,], yz)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# layers <- rbind(layers, c2)
# }
# layers[,1] <- layers[,1] + 3000
# --layers from 7-column
# bp7c <- c(0, 5.5,14.5,20,23,28,36,42,48,56,61) #layer 1 to 10
bp7c <- c(0, 5.5, 14.5, 20, 23, 28, 33, 39, 45, 56, 61) #layer 1 to 10, modify 6-7-8-9 by Mi1, C2, Tm5 and Tm20
bp7c <- max(bp7c) - bp7c %>% rev() #layer 10 to 1
bp7c_prob <- bp7c/max(bp7c)
x_qua <- apply(cbind(top_bd_dorsal$valfit, bot_bd_dorsal$valfit), MARGIN = 1,
function(x) quantile(x, probs = bp7c_prob) )
DRA_layers_ME_dorsal <- matrix(ncol = 3, nrow = 0)
for (j in 1:length(bp7c_prob)) {
cc <- cbind(x_qua[j,], yz_dorsal)
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_dorsal <- rbind(DRA_layers_ME_dorsal, c2)
}
# DRA_layers_ME_dorsal[,1] <- DRA_layers_ME_dorsal[,1] * 0.97 + 1100
x_qua <- apply(cbind(top_bd_posterior$valfit, bot_bd_posterior$valfit), MARGIN = 1,
function(x) quantile(x, probs = bp7c_prob) )
DRA_layers_ME_posterior <- matrix(ncol = 3, nrow = 0)
for (j in 1:length(bp7c_prob)) {
cc <- cbind(x_qua[j,], yz_posterior)
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
}
bot2 <- DRA_layers_ME_posterior[(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),]
top2 <- DRA_layers_ME_posterior[(nrow(DRA_layers_ME_posterior)-1):(nrow(DRA_layers_ME_posterior)), ]
DRA_layers_ME_posterior <- DRA_layers_ME_posterior[-c(
(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),
(nrow(DRA_layers_ME_posterior)-1):(nrow(DRA_layers_ME_posterior))), ]
cc <- cbind(x_qua[2:10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
# add round corner
pt_corner <- rbind(bot2,
as.numeric(c(x_qua[2,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
ptArc <- round_corner(pt_corner)
cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
pt_corner <- rbind(top2,
as.numeric(c(x_qua[10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
ptArc <- round_corner(pt_corner)
cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
t() %>%
matrix(., ncol = 3, byrow = T)
DRA_layers_ME_posterior <- rbind(DRA_layers_ME_posterior, c2)
DRA_layers_ME_posterior_rot90 <- DRA_layers_ME_posterior %*% t(DRA_xRot2)
# bd_Mx <- x_qua[,ncol(x_qua)] %>% rev()
# bd_Mx <- bd_Mx * 0.97 + 1100
# bd_Mx <- bd_Mx[-length(bd_Mx)] + diff(bd_Mx)/2
# DRA_layers_ME_anno <- cbind(bd_Mx, yz_dorsal[nrow(yz_dorsal),])
# # ME layer side DRAR7, 2 - posterior ---------------------------------------------------
#
# # Nnb <- 9
# # ind_nb <- sweep(xyz_M5_avg_xform_DRA, 2,
# # xyz_M5_avg_xform_DRA[ind_Mi1_DRA_hcol[2],])^2 %>%
# # rowSums() %>%
# # order() %>%
# # .[1:Nnb]
# # ind_nb <- c(704, 644, 419, 217, 323, 537, 642,221)
#
# ind_nb <- c(521, 325, 656, 770, 268, 518, 516, 517, 269, 774, 322, 494, 425)
#
# # nopen3d()
# # points3d(xyz_M5_avg_xform_DRA_yz, col='grey', size = 20)
# # # points3d(xyz_M5_avg_xform_DRA, col='grey', size = 10)
# # # points3d(ref_com4,col='red',size=10)
# # points3d(xyz_M5_avg_xform_DRA_yz[ind_nb,], size=25, col='blue')
# # plot3d(DRAR7_xform, col='green', lwd=2)
# # plot3d(DRAR8_xform, col='gold2', lwd=2)
#
# # -- make local mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < 1.2*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# DRA_ME_msh_local_2 <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
#
# # -- ME boundary
# yz_msh <- t(DRA_ME_msh_local_2$vb)
# yz_dorsal <- data.frame(y = seq(-20000, -50000, length.out = 20),
# z = seq(-0000, -60000, length.out = 20))
# yz_posterior <- data.frame(y = seq(-10000, -45000, length.out = 16),
# z = seq(-40000, -25000, length.out = 16))
#
# # choose yz
# nopen3d()
# plot3d(DRAR7_xform[[2]], lwd=2)
# # points3d(yz_msh[,1:3])
# shade3d(DRA_ME_msh_local_2, alpha=0.1, col='gray')
# points3d(yz_dorsal)
# points3d(yz_posterior, col='red')
# # shade3d(DRA_ME_msh_xform, alpha=0.1, col='gold')
# axes3d(c('x','y','z')); title3d('','','x','y','z')
#
# points3d(xyz_bot)
# # points3d(top_bd_dorsal_2)
# # points3d(bot_bd_dorsal_2)
#
# # points on mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# # top
# xyz_top <- xyz_msh[xyz_msh[,1] > 30000, ]
# x <- xyz_top[,1]; y <- xyz_top[,2]; z <- xyz_top[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# top_bd_dorsal_2 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# top_bd_posterior_2 <- cbind(valfit, yz_posterior)
# # bottom
# xyz_bot <- xyz_msh[xyz_msh[,1] < -18000 & xyz_msh[,1] > -28000, ]
# x <- xyz_bot[,1]; y <- xyz_bot[,2]; z <- xyz_bot[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# bot_bd_dorsal_2 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# bot_bd_posterior_2 <- cbind(valfit, yz_posterior)
#
# x_qua <- apply(cbind(top_bd_dorsal_2$valfit, bot_bd_dorsal_2$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_dorsal_2 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_dorsal)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_dorsal_2 <- rbind(DRA_layers_ME_dorsal_2, c2)
# }
# # manual translation
# DRA_layers_ME_dorsal_2[,1] <- DRA_layers_ME_dorsal_2[,1] * 1.3 - 12000
#
# x_qua <- apply(cbind(top_bd_posterior_2$valfit, bot_bd_posterior_2$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_posterior_2 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_posterior)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
# }
# bot2 <- DRA_layers_ME_posterior_2[(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),]
# top2 <- DRA_layers_ME_posterior_2[(nrow(DRA_layers_ME_posterior_2)-1):(nrow(DRA_layers_ME_posterior_2)), ]
# DRA_layers_ME_posterior_2 <- DRA_layers_ME_posterior_2[-c(
# (nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),
# (nrow(DRA_layers_ME_posterior_2)-1):(nrow(DRA_layers_ME_posterior_2))), ]
#
# cc <- cbind(x_qua[2:10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
#
# # add round corner
# pt_corner <- rbind(bot2,
# as.numeric(c(x_qua[2,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
#
# pt_corner <- rbind(top2,
# as.numeric(c(x_qua[10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_2 <- rbind(DRA_layers_ME_posterior_2, c2)
#
# # manual adjust
# DRA_layers_ME_posterior_2[,1] <- DRA_layers_ME_posterior_2[,1]*1.1 - 9000
#
# # DRA_layers_ME_posterior_2_rot90 <- DRA_layers_ME_posterior_2 %*% t(DRA_xRot2)
#
#
#
#
# # # for DRAR7 - 3
# # # -- ME boundary
# # yz_msh <- t(DRA_ME_msh_local_2$vb)
# # yz_dorsal <- data.frame(y = seq(-20000, 30000, length.out = 20),
# # z = seq(100000, 50000, length.out = 20))
# #
# # # bottom
# # xyz_bot <- xyz_msh[xyz_msh[,1] < -45000 & xyz_msh[,1] > -70000, ]
# # # top
# # xyz_top <- xyz_msh[xyz_msh[,1] > 21000, ]
# # ME layer side DRAR7, 3 - anterior ---------------------------------------
#
# Nnb <- 16
# ind_nb <- sweep(xyz_M5_avg_xform_DRA, 2,
# xyz_M5_avg_xform_DRA[ind_Mi1_DRA_hcol[3],])^2 %>%
# rowSums() %>%
# order() %>%
# .[1:Nnb]
#
# nopen3d()
# points3d(xyz_M5_avg_xform_DRA_yz, col='grey', size = 20)
# # points3d(xyz_M5_avg_xform_DRA, col='grey', size = 10)
# # points3d(ref_com4,col='red',size=10)
# points3d(xyz_M5_avg_xform_DRA_yz[ind_nb,], size=25, col='blue')
# plot3d(DRAR7_xform, col='green', lwd=2)
# plot3d(DRAR8_xform, col='gold2', lwd=2)
#
# # -- make local mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < 1.2*max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# DRA_ME_msh_local_3 <- ashape3d(xyz_msh, alpha = 60000) %>% as.mesh3d()
#
# # -- ME boundary
# # yz_msh <- t(DRA_ME_msh_local_3$vb)
# # yz_dorsal <- data.frame(y = seq(80000, 35000, length.out = 20),
# # z = seq(-40000, -0000, length.out = 20))
# #
# # yz_posterior <- data.frame(y = seq(30000, 70000, length.out = 16),
# # z = seq(-40000, -8000, length.out = 16))
#
# yz_msh <- t(DRA_ME_msh_local_3$vb)
# yz_dorsal <- data.frame(y = seq(-35000, 30000, length.out = 16),
# z = seq(-30000, 10000, length.out = 16))
#
# yz_posterior <- data.frame(y = seq( -15000,7500, length.out = 20),
# z = seq( 21300,-12000, length.out = 20))
#
# # choose yz
# nopen3d()
# shade3d(DRA_ME_msh_local_3, alpha=0.1, col='gray')
# plot3d(DRAR7_xform[[3]], lwd=2)
# points3d(yz_dorsal)
# points3d(yz_posterior, col='red')
# # points3d(yz_msh[,1:3])
# # shade3d(DRA_ME_msh_xform, alpha=0.1, col='gold')
# axes3d(c('x','y','z')); title3d('','','x','y','z')
# # points3d(xyz_bot)
# # points3d(top_bd_dorsal_3)
# # points3d(bot_bd_dorsal_3)
#
# # points on mesh
# xyz <- t(DRA_ME_msh_xform$vb[1:3,])
# dd <- sweep(xyz[,2:3], 2, colMeans(xyz_M5_avg_xform_DRA_yz[ind_nb,])[-1]) %>% .^2 %>% rowSums() %>% sqrt()
# xyz_msh <- xyz[dd < max(dist(xyz_M5_avg_xform_DRA_yz[ind_nb,])), ]
# # top
# xyz_top <- xyz_msh[xyz_msh[,1] > 0, ]
# x <- xyz_top[,1]; y <- xyz_top[,2]; z <- xyz_top[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# top_bd_dorsal_3 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# top_bd_posterior_3 <- cbind(valfit, yz_posterior)
# # bottom
# # xyz_bot <- xyz_msh[xyz_msh[,1] < -24000, ]
# xyz_bot <- xyz_msh[xyz_msh[,1] < -24000 & xyz_msh[,2] < 68000, ]
# x <- xyz_bot[,1]; y <- xyz_bot[,2]; z <- xyz_bot[,3]
# fitlm <- lm(x ~ poly(y, z, degree = 2, raw = T))
# valfit <- predict(fitlm, yz_dorsal) #generate values from the fit
# bot_bd_dorsal_3 <- cbind(valfit, yz_dorsal)
# valfit <- predict(fitlm, yz_posterior) #generate values from the fit
# bot_bd_posterior_3 <- cbind(valfit, yz_posterior)
#
# x_qua <- apply(cbind(top_bd_dorsal_3$valfit, bot_bd_dorsal_3$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_dorsal_3 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_dorsal)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_dorsal_3 <- rbind(DRA_layers_ME_dorsal_3, c2)
# }
#
# # manual translation
# DRA_layers_ME_dorsal_3[,1] <- DRA_layers_ME_dorsal_3[,1] - 4000
#
#
# x_qua <- apply(cbind(top_bd_posterior_3$valfit, bot_bd_posterior_3$valfit), MARGIN = 1,
# function(x) quantile(x, probs = bp7c_prob) )
# DRA_layers_ME_posterior_3 <- matrix(ncol = 3, nrow = 0)
# for (j in 1:length(bp7c_prob)) {
# cc <- cbind(x_qua[j,], yz_posterior)
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
# }
# bot2 <- DRA_layers_ME_posterior_3[(nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),]
# top2 <- DRA_layers_ME_posterior_3[(nrow(DRA_layers_ME_posterior_3)-1):(nrow(DRA_layers_ME_posterior_3)), ]
# DRA_layers_ME_posterior_3 <- DRA_layers_ME_posterior_3[-c(
# (nrow(yz_posterior)*2-3):(nrow(yz_posterior)*2-2),
# (nrow(DRA_layers_ME_posterior_3)-1):(nrow(DRA_layers_ME_posterior_3))), ]
#
# cc <- cbind(x_qua[2:10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
#
# # add round corner
# pt_corner <- rbind(bot2,
# as.numeric(c(x_qua[2,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
#
# pt_corner <- rbind(top2,
# as.numeric(c(x_qua[10,nrow(yz_posterior)], yz_posterior[nrow(yz_posterior),])))
# ptArc <- round_corner(pt_corner)
# cc <- rbind(pt_corner[3,], ptArc, pt_corner[1,])
# c2 <- cbind(cc[-nrow(cc),], cc[-1,]) %>%
# t() %>%
# matrix(., ncol = 3, byrow = T)
# DRA_layers_ME_posterior_3 <- rbind(DRA_layers_ME_posterior_3, c2)
#
# # manual adjust
# DRA_layers_ME_posterior_3[,1] <- DRA_layers_ME_posterior_3[,1] - 5500
#
# # DRA_layers_ME_posterior_3_rot90 <- DRA_layers_ME_posterior_3 %*% t(DRA_xRot2)
# # SAVE --------------------------------------------------------------------
#
# save(DRA_layers_ME_dorsal, DRA_layers_ME_dorsal_2, DRA_layers_ME_dorsal_3,
# DRA_layers_ME_posterior, DRA_layers_ME_posterior_2, DRA_layers_ME_posterior_3,
# DRA_layers_ME_posterior_rot90, file = "data/DRA_layers.RData")
# # Dm-DRA1 test view ---------------------------------------------------------------------
#
# type <- "Dm-DRA1"
# tb <- read.csv(paste("table_by_type_out_DRA/", "DRAR7R8_outgoing_", type, ".csv", sep = ''))
#
# skid <- tb$skid %>% na.omit()
# neu <- read.neurons.catmaid(skid, .progress='text' )
# neu_xform <- xEucl_neu(neu, DRA_me_pca$rotation, DRA_me_pca$center)
#
# ii <- 4 #4, 2, 1 for 3 seed col positions
#
#
# # - top
# nopen3d()
# par3d('windowRect' = c(100,100,1300,1300))
# plot3d(neu_xform[[ii]], col='gray40', lwd=2, soma = T, WithNodes = F, lit=F)
# pch3d(xyz_M5_avg_xform_DRA_yz[ind_Mi1_DRA_hcol,,drop=F], radius=5000,col="#b7252a",pch=16,alpha=0.7)
# pch3d(xyz_M5_avg_xform_DRA_yz[ind_Mi1_top_grey,], radius=5000,col="grey",pch=16,alpha=0.2)
# pch3d(xyz_M5_avg_xform_DRA_yz[ind_Mi1_top_red,], radius=5000,col="#b7252a",pch=16,alpha=0.3)
# segments3d(sweep(DRA_sbar,2,c(0,0,-20000)), lwd=2)
# text3d(colMeans(DRA_sbar)+c(0,0,23000), texts = "10 um", cex = 1.5)
# rgl.viewpoint(fov=0,zoom=1, userMatrix= t(translationMatrix(1e4,0,0)) %*%
# rotationMatrix(-80/180*pi,0,0,1) %*%
# rotationMatrix(-90/180*pi,0,1,0) %*%
# rotationMatrix(DRA_xAng/180*pi,1,0,0) )
#
# # add syn from R7
# conn_in_R7 <- catmaid_get_connectors_between(pre_skids = skid_R7s, post_skids = skid[ii])
# if (!is.null(conn_in_R7)) {
# conn_in_R7 <- conn_in_R7[, c("post_node_x", "post_node_y", "post_node_z")]
# conn_in_R7_xform <- sweep(as.matrix(conn_in_R7), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
# pch3d(conn_in_R7_xform, pch=16, radius=1000, alpha=0.7, col=pal_syn['R7'])
# }
#
# # # save
# # rgl.snapshot(filename = paste("F5_", type, "_", skid[ii], "_top.png", sep = ''))
#
#
# # # - side
# # nopen3d()
# # par3d('windowRect' = c(100,100,1300,1300))
# # plot3d(neu_xform[[ii]], col='gray40', lwd=2, soma = T, WithNodes = F, lit=F)
# # segments3d(DRA_sbar_side, lwd=2)
# # plot3d(DRAR7_xform[[1]], col=pal_syn["R7"], lwd=2, soma = T, WithNodes = F, lit=F)
# # segments3d(DRA_layers_ME_dorsal, lwd=1)
# # rgl.viewpoint(fov=0,zoom=0.75, userMatrix= rotationMatrix(90/180*pi,0,0,1) %*%
# # rotationMatrix(0/180*pi,1,0,0) %*%
# # rotationMatrix(DRA_yAng/180*pi,0,1,0) ) # add 15 deg for the tilt
# # conn_in_R7 <- catmaid_get_connectors_between(pre_skids = skid_R7s, post_skids = skid[ii])
# # if (!is.null(conn_in_R7)) {
# # conn_in_R7 <- conn_in_R7[, c("post_node_x", "post_node_y", "post_node_z")]
# # conn_in_R7_xform <- sweep(as.matrix(conn_in_R7), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
# # pch3d(conn_in_R7_xform, pch=16, radius=1000, alpha=0.7, col=pal_syn['R7'])
# # }
# #
# # # save
# # rgl.snapshot(filename = paste("F5_", type, "_", skid[ii], "_side_dorsal.png", sep = ''))
# #
#
#
# # rot 90
# ii_hc <- sweep(xyz_M5_avg_xform_DRA[ind_Mi1_DRA_hcol,], 2, colMeans(xyzmatrix(neu_xform[[ii]]$d)), '-') %>%
# .^2 %>% rowSums() %>% order()
# ii_hc[1]
#
# nopen3d()
# par3d('windowRect' = c(100,100,1300,1300))
# plot3d(neu_xform[[ii]], col='gray40', lwd=2, soma = T, WithNodes = F, lit=F)
# if (ii_hc[1] == 1) {
# segments3d(DRA_layers_ME_posterior_rot90, lwd=1)
# rgl.viewpoint(fov=0,zoom=1, userMatrix= rotationMatrix(90/180*pi,0,0,1) %*%
# rotationMatrix((DRA_xAng-90)/180*pi,1,0,0) %*%
# rotationMatrix(DRA_yAng/180*pi,0,1,0))
# } else if (ii_hc[1] == 2) {
# segments3d(DRA_layers_ME_posterior_2, lwd=1)
# rgl.viewpoint(fov=0,zoom=1,
# userMatrix= matrix(c(-0.084, 0.870, -0.485, -1044.4827,
# 0.996, 0.077, -0.034, 392.4445,
# 0.0077, -0.486, -0.8738, -13105.9336,
# 0,0,0,1), ncol = 4, byrow = T))
# } else if (ii_hc[1] == 3) {
# segments3d(DRA_layers_ME_posterior_3, lwd=1)
# rgl.viewpoint(fov=0,zoom=1,
# userMatrix= matrix(c(0.164, -0.752, -0.6374, -11506.360,
# 0.984, 0.1626, 0.062, 5351.796,
# 0.0569, -0.6379, 0.7679, 0.000,
# 0,0,0,1), ncol = 4, byrow = T))
# }
#
# conn_in_R7 <- catmaid_get_connectors_between(pre_skids = skid_R7s, post_skids = skid[ii])
# if (!is.null(conn_in_R7)) {
# conn_in_R7 <- conn_in_R7[, c("post_node_x", "post_node_y", "post_node_z")]
# conn_in_R7_xform <- sweep(as.matrix(conn_in_R7), 2, DRA_me_pca$center) %*% DRA_me_pca$rotation
# pch3d(conn_in_R7_xform, pch=16, radius=1000, alpha=0.7, col=pal_syn['R7'])
# }
#
# # # save
# # rgl.snapshot(filename = paste("F5_", type, "_", skid[ii], "_side_posterial.png", sep = ''))
|
gsc.sub <- function(
expression,
genes,
g,
rwr.r,
rwr.cutoff
) {
# run the compactness function on a cellular network created using expression and genes
# this function will be run a large number of times and therefore is important to reduce the time required
# because of this, no checks are run within the function
# all checks should be run within the compactness.sig function
# a single compactness score is output
edge.attr <- "edge.attr"
genes.vnum <- get.vertex.numbers(g, genes)
genes.vnum.size <- length(genes.vnum)
# score edges in g
el <- get.edgelist(g, names=TRUE)
expression[expression == 0] <- min(expression[expression != 0], na.rm=T)
edge.weights <- (expression[el[, 1]] * expression[el[, 2]])
# extract column-normalised adjacency matrix
g <- set.edge.attribute(g, edge.attr, value=edge.weights)
W <- get.adjacency.norm(g, edge.attr)
# p: a matrix of random walk probabilities
# plast: p in the previous time point
# p0: p at t=0
plast <- p <- p0 <- as(sparseMatrix(dims=c(vcount(g), genes.vnum.size), i=genes.vnum, j=1:genes.vnum.size, x=rep(1, genes.vnum.size)), "dgeMatrix")
# run first RWR iteration
p <- (1 - rwr.r) * W %*% plast
p[cbind(genes.vnum, 1:genes.vnum.size)] <- p[cbind(genes.vnum, 1:genes.vnum.size)] + rwr.r
# run remaining RWR iterations
rwr.cutoff.total <- rwr.cutoff * genes.vnum.size
while (sum(abs(p - plast)) > rwr.cutoff.total) {
plast <- p
p <- (1 - rwr.r) * W %*% plast
p[cbind(genes.vnum, 1:genes.vnum.size)] <- p[cbind(genes.vnum, 1:genes.vnum.size)] + rwr.r
}
# rank distances
d <- apply(-p, 2, rank)[genes.vnum, ]
# compute average distance and output
mean(d)
}
| /R/gsc.sub.R | no_license | alexjcornish/DiseaseCellTypes | R | false | false | 1,816 | r | gsc.sub <- function(
expression,
genes,
g,
rwr.r,
rwr.cutoff
) {
# run the compactness function on a cellular network created using expression and genes
# this function will be run a large number of times and therefore is important to reduce the time required
# because of this, no checks are run within the function
# all checks should be run within the compactness.sig function
# a single compactness score is output
edge.attr <- "edge.attr"
genes.vnum <- get.vertex.numbers(g, genes)
genes.vnum.size <- length(genes.vnum)
# score edges in g
el <- get.edgelist(g, names=TRUE)
expression[expression == 0] <- min(expression[expression != 0], na.rm=T)
edge.weights <- (expression[el[, 1]] * expression[el[, 2]])
# extract column-normalised adjacency matrix
g <- set.edge.attribute(g, edge.attr, value=edge.weights)
W <- get.adjacency.norm(g, edge.attr)
# p: a matrix of random walk probabilities
# plast: p in the previous time point
# p0: p at t=0
plast <- p <- p0 <- as(sparseMatrix(dims=c(vcount(g), genes.vnum.size), i=genes.vnum, j=1:genes.vnum.size, x=rep(1, genes.vnum.size)), "dgeMatrix")
# run first RWR iteration
p <- (1 - rwr.r) * W %*% plast
p[cbind(genes.vnum, 1:genes.vnum.size)] <- p[cbind(genes.vnum, 1:genes.vnum.size)] + rwr.r
# run remaining RWR iterations
rwr.cutoff.total <- rwr.cutoff * genes.vnum.size
while (sum(abs(p - plast)) > rwr.cutoff.total) {
plast <- p
p <- (1 - rwr.r) * W %*% plast
p[cbind(genes.vnum, 1:genes.vnum.size)] <- p[cbind(genes.vnum, 1:genes.vnum.size)] + rwr.r
}
# rank distances
d <- apply(-p, 2, rank)[genes.vnum, ]
# compute average distance and output
mean(d)
}
|
#' Secure your Shiny UI
#'
#' This function is used to secure your Shiny app's UI. Make sure to pass
#' your Shiny app's UI as the first argument to \code{secure_ui()} at
#' the bottom of your Shiny app's \code{ui.R} file.
#'
#' @param ui UI of the application.
#' @param sign_in_page_ui Either \code{NULL}, the default (See \code{\link{sign_in_ui_default}}), or the HTML, CSS, and JavaScript
#' to use for the UI of the Sign In page.
#' @param custom_admin_ui Either \code{NULL}, the default, or a list of 2 Shiny module UI functions
#' to add additional \code{shinydashboard} tabs to the \code{polished} Admin Panel. The list must be in the form:
#' \preformatted{
#' list(
#' "menu_items" = <your_custom_admin_menu_ui("custom_admin")>,
#' "tab_items" = <your_custom_admin_tabs_ui("custom_admin")>
#' )
#' }
#' @param custom_admin_button_ui Either \code{admin_button_ui()}, the default, or your custom
#' UI to take Admins from the custom Shiny app to the \code{polished} Admin Panel.
#' @param admin_ui_options list of HTML elements to customize branding of the \code{polished} Admin Panel. Valid
#' list element names are \code{title}, \code{sidebar_branding}, and \code{browser_tab_icon}. See
#' \code{\link{default_admin_ui_options}}, the default.
#' @param account_module_ui the UI portion for the user's account module.
#' @param splash_module_ui the UI portion for the splash page module.
#'
#' @return Secured Shiny app UI
#'
#' @export
#'
#' @importFrom shiny fluidPage fluidRow column actionButton parseQueryString
#' @importFrom htmltools tagList h1 tags
#' @importFrom digest digest
#' @importFrom uuid UUIDgenerate
#'
#'
secure_ui <- function(
ui,
sign_in_page_ui = NULL,
custom_admin_ui = NULL,
custom_admin_button_ui = admin_button_ui(),
admin_ui_options = default_admin_ui_options(),
account_module_ui = NULL,
splash_module_ui = NULL
) {
custom_admin_button_ui <- force(custom_admin_button_ui)
function(request) {
if (is.function(ui)) {
ui <- ui(request)
} else {
ui <- force(ui)
}
if (isTRUE(.global_sessions$get_admin_mode())) {
# go to Admin Panel
return(tagList(
admin_module_ui(
"admin",
custom_admin_ui,
options = admin_ui_options,
include_go_to_shiny_app_button = FALSE
),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", uuid::UUIDgenerate(), "')"))
))
}
query <- shiny::parseQueryString(request$QUERY_STRING)
page_query <- query$page
cookie_string <- request$HTTP_COOKIE
hashed_cookie <- NULL
if (!is.null(cookie_string)) {
polished_cookie <- get_cookie(cookie_string, "polished")
hashed_cookie <- digest::digest(polished_cookie)
}
# if a token exists attempt to sign in the user using the token. This is used to automatically
# sign a user in via an email link without requiring the user to enter their email
# and password.
if (!is.null(query$token)) {
query_cookie <- query$token
return(
tagList(
tags$script(src = "https://cdn.jsdelivr.net/npm/js-cookie@2/src/js.cookie.min.js"),
tags$script(paste0("
Cookies.set(
'polished',
'", query_cookie, "',
{ expires: 365 } // set cookie to expire in 1 year
)
window.location.href = window.location.origin + window.location.pathname;
"))
)
)
}
user <- NULL
if (!is.null(hashed_cookie) && length(hashed_cookie) > 0) {
tryCatch({
user <- .global_sessions$find(hashed_cookie, paste0("ui-", page_query))
}, error = function(error) {
print("sign_in_ui_1")
print(error)
})
}
# UI to optionally add Sentry.io error monitoring
sentry_ui_out <- function(x) NULL
sentry_dsn <- getOption("polished")$sentry
if (!is.null(sentry_dsn)) {
sentry_ui_out <- sentry_ui(
sentry_dsn = sentry_dsn,
app_uid = paste0(getOption("polished")$app_name, "@", getOption("polished")$app_uid),
user = user,
r_env = if (Sys.getenv("R_CONFIG_ACTIVE") == "") "default" else Sys.getenv("R_CONFIG_ACTIVE")
)
}
page_out <- NULL
if (is.null(user)) {
if (!is.null(splash_module_ui) && is.null(page_query)) {
page_out <- tagList(
splash_module_ui,
tags$script(src = "polish/js/router.js?version=3"),
sentry_ui_out("splash")
)
} else if (identical(page_query, "sign_in")) {
# go to the sign in page
if (is.null(sign_in_page_ui)) {
# go to default sign in page
page_out <- tagList(
sign_in_ui_default(),
tags$script(src = "polish/js/router.js?version=3"),
sentry_ui_out("sign_in_default")
)
} else {
# go to custom sign in page
page_out <- tagList(
sign_in_page_ui,
tags$script(src = "polish/js/router.js?version=3"),
sentry_ui_out("sign_in_custom")
)
}
} else {
if (isFALSE(.global_sessions$is_auth_required)) {
# auth is not required, so allow the user to go directly to the custom shiny app
# go to Shiny app without admin button. User is not an admin
page_out <- tagList(
ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("shiny_app")
)
} else {
# send a random uuid as the polished_session. This will trigger a session
# reload and a redirect to the sign in page
page_out <- tagList(
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", uuid::UUIDgenerate(), "')"))
)
}
}
} else {
# user is not NULL
if (identical(page_query, "sign_in")) {
# send signed in session to polished_session. This will trigger
# a redirect to the app
page_out <- tagList(
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')"))
)
} else if (isTRUE(user$email_verified) ||
isFALSE(.global_sessions$is_email_verification_required)) {
if (identical(page_query, "account")) {
# server the payments module UI
if (is.null(account_module_ui)) {
stop("`account_module_ui`` cannot ne NULL", call. = FALSE)
} else {
page_out <- tagList(
account_module_ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("account")
)
}
} else if (isTRUE(user$is_admin)) {
if (identical(page_query, "admin_panel")) {
# go to Admin Panel
page_out <- tagList(
admin_module_ui("admin", custom_admin_ui, options = admin_ui_options),
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("admin_panel")
)
} else if (is.null(page_query)) {
# go to Shiny app with admin button. User is an admin.
page_out <- tagList(
ui,
custom_admin_button_ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("shiny_app")
)
}
} else {
# go to Shiny app without admin button. User is not an admin
page_out <- tagList(
ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("shiny_app")
)
} # end is_admin check
} else {
# email is not verified.
# go to email verification page
page_out <- tagList(
verify_email_module_ui(
"verify"
),
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("email_verification")
)
}
}
page_out
} # end request handler function
}
| /R/secure_ui.R | no_license | cmh2jb/polished | R | false | false | 9,288 | r | #' Secure your Shiny UI
#'
#' This function is used to secure your Shiny app's UI. Make sure to pass
#' your Shiny app's UI as the first argument to \code{secure_ui()} at
#' the bottom of your Shiny app's \code{ui.R} file.
#'
#' @param ui UI of the application.
#' @param sign_in_page_ui Either \code{NULL}, the default (See \code{\link{sign_in_ui_default}}), or the HTML, CSS, and JavaScript
#' to use for the UI of the Sign In page.
#' @param custom_admin_ui Either \code{NULL}, the default, or a list of 2 Shiny module UI functions
#' to add additional \code{shinydashboard} tabs to the \code{polished} Admin Panel. The list must be in the form:
#' \preformatted{
#' list(
#' "menu_items" = <your_custom_admin_menu_ui("custom_admin")>,
#' "tab_items" = <your_custom_admin_tabs_ui("custom_admin")>
#' )
#' }
#' @param custom_admin_button_ui Either \code{admin_button_ui()}, the default, or your custom
#' UI to take Admins from the custom Shiny app to the \code{polished} Admin Panel.
#' @param admin_ui_options list of HTML elements to customize branding of the \code{polished} Admin Panel. Valid
#' list element names are \code{title}, \code{sidebar_branding}, and \code{browser_tab_icon}. See
#' \code{\link{default_admin_ui_options}}, the default.
#' @param account_module_ui the UI portion for the user's account module.
#' @param splash_module_ui the UI portion for the splash page module.
#'
#' @return Secured Shiny app UI
#'
#' @export
#'
#' @importFrom shiny fluidPage fluidRow column actionButton parseQueryString
#' @importFrom htmltools tagList h1 tags
#' @importFrom digest digest
#' @importFrom uuid UUIDgenerate
#'
#'
secure_ui <- function(
ui,
sign_in_page_ui = NULL,
custom_admin_ui = NULL,
custom_admin_button_ui = admin_button_ui(),
admin_ui_options = default_admin_ui_options(),
account_module_ui = NULL,
splash_module_ui = NULL
) {
custom_admin_button_ui <- force(custom_admin_button_ui)
function(request) {
if (is.function(ui)) {
ui <- ui(request)
} else {
ui <- force(ui)
}
if (isTRUE(.global_sessions$get_admin_mode())) {
# go to Admin Panel
return(tagList(
admin_module_ui(
"admin",
custom_admin_ui,
options = admin_ui_options,
include_go_to_shiny_app_button = FALSE
),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", uuid::UUIDgenerate(), "')"))
))
}
query <- shiny::parseQueryString(request$QUERY_STRING)
page_query <- query$page
cookie_string <- request$HTTP_COOKIE
hashed_cookie <- NULL
if (!is.null(cookie_string)) {
polished_cookie <- get_cookie(cookie_string, "polished")
hashed_cookie <- digest::digest(polished_cookie)
}
# if a token exists attempt to sign in the user using the token. This is used to automatically
# sign a user in via an email link without requiring the user to enter their email
# and password.
if (!is.null(query$token)) {
query_cookie <- query$token
return(
tagList(
tags$script(src = "https://cdn.jsdelivr.net/npm/js-cookie@2/src/js.cookie.min.js"),
tags$script(paste0("
Cookies.set(
'polished',
'", query_cookie, "',
{ expires: 365 } // set cookie to expire in 1 year
)
window.location.href = window.location.origin + window.location.pathname;
"))
)
)
}
user <- NULL
if (!is.null(hashed_cookie) && length(hashed_cookie) > 0) {
tryCatch({
user <- .global_sessions$find(hashed_cookie, paste0("ui-", page_query))
}, error = function(error) {
print("sign_in_ui_1")
print(error)
})
}
# UI to optionally add Sentry.io error monitoring
sentry_ui_out <- function(x) NULL
sentry_dsn <- getOption("polished")$sentry
if (!is.null(sentry_dsn)) {
sentry_ui_out <- sentry_ui(
sentry_dsn = sentry_dsn,
app_uid = paste0(getOption("polished")$app_name, "@", getOption("polished")$app_uid),
user = user,
r_env = if (Sys.getenv("R_CONFIG_ACTIVE") == "") "default" else Sys.getenv("R_CONFIG_ACTIVE")
)
}
page_out <- NULL
if (is.null(user)) {
if (!is.null(splash_module_ui) && is.null(page_query)) {
page_out <- tagList(
splash_module_ui,
tags$script(src = "polish/js/router.js?version=3"),
sentry_ui_out("splash")
)
} else if (identical(page_query, "sign_in")) {
# go to the sign in page
if (is.null(sign_in_page_ui)) {
# go to default sign in page
page_out <- tagList(
sign_in_ui_default(),
tags$script(src = "polish/js/router.js?version=3"),
sentry_ui_out("sign_in_default")
)
} else {
# go to custom sign in page
page_out <- tagList(
sign_in_page_ui,
tags$script(src = "polish/js/router.js?version=3"),
sentry_ui_out("sign_in_custom")
)
}
} else {
if (isFALSE(.global_sessions$is_auth_required)) {
# auth is not required, so allow the user to go directly to the custom shiny app
# go to Shiny app without admin button. User is not an admin
page_out <- tagList(
ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("shiny_app")
)
} else {
# send a random uuid as the polished_session. This will trigger a session
# reload and a redirect to the sign in page
page_out <- tagList(
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", uuid::UUIDgenerate(), "')"))
)
}
}
} else {
# user is not NULL
if (identical(page_query, "sign_in")) {
# send signed in session to polished_session. This will trigger
# a redirect to the app
page_out <- tagList(
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')"))
)
} else if (isTRUE(user$email_verified) ||
isFALSE(.global_sessions$is_email_verification_required)) {
if (identical(page_query, "account")) {
# server the payments module UI
if (is.null(account_module_ui)) {
stop("`account_module_ui`` cannot ne NULL", call. = FALSE)
} else {
page_out <- tagList(
account_module_ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("account")
)
}
} else if (isTRUE(user$is_admin)) {
if (identical(page_query, "admin_panel")) {
# go to Admin Panel
page_out <- tagList(
admin_module_ui("admin", custom_admin_ui, options = admin_ui_options),
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("admin_panel")
)
} else if (is.null(page_query)) {
# go to Shiny app with admin button. User is an admin.
page_out <- tagList(
ui,
custom_admin_button_ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("shiny_app")
)
}
} else {
# go to Shiny app without admin button. User is not an admin
page_out <- tagList(
ui,
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("shiny_app")
)
} # end is_admin check
} else {
# email is not verified.
# go to email verification page
page_out <- tagList(
verify_email_module_ui(
"verify"
),
tags$script(src = "polish/js/router.js?version=3"),
tags$script(src = "polish/js/polished_session.js?version=2"),
tags$script(paste0("polished_session('", user$hashed_cookie, "')")),
sentry_ui_out("email_verification")
)
}
}
page_out
} # end request handler function
}
|
percauthors <- function(Table, Sums)
{
justy <- Table[,2:2]
newcol <- justy/Sums[2]
return(newcol)
}
| /LotkasLaw/R/percauthors.R | no_license | ingted/R-Examples | R | false | false | 110 | r | percauthors <- function(Table, Sums)
{
justy <- Table[,2:2]
newcol <- justy/Sums[2]
return(newcol)
}
|
# ==============================================================================
# osa-xlsx-wrangle-lookup
# Alexander Nielson
# 2/25/2020
# ==============================================================================
#lib
library(readxl)
library(openxlsx)
library(tidyverse)
library(stringi)
library(tidyr)
osa_file_name <- "ucoa.xlsx"
# fund =========================================================================
osa_fund <-
read_xlsx(osa_file_name, sheet = excel_sheets(osa_file_name)[[1]]) %>%
mutate(number = number %>% str_pad(3, pad = "0")) %>%
mutate(
level_primary =
ifelse(
str_detect(full_description, "^(.*)[:space:]-[:space:]"),
stri_extract_first(full_description, regex = "^(.*)[:space:]-[:space:]"),
full_description
),
level_secondary = stri_extract_first(full_description, regex = "[:space:]-[:space:](.*)$"),
level_primary = str_replace(level_primary, "-", "") %>% str_trim() %>% str_to_upper(),
level_secondary = str_replace(level_secondary, "-", "") %>% str_trim()
)
# add additional fund numbers.
extra_fund_numbers <-
c(202:298, 302:398, 402:448, 452:498, 502:598, 602:698, 702:798) %>%
as.character() %>%
enframe(name = NULL) %>%
rename("number" = "value") %>%
mutate(
short_description = NA,
full_description = case_when(
number %in% (202:298) ~ "Special Revenue Funds (as assigned by local government)",
number %in% (302:398) ~ "Debt Service Funds (as assigned by local government)",
number %in% (402:448) ~ "Capital Projects Funds (as assigned by local government)",
number %in% (452:498) ~ "Permanent Funds (as assigned by local government)",
number %in% (502:598) ~ "Enterprise Funds (as assigned by local government)",
number %in% (602:698) ~ "Internal Service Funds (as assigned by local government)",
number %in% (702:798) ~ "Trust and Agency Funds (as assigned by local government)"
),
detail = NA,
level_primary = case_when(
number %in% (202:298) ~ "Special Revenue Funds",
number %in% (302:398) ~ "Debt Service Funds",
number %in% (402:448) ~ "Capital Projects Funds ",
number %in% (452:498) ~ "Permanent Funds",
number %in% (502:598) ~ "Enterprise Funds",
number %in% (602:698) ~ "Internal Service Funds",
number %in% (702:798) ~ "Trust and Agency Funds"
),
level_secondary = paste(level_primary, str_extract(number, "[:digit:][:digit:]$"))
) %>%
mutate(level_primary = str_to_upper(level_primary))
osa_fund <-
osa_fund %>%
bind_rows(extra_fund_numbers) %>%
mutate(code_primary = str_sub(number, 0, 2),
code_secondary = str_sub(number, 2, 3)) %>%
select(-short_description, -detail) %>%
rename(description = full_description)
# funct ========================================================================
osa_funct <-
read_xlsx(osa_file_name, sheet = excel_sheets(osa_file_name)[[2]])
# quicker than str_pad to the entire column
osa_funct$number[[1]] <- "000000"
osa_funct <- osa_funct %>%
mutate(
number = as.character(number),
code_primary = str_sub(number, 0, 2),
code_secondary = str_sub(number, 3, 4),
code_tertiary = str_sub(number, 5, 6),
temp_description = full_description
) %>%
separate(
temp_description,
into = c("level_primary", "level_secondary", "level_tertiary"),
sep = "[:space:]-[:space:]"
) %>%
select(-short_description, -detail) %>%
rename(description = full_description)
# account ======================================================================
osa_account <-
read_xlsx(osa_file_name, sheet = excel_sheets(osa_file_name)[[3]]) %>%
mutate(
number = as.character(number),
code_primary = str_sub(number, 0, 2),
code_secondary = str_sub(number, 3, 4),
code_tertiary = str_sub(number, 5, 6),
code_quaternary = str_sub(number, 7, 8),
temp_description = full_description,
level_primary = case_when(
str_extract(number, "^[:digit:][:digit:]") == "10" ~ "ASSETS",
str_extract(number, "^[:digit:][:digit:]") == "11" ~ "DEFERRED OUTFLOWS OF RESOURCES",
str_extract(number, "^[:digit:][:digit:]") == "20" ~ "LIABILITIES",
str_extract(number, "^[:digit:][:digit:]") == "21" ~ "DEFERRED INFLOWS OF RESOURCES",
str_extract(number, "^[:digit:][:digit:]") == "22" ~ "FUND BALANCE",
str_extract(number, "^[:digit:][:digit:]") == "23" ~ "NET POSITION",
str_extract(number, "^[:digit:][:digit:]") == "30" ~ "REVENUES",
str_extract(number, "^[:digit:][:digit:]") == "40" ~ "EXPENDITURES"
)
) %>%
separate(
temp_description,
into = c("level_secondary", "level_tertiary", "level_quaternary"),
sep = "[:space:]-[:space:]"
) %>%
select(
number,
full_description,
code_primary,
code_secondary,
code_tertiary,
code_quaternary,
level_primary,
level_secondary,
level_tertiary,
level_quaternary
) %>%
rename(description = full_description)
# consolidate ==================================================================
osa_lookup <-
list(fund = osa_fund,
funct = osa_funct,
account = osa_account)
# export to a csv document =====================================================
osa_names <- names(osa_lookup)
wb <- createWorkbook()
for (name in osa_names) {
sheet_name <-
name #str_trunc(name, 30, side = c("right"), ellipsis = "...")
tempdata <- osa_lookup %>% pluck(name)
addWorksheet(wb, sheet_name)
writeData(wb, sheet_name, tempdata, colNames = TRUE)
}
saveWorkbook(wb, file = "osa_lookup.xlsx", overwrite = TRUE)
# empty garbage ================================================================
rm(
name,
osa_file_name,
osa_names,
sheet_name,
wb,
tempdata,
osa_lookup,
osa_fund,
osa_funct,
osa_account,
extra_fund_numbers
)
| /osa/osa-xlsx-wrangle-lookup.R | no_license | alexnielson/Transparency-UCOA | R | false | false | 5,872 | r | # ==============================================================================
# osa-xlsx-wrangle-lookup
# Alexander Nielson
# 2/25/2020
# ==============================================================================
#lib
library(readxl)
library(openxlsx)
library(tidyverse)
library(stringi)
library(tidyr)
osa_file_name <- "ucoa.xlsx"
# fund =========================================================================
osa_fund <-
read_xlsx(osa_file_name, sheet = excel_sheets(osa_file_name)[[1]]) %>%
mutate(number = number %>% str_pad(3, pad = "0")) %>%
mutate(
level_primary =
ifelse(
str_detect(full_description, "^(.*)[:space:]-[:space:]"),
stri_extract_first(full_description, regex = "^(.*)[:space:]-[:space:]"),
full_description
),
level_secondary = stri_extract_first(full_description, regex = "[:space:]-[:space:](.*)$"),
level_primary = str_replace(level_primary, "-", "") %>% str_trim() %>% str_to_upper(),
level_secondary = str_replace(level_secondary, "-", "") %>% str_trim()
)
# add additional fund numbers.
extra_fund_numbers <-
c(202:298, 302:398, 402:448, 452:498, 502:598, 602:698, 702:798) %>%
as.character() %>%
enframe(name = NULL) %>%
rename("number" = "value") %>%
mutate(
short_description = NA,
full_description = case_when(
number %in% (202:298) ~ "Special Revenue Funds (as assigned by local government)",
number %in% (302:398) ~ "Debt Service Funds (as assigned by local government)",
number %in% (402:448) ~ "Capital Projects Funds (as assigned by local government)",
number %in% (452:498) ~ "Permanent Funds (as assigned by local government)",
number %in% (502:598) ~ "Enterprise Funds (as assigned by local government)",
number %in% (602:698) ~ "Internal Service Funds (as assigned by local government)",
number %in% (702:798) ~ "Trust and Agency Funds (as assigned by local government)"
),
detail = NA,
level_primary = case_when(
number %in% (202:298) ~ "Special Revenue Funds",
number %in% (302:398) ~ "Debt Service Funds",
number %in% (402:448) ~ "Capital Projects Funds ",
number %in% (452:498) ~ "Permanent Funds",
number %in% (502:598) ~ "Enterprise Funds",
number %in% (602:698) ~ "Internal Service Funds",
number %in% (702:798) ~ "Trust and Agency Funds"
),
level_secondary = paste(level_primary, str_extract(number, "[:digit:][:digit:]$"))
) %>%
mutate(level_primary = str_to_upper(level_primary))
osa_fund <-
osa_fund %>%
bind_rows(extra_fund_numbers) %>%
mutate(code_primary = str_sub(number, 0, 2),
code_secondary = str_sub(number, 2, 3)) %>%
select(-short_description, -detail) %>%
rename(description = full_description)
# funct ========================================================================
osa_funct <-
read_xlsx(osa_file_name, sheet = excel_sheets(osa_file_name)[[2]])
# quicker than str_pad to the entire column
osa_funct$number[[1]] <- "000000"
osa_funct <- osa_funct %>%
mutate(
number = as.character(number),
code_primary = str_sub(number, 0, 2),
code_secondary = str_sub(number, 3, 4),
code_tertiary = str_sub(number, 5, 6),
temp_description = full_description
) %>%
separate(
temp_description,
into = c("level_primary", "level_secondary", "level_tertiary"),
sep = "[:space:]-[:space:]"
) %>%
select(-short_description, -detail) %>%
rename(description = full_description)
# account ======================================================================
osa_account <-
read_xlsx(osa_file_name, sheet = excel_sheets(osa_file_name)[[3]]) %>%
mutate(
number = as.character(number),
code_primary = str_sub(number, 0, 2),
code_secondary = str_sub(number, 3, 4),
code_tertiary = str_sub(number, 5, 6),
code_quaternary = str_sub(number, 7, 8),
temp_description = full_description,
level_primary = case_when(
str_extract(number, "^[:digit:][:digit:]") == "10" ~ "ASSETS",
str_extract(number, "^[:digit:][:digit:]") == "11" ~ "DEFERRED OUTFLOWS OF RESOURCES",
str_extract(number, "^[:digit:][:digit:]") == "20" ~ "LIABILITIES",
str_extract(number, "^[:digit:][:digit:]") == "21" ~ "DEFERRED INFLOWS OF RESOURCES",
str_extract(number, "^[:digit:][:digit:]") == "22" ~ "FUND BALANCE",
str_extract(number, "^[:digit:][:digit:]") == "23" ~ "NET POSITION",
str_extract(number, "^[:digit:][:digit:]") == "30" ~ "REVENUES",
str_extract(number, "^[:digit:][:digit:]") == "40" ~ "EXPENDITURES"
)
) %>%
separate(
temp_description,
into = c("level_secondary", "level_tertiary", "level_quaternary"),
sep = "[:space:]-[:space:]"
) %>%
select(
number,
full_description,
code_primary,
code_secondary,
code_tertiary,
code_quaternary,
level_primary,
level_secondary,
level_tertiary,
level_quaternary
) %>%
rename(description = full_description)
# consolidate ==================================================================
osa_lookup <-
list(fund = osa_fund,
funct = osa_funct,
account = osa_account)
# export to a csv document =====================================================
osa_names <- names(osa_lookup)
wb <- createWorkbook()
for (name in osa_names) {
sheet_name <-
name #str_trunc(name, 30, side = c("right"), ellipsis = "...")
tempdata <- osa_lookup %>% pluck(name)
addWorksheet(wb, sheet_name)
writeData(wb, sheet_name, tempdata, colNames = TRUE)
}
saveWorkbook(wb, file = "osa_lookup.xlsx", overwrite = TRUE)
# empty garbage ================================================================
rm(
name,
osa_file_name,
osa_names,
sheet_name,
wb,
tempdata,
osa_lookup,
osa_fund,
osa_funct,
osa_account,
extra_fund_numbers
)
|
###The purpose of this script:
###1. To perform Genome-Wide Association Analysis with simple linear model on a population with ~1800 lines, 21M SNPs and on plant height traits
###2. Because of the limited computing resources we have, we read into small segments from each chromosome to facilitate the computing, this also enable the parallel computing
##Pass the chromosome information from command line
Args <- commandArgs(TRUE)
ch_s <- as.numeric(Args[1])
ch_e <- as.numeric(Args[2])
##Read into the phenotype file, and put the phenotype into a vector
Pheno <- read.table('/XXX/pheno/AmesDP_pheno', header = T, sep = "\t")
Y_ori <- as.vector(Pheno$PH)
##create a vector which store the snp number for each of the chromosome
snpnum <- read.table('/XXX/geno/AmesDP_snpnum', header = T, sep = "\t")
SNP_num <- rep (0, 10)
for (i in 1:10){
data <- snpnum[snpnum$CHROM==i, ]
SNP_num[i] <- nrow(data)
}
##define the size of each segments
n_rows <- 10000
for (ch in ch_s:ch_e) {
ch_hmp_file <- paste('/XXX/geno/AmesDP_ch', ch, '.hmp', sep = '')
##split the genotype of each chromsome into segments
snp_num <- SNP_num[ch]
L <- floor(snp_num/n_rows) + 1
for (j in 1:L){
n_s <- (j - 1) * n_rows
n_e <- j * n_rows + 1
ch_hmp <- read.table(ch_hmp_file, header = T, sep = "\t" , skip = n_s, nrows = n_rows)
output_dir <- '/XXX/'
if (!file.exists(output_dir)) {dir.create(output_dir, recursive = T) }
simple_GWAS <- paste(output_dir, group, '_simple_ch', ch,'.hmp', sep = '')
N <- nrow(ch_hmp)
M <- nrow(Pheno)
P_values <- c()
F_values <- c()
##create dataframe to store the GWAS results
result_df <- ch_hmp[,c(1:4)]
##loop through each SNP to perform the test
for (i in 1:N) {
all_samples <- as.vector(as.matrix(ch_hmp[i,]))
##remove the first 11 value in the SNP vector
X_ori <- all_samples[-c(1:11)]
X <- X_ori
Y <- Y_ori
bad_samples <- which(X_ori == "N")
##remove missing values
if (length(bad_samples) > 0) {
X <- X_ori[-bad_samples];
Y <- Y_ori[-bad_samples];
}
##Run linear model for each of the SNP
X <- as.factor(X);
Y_X_lm <- lm(Y ~ X);
P <- -log10(anova(Y_X_lm)[1,5])
F <- anova(Y_X_lm)[1,4]
P_values[i] <- P
F_values[i] <- F
}
result_df$P <- P_values
result_df$F <- F_values
##write out the results
write.table(result_df, file = simple_GWAS, append = T, sep = "\t", col.names=FALSE)
}
}
| /R/1Simple_linear_model_GWAS.r | no_license | jwang1133/Bioinformatics | R | false | false | 2,464 | r | ###The purpose of this script:
###1. To perform Genome-Wide Association Analysis with simple linear model on a population with ~1800 lines, 21M SNPs and on plant height traits
###2. Because of the limited computing resources we have, we read into small segments from each chromosome to facilitate the computing, this also enable the parallel computing
##Pass the chromosome information from command line
Args <- commandArgs(TRUE)
ch_s <- as.numeric(Args[1])
ch_e <- as.numeric(Args[2])
##Read into the phenotype file, and put the phenotype into a vector
Pheno <- read.table('/XXX/pheno/AmesDP_pheno', header = T, sep = "\t")
Y_ori <- as.vector(Pheno$PH)
##create a vector which store the snp number for each of the chromosome
snpnum <- read.table('/XXX/geno/AmesDP_snpnum', header = T, sep = "\t")
SNP_num <- rep (0, 10)
for (i in 1:10){
data <- snpnum[snpnum$CHROM==i, ]
SNP_num[i] <- nrow(data)
}
##define the size of each segments
n_rows <- 10000
for (ch in ch_s:ch_e) {
ch_hmp_file <- paste('/XXX/geno/AmesDP_ch', ch, '.hmp', sep = '')
##split the genotype of each chromsome into segments
snp_num <- SNP_num[ch]
L <- floor(snp_num/n_rows) + 1
for (j in 1:L){
n_s <- (j - 1) * n_rows
n_e <- j * n_rows + 1
ch_hmp <- read.table(ch_hmp_file, header = T, sep = "\t" , skip = n_s, nrows = n_rows)
output_dir <- '/XXX/'
if (!file.exists(output_dir)) {dir.create(output_dir, recursive = T) }
simple_GWAS <- paste(output_dir, group, '_simple_ch', ch,'.hmp', sep = '')
N <- nrow(ch_hmp)
M <- nrow(Pheno)
P_values <- c()
F_values <- c()
##create dataframe to store the GWAS results
result_df <- ch_hmp[,c(1:4)]
##loop through each SNP to perform the test
for (i in 1:N) {
all_samples <- as.vector(as.matrix(ch_hmp[i,]))
##remove the first 11 value in the SNP vector
X_ori <- all_samples[-c(1:11)]
X <- X_ori
Y <- Y_ori
bad_samples <- which(X_ori == "N")
##remove missing values
if (length(bad_samples) > 0) {
X <- X_ori[-bad_samples];
Y <- Y_ori[-bad_samples];
}
##Run linear model for each of the SNP
X <- as.factor(X);
Y_X_lm <- lm(Y ~ X);
P <- -log10(anova(Y_X_lm)[1,5])
F <- anova(Y_X_lm)[1,4]
P_values[i] <- P
F_values[i] <- F
}
result_df$P <- P_values
result_df$F <- F_values
##write out the results
write.table(result_df, file = simple_GWAS, append = T, sep = "\t", col.names=FALSE)
}
}
|
#' remakeRandomData
#'
#' Given an object output by \code{makeRandomData}, make another data set with the same
#' distribution.
#'
#' @param n A \code{numeric} sample size
#' @param object An object of class \code{"makeRandomData"}
#' @param setA Value(s) to set treatment variable to. If \code{NULL} then treatment
#' is simulated according to observed data functions. If \code{length(A)==1} then all
#' values are set to this single value. Otherwise, if A is a vector of length n, A is set
#' according to this vector.
#' @param setW A matrix of proper size with values of covariates set to fixed levels. Useful for
#' plotting methods. Assumes proper dimensions. If \code{NULL} simulates W according to distributions.
#' @return An object of class \code{"makeRandomData"} with the following entries
#' \item{W}{A matrix of covariates}
#' \item{A}{A vector of binary treatments}
#' \item{Y}{A vector of continuously valued outcome}
#' \item{distW}{A list containing relevant information needed to reproduce data sets}
#' \item{fnG0}{A list of lists containing relevant information needed to reproduce data sets}
#' \item{fnQ0}{A list of lists containing relevant information needed to reproduce data sets}
#' \item{distErrY}{A list containing relevant information needed to reproduce data sets}
#' @export
remakeRandomData <- function(n, object, setA = NULL, setW = NULL, ...){
# draw random number of covariates
D <- ncol(object$W)
#----------------------------------------------------------------------
# Simulate W
#----------------------------------------------------------------------
# initialize empties
W <- matrix(nrow = n, ncol = D)
distW <- vector(mode = "list", length = D)
if(is.null(setW)){
for(d in 1:D){
if(d == 1){
W[,d] <- do.call(object$distW[[d]]$fn, args = c(list(n=n), object$distW[[d]]$parm))
}else{
W[,d] <- do.call(object$distW[[d]]$fn, args = c(list(n=n, x = W[,d-1]), object$distW[[d]]$parm))
}
}
}else{
W <- as.matrix(setW)
}
#----------------------------------------------------------------------
# Simulate propensity -- only if setA == NULL
#----------------------------------------------------------------------
if(is.null(setA)){
# draw random number of main terms
Mg1 <- length(object$fnG0$uni)
Mg2 <- length(object$fnG0$biv)
Mg3 <- length(object$fnG0$tri)
Mg4 <- length(object$fnG0$quad)
# initialize empty
logitg0 <- rep(0, n)
# univariate
if(Mg1 > 0){
for(m in 1:Mg1){
fOut <- do.call(object$fnG0$uni[[m]]$fn, args = c(list(x = W[,object$fnG0$uni[[m]]$whichColsW]), object$fnG0$uni[[m]]$parm))
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
# two-way interactions
if(Mg2 > 0){
for(m in 1:Mg2){
# call function with parameters
fOut <- do.call(object$fnG0$biv[[m]]$fn,
args = c(list(x1 = W[,object$fnG0$biv[[m]]$whichColsW[1]],
x2 = W[,object$fnG0$biv[[m]]$whichColsW[2]]),
object$fnG0$biv[[m]]$parm))
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
#trivariate
if(Mg3 > 0){
for(m in 1:Mg3){
# call function with parameters
fOut <- do.call(object$fnG0$tri[[m]]$fn,
args = c(list(x1 = W[,object$fnG0$tri[[m]]$whichColsW[1]],
x2 = W[,object$fnG0$tri[[m]]$whichColsW[2]],
x3 = W[,object$fnG0$tri[[m]]$whichColsW[3]]),
object$fnG0$tri[[m]]$parm))# save output in list
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
#quadravariant
if(Mg4 > 0){
for(m in 1:Mg4){
# call function with parameters
fOut <- do.call(object$fnG0$quad[[m]]$fn,
args = c(list(x1 = W[,object$fnG0$quad[[m]]$whichColsW[1]],
x2 = W[,object$fnG0$quad[[m]]$whichColsW[2]],
x3 = W[,object$fnG0$quad[[m]]$whichColsW[3]],
x4 = W[,object$fnG0$quad[[m]]$whichColsW[4]]),
object$fnG0$quad[[m]]$parm))# save output in list
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
# correct for positivity violations
logitg0[plogis(logitg0) < object$minG0] <- qlogis(object$minG0)
logitg0[plogis(logitg0) > 1 - object$minG0] <- qlogis(1 - object$minG0)
# simulate A
A <- rbinom(n, 1, plogis(logitg0))
}else{
if(length(setA)==1){
A <- rep(setA, n)
}else{
A <- setA
}
logitg0 <- Inf
}
# matrix with A and W
AW <- cbind(A, W)
#----------------------------------------------------------------------
# Simulate Y
#----------------------------------------------------------------------
# draw random number of main terms between 2 and D + 1, where we set
# 2 to be the minimum so that we ensure there is confounding.
MQ1 <- length(object$fnQ0$uni)
MQ2 <- length(object$fnQ0$biv)
MQ3 <- length(object$fnQ0$tri)
MQ4 <- length(object$fnQ0$quad)
# empty
Q0 <- rep(0, n)
# main terms
if(MQ1 > 0){
for(m in 1:MQ1){
fOut <- do.call(object$fnQ0$uni[[m]]$fn, args = c(list(x = AW[,object$fnQ0$uni[[m]]$whichColsAW]), object$fnQ0$uni[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# two-way interactions
if(MQ2 > 0){
for(m in 1:MQ2){
# call function with parameters
fOut <- do.call(object$fnQ0$biv[[m]]$fn,
args = c(list(x1 = AW[,object$fnQ0$biv[[m]]$whichColsAW[1]],
x2 = AW[,object$fnQ0$biv[[m]]$whichColsAW[2]]),
object$fnQ0$biv[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# three-way interactions
if(MQ3 > 0){
for(m in 1:MQ3){
# call function with parameters
fOut <- do.call(object$fnQ0$tri[[m]]$fn,
args = c(list(x1 = AW[,object$fnQ0$tri[[m]]$whichColsAW[1]],
x2 = AW[,object$fnQ0$tri[[m]]$whichColsAW[2]],
x3 = AW[,object$fnQ0$tri[[m]]$whichColsAW[3]]),
object$fnQ0$tri[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# four-way interactions
if(MQ4 > 0){
for(m in 1:MQ4){
# call function with parameters
fOut <- do.call(object$fnQ0$quad[[m]]$fn,
args = c(list(x1 = AW[,object$fnQ0$quad[[m]]$whichColsAW[1]],
x2 = AW[,object$fnQ0$quad[[m]]$whichColsAW[2]],
x3 = AW[,object$fnQ0$quad[[m]]$whichColsAW[3]],
x4 = AW[,object$fnQ0$quad[[m]]$whichColsAW[4]]),
object$fnQ0$quad[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# Drawing an error function
# evaluate error function
errOut <- do.call(object$distErrY$fn, args = c(list(AW=AW, n=n), object$distErrY$parm))
# compute Y
Y <- Q0 + errOut * object$errMult
return(list(W = W, A = A, Y = Y, Q0 = Q0, g0 = plogis(logitg0), err = errOut))
} | /R/remakeRandomData.R | no_license | jlstiles/haltmle.sim | R | false | false | 6,925 | r | #' remakeRandomData
#'
#' Given an object output by \code{makeRandomData}, make another data set with the same
#' distribution.
#'
#' @param n A \code{numeric} sample size
#' @param object An object of class \code{"makeRandomData"}
#' @param setA Value(s) to set treatment variable to. If \code{NULL} then treatment
#' is simulated according to observed data functions. If \code{length(A)==1} then all
#' values are set to this single value. Otherwise, if A is a vector of length n, A is set
#' according to this vector.
#' @param setW A matrix of proper size with values of covariates set to fixed levels. Useful for
#' plotting methods. Assumes proper dimensions. If \code{NULL} simulates W according to distributions.
#' @return An object of class \code{"makeRandomData"} with the following entries
#' \item{W}{A matrix of covariates}
#' \item{A}{A vector of binary treatments}
#' \item{Y}{A vector of continuously valued outcome}
#' \item{distW}{A list containing relevant information needed to reproduce data sets}
#' \item{fnG0}{A list of lists containing relevant information needed to reproduce data sets}
#' \item{fnQ0}{A list of lists containing relevant information needed to reproduce data sets}
#' \item{distErrY}{A list containing relevant information needed to reproduce data sets}
#' @export
remakeRandomData <- function(n, object, setA = NULL, setW = NULL, ...){
# draw random number of covariates
D <- ncol(object$W)
#----------------------------------------------------------------------
# Simulate W
#----------------------------------------------------------------------
# initialize empties
W <- matrix(nrow = n, ncol = D)
distW <- vector(mode = "list", length = D)
if(is.null(setW)){
for(d in 1:D){
if(d == 1){
W[,d] <- do.call(object$distW[[d]]$fn, args = c(list(n=n), object$distW[[d]]$parm))
}else{
W[,d] <- do.call(object$distW[[d]]$fn, args = c(list(n=n, x = W[,d-1]), object$distW[[d]]$parm))
}
}
}else{
W <- as.matrix(setW)
}
#----------------------------------------------------------------------
# Simulate propensity -- only if setA == NULL
#----------------------------------------------------------------------
if(is.null(setA)){
# draw random number of main terms
Mg1 <- length(object$fnG0$uni)
Mg2 <- length(object$fnG0$biv)
Mg3 <- length(object$fnG0$tri)
Mg4 <- length(object$fnG0$quad)
# initialize empty
logitg0 <- rep(0, n)
# univariate
if(Mg1 > 0){
for(m in 1:Mg1){
fOut <- do.call(object$fnG0$uni[[m]]$fn, args = c(list(x = W[,object$fnG0$uni[[m]]$whichColsW]), object$fnG0$uni[[m]]$parm))
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
# two-way interactions
if(Mg2 > 0){
for(m in 1:Mg2){
# call function with parameters
fOut <- do.call(object$fnG0$biv[[m]]$fn,
args = c(list(x1 = W[,object$fnG0$biv[[m]]$whichColsW[1]],
x2 = W[,object$fnG0$biv[[m]]$whichColsW[2]]),
object$fnG0$biv[[m]]$parm))
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
#trivariate
if(Mg3 > 0){
for(m in 1:Mg3){
# call function with parameters
fOut <- do.call(object$fnG0$tri[[m]]$fn,
args = c(list(x1 = W[,object$fnG0$tri[[m]]$whichColsW[1]],
x2 = W[,object$fnG0$tri[[m]]$whichColsW[2]],
x3 = W[,object$fnG0$tri[[m]]$whichColsW[3]]),
object$fnG0$tri[[m]]$parm))# save output in list
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
#quadravariant
if(Mg4 > 0){
for(m in 1:Mg4){
# call function with parameters
fOut <- do.call(object$fnG0$quad[[m]]$fn,
args = c(list(x1 = W[,object$fnG0$quad[[m]]$whichColsW[1]],
x2 = W[,object$fnG0$quad[[m]]$whichColsW[2]],
x3 = W[,object$fnG0$quad[[m]]$whichColsW[3]],
x4 = W[,object$fnG0$quad[[m]]$whichColsW[4]]),
object$fnG0$quad[[m]]$parm))# save output in list
# add to current logitg0
logitg0 <- logitg0 + fOut
}
}
# correct for positivity violations
logitg0[plogis(logitg0) < object$minG0] <- qlogis(object$minG0)
logitg0[plogis(logitg0) > 1 - object$minG0] <- qlogis(1 - object$minG0)
# simulate A
A <- rbinom(n, 1, plogis(logitg0))
}else{
if(length(setA)==1){
A <- rep(setA, n)
}else{
A <- setA
}
logitg0 <- Inf
}
# matrix with A and W
AW <- cbind(A, W)
#----------------------------------------------------------------------
# Simulate Y
#----------------------------------------------------------------------
# draw random number of main terms between 2 and D + 1, where we set
# 2 to be the minimum so that we ensure there is confounding.
MQ1 <- length(object$fnQ0$uni)
MQ2 <- length(object$fnQ0$biv)
MQ3 <- length(object$fnQ0$tri)
MQ4 <- length(object$fnQ0$quad)
# empty
Q0 <- rep(0, n)
# main terms
if(MQ1 > 0){
for(m in 1:MQ1){
fOut <- do.call(object$fnQ0$uni[[m]]$fn, args = c(list(x = AW[,object$fnQ0$uni[[m]]$whichColsAW]), object$fnQ0$uni[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# two-way interactions
if(MQ2 > 0){
for(m in 1:MQ2){
# call function with parameters
fOut <- do.call(object$fnQ0$biv[[m]]$fn,
args = c(list(x1 = AW[,object$fnQ0$biv[[m]]$whichColsAW[1]],
x2 = AW[,object$fnQ0$biv[[m]]$whichColsAW[2]]),
object$fnQ0$biv[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# three-way interactions
if(MQ3 > 0){
for(m in 1:MQ3){
# call function with parameters
fOut <- do.call(object$fnQ0$tri[[m]]$fn,
args = c(list(x1 = AW[,object$fnQ0$tri[[m]]$whichColsAW[1]],
x2 = AW[,object$fnQ0$tri[[m]]$whichColsAW[2]],
x3 = AW[,object$fnQ0$tri[[m]]$whichColsAW[3]]),
object$fnQ0$tri[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# four-way interactions
if(MQ4 > 0){
for(m in 1:MQ4){
# call function with parameters
fOut <- do.call(object$fnQ0$quad[[m]]$fn,
args = c(list(x1 = AW[,object$fnQ0$quad[[m]]$whichColsAW[1]],
x2 = AW[,object$fnQ0$quad[[m]]$whichColsAW[2]],
x3 = AW[,object$fnQ0$quad[[m]]$whichColsAW[3]],
x4 = AW[,object$fnQ0$quad[[m]]$whichColsAW[4]]),
object$fnQ0$quad[[m]]$parm))
# add
Q0 <- Q0 + fOut
}
}
# Drawing an error function
# evaluate error function
errOut <- do.call(object$distErrY$fn, args = c(list(AW=AW, n=n), object$distErrY$parm))
# compute Y
Y <- Q0 + errOut * object$errMult
return(list(W = W, A = A, Y = Y, Q0 = Q0, g0 = plogis(logitg0), err = errOut))
} |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/vital_rate_regression_functions.R
\name{get_survival_params}
\alias{get_survival_params}
\title{Estimate survival regression coefficients using INLA}
\usage{
get_survival_params(dataframe, crowd_mat, alpha)
}
\arguments{
\item{dataframe}{Time series dataframe of genet sizes at t0 and t1 with
neighborhood crowding covariate for each observation.}
\item{crowd_mat}{Matrix of crowding indices for each observation. Matrix
dimensions are nrow = number of observations,
ncol = number of species.}
\item{alphas}{Vector of length(species) of vital rate specific alpha values.}
}
\value{
Dataframe with named regression coefficients.
}
\description{
Estimate survival regression coefficients using INLA
}
\author{
Andrew Tredennick
}
| /man/get_survival_params.Rd | no_license | atredennick/community_synchrony | R | false | false | 817 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/vital_rate_regression_functions.R
\name{get_survival_params}
\alias{get_survival_params}
\title{Estimate survival regression coefficients using INLA}
\usage{
get_survival_params(dataframe, crowd_mat, alpha)
}
\arguments{
\item{dataframe}{Time series dataframe of genet sizes at t0 and t1 with
neighborhood crowding covariate for each observation.}
\item{crowd_mat}{Matrix of crowding indices for each observation. Matrix
dimensions are nrow = number of observations,
ncol = number of species.}
\item{alphas}{Vector of length(species) of vital rate specific alpha values.}
}
\value{
Dataframe with named regression coefficients.
}
\description{
Estimate survival regression coefficients using INLA
}
\author{
Andrew Tredennick
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{TpsConnect2}
\alias{TpsConnect2}
\title{Connects to a local or remote TPS.}
\usage{
TpsConnect2(ip, type)
}
\arguments{
\item{ip}{TPS2 host name or IP.}
\item{type}{TPS type (0: 1st generation TPS, 1: 2nd generation TPS).}
}
\description{
\code{TpsConnect2} connects to a local or remote TPS (TPS1: type = 0,
TPS2: type = 1)
}
\seealso{
Other TPS functions:
\code{\link{TpsChangeIonMode}()},
\code{\link{TpsConnect}()},
\code{\link{TpsDisconnect}()},
\code{\link{TpsGetActiveFilament}()},
\code{\link{TpsGetLastSetValue}()},
\code{\link{TpsGetModuleCodes}()},
\code{\link{TpsGetModuleLimits}()},
\code{\link{TpsGetMonitorValue}()},
\code{\link{TpsGetNbrModules}()},
\code{\link{TpsGetStatus}()},
\code{\link{TpsGetTargetValue}()},
\code{\link{TpsInitialize}()},
\code{\link{TpsLoadSetFile}()},
\code{\link{TpsSaveSetFile}()},
\code{\link{TpsSetActiveFilament}()},
\code{\link{TpsSetAllVoltages}()},
\code{\link{TpsSetTargetValue}()},
\code{\link{TpsShutdown}()}
}
\concept{TPS functions}
| /man/TpsConnect2.Rd | no_license | pasturm/TofDaqR | R | false | true | 1,088 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{TpsConnect2}
\alias{TpsConnect2}
\title{Connects to a local or remote TPS.}
\usage{
TpsConnect2(ip, type)
}
\arguments{
\item{ip}{TPS2 host name or IP.}
\item{type}{TPS type (0: 1st generation TPS, 1: 2nd generation TPS).}
}
\description{
\code{TpsConnect2} connects to a local or remote TPS (TPS1: type = 0,
TPS2: type = 1)
}
\seealso{
Other TPS functions:
\code{\link{TpsChangeIonMode}()},
\code{\link{TpsConnect}()},
\code{\link{TpsDisconnect}()},
\code{\link{TpsGetActiveFilament}()},
\code{\link{TpsGetLastSetValue}()},
\code{\link{TpsGetModuleCodes}()},
\code{\link{TpsGetModuleLimits}()},
\code{\link{TpsGetMonitorValue}()},
\code{\link{TpsGetNbrModules}()},
\code{\link{TpsGetStatus}()},
\code{\link{TpsGetTargetValue}()},
\code{\link{TpsInitialize}()},
\code{\link{TpsLoadSetFile}()},
\code{\link{TpsSaveSetFile}()},
\code{\link{TpsSetActiveFilament}()},
\code{\link{TpsSetAllVoltages}()},
\code{\link{TpsSetTargetValue}()},
\code{\link{TpsShutdown}()}
}
\concept{TPS functions}
|
\name{intensity.lpp} %DontDeclareMethods
\alias{intensity.lpp}
\title{
Empirical Intensity of Point Pattern on Linear Network
}
\description{
Computes the average number of points per unit length
in a point pattern on a linear network
}
\usage{
\method{intensity}{lpp}(X, ...)
}
\arguments{
\item{X}{
A point pattern on a linear network (object of class \code{"lpp"}).
}
\item{\dots}{
Ignored.
}
}
\details{
This is a method for the generic function \code{\link{intensity}}
It computes the empirical intensity of a point pattern
on a linear network (object of class \code{"lpp"}),
i.e. the average density of points per unit length.
If the point pattern is multitype, the intensities of the
different types are computed separately.
}
\value{
A numeric value (giving the intensity) or numeric vector
(giving the intensity for each possible type).
}
\seealso{
\code{\link{intensity}},
\code{\link{intensity.ppp}}
}
\examples{
intensity(chicago)
}
\author{Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
and Rolf Turner
\email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{nonparametric}
| /man/intensity.lpp.Rd | no_license | benjaminbenwu/spatstat | R | false | false | 1,194 | rd | \name{intensity.lpp} %DontDeclareMethods
\alias{intensity.lpp}
\title{
Empirical Intensity of Point Pattern on Linear Network
}
\description{
Computes the average number of points per unit length
in a point pattern on a linear network
}
\usage{
\method{intensity}{lpp}(X, ...)
}
\arguments{
\item{X}{
A point pattern on a linear network (object of class \code{"lpp"}).
}
\item{\dots}{
Ignored.
}
}
\details{
This is a method for the generic function \code{\link{intensity}}
It computes the empirical intensity of a point pattern
on a linear network (object of class \code{"lpp"}),
i.e. the average density of points per unit length.
If the point pattern is multitype, the intensities of the
different types are computed separately.
}
\value{
A numeric value (giving the intensity) or numeric vector
(giving the intensity for each possible type).
}
\seealso{
\code{\link{intensity}},
\code{\link{intensity.ppp}}
}
\examples{
intensity(chicago)
}
\author{Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
and Rolf Turner
\email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{nonparametric}
|
library(twoway)
### Name: anova.twoway
### Title: ANOVA summary for a two-way table, including Tukey Additivity
### Test
### Aliases: anova.twoway
### ** Examples
data(sentRT)
sent.2way <- twoway(sentRT)
anova(sent.2way)
| /data/genthat_extracted_code/twoway/examples/anova.twoway.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 230 | r | library(twoway)
### Name: anova.twoway
### Title: ANOVA summary for a two-way table, including Tukey Additivity
### Test
### Aliases: anova.twoway
### ** Examples
data(sentRT)
sent.2way <- twoway(sentRT)
anova(sent.2way)
|
#' Shift points around Alaska and Hawaii to the elided area
#'
#' This function will take a SpatialPoints object or a data frame of coordinates
#' and shift the points around Alaska and Hawaii to the elided area from this package.
#'
#' @param sp An object of SpatialPoints class or a data frame with x (lon) and y (lat)
#' @return An elided version of the original SpatialPoints class or a data frame
#' depending on what was passed in.
#' @export
points_elided <- function(sp) {
ret <- "sp"
if (inherits(sp, "data.frame")) {
class(sp) <- "data.frame"
sp <- setNames(sp, c("lon", "lat"))
sp::coordinates(sp) <- ~lon+lat
sp::proj4string(sp) <- us_longlat_proj
ret <- "df"
}
orig_proj <- sp::proj4string(sp)
# convert it to Albers equal area
sp <- sp::spTransform(sp, sp::CRS("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs"))
ak_bb <- readRDS(system.file("extdata/alaska_bb.rda", package="albersusa"))
ak_poly <- as(raster::extent(as.vector(t(ak_bb))), "SpatialPolygons")
sp::proj4string(ak_poly) <- "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs"
# Determine which points fall in the Alaska bounding box, subset and remove
# from the original points
ak_l <- sp::over(sp, ak_poly)
ak <- sp[!is.na(ak_l),]
sp <- sp[is.na(ak_l),]
if (length(ak)) {
# Elide the points, the key here is to set "bb" to what the original
# transformation's bounding box was!
ak <- maptools::elide(
ak,
scale=max(apply(ak_bb, 1, diff)) / 2.3,
rotate = -50,
bb = ak_bb
) # NEED the bb option here
ak <- maptools::elide(ak, shift = c(-1298669, -3018809)) # bb doesn't matter
sp::proj4string(ak) <- sp::proj4string(sp)
}
hi_bb <- readRDS(system.file("extdata/hawaii_bb.rda", package="albersusa"))
#hi_bb <- readRDS("inst/extdata/hawaii_bb.rda")
# hi_bb <- matrix(c(-160.2471, 18.9117, -154.8066, 22.2356), 2, 2)
# rownames(hi_bb) <- c("x", "y")
# colnames(hi_bb) <- c("min", "max")
hi_poly <- as(raster::extent(as.vector(t(hi_bb))), "SpatialPolygons")
# sp::proj4string(hi_poly) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# hi_poly <- sp::spTransform(hi_poly, CRSobj = "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs")
sp::proj4string(hi_poly) <- "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs"
# Determine which points fall in the Alaska bounding box, subset and remove
# from the original points
hi_l <- sp::over(sp, hi_poly)
hi <- sp[!is.na(hi_l),]
sp <- sp[is.na(hi_l),]
if (length(hi)) {
hi <- maptools::elide(
hi,
rotate = -35,
bb = hi_bb
) # NEED the bb option here
hi <- maptools::elide(hi, shift = c(5400000, -1400000)) # bb doesn't matter
sp::proj4string(hi) <- sp::proj4string(sp)
}
# Bring them back together with original projection
if (length(ak) && length(hi)) {
sp <- rbind(sp, ak, hi)
} else if (length(ak)) {
sp <- ak
} else if (length(hi)) {
sp <- hi
}
sp <- sp::spTransform(sp, CRS(orig_proj))
return(if (ret == "sp") sp else as.data.frame(sp))
} | /R/points_elided.R | no_license | reuning/albersusa | R | false | false | 3,245 | r | #' Shift points around Alaska and Hawaii to the elided area
#'
#' This function will take a SpatialPoints object or a data frame of coordinates
#' and shift the points around Alaska and Hawaii to the elided area from this package.
#'
#' @param sp An object of SpatialPoints class or a data frame with x (lon) and y (lat)
#' @return An elided version of the original SpatialPoints class or a data frame
#' depending on what was passed in.
#' @export
points_elided <- function(sp) {
ret <- "sp"
if (inherits(sp, "data.frame")) {
class(sp) <- "data.frame"
sp <- setNames(sp, c("lon", "lat"))
sp::coordinates(sp) <- ~lon+lat
sp::proj4string(sp) <- us_longlat_proj
ret <- "df"
}
orig_proj <- sp::proj4string(sp)
# convert it to Albers equal area
sp <- sp::spTransform(sp, sp::CRS("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs"))
ak_bb <- readRDS(system.file("extdata/alaska_bb.rda", package="albersusa"))
ak_poly <- as(raster::extent(as.vector(t(ak_bb))), "SpatialPolygons")
sp::proj4string(ak_poly) <- "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs"
# Determine which points fall in the Alaska bounding box, subset and remove
# from the original points
ak_l <- sp::over(sp, ak_poly)
ak <- sp[!is.na(ak_l),]
sp <- sp[is.na(ak_l),]
if (length(ak)) {
# Elide the points, the key here is to set "bb" to what the original
# transformation's bounding box was!
ak <- maptools::elide(
ak,
scale=max(apply(ak_bb, 1, diff)) / 2.3,
rotate = -50,
bb = ak_bb
) # NEED the bb option here
ak <- maptools::elide(ak, shift = c(-1298669, -3018809)) # bb doesn't matter
sp::proj4string(ak) <- sp::proj4string(sp)
}
hi_bb <- readRDS(system.file("extdata/hawaii_bb.rda", package="albersusa"))
#hi_bb <- readRDS("inst/extdata/hawaii_bb.rda")
# hi_bb <- matrix(c(-160.2471, 18.9117, -154.8066, 22.2356), 2, 2)
# rownames(hi_bb) <- c("x", "y")
# colnames(hi_bb) <- c("min", "max")
hi_poly <- as(raster::extent(as.vector(t(hi_bb))), "SpatialPolygons")
# sp::proj4string(hi_poly) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# hi_poly <- sp::spTransform(hi_poly, CRSobj = "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs")
sp::proj4string(hi_poly) <- "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs"
# Determine which points fall in the Alaska bounding box, subset and remove
# from the original points
hi_l <- sp::over(sp, hi_poly)
hi <- sp[!is.na(hi_l),]
sp <- sp[is.na(hi_l),]
if (length(hi)) {
hi <- maptools::elide(
hi,
rotate = -35,
bb = hi_bb
) # NEED the bb option here
hi <- maptools::elide(hi, shift = c(5400000, -1400000)) # bb doesn't matter
sp::proj4string(hi) <- sp::proj4string(sp)
}
# Bring them back together with original projection
if (length(ak) && length(hi)) {
sp <- rbind(sp, ak, hi)
} else if (length(ak)) {
sp <- ak
} else if (length(hi)) {
sp <- hi
}
sp <- sp::spTransform(sp, CRS(orig_proj))
return(if (ret == "sp") sp else as.data.frame(sp))
} |
library(testthat)
library(Dknitprintr)
test_check("Dknitprintr")
| /tests/testthat.R | no_license | dmenne/dknitprintr | R | false | false | 65 | r | library(testthat)
library(Dknitprintr)
test_check("Dknitprintr")
|
###################################################################################
# Here we see if high frequency data for last 6 months gives different results
# load daily measures for last 180 days
require(highfrequency)
require(quantmod)
require(qmao)
corr_meas <- merge(DCCcorr_measures,WINcorr_measures,by="date") # drop dates for which we don't have one of the measures
recent_corr <- tail(corr_meas,134)
# import data
Sys.timezone(location = TRUE)
stock_dat <- read.csv("sp500.csv",skip =1, header = TRUE, stringsAsFactors = FALSE)
bond_dat <- read.csv("treasury10y.csv",skip =1,header = TRUE, stringsAsFactors = FALSE)
stock_date <- strptime(as.character(stock_dat$Date),format="%m/%d/%Y %H:%M")
stock_open <- xts(stock_dat$OPEN,stock_date)
bond_date <- strptime(as.character(bond_dat$Date), format = "%m/%d/%Y %H:%M") # "%m/%d/%Y" SLOW to get everything
bond_open <- xts(bond_dat$OPEN, bond_date)
stock_log_dr <- tail(diff(log(stock_open)),-1) # Stock daily returns (remove 1st ob)
bond_log_dr <- tail(-diff(log(bond_open+100)),-1) # Remove first observation of log daily bond returns
bond_log_dr <- bond_log_dr[!is.na(index(bond_log_dr))]
minute_returns <- merge.xts(stock_log_dr,bond_log_dr,join = "inner")
minute_returns <- ExcludeDates(minute_returns, exclude = c("2016-11-11", "2016-10-10"))
## Estimate daily correlations and volatilities using high frequency (minute level) data
# Compute daily realized variances
realized_var_stock <- rCov(minute_returns[,1], align.by = "minutes", align.period= NULL)
realized_var_bond <- rCov(minute_returns[,2], align.by = "minutes", align.period = NULL)
realized_cov <- rCov(minute_returns, cor = TRUE, align.by = "minutes", align.period = NULL)
realized_corr <- rapply(realized_cov, function(x) head(x, 1)[2])
realized_corr <- xts(realized_corr,index(realized_var_bond))
corr_plot <- merge.xts(realized_corr, xts(recent_corr[,4:7],index(realized_var_bond)))
coly = c(1,2,3,4,5)
pdf("HighFreqComparison.pdf")
plot.zoo(corr_plot, plot.type = "single", col =coly, ylab ="", xlab = "")
abline(h =0, col = "1")
title("High frequency comparison")
legend("topright", c("High Frequency realized correlation","30 days","90 days","180 days", "360 days"), col = coly, lty=rep(1,5), bty="n")
dev.off()
## Normalize data by their standard deviations
daily_returns<- tail(log_dr,134)
daily_returns[,1] <- daily_returns[,1]/as.vector(sqrt(realized_var_stock))
daily_returns[,2] <- daily_returns[,2]/as.vector(sqrt(realized_var_bond))
plot(realized_cov[2,1])
| /HighFreq.R | no_license | gianlucaRinaldi/2142Breaks | R | false | false | 2,567 | r | ###################################################################################
# Here we see if high frequency data for last 6 months gives different results
# load daily measures for last 180 days
require(highfrequency)
require(quantmod)
require(qmao)
corr_meas <- merge(DCCcorr_measures,WINcorr_measures,by="date") # drop dates for which we don't have one of the measures
recent_corr <- tail(corr_meas,134)
# import data
Sys.timezone(location = TRUE)
stock_dat <- read.csv("sp500.csv",skip =1, header = TRUE, stringsAsFactors = FALSE)
bond_dat <- read.csv("treasury10y.csv",skip =1,header = TRUE, stringsAsFactors = FALSE)
stock_date <- strptime(as.character(stock_dat$Date),format="%m/%d/%Y %H:%M")
stock_open <- xts(stock_dat$OPEN,stock_date)
bond_date <- strptime(as.character(bond_dat$Date), format = "%m/%d/%Y %H:%M") # "%m/%d/%Y" SLOW to get everything
bond_open <- xts(bond_dat$OPEN, bond_date)
stock_log_dr <- tail(diff(log(stock_open)),-1) # Stock daily returns (remove 1st ob)
bond_log_dr <- tail(-diff(log(bond_open+100)),-1) # Remove first observation of log daily bond returns
bond_log_dr <- bond_log_dr[!is.na(index(bond_log_dr))]
minute_returns <- merge.xts(stock_log_dr,bond_log_dr,join = "inner")
minute_returns <- ExcludeDates(minute_returns, exclude = c("2016-11-11", "2016-10-10"))
## Estimate daily correlations and volatilities using high frequency (minute level) data
# Compute daily realized variances
realized_var_stock <- rCov(minute_returns[,1], align.by = "minutes", align.period= NULL)
realized_var_bond <- rCov(minute_returns[,2], align.by = "minutes", align.period = NULL)
realized_cov <- rCov(minute_returns, cor = TRUE, align.by = "minutes", align.period = NULL)
realized_corr <- rapply(realized_cov, function(x) head(x, 1)[2])
realized_corr <- xts(realized_corr,index(realized_var_bond))
corr_plot <- merge.xts(realized_corr, xts(recent_corr[,4:7],index(realized_var_bond)))
coly = c(1,2,3,4,5)
pdf("HighFreqComparison.pdf")
plot.zoo(corr_plot, plot.type = "single", col =coly, ylab ="", xlab = "")
abline(h =0, col = "1")
title("High frequency comparison")
legend("topright", c("High Frequency realized correlation","30 days","90 days","180 days", "360 days"), col = coly, lty=rep(1,5), bty="n")
dev.off()
## Normalize data by their standard deviations
daily_returns<- tail(log_dr,134)
daily_returns[,1] <- daily_returns[,1]/as.vector(sqrt(realized_var_stock))
daily_returns[,2] <- daily_returns[,2]/as.vector(sqrt(realized_var_bond))
plot(realized_cov[2,1])
|
# - Load all required packages -
library(shiny)
library(shinyTime)
library(rdrop2)
library(gridExtra)
library(ggrepel)
library(lubridate)
library(tidyverse)
# --
# - 20190926: To access your dropbox account this had to be done: -
# It's based on the rdrop2 package, the critical infor is given here: <https://github.com/karthik/rdrop2#accessing-dropbox-on-shiny-and-remote-servers>
# So you have to do once the part that is under "Authentication" on that page, i.e.:
# library(rdrop2)
# drop_auth() # then accept in browser
# NB: this gives you the .httr-oauth file, that works locally but not always on shinyapps.io. So delete it!
# instead continue with:
# token <- drop_auth()
# saveRDS(token, file = "token.rds")
# so now you have token.rds in your app folder
# To make things work smoothly on shinyapps.io, I got the key info here: <https://github.com/karthik/rdrop2/issues/61>
# you do 1.) drop_auth(rdstoken = "token.rds") in the general part of the app here, then use all drop_* functions without
# dtoken argument.
# --
# - 20190926: Uploading app to shinyapps.io -
# Thats' were the rsconnect folder comes from.
# see <https://docs.rstudio.com/shinyapps.io/getting-started.html#deploying-applications>
# --
# - 20190926: the app friendly timeInput from shinyTime package -
# This cost me some nerves, see now I use: see ShinyTime folder or shinyTimeExample() if you want
# The problem was that it always changed the time to UTC, so 2 hours wrong from CET.
# The problem is the minute.steps argument, don't use it, even if it appears interesting, it rounds and must call
# a time function that changes the tz. So just stick to:
# timeInput(inputId = "time", label = "Start Time", value = now(tzone = "CET"), seconds = FALSE),
# --
# - load functions -
source("calendarFunctions.R")
# --
# - do the dropbox authentication -
# <https://github.com/karthik/rdrop2/issues/61>
drop_auth(rdstoken = "token.rds")
# --
# - load the ui files -
source("Calendar_ui.R")
# --
# - load the server function -
source("Calendar_server.R")
# --
# - run the app -
shinyApp(ui = ui, server = server)
# --
| /calendar/app_calendar.R | no_license | TBrach/shiny_apps | R | false | false | 2,106 | r | # - Load all required packages -
library(shiny)
library(shinyTime)
library(rdrop2)
library(gridExtra)
library(ggrepel)
library(lubridate)
library(tidyverse)
# --
# - 20190926: To access your dropbox account this had to be done: -
# It's based on the rdrop2 package, the critical infor is given here: <https://github.com/karthik/rdrop2#accessing-dropbox-on-shiny-and-remote-servers>
# So you have to do once the part that is under "Authentication" on that page, i.e.:
# library(rdrop2)
# drop_auth() # then accept in browser
# NB: this gives you the .httr-oauth file, that works locally but not always on shinyapps.io. So delete it!
# instead continue with:
# token <- drop_auth()
# saveRDS(token, file = "token.rds")
# so now you have token.rds in your app folder
# To make things work smoothly on shinyapps.io, I got the key info here: <https://github.com/karthik/rdrop2/issues/61>
# you do 1.) drop_auth(rdstoken = "token.rds") in the general part of the app here, then use all drop_* functions without
# dtoken argument.
# --
# - 20190926: Uploading app to shinyapps.io -
# Thats' were the rsconnect folder comes from.
# see <https://docs.rstudio.com/shinyapps.io/getting-started.html#deploying-applications>
# --
# - 20190926: the app friendly timeInput from shinyTime package -
# This cost me some nerves, see now I use: see ShinyTime folder or shinyTimeExample() if you want
# The problem was that it always changed the time to UTC, so 2 hours wrong from CET.
# The problem is the minute.steps argument, don't use it, even if it appears interesting, it rounds and must call
# a time function that changes the tz. So just stick to:
# timeInput(inputId = "time", label = "Start Time", value = now(tzone = "CET"), seconds = FALSE),
# --
# - load functions -
source("calendarFunctions.R")
# --
# - do the dropbox authentication -
# <https://github.com/karthik/rdrop2/issues/61>
drop_auth(rdstoken = "token.rds")
# --
# - load the ui files -
source("Calendar_ui.R")
# --
# - load the server function -
source("Calendar_server.R")
# --
# - run the app -
shinyApp(ui = ui, server = server)
# --
|
library("pasilla")
library("DESeq2")
library("pheatmap")
library("amap")
library("ggplot2")
library("BiocParallel")
register(MulticoreParam(4))
library("gplots")
library("RColorBrewer")
funaa<-read.csv("d:\\RNA3\\A7\\v4fun.csv" ,header = T)
names(funaa)<-c("Gene","desc")
setwd("d:\\RNA2")
fn<-c("gene")
#for (fn in fileNamebase){
##define file and file name
alldata <- read.csv(paste(fn,"_count_matrix.csv",sep="") ,row.names=1)
alldata <- read.csv("count-a7-a7l.csv" ,row.names=1)
coldata <- read.table("sample-group.txt", row.names=1,header = T)
##因为对照重复性不好,因此可能会因为对照的选择影响表达量,因此使用两组对照做分析?
###group1:ZJMA,ZJMD,2611Y4:19,22,26,
###group2:2611Y7,2611Y5,2503Y2:23,27,28
###group3:ZJMB.ZJMC,2503Y4,2503Y5: 24,25,20:21,该组可能不好
###A7OE:15,17,18
###A7RNAi:10,11 A7RNAi2:12,13
###VP:ALl
###SRDX:
####重组数据矩阵
vp<-alldata[,c(1:4)]
srdx<-alldata[,c(5:9)]
rnai1<-alldata[,c(10,11)]
rnai2<-alldata[,c(12,13)]
a7loe<-alldata[,c(29:31)]
a7lrnai<-alldata[,c(32:34)]
oe<-alldata[,c(15,17,18)]
ck<-alldata[,c(23,27,28)]
ck2<-alldata[,c(19,22,26)]
ck3<-alldata[,c(20,21,25)]
rnaia7<-alldata[,c(10,12:13)]
###
cts<-data.frame(vp,srdx,rnai1,rnai2,oe,a7lrnai,ck)
group<-c(rep("VP",length(vp)),
rep("SRDX",length(srdx)),
rep("RNAi1",length(rnai1)),
rep("RNAi2",length(rnai2)),
rep("OE",length(oe)),
rep("A7LRNAi",length(a7lrnai)),
rep("CK",length(ck)))
###重组结束
###vp,srdx,rnai1,rnai2,a7lrnai,a7loe,oe
###"vp","srdx","rnai1","rnai2","a7lrnai","a7loe","oe
sample<-list(oe,rnaia7,vp,srdx )
samplenames<-list("OE","a7rnai","vp","srdx")
for (s in 1:length(sample)){
cts<-data.frame(sample[s],ck)
group<-c(rep(samplenames[[s]],length(sample[[s]])),
rep("CK1",length(ck)))
{
aa<-data.frame(group)
rownames(aa)<-colnames(cts)
all(rownames(aa) %in% colnames(cts))
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = aa,
design = ~ group)
dds <- DESeq(dds)
degs<-resultsNames(dds) # lists the coefficients
for(i in 2:length(degs)){
print(degs[i])
res <- results(dds, name=degs[i])
# or to shrink log fold changes association with condition:
#res <- lfcShrink(dds, coef=degs[i], type="apeglm")
pdf(file=paste(degs[i],paste(fn,"_MA.pdf",sep="")), pointsize=10)
plotMA(res, ylim=c(-2,2))
dev.off()
###normaliz counts
normalized_counts <- counts(dds, normalized=TRUE)
###????出现什么问题了????
normalized_counts_mad <- apply(normalized_counts, 1, mad)
#normalized_counts <- normalized_counts[order(normalized_counts_mad, decreasing=T), ]
{aa<-as.data.frame(res)
Gene<-rownames(aa)
da<-data.frame(Gene,aa)
bb<-as.data.frame(normalized_counts)
Gene<-rownames(bb)
db<-data.frame(Gene,bb)
aadd<-merge(db,da,by="Gene")}
write.csv(as.data.frame(aadd),
file=paste(degs[i],paste(fn,"_all_count_exp.csv",sep="")),row.names = FALSE)
diff_gene_deseq2 <- subset(aadd,padj < 0.05 & (log2FoldChange >1 | log2FoldChange < -1))
row.names(diff_gene_deseq2)<-diff_gene_deseq2[,1]
deg<-merge(diff_gene_deseq2,subset(funaa , funaa$Gene%in%diff_gene_deseq2[,1]),by="Gene",all=T)
write.csv(as.data.frame(deg), file=paste(degs[i],paste(fn,"_deff_count_exp.csv",sep="")),row.names = FALSE)
write.table(as.data.frame(diff_gene_deseq2[1]), file=paste(degs[i],paste(fn,"_DEG_ID.txt",sep="")),quote=F,row.names = F)
rld <- rlog(dds, blind=FALSE)
rlogMat <- assay(rld)
rlogMat <- rlogMat[order(normalized_counts_mad, decreasing=T), ]
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
pearson_cor <- as.matrix(cor(rlogMat, method="pearson"))
hc <- hcluster(t(rlogMat), method="pearson")
pdf(file=paste(degs[i],paste(fn,"_cluster.pdf",sep="")), pointsize=10)
heatmap.2(pearson_cor, Rowv=as.dendrogram(hc), symm=T, trace="none",
col=hmcol, margins=c(11,11), main="The pearson correlation of each
sample")
dev.off()
pca_data <- plotPCA(rld, intgroup=c("group"), returnData=T, ntop=50000)
pdf(file=paste(degs[i],paste(fn,"_pca.pdf",sep="")), pointsize=10)
plot(pca_data[,1:2],pch=19)
text(pca_data[,1],pca_data[,2]+1,row.names(pca_data),cex=0.5 )
dev.off()
}
}
}
| /deseq2v4.r | no_license | lhaclove/MyCode | R | false | false | 4,238 | r | library("pasilla")
library("DESeq2")
library("pheatmap")
library("amap")
library("ggplot2")
library("BiocParallel")
register(MulticoreParam(4))
library("gplots")
library("RColorBrewer")
funaa<-read.csv("d:\\RNA3\\A7\\v4fun.csv" ,header = T)
names(funaa)<-c("Gene","desc")
setwd("d:\\RNA2")
fn<-c("gene")
#for (fn in fileNamebase){
##define file and file name
alldata <- read.csv(paste(fn,"_count_matrix.csv",sep="") ,row.names=1)
alldata <- read.csv("count-a7-a7l.csv" ,row.names=1)
coldata <- read.table("sample-group.txt", row.names=1,header = T)
##因为对照重复性不好,因此可能会因为对照的选择影响表达量,因此使用两组对照做分析?
###group1:ZJMA,ZJMD,2611Y4:19,22,26,
###group2:2611Y7,2611Y5,2503Y2:23,27,28
###group3:ZJMB.ZJMC,2503Y4,2503Y5: 24,25,20:21,该组可能不好
###A7OE:15,17,18
###A7RNAi:10,11 A7RNAi2:12,13
###VP:ALl
###SRDX:
####重组数据矩阵
vp<-alldata[,c(1:4)]
srdx<-alldata[,c(5:9)]
rnai1<-alldata[,c(10,11)]
rnai2<-alldata[,c(12,13)]
a7loe<-alldata[,c(29:31)]
a7lrnai<-alldata[,c(32:34)]
oe<-alldata[,c(15,17,18)]
ck<-alldata[,c(23,27,28)]
ck2<-alldata[,c(19,22,26)]
ck3<-alldata[,c(20,21,25)]
rnaia7<-alldata[,c(10,12:13)]
###
cts<-data.frame(vp,srdx,rnai1,rnai2,oe,a7lrnai,ck)
group<-c(rep("VP",length(vp)),
rep("SRDX",length(srdx)),
rep("RNAi1",length(rnai1)),
rep("RNAi2",length(rnai2)),
rep("OE",length(oe)),
rep("A7LRNAi",length(a7lrnai)),
rep("CK",length(ck)))
###重组结束
###vp,srdx,rnai1,rnai2,a7lrnai,a7loe,oe
###"vp","srdx","rnai1","rnai2","a7lrnai","a7loe","oe
sample<-list(oe,rnaia7,vp,srdx )
samplenames<-list("OE","a7rnai","vp","srdx")
for (s in 1:length(sample)){
cts<-data.frame(sample[s],ck)
group<-c(rep(samplenames[[s]],length(sample[[s]])),
rep("CK1",length(ck)))
{
aa<-data.frame(group)
rownames(aa)<-colnames(cts)
all(rownames(aa) %in% colnames(cts))
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = aa,
design = ~ group)
dds <- DESeq(dds)
degs<-resultsNames(dds) # lists the coefficients
for(i in 2:length(degs)){
print(degs[i])
res <- results(dds, name=degs[i])
# or to shrink log fold changes association with condition:
#res <- lfcShrink(dds, coef=degs[i], type="apeglm")
pdf(file=paste(degs[i],paste(fn,"_MA.pdf",sep="")), pointsize=10)
plotMA(res, ylim=c(-2,2))
dev.off()
###normaliz counts
normalized_counts <- counts(dds, normalized=TRUE)
###????出现什么问题了????
normalized_counts_mad <- apply(normalized_counts, 1, mad)
#normalized_counts <- normalized_counts[order(normalized_counts_mad, decreasing=T), ]
{aa<-as.data.frame(res)
Gene<-rownames(aa)
da<-data.frame(Gene,aa)
bb<-as.data.frame(normalized_counts)
Gene<-rownames(bb)
db<-data.frame(Gene,bb)
aadd<-merge(db,da,by="Gene")}
write.csv(as.data.frame(aadd),
file=paste(degs[i],paste(fn,"_all_count_exp.csv",sep="")),row.names = FALSE)
diff_gene_deseq2 <- subset(aadd,padj < 0.05 & (log2FoldChange >1 | log2FoldChange < -1))
row.names(diff_gene_deseq2)<-diff_gene_deseq2[,1]
deg<-merge(diff_gene_deseq2,subset(funaa , funaa$Gene%in%diff_gene_deseq2[,1]),by="Gene",all=T)
write.csv(as.data.frame(deg), file=paste(degs[i],paste(fn,"_deff_count_exp.csv",sep="")),row.names = FALSE)
write.table(as.data.frame(diff_gene_deseq2[1]), file=paste(degs[i],paste(fn,"_DEG_ID.txt",sep="")),quote=F,row.names = F)
rld <- rlog(dds, blind=FALSE)
rlogMat <- assay(rld)
rlogMat <- rlogMat[order(normalized_counts_mad, decreasing=T), ]
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
pearson_cor <- as.matrix(cor(rlogMat, method="pearson"))
hc <- hcluster(t(rlogMat), method="pearson")
pdf(file=paste(degs[i],paste(fn,"_cluster.pdf",sep="")), pointsize=10)
heatmap.2(pearson_cor, Rowv=as.dendrogram(hc), symm=T, trace="none",
col=hmcol, margins=c(11,11), main="The pearson correlation of each
sample")
dev.off()
pca_data <- plotPCA(rld, intgroup=c("group"), returnData=T, ntop=50000)
pdf(file=paste(degs[i],paste(fn,"_pca.pdf",sep="")), pointsize=10)
plot(pca_data[,1:2],pch=19)
text(pca_data[,1],pca_data[,2]+1,row.names(pca_data),cex=0.5 )
dev.off()
}
}
}
|
# process stransky1 and stransky2 data
cat('processing stransky1 and stransky2 ...\n\n')
rm(list = ls())
source('functions/setup_functions.R')
# specify dataset name and set file names appropriately
ds1_name <- 'stransky1'
file1_expr <- paste0('../../data/processed/', ds1_name, '.RData')
file1_clinical <- paste0('../../data/clinical/', ds1_name, '.RData')
ds2_name <- 'stransky2'
file2_expr <- paste0('../../data/processed/', ds2_name, '.RData')
file2_clinical <- paste0('../../data/clinical/', ds2_name, '.RData')
# load expression and platform data
load("../../data/original/E.TABM.147.combine.reps.RData")
load("../../data/platforms/GPL91.RData")
# extract clinical information for stage
stage = rep(NA,length(E.TABM.147.p$Characteristics..DiseaseStaging.))
stage[grep("T[a1]",E.TABM.147.p$Characteristics..DiseaseStaging.)] <- 'nmi'
stage[grep("T[2-4]",E.TABM.147.p$Characteristics..DiseaseStaging.)] <- 'mi'
# extract clinical information for grade
tmp = E.TABM.147.p$Characteristics..TumorGrading.
grade = rep(NA, length(tmp))
grade[tmp == "G1" | tmp == "G1-G2"] <- "lg"
grade[tmp == "G2" | tmp == "G3"] <- "hg"
# extract clinical information for tumor
tumor = as.character(E.TABM.147.p$Characteristics..DiseaseState.)
tumor[tumor == "bladder carcinoma"] = "tumor"
# create clinical data table
stransky_clinical <- create_clinical_table(id = paste0("P", E.TABM.147.p$Source.Name),
tumor = tumor, grade = grade,
stage = stage)
# Dataset contains two platforms
# samples 1 - 31 are on U95Av (GPL91)
# samples 32 - 62 are on U95Av2 (GPL8300)
# However, both platforms are identical in this dataset, so
# we will just use GPL91
# get gene-level expression values
stransky.expr <- get_expression(E.TABM.147.expr, GPL91)
# split into 2 datasets
i1 <- 1:31
i2 <- 32:62
stransky1.expr <- stransky.expr[,i1]
stransky2.expr <- stransky.expr[,i2]
stransky1_clinical <- stransky_clinical[i1,]
stransky2_clinical <- stransky_clinical[i2,]
stransky2_clinical$tumor <- NULL # remove since all are tumors
# generate boxplots
generate_boxplot(stransky1.expr, 'stransky1')
generate_boxplot(stransky2.expr, 'stransky2')
# save expression and clinical data
save(stransky1.expr, file = file1_expr)
save(stransky2.expr, file = file2_expr)
save(stransky1_clinical, file = file1_clinical)
save(stransky2_clinical, file = file2_clinical)
| /setup/R/process/process_stransky.R | no_license | cjkelly1738/BC-BET | R | false | false | 2,430 | r | # process stransky1 and stransky2 data
cat('processing stransky1 and stransky2 ...\n\n')
rm(list = ls())
source('functions/setup_functions.R')
# specify dataset name and set file names appropriately
ds1_name <- 'stransky1'
file1_expr <- paste0('../../data/processed/', ds1_name, '.RData')
file1_clinical <- paste0('../../data/clinical/', ds1_name, '.RData')
ds2_name <- 'stransky2'
file2_expr <- paste0('../../data/processed/', ds2_name, '.RData')
file2_clinical <- paste0('../../data/clinical/', ds2_name, '.RData')
# load expression and platform data
load("../../data/original/E.TABM.147.combine.reps.RData")
load("../../data/platforms/GPL91.RData")
# extract clinical information for stage
stage = rep(NA,length(E.TABM.147.p$Characteristics..DiseaseStaging.))
stage[grep("T[a1]",E.TABM.147.p$Characteristics..DiseaseStaging.)] <- 'nmi'
stage[grep("T[2-4]",E.TABM.147.p$Characteristics..DiseaseStaging.)] <- 'mi'
# extract clinical information for grade
tmp = E.TABM.147.p$Characteristics..TumorGrading.
grade = rep(NA, length(tmp))
grade[tmp == "G1" | tmp == "G1-G2"] <- "lg"
grade[tmp == "G2" | tmp == "G3"] <- "hg"
# extract clinical information for tumor
tumor = as.character(E.TABM.147.p$Characteristics..DiseaseState.)
tumor[tumor == "bladder carcinoma"] = "tumor"
# create clinical data table
stransky_clinical <- create_clinical_table(id = paste0("P", E.TABM.147.p$Source.Name),
tumor = tumor, grade = grade,
stage = stage)
# Dataset contains two platforms
# samples 1 - 31 are on U95Av (GPL91)
# samples 32 - 62 are on U95Av2 (GPL8300)
# However, both platforms are identical in this dataset, so
# we will just use GPL91
# get gene-level expression values
stransky.expr <- get_expression(E.TABM.147.expr, GPL91)
# split into 2 datasets
i1 <- 1:31
i2 <- 32:62
stransky1.expr <- stransky.expr[,i1]
stransky2.expr <- stransky.expr[,i2]
stransky1_clinical <- stransky_clinical[i1,]
stransky2_clinical <- stransky_clinical[i2,]
stransky2_clinical$tumor <- NULL # remove since all are tumors
# generate boxplots
generate_boxplot(stransky1.expr, 'stransky1')
generate_boxplot(stransky2.expr, 'stransky2')
# save expression and clinical data
save(stransky1.expr, file = file1_expr)
save(stransky2.expr, file = file2_expr)
save(stransky1_clinical, file = file1_clinical)
save(stransky2_clinical, file = file2_clinical)
|
# Analisis de datos espaciales
# Hugo Andres Dorado
library(sf)
library(tidyverse)
peru <- read_sf("datos/Peru_departamento.shp")
print(peru)
plot(peru)
plot(peru["NOMBDEP"])
ggplot() + geom_sf(data = peru, aes(fill = NOMBDEP))
#
peru <- peru %>% mutate(areakm2 =as.numeric( st_area(geometry) /(1000*1000)))
print(peru)
# Ordenar a areas de menor a mayor
arrange(peru,areakm2)
peru %>% filter(NOMBDEP == "PASCO")
plot(st_geometry(peru))
departamentos <- peru %>% filter(areakm2 >50000)
plot(st_geometry(departamentos),add=T,col="red")
# Ejercicio con raster
filter
| /Scripts/Datos_espaciales.R | no_license | hdorado/Curso_Machine_learning_AG | R | false | false | 637 | r |
# Analisis de datos espaciales
# Hugo Andres Dorado
library(sf)
library(tidyverse)
peru <- read_sf("datos/Peru_departamento.shp")
print(peru)
plot(peru)
plot(peru["NOMBDEP"])
ggplot() + geom_sf(data = peru, aes(fill = NOMBDEP))
#
peru <- peru %>% mutate(areakm2 =as.numeric( st_area(geometry) /(1000*1000)))
print(peru)
# Ordenar a areas de menor a mayor
arrange(peru,areakm2)
peru %>% filter(NOMBDEP == "PASCO")
plot(st_geometry(peru))
departamentos <- peru %>% filter(areakm2 >50000)
plot(st_geometry(departamentos),add=T,col="red")
# Ejercicio con raster
filter
|
#' Nominal votes in the Brazilian Federal Senate
#' @description This is a dataset of the nominal votes in the Brazilian
#' Federal Senate, from all those available on the API from 1991 onwards.
#' @note These data can easily be grouped by legislature if so desired, using the
#' \code{legislature} variable.
#' @return
#' \itemize{
#' \item{\code{vote_date: }}{\code{POSIXct}, date the vote took place.}
#' \item{\code{bill_id: }}{id of the bill in the Senate API database.}
#' \item{\code{bill: }}{bill type, year and number.}
#' \item{\code{legislature: }}{legislature number.}
#' \item{\code{senator_id: }}{unique id of the senator.}
#' \item{\code{senator_name: }}{the senator's name.}
#' \item{\code{senator_party: }}{political party the senator was in when the vote took place.}
#' \item{\code{senator_state: }}{state the senator represented when the vote took place.}
#' }
#'
#' @format A data frame with 60691 rows and 8 variables
"senate_nominal_votes"
#> [1] "senate_nominal_votes"
| /R/sen_nominal_votes.R | no_license | DATAUNIRIO/congressbr | R | false | false | 1,001 | r | #' Nominal votes in the Brazilian Federal Senate
#' @description This is a dataset of the nominal votes in the Brazilian
#' Federal Senate, from all those available on the API from 1991 onwards.
#' @note These data can easily be grouped by legislature if so desired, using the
#' \code{legislature} variable.
#' @return
#' \itemize{
#' \item{\code{vote_date: }}{\code{POSIXct}, date the vote took place.}
#' \item{\code{bill_id: }}{id of the bill in the Senate API database.}
#' \item{\code{bill: }}{bill type, year and number.}
#' \item{\code{legislature: }}{legislature number.}
#' \item{\code{senator_id: }}{unique id of the senator.}
#' \item{\code{senator_name: }}{the senator's name.}
#' \item{\code{senator_party: }}{political party the senator was in when the vote took place.}
#' \item{\code{senator_state: }}{state the senator represented when the vote took place.}
#' }
#'
#' @format A data frame with 60691 rows and 8 variables
"senate_nominal_votes"
#> [1] "senate_nominal_votes"
|
#' Calculates distance covariance and distance correlation matrices
#'
#' @param X A data.frame or matrix.
#' @param Y Either NULL or a data.frame or a matrix with the same number of rows as X. If only X is provided, distance covariances/correlations are calculated between all groups in X. If X and Y are provided, distance covariances/correlations are calculated between all groups in X and all groups of Y.
#' @param calc.dcov logical; specifies if the distance covariance matrix is calculated.
#' @param calc.dcor logical; specifies if the distance correlation matrix is calculated.
#' @param calc.cor If set as "pearson", "spearman" or "kendall", a corresponding correlation matrix is addionally calculated.
#' @param calc.pvalue.cor logical; IF TRUE, a p-value based on the Pearson or Spearman correlation matrix is calculated (not implemented for calc.cor ="kendall") using Hmisc::rcorr.
#' @param return.data logical; specifies if the dcmatrix object should contain the original data.
#' @param test specifies the type of test that is performed, "permutation" performs a Monte Carlo Permutation test. "gamma" performs a test based on a gamma approximation of the test statistic under the null. "conservative" performs a conservative two-moment approximation. "bb3" performs a quite precise three-moment approximation and is recommended when computation time is not an issue.
#' @param adjustp If setting this parameter to "holm", "hochberg", "hommel", "bonferroni", "BH", "BY" or "fdr", corresponding adjusted p-values are additionally returned for the distance covariance test.
#' @param b specifies the number of random permutations used for the permutation test. Ignored for all other tests.
#' @param affine logical; indicates if the affinely transformed distance covariance should be calculated or not.
#' @param bias.corr logical; specifies if the bias corrected version of the sample distance covariance \insertCite{huo2016fast}{dcortools} should be calculated.
#' @param group.X A vector, each entry specifying the group membership of the respective column in X. Each group is handled as one sample for calculating the distance covariance/correlation matrices. If NULL, every sample is handled as an individual group.
#' @param group.Y A vector, each entry specifying the group membership of the respective column in Y. Each group is handled as one sample for calculating the distance covariance/correlation matrices. If NULL, every sample is handled as an individual group.
#' @param metr.X Either a single metric or a list providing a metric for each group in X (see examples).
#' @param metr.Y see metr.X.
#' @param use "all" uses all observations, "complete.obs" excludes NA's, "pairwise.complete.obs" uses pairwise complete observations for each comparison.
#' @param algorithm specifies the algorithm used for calculating the distance covariance.
#'
#' "fast" uses an O(n log n) algorithm if the observations are one-dimensional and metr.X and metr.Y are either "euclidean" or "discrete", see also \insertCite{huo2016fast;textual}{dcortools}.
#'
#' "memsave" uses a memory saving version of the standard algorithm with computational complexity O(n^2) but requiring only O(n) memory.
#'
#' "standard" uses the classical algorithm. User-specified metrics always use the classical algorithm.
#'
#' "auto" chooses the best algorithm for the specific setting using a rule of thumb.
#'
#' "memsave" is typically very inefficient for dcmatrix and should only be applied in exceptional cases.
#'
#' @param fc.discrete: logical; If TRUE, "discrete" metric is applied automatically on samples of type "factor" or "character".
#' @param calc.dcov.pw logical; If TRUE, a distance covariance matrix between the univariate observations/columns is additionally calculated. Not meaningful if group.X and group.Y are not specified.
#' @param calc.dcor.pw logical; If TRUE, a distance correlation matrix between the univariate observations/columns is additionally calculated. Not meaningful if group.X and group.Y are not specified.
#' @param calc.test.pw specifes a test (see argument "test") that is performed between all single observations.
#' @param metr.pw.X Either a single metric or a list providing a metric for each single observation/column in X (see metr.X).
#' @param metr.pw.Y See metr.pw.Y.
#' @return S3 object of class "dcmatrix" with the following components
#' \item{name X, Y}{description original data (if return.data = TRUE).}
#' \item{name dcov, dcor}{distance covariance/correlation matrices between the groups specified in group.X/group.Y (if calc.dcov/calc.dcor = TRUE).}
#' \item{name corr}{correlation matrix between the univariate observations/columns (if cal.cor is "pearson", "spearman" or "kendall").}
#' \item{name pvalue}{matrix of p-values based on a corresponding distance covariance test based on the entries in dcov (if argument test is not "none").}
#' \item{name pvalue.adj}{matrix of p-values adjusted for multiple comparisons using the method specified in argument adjustp.}
#' \item{name pvalue.cor}{matrix of pvalues based on "pearson"/"spearman" correlation (if calc.cor is "pearson" or "spearman" and calc.pvalue.cor = TRUE).}
#' \item{name dcov.pw,dcor.pw}{distance covariance/correlation matrices between the univariate observations (if calc.dcov.pw/calc.dcor.pw = TRUE.)}
#' \item{name pvalue.pw}{matrix of p-values based on a corresponding distance covariance test based on the entries in dcov.pw (if argument test is not "none").}
#' @export
#' @references
#' \insertRef{berschneider2018complex}{dcortools}
#'
#' \insertRef{bottcher2017detecting}{dcortools}
#'
#' \insertRef{dueck2014affinely}{dcortools}
#'
#' \insertRef{huang2017statistically}{dcortools}
#'
#' \insertRef{huo2016fast}{dcortools}
#'
#' \insertRef{lyons2013distance}{dcortools}
#'
#' \insertRef{sejdinovic2013equivalence}{dcortools}
#'
#' \insertRef{szekely2007}{dcortools}
#'
#' \insertRef{szekely2009brownian}{dcortools}
#'
#'@examples
#' X <- matrix(rnorm(1000), ncol = 10)
#' dcm <- dcmatrix(X, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH")
#' dcm <- dcmatrix(X, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH", group.X = c(rep(1, 5), rep(2, 5)), calc.dcor.pw = T, test.pw = "bb3")
#'
#' Y <- matrix(rnorm(600), ncol = 6)
#' Y[,6] <- rbinom(100, 4, 0.3)
#' dcm <- dcmatrix(X, Y, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH")
#' dcm <- dcmatrix(X, Y, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH", group.X = c(rep("group1", 5), rep("group2", 5)), group.Y = c(rep("group1", 5), "group2"), metr.X = "gaussauto", metr.Y = list("group1" = "gaussauto", "group2" = "discrete"))
dcmatrix <- function (X,
Y = NULL,
calc.dcov = TRUE,
calc.dcor = TRUE,
calc.cor = "none",
calc.pvalue.cor = FALSE,
return.data = TRUE,
test = "none",
adjustp = "none",
b = 499,
affine = FALSE,
standardize = FALSE,
bias.corr = FALSE,
group.X = NULL,
group.Y = NULL,
metr.X = "euclidean",
metr.Y = "euclidean",
use="all",
algorithm ="auto",
fc.discrete = FALSE,
calc.dcor.pw = FALSE,
calc.dcov.pw = FALSE,
test.pw = "none",
metr.pw.X = "euclidean",
metr.pw.Y = "euclidean"
) {
output <- .dcmatrixmain(X, Y, calc.dcov, calc.dcor, calc.cor, calc.pvalue.cor, return.data, test, adjustp, b, affine, standardize, bias.corr, group.X, group.Y, metr.X, metr.Y, use, algorithm, fc.discrete)
output$call <- match.call()
if (calc.dcor.pw | calc.dcov.pw | test.pw != "none") {
output2 <- .dcmatrixmain(X, Y, calc.dcov = calc.dcov.pw, calc.cor = "no", calc.dcor = calc.dcor.pw, calc.pvalue.cor = FALSE, return.data, test = test.pw, adjustp, b, affine, standardize, bias.corr, group.X = NULL, group.Y = NULL, metr.X = metr.pw.X, metr.Y = metr.pw.Y, use, algorithm, fc.discrete)
output$dcor.pw <- output2$dcor
output$dcov.pw <- output2$dcov
output$pvalue.pw <- output2$pvalue
output$metr.pw.X <- metr.pw.X
output$metr.pw.Y <- metr.pw.Y
output$dX.pw <- output2$dX
output$dY.pw <- output2$dY
names.X <- names.Y <- colnames(X)
if (!is.null(Y))
names.Y <- colnames(Y)
if (!is.null(output$dcor.pw)) {
rownames(output$dcor.pw) <- names.X
colnames(output$dcor.pw) <- names.Y
}
if (!is.null(output$dcov.pw)) {
rownames(output$dcov.pw) <- names.X
colnames(output$dcov.pw) <- names.Y
}
if (!is.null(output$pvalue.pw)) {
rownames(output$pvalue.pw) <- names.X
colnames(output$pvalue.pw) <- names.Y
}
}
output$test.pw <- test.pw
output$calc.dcor.pw <- calc.dcor.pw
output$calc.dcov.pw <- calc.dcov.pw
return(output)
}
.dcmatrixmain <- function (X,
Y = NULL,
calc.dcov = TRUE,
calc.dcor = TRUE,
calc.cor = "none",
calc.pvalue.cor = FALSE,
return.data = TRUE,
test = "none",
adjustp = "none",
b = 499,
affine = FALSE,
standardize=FALSE,
bias.corr = FALSE,
group.X = NULL,
group.Y = NULL,
metr.X = "euclidean",
metr.Y = "euclidean",
use="all",
algorithm ="auto",
fc.discrete = FALSE) {
output <- list()
if(return.data) {
output$X <- X
output$Y <- Y
} else {
output <- NULL
}
withY <- ifelse(is.null(Y), FALSE, TRUE)
dogamma <- docons <- dobb3 <- doperm <- donotest <- FALSE
if (test == "none")
donotest <- TRUE
else if (test == "gamma")
dogamma <- TRUE
else if (test == "conservative")
docons <- TRUE
else if (test == "bb3")
dobb3 <- TRUE
else if (test == "permutation")
doperm <- TRUE
else
stop ("Test must be one of \"none\", \"permutation\", \"gamma\", \"bb3\" or \"conservative\"")
use.all <- use.pw <- FALSE
if (use == "complete.obs") {
cc <- which(complete.cases(X))
if (withY) {
cc <- intersect(cc,which(complete.cases(Y)))
Y <- Y[cc,]
}
X <- X[cc,]
use.all <- TRUE
} else if (use == "all") {
use.all <- TRUE
} else if (use == "pairwise.complete.obs") {
use.pw <- TRUE
} else {
stop("use must be one of \"all\", \"complete.obs\" or \"pairwise.complete.obs\"")
}
if (is.vector(X)) {
X <- as.matrix(X)
}
p <- ncol(X)
n <- nrow(X)
if (is.null(group.X)) {
if (is.null(colnames(X)))
group.X <- 1:p
else
group.X <- colnames(X)
}
names.X <- names.Y <- unique(group.X)
dX <- dY <- length(unique(group.X))
ms.grpX <- ms.grpY <- NULL
groupslistX <- lapply(1:dX, function(t) which(group.X == names.X[t]))
pX <- sapply(1:dX, function(t) length(groupslistX[[t]]))
prepX <- as.list(rep(NA,dX))
dvarX <- rep(NA,dX)
#
# tblX <- table(group.X)
# labelsX <- names.X <- names.Y <- as.factor(names(tblX))
# #if (sum(group.X - 1:p) == 0)
# # names.X <- names.Y <- colnames(X)
# pX <- as.numeric(tblX)
# dX <- dY <- length(pX)
# ms.grpX <- ms.grpY <- NULL
# groupslistX <- lapply(1:dX, function(t) which(group.X == labelsX[t]))
#prepX <- as.list(rep(NA,dX))
#dvarX <- rep(NA,dX)
lmX <- length(metr.X)
if (lmX ==1) {
metr.X <- as.list(replicate(dX, metr.X))
} else if (lmX == 2) {
ischar <- suppressWarnings(is.na(as.numeric(metr.X[2])))
if (!ischar)
metr.X <- lapply(1:dX, function(u) metr.X)
}
if (is.character(metr.X) & lmX == dX) {
ischar <- suppressWarnings(is.na(as.numeric(metr.X[2])))
if (ischar)
metr.X <- as.list(metr.X)
}
if (use.all) {
ms.X <- sapply(1:dX, function(t) any(!complete.cases(X[,t])))
ms.grpX <- which(ms.X)
} else {
ms.X <- rep(FALSE,dX)
ms.grpX <- numeric(0)
}
## normalize samples if calculation of affinely invariant distance covariance is desired
if (affine) {
for (j in 1 : dX) {
if (use.all) {
X[,groupslistX[[j]]] <- normalize.sample(X[,groupslistX[[j]]], n, pX[j])
} else {
cc <- complete.cases(X[,groupslistX[[j]]])
ncc <- length(cc)
X[cc,groupslistX[[j]]] <- normalize.sample(X[cc,groupslistX[[j]]], n, pX[j])
}
}
} else if (standardize) {
if (use.all) {
X[,groupslistX[[j]]] <- scale.sample(X[,groupslistX[[j]]], n, pX[j])
} else {
cc <- complete.cases(X[,groupslistX[[j]]])
ncc <- length(cc)
X[cc,groupslistX[[j]]] <- scale.sample(X[cc,groupslistX[[j]]], n, pX[j])
}
}
if (withY) {
lmY <- length(metr.Y)
if (is.vector(Y)) {
Y <- as.matrix(Y)
}
q <- ncol(Y)
m <- nrow(Y)
if (is.null(group.Y)) {
if (is.null(colnames(Y)))
group.Y <- 1 : q
else
group.Y <- colnames(Y)
}
# names.Y <- colnames(Y)
# } else
# names.Y <- unique(group.Y)
names.Y <- unique(group.Y)
dY <- length(unique(group.Y))
groupslistY <- lapply(1:dY, function(t) which(group.Y == names.Y[t]))
pY <- sapply(1:dY, function(t) length(groupslistY[[t]]))
prepY <- as.list(rep(NA,dY))
dvarY <- rep(NA,dY)
#tblY <- table(group.Y)
#labelsY <- as.factor(names(tblY))
#pY <- as.numeric(tblY)
#dY <- length(pY)
#groupslistY <- lapply(1:dY, function(t) which(group.Y == labelsY[t]))
#prepY <- as.list(rep(NA,dY))
#dvarY <- rep(NA,dY)
if (use.all) {
ms.Y <- sapply(1:dY, function(t) any(!complete.cases(Y[,t])))
ms.grpY <- which(sapply(1:dY, function(t) any(!complete.cases(Y[,t]))))
} else {
ms.Y <- rep(FALSE,dY)
ms.grpY <- numeric(0)
}
if (lmY ==1) {
metr.Y <- as.list(replicate(dY, metr.Y))
} else if (lmX == 2) {
ischar <- suppressWarnings(is.na(as.numeric(metr.Y[2])))
if (!ischar)
metr.Y <- lapply(1:dY, function(u) metr.Y)
}
if (is.character(metr.Y) & lmY == dY) {
ischar <- suppressWarnings(is.na(as.numeric(metr.Y[2])))
if (ischar)
metr.Y <- as.list(metr.Y)
}
if (m != n)
stop("X and Y must have same number of rows (samples)")
if (affine) {
for (j in 1 : dY) {
if (use.all) {
Y[,groupslistY[[j]]] <- normalize.sample(Y[,groupslistY[[j]]], n, pY[j])
} else {
cc <- complete.cases(Y[,groupslistY[[j]]])
ncc <- length(cc)
Y[cc,groupslistX[[j]]] <- normalize.sample(Y[cc,groupslistX[[j]]], ncc, pY[j])
}
}
} else if (standardize) {
if (use.all) {
Y[,groupslistY[[j]]] <- scale.sample(Y[,groupslistY[[j]]], n, pY[j])
} else {
cc <- complete.cases(X[,groupslistX[[j]]])
ncc <- length(cc)
Y[cc,groupslistY[[j]]] <- scale.sample(Y[cc,groupslistY[[j]]], n, pY[j])
}
}
}
if (algorithm == "auto") {
gofast <- (((p == length(names.X))) * (n>200)) & (!dobb3) * all(metr.X %in% c("euclidean", "discrete"))
if (withY)
gofast <- gofast * (q == length(names.Y)) * all(metr.Y %in% c("euclidean", "discrete"))
if (gofast) {
algorithm <- "fast"
} else {
algorithm <- "standard"
}
}
alg.fast <- alg.standard <- alg.memsave <- FALSE
if (algorithm == "fast") {
alg.fast <- TRUE
if (doperm)
terms.smp <- function(terms, smp) {sampleterms.fast.matr(terms, smp)}
} else if (algorithm == "standard") {
alg.standard <- TRUE
if (doperm)
terms.smp <- function(terms, smp, ndisc = NULL) {sampleterms.standard(terms, smp)}
} else if (algorithm == "memsave") {
alg.memsave <- TRUE
if (doperm)
terms.smp <- function(terms, smp, ndisc = NULL) {sampleterms.memsave(terms, smp)}
} else
stop ("Algorithm must be one of \"fast\", \"standard\", \"memsave\" or \"auto\"")
if (!alg.standard & dobb3)
stop("bb3 p-value calculation is only possible with algorithm=\"standard\"!")
if (bias.corr == TRUE) {
termstodcov2 <- function(aijbij,Sab,Tab,n) {
aijbij/ n / (n - 3) - 2 * Sab / n / (n - 2) / (n - 3) + Tab / n / (n - 1) / (n - 2) / (n - 3)
}
dcov2todcov <- function(dcov2) {
sqrt(abs(dcov2)) * sign(dcov2)
}
dcov2todcor <- function(dcov2, dvarX, dvarY) {
(sqrt(abs(dcov2)) * sign(dcov2)) / sqrt(sqrt(dvarX * dvarY))
}
} else {
termstodcov2 <- function(aijbij, Sab, Tab, n) {
aijbij / n / n - 2 * Sab / n / n / n + Tab / n / n / n / n
}
dcov2todcov <- function(dcov2) {
sqrt(dcov2)
}
dcov2todcor <- function(dcov2, dvarX, dvarY) {
sqrt(dcov2) / sqrt(sqrt(dvarX * dvarY))
}
}
if (dogamma) {
testfunc <- function(terms, ...) {
n <- terms$ncc
Saa <- vector_prod_sum(terms$aidot,terms$aidot)
Sbb <- vector_prod_sum(terms$bidot,terms$bidot)
Sab <- vector_prod_sum(terms$aidot,terms$bidot)
dvarX <- terms$aijaij / n / (n - 3) - 2 * Saa/ n / (n - 2) / (n - 3) + terms$adotdot * terms$adotdot / n / (n - 1) / (n - 2) / (n - 3)
dvarY <- terms$bijbij / n / (n - 3) - 2 * Sbb / n / (n - 2) / (n - 3) + terms$bdotdot * terms$bdotdot / n / (n - 1) / (n - 2) / (n - 3)
dcov2 <- terms$aijbij / n / (n - 3) - 2 * Sab / n / (n - 2) / (n - 3) + terms$adotdot * terms$bdotdot / n / (n - 1) / (n - 2) / (n - 3)
U1 <- dvarX * dvarY
U2 <- terms$adotdot / n / (n - 1)
U3 <- terms$bdotdot / n / (n - 1)
alph <- 1 / 2 * (U2 ^ 2 * U3 ^ 2) / U1
beta <- 1 / 2 * (U2 * U3) / U1
stat <- n * dcov2 + U2 * U3
pval <- pgamma(stat, alph, beta, lower.tail = FALSE)
return(pval)
}
} else if (doperm) {
testfunc <- function(dcov2, smp, terms, ...) {
n <- terms$ncc
if (is.na(dcov2))
return(NA)
Tab <- terms$adotdot * terms$bdotdot
reps <- lapply(1:b, function(t) {
terms.sample <- terms.smp(terms,smp[[t]])
return(termstodcov2(terms.sample$aijbij, terms.sample$Sab, Tab, n))
})
pval <- (1 + length(which(reps >= dcov2))) / (1 + b)
return(pval)
}
} else if (docons) {
testfunc <- function(terms, moms.X, moms.Y,...) {
n <- terms$ncc
est.m2 <- sum((moms.X * moms.Y)) / n ^ 10
est.m1 <- terms$adotdot * terms$bdotdot / n ^ 3 / (n - 1)
est.var <- (est.m2 - est.m1 ^ 2)
alpha <- sqrt(est.var / 2 / est.m1 ^ 2)
stat <- terms$aijbij / n - 2 * vector_prod_sum(terms$aidot,terms$bidot) / n ^ 2 + terms$adotdot * terms$bdotdot / n ^ 3
pval <- pchisq(stat * sqrt(2) / sqrt(est.var), df = 1 / alpha, lower.tail = FALSE)
return(pval)
}
} else if (dobb3) {
testfunc <- function(terms, moms.X, moms.Y,...) {
n <- terms$ncc
est.m2 <- sum((moms.X$vc * moms.Y$vc)) / n ^ 10
est.m1 <- terms$adotdot * terms$bdotdot / n ^ 3 / (n - 1)
est.var <- (est.m2 - est.m1 ^ 2)
est.skw <- moms.X$skw * moms.Y$skw
beta <- est.skw / sqrt(8)
stat <- terms$aijbij / n - 2 * vector_prod_sum(terms$aidot,terms$bidot) / n ^ 2 + terms$adotdot * terms$bdotdot / n ^ 3
centstat <- (stat - est.m1) / sqrt(est.var)
pval <- pchisq((centstat * sqrt(2) + 1 / beta) / beta , df = 1 / beta ^ 2, lower.tail = FALSE)
return(pval)
}
} else if (donotest) {
testfunc <- function(...) {}
}
if (!calc.dcov) {
dcov2todcov <- function(...) {}
}
if (!calc.dcor) {
dcov2todcor <- function(...) {}
}
if (doperm & use.all) {
perms <- lapply(1:b, function(t) sample(1:n))
} else {
perms <- NULL
}
extendoutput <- doperm| ((dobb3|docons)*use.pw)
if (fc.discrete) {
for (j in 1:dX) {
if (is.factor(X[,groupslistX[[j]]]) | is.character(X[,groupslistX[[j]]]))
metr.X[[j]] <- "discrete"
}
if (withY) {
for (j in 1:dY) {
if (is.factor(Y[,groupslistY[[j]]]) | is.character(Y[,groupslistY[[j]]]))
metr.Y[[j]] <- "discrete"
}
}
}
if (calc.cor %in% c("spearman","kendall", "pearson")) {
output$corr <- cor(X,Y, use = use, method = calc.cor)
if (calc.pvalue.cor) {
if (calc.cor %in% c("spearman", "pearson")) {
if (!withY) {
corrp <- Hmisc::rcorr(X, type = calc.cor)
output$pvalue.cor <- corrp$P
diag(output$pvalue.cor) <- 0
if (use.all)
output$pvalue.cor[which(corrp$n<n,arr.ind=TRUE)] <- NA
} else {
corrp <- Hmisc::rcorr(X,Y)
output$pvalue.cor <- corrp$P[1:dX,(dX+1):(dX+dY)]
if (use.all)
output$pvalue.cor[which(corrp$n[1:dX,(dX+1):(dX+dY)]<n,arr.ind=TRUE)] <- NA
}
} else
warning("P-Value calculation for Kendall correlation not implemented")
}
}
if (alg.fast) {
discrete.X <- (metr.X == "discrete")
if (withY)
discrete.Y <- (metr.Y == "discrete")
}
if (calc.dcov) {
output$dcov <- matrix(nrow = dX, ncol = dY)
rownames(output$dcov) <- names.X
colnames(output$dcov) <- names.Y
}
if (calc.dcor) {
output$dcor <- matrix(nrow = dX, ncol = dY)
rownames(output$dcor) <- names.X
colnames(output$dcor) <- names.Y
if (!withY)
diag(output$dcor) <- 1
}
if (!donotest) {
output$pvalue <- matrix(nrow = dX, ncol = dY)
rownames(output$pvalue) <- names.X
colnames(output$pvalue) <- names.Y
if (!withY)
diag(output$pvalue) <- 0
}
momsX <- momsY <- NULL
if ((docons | dobb3) & !use.pw) {
momsX <- as.list(rep(NA,dX))
if (withY)
momsY <- as.list(rep(NA,dY))
}
for (j in setdiff(1:dX,ms.grpX)) {
if (alg.fast) {
prepX[[j]] <- prep.fast(X[,j], n, discrete = discrete.X[j], pairwise = use.pw)
} else if (alg.memsave) {
prepX[[j]] <- prep.memsave(X[,groupslistX[[j]]], n, pX[j], metr.X = metr.X[[j]], pairwise = use.pw)
} else if (alg.standard) {
prepX[[j]] <- prep.standard(X[,groupslistX[[j]]], n, pX[j], metr.X = metr.X[[j]], pairwise = use.pw)
}
Saa <- vector_prod_sum(prepX[[j]]$aidot,prepX[[j]]$aidot)
if ((docons | dobb3) & !use.pw) {
momsX[[j]] <- calcmom(aijaij = prepX[[j]]$aijaij, Saa = Saa, adotdot = prepX[[j]]$adotdot, aidot = prepX[[j]]$aidot, distX = prepX[[j]]$distX, n = n, dobb3 = dobb3)
}
dvarX[j] <- termstodcov2(prepX[[j]]$aijaij, Saa, prepX[[j]]$adotdot*prepX[[j]]$adotdot, prepX[[j]]$ncc)
}
if (!withY & calc.dcov)
diag(output$dcov) <- sqrt(dvarX)
if (withY) {
for (j in setdiff(1:dY,ms.grpY)) {
if (alg.fast) {
prepY[[j]] <- prep.fast(Y[,j], n, discrete = discrete.Y[j], pairwise = use.pw)
} else if (alg.memsave) {
prepY[[j]] <- prep.memsave(Y[,groupslistY[[j]]], n, pY[j], metr.X = metr.Y[[j]], pairwise = use.pw)
} else if (alg.standard) {
prepY[[j]] <- prep.standard(Y[,groupslistY[[j]]], n, pY[j], metr.X = metr.Y[[j]], pairwise = use.pw)
}
Sbb <- vector_prod_sum(prepY[[j]]$aidot, prepY[[j]]$aidot)
if ((docons | dobb3) & !use.pw) {
momsY[[j]] <- calcmom(aijaij = prepY[[j]]$aijaij, Saa = Sbb, adotdot = prepY[[j]]$adotdot, aidot = prepY[[j]]$aidot, distX = prepY[[j]]$distX, n = n, dobb3 = dobb3)
}
dvarY[j] <- termstodcov2(prepY[[j]]$aijaij, Sbb, prepY[[j]]$adotdot*prepY[[j]]$adotdot, prepY[[j]]$ncc)
}
}
if (!withY) {
if (dX > 1) {
for (i in setdiff(1:(dX-1),ms.grpX)) {
for (j in setdiff((i+1):dX,ms.grpX)) {
if (alg.fast) {
terms <- preptoterms.fast(prepX[[i]], prepX[[j]], n, pairwise = use.pw, discrete.X[[i]], discrete.X[[j]], perm = extendoutput)
} else if (alg.memsave) {
terms <- preptoterms.memsave(prepX[[i]], prepX[[j]], metr.X[[i]], metr.X[[j]], n, pairwise = use.pw, perm = extendoutput)
} else if (alg.standard) {
terms <- preptoterms.standard(prepX[[i]], prepX[[j]], n, pairwise = use.pw, perm = extendoutput)
}
dcov2XY <- termstodcov2(terms$aijbij, vector_prod_sum(terms$aidot, terms$bidot), terms$adotdot * terms$bdotdot, terms$ncc)
output$dcov[i,j] <- output$dcov[j,i] <- dcov2todcov(dcov2 = dcov2XY)
if (use.pw) {
Saa <- vector_prod_sum(terms$aidot, terms$aidot)
Sbb <- vector_prod_sum(terms$bidot, terms$bidot)
dvX <- termstodcov2(terms$aijaij, Saa, terms$adotdot * terms$adotdot, terms$ncc)
dvY <- termstodcov2(terms$bijbij, Sbb, terms$bdotdot * terms$bdotdot, terms$ncc)
if (docons | dobb3) {
moms.X <- calcmom(aijaij = terms$aijaij, Saa = Saa, adotdot = terms$adotdot, distX = terms$distX, n = terms$ncc, aidot = terms$aidot, dobb3 = dobb3)
moms.Y <- calcmom(aijaij = terms$bijbij, Saa = Sbb, adotdot = terms$bdotdot, distX = terms$distY, n = terms$ncc, aidot = terms$bidot, dobb3 = dobb3)
}
if (doperm) {
perms <- lapply(1:b, function(t) sample(1:terms$ncc))
}
} else {
dvX <- dvarX[i]
dvY <- dvarX[j]
if (docons | dobb3) {
moms.X <- momsX[[i]]
moms.Y <- momsX[[j]]
}
}
output$dcor[i,j] <- output$dcor[j,i] <- dcov2todcor(dcov2 = dcov2XY, dvX, dvY)
output$pvalue[i,j] <- output$pvalue[j,i] <- testfunc(dcov2 = dcov2XY, terms = terms, moms.X = moms.X, moms.Y = moms.Y, n = n, smp = perms, prepX[[i]], prepX[[j]])
}
}
}
} else {
for (i in setdiff(1:dX,ms.grpX)) {
for (j in setdiff(1:dY,ms.grpY)) {
if (alg.fast) {
terms <- preptoterms.fast(prepX[[i]], prepY[[j]], n, pairwise = use.pw, discrete.X[[i]], discrete.Y[[j]], perm = extendoutput)
} else if (alg.memsave) {
terms <- preptoterms.memsave(prepX[[i]], prepY[[j]], metr.X[[i]], metr.Y[[j]], n, pairwise = use.pw, perm = extendoutput)
} else if (alg.standard) {
terms <- preptoterms.standard(prepX[[i]], prepY[[j]], n, pairwise = use.pw, perm = extendoutput)
}
dcov2XY <- termstodcov2(terms$aijbij, vector_prod_sum(terms$aidot, terms$bidot), terms$adotdot * terms$bdotdot, terms$ncc)
output$dcov[i,j] <- dcov2todcov(dcov2 = dcov2XY)
if (use.pw) {
Saa <- vector_prod_sum(terms$aidot, terms$aidot)
Sbb <- vector_prod_sum(terms$bidot, terms$bidot)
dvX <- termstodcov2(terms$aijaij, Saa, terms$adotdot * terms$adotdot, terms$ncc)
dvY <- termstodcov2(terms$bijbij, Sbb, terms$bdotdot * terms$bdotdot, terms$ncc)
if (docons | dobb3) {
moms.X <- calcmom(aijaij = terms$aijaij, Saa = Saa, adotdot = terms$adotdot, distX = terms$distX, aidot = terms$aidot, n = terms$ncc, dobb3 = dobb3)
moms.Y <- calcmom(aijaij = terms$bijbij, Saa = Sbb, adotdot = terms$bdotdot, distX = terms$distY, aidot = terms$bidot, n = terms$ncc, dobb3 = dobb3)
}
if (doperm) {
perms <- lapply(1:b, function(t) sample(1:terms$ncc))
}
} else {
dvX <- dvarX[i]
dvY <- dvarY[j]
if (docons | dobb3) {
moms.X <- momsX[[i]]
moms.Y <- momsY[[j]]
}
}
output$dcor[i,j] <- dcov2todcor(dcov2 = dcov2XY, dvX, dvY)
output$pvalue[i,j] <- testfunc(dcov2 = dcov2XY, terms = terms, moms.X = moms.X, moms.Y = moms.Y, smp = perms, prepX[[i]], prepY[[j]])
}
}
}
if (adjustp %in% c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr")) {
if (withY) {
output$pvalue.adj <- matrix(p.adjust(output$pvalue,method = adjustp), ncol = dY)
} else {
ind <- which(lower.tri(output$pvalue), arr.ind=TRUE)
pvec <- as.vector(output$pvalue[ind])
pvec <- p.adjust(pvec, method = adjustp)
output$pvalue.adj <- diag(0,dX)
ind2 <- ind[,2:1]
output$pvalue.adj[ind] <- output$pvalue.adj[ind2] <- pvec
}
} else if (adjustp != "none")
warning ("adjustp should be one of \"holm\", \"hochberg\", \"hommel\", \"bonferroni\", \"BH\", \"BY\", \"fdr\" \n
No p-value correction performed")
class(output) <- "dcmatrix"
output$withY <- withY
output$dX <- dX
output$n <- n
output$b <- b
output$test <- test
output$calc.dcov <- calc.dcov
output$calc.dcor <- calc.dcor
output$bias.corr <- bias.corr
output$affine <- affine
output$calc.cor <- calc.cor
output$group.X <- group.X
output$names.X <- names.X
output$groupslistX <- groupslistX
if (withY) {
output$group.Y <- group.Y
output$dY <- dY
output$names.Y <- names.Y
output$groupslistY <- groupslistY
}
return(output)
} | /R/dc_matrix2.R | no_license | edelmand21/dcortools | R | false | false | 29,588 | r | #' Calculates distance covariance and distance correlation matrices
#'
#' @param X A data.frame or matrix.
#' @param Y Either NULL or a data.frame or a matrix with the same number of rows as X. If only X is provided, distance covariances/correlations are calculated between all groups in X. If X and Y are provided, distance covariances/correlations are calculated between all groups in X and all groups of Y.
#' @param calc.dcov logical; specifies if the distance covariance matrix is calculated.
#' @param calc.dcor logical; specifies if the distance correlation matrix is calculated.
#' @param calc.cor If set as "pearson", "spearman" or "kendall", a corresponding correlation matrix is addionally calculated.
#' @param calc.pvalue.cor logical; IF TRUE, a p-value based on the Pearson or Spearman correlation matrix is calculated (not implemented for calc.cor ="kendall") using Hmisc::rcorr.
#' @param return.data logical; specifies if the dcmatrix object should contain the original data.
#' @param test specifies the type of test that is performed, "permutation" performs a Monte Carlo Permutation test. "gamma" performs a test based on a gamma approximation of the test statistic under the null. "conservative" performs a conservative two-moment approximation. "bb3" performs a quite precise three-moment approximation and is recommended when computation time is not an issue.
#' @param adjustp If setting this parameter to "holm", "hochberg", "hommel", "bonferroni", "BH", "BY" or "fdr", corresponding adjusted p-values are additionally returned for the distance covariance test.
#' @param b specifies the number of random permutations used for the permutation test. Ignored for all other tests.
#' @param affine logical; indicates if the affinely transformed distance covariance should be calculated or not.
#' @param bias.corr logical; specifies if the bias corrected version of the sample distance covariance \insertCite{huo2016fast}{dcortools} should be calculated.
#' @param group.X A vector, each entry specifying the group membership of the respective column in X. Each group is handled as one sample for calculating the distance covariance/correlation matrices. If NULL, every sample is handled as an individual group.
#' @param group.Y A vector, each entry specifying the group membership of the respective column in Y. Each group is handled as one sample for calculating the distance covariance/correlation matrices. If NULL, every sample is handled as an individual group.
#' @param metr.X Either a single metric or a list providing a metric for each group in X (see examples).
#' @param metr.Y see metr.X.
#' @param use "all" uses all observations, "complete.obs" excludes NA's, "pairwise.complete.obs" uses pairwise complete observations for each comparison.
#' @param algorithm specifies the algorithm used for calculating the distance covariance.
#'
#' "fast" uses an O(n log n) algorithm if the observations are one-dimensional and metr.X and metr.Y are either "euclidean" or "discrete", see also \insertCite{huo2016fast;textual}{dcortools}.
#'
#' "memsave" uses a memory saving version of the standard algorithm with computational complexity O(n^2) but requiring only O(n) memory.
#'
#' "standard" uses the classical algorithm. User-specified metrics always use the classical algorithm.
#'
#' "auto" chooses the best algorithm for the specific setting using a rule of thumb.
#'
#' "memsave" is typically very inefficient for dcmatrix and should only be applied in exceptional cases.
#'
#' @param fc.discrete: logical; If TRUE, "discrete" metric is applied automatically on samples of type "factor" or "character".
#' @param calc.dcov.pw logical; If TRUE, a distance covariance matrix between the univariate observations/columns is additionally calculated. Not meaningful if group.X and group.Y are not specified.
#' @param calc.dcor.pw logical; If TRUE, a distance correlation matrix between the univariate observations/columns is additionally calculated. Not meaningful if group.X and group.Y are not specified.
#' @param calc.test.pw specifes a test (see argument "test") that is performed between all single observations.
#' @param metr.pw.X Either a single metric or a list providing a metric for each single observation/column in X (see metr.X).
#' @param metr.pw.Y See metr.pw.Y.
#' @return S3 object of class "dcmatrix" with the following components
#' \item{name X, Y}{description original data (if return.data = TRUE).}
#' \item{name dcov, dcor}{distance covariance/correlation matrices between the groups specified in group.X/group.Y (if calc.dcov/calc.dcor = TRUE).}
#' \item{name corr}{correlation matrix between the univariate observations/columns (if cal.cor is "pearson", "spearman" or "kendall").}
#' \item{name pvalue}{matrix of p-values based on a corresponding distance covariance test based on the entries in dcov (if argument test is not "none").}
#' \item{name pvalue.adj}{matrix of p-values adjusted for multiple comparisons using the method specified in argument adjustp.}
#' \item{name pvalue.cor}{matrix of pvalues based on "pearson"/"spearman" correlation (if calc.cor is "pearson" or "spearman" and calc.pvalue.cor = TRUE).}
#' \item{name dcov.pw,dcor.pw}{distance covariance/correlation matrices between the univariate observations (if calc.dcov.pw/calc.dcor.pw = TRUE.)}
#' \item{name pvalue.pw}{matrix of p-values based on a corresponding distance covariance test based on the entries in dcov.pw (if argument test is not "none").}
#' @export
#' @references
#' \insertRef{berschneider2018complex}{dcortools}
#'
#' \insertRef{bottcher2017detecting}{dcortools}
#'
#' \insertRef{dueck2014affinely}{dcortools}
#'
#' \insertRef{huang2017statistically}{dcortools}
#'
#' \insertRef{huo2016fast}{dcortools}
#'
#' \insertRef{lyons2013distance}{dcortools}
#'
#' \insertRef{sejdinovic2013equivalence}{dcortools}
#'
#' \insertRef{szekely2007}{dcortools}
#'
#' \insertRef{szekely2009brownian}{dcortools}
#'
#'@examples
#' X <- matrix(rnorm(1000), ncol = 10)
#' dcm <- dcmatrix(X, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH")
#' dcm <- dcmatrix(X, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH", group.X = c(rep(1, 5), rep(2, 5)), calc.dcor.pw = T, test.pw = "bb3")
#'
#' Y <- matrix(rnorm(600), ncol = 6)
#' Y[,6] <- rbinom(100, 4, 0.3)
#' dcm <- dcmatrix(X, Y, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH")
#' dcm <- dcmatrix(X, Y, test="bb3",calc.cor = "pearson", calc.pvalue.cor = T, adjustp = "BH", group.X = c(rep("group1", 5), rep("group2", 5)), group.Y = c(rep("group1", 5), "group2"), metr.X = "gaussauto", metr.Y = list("group1" = "gaussauto", "group2" = "discrete"))
dcmatrix <- function (X,
Y = NULL,
calc.dcov = TRUE,
calc.dcor = TRUE,
calc.cor = "none",
calc.pvalue.cor = FALSE,
return.data = TRUE,
test = "none",
adjustp = "none",
b = 499,
affine = FALSE,
standardize = FALSE,
bias.corr = FALSE,
group.X = NULL,
group.Y = NULL,
metr.X = "euclidean",
metr.Y = "euclidean",
use="all",
algorithm ="auto",
fc.discrete = FALSE,
calc.dcor.pw = FALSE,
calc.dcov.pw = FALSE,
test.pw = "none",
metr.pw.X = "euclidean",
metr.pw.Y = "euclidean"
) {
output <- .dcmatrixmain(X, Y, calc.dcov, calc.dcor, calc.cor, calc.pvalue.cor, return.data, test, adjustp, b, affine, standardize, bias.corr, group.X, group.Y, metr.X, metr.Y, use, algorithm, fc.discrete)
output$call <- match.call()
if (calc.dcor.pw | calc.dcov.pw | test.pw != "none") {
output2 <- .dcmatrixmain(X, Y, calc.dcov = calc.dcov.pw, calc.cor = "no", calc.dcor = calc.dcor.pw, calc.pvalue.cor = FALSE, return.data, test = test.pw, adjustp, b, affine, standardize, bias.corr, group.X = NULL, group.Y = NULL, metr.X = metr.pw.X, metr.Y = metr.pw.Y, use, algorithm, fc.discrete)
output$dcor.pw <- output2$dcor
output$dcov.pw <- output2$dcov
output$pvalue.pw <- output2$pvalue
output$metr.pw.X <- metr.pw.X
output$metr.pw.Y <- metr.pw.Y
output$dX.pw <- output2$dX
output$dY.pw <- output2$dY
names.X <- names.Y <- colnames(X)
if (!is.null(Y))
names.Y <- colnames(Y)
if (!is.null(output$dcor.pw)) {
rownames(output$dcor.pw) <- names.X
colnames(output$dcor.pw) <- names.Y
}
if (!is.null(output$dcov.pw)) {
rownames(output$dcov.pw) <- names.X
colnames(output$dcov.pw) <- names.Y
}
if (!is.null(output$pvalue.pw)) {
rownames(output$pvalue.pw) <- names.X
colnames(output$pvalue.pw) <- names.Y
}
}
output$test.pw <- test.pw
output$calc.dcor.pw <- calc.dcor.pw
output$calc.dcov.pw <- calc.dcov.pw
return(output)
}
.dcmatrixmain <- function (X,
Y = NULL,
calc.dcov = TRUE,
calc.dcor = TRUE,
calc.cor = "none",
calc.pvalue.cor = FALSE,
return.data = TRUE,
test = "none",
adjustp = "none",
b = 499,
affine = FALSE,
standardize=FALSE,
bias.corr = FALSE,
group.X = NULL,
group.Y = NULL,
metr.X = "euclidean",
metr.Y = "euclidean",
use="all",
algorithm ="auto",
fc.discrete = FALSE) {
output <- list()
if(return.data) {
output$X <- X
output$Y <- Y
} else {
output <- NULL
}
withY <- ifelse(is.null(Y), FALSE, TRUE)
dogamma <- docons <- dobb3 <- doperm <- donotest <- FALSE
if (test == "none")
donotest <- TRUE
else if (test == "gamma")
dogamma <- TRUE
else if (test == "conservative")
docons <- TRUE
else if (test == "bb3")
dobb3 <- TRUE
else if (test == "permutation")
doperm <- TRUE
else
stop ("Test must be one of \"none\", \"permutation\", \"gamma\", \"bb3\" or \"conservative\"")
use.all <- use.pw <- FALSE
if (use == "complete.obs") {
cc <- which(complete.cases(X))
if (withY) {
cc <- intersect(cc,which(complete.cases(Y)))
Y <- Y[cc,]
}
X <- X[cc,]
use.all <- TRUE
} else if (use == "all") {
use.all <- TRUE
} else if (use == "pairwise.complete.obs") {
use.pw <- TRUE
} else {
stop("use must be one of \"all\", \"complete.obs\" or \"pairwise.complete.obs\"")
}
if (is.vector(X)) {
X <- as.matrix(X)
}
p <- ncol(X)
n <- nrow(X)
if (is.null(group.X)) {
if (is.null(colnames(X)))
group.X <- 1:p
else
group.X <- colnames(X)
}
names.X <- names.Y <- unique(group.X)
dX <- dY <- length(unique(group.X))
ms.grpX <- ms.grpY <- NULL
groupslistX <- lapply(1:dX, function(t) which(group.X == names.X[t]))
pX <- sapply(1:dX, function(t) length(groupslistX[[t]]))
prepX <- as.list(rep(NA,dX))
dvarX <- rep(NA,dX)
#
# tblX <- table(group.X)
# labelsX <- names.X <- names.Y <- as.factor(names(tblX))
# #if (sum(group.X - 1:p) == 0)
# # names.X <- names.Y <- colnames(X)
# pX <- as.numeric(tblX)
# dX <- dY <- length(pX)
# ms.grpX <- ms.grpY <- NULL
# groupslistX <- lapply(1:dX, function(t) which(group.X == labelsX[t]))
#prepX <- as.list(rep(NA,dX))
#dvarX <- rep(NA,dX)
lmX <- length(metr.X)
if (lmX ==1) {
metr.X <- as.list(replicate(dX, metr.X))
} else if (lmX == 2) {
ischar <- suppressWarnings(is.na(as.numeric(metr.X[2])))
if (!ischar)
metr.X <- lapply(1:dX, function(u) metr.X)
}
if (is.character(metr.X) & lmX == dX) {
ischar <- suppressWarnings(is.na(as.numeric(metr.X[2])))
if (ischar)
metr.X <- as.list(metr.X)
}
if (use.all) {
ms.X <- sapply(1:dX, function(t) any(!complete.cases(X[,t])))
ms.grpX <- which(ms.X)
} else {
ms.X <- rep(FALSE,dX)
ms.grpX <- numeric(0)
}
## normalize samples if calculation of affinely invariant distance covariance is desired
if (affine) {
for (j in 1 : dX) {
if (use.all) {
X[,groupslistX[[j]]] <- normalize.sample(X[,groupslistX[[j]]], n, pX[j])
} else {
cc <- complete.cases(X[,groupslistX[[j]]])
ncc <- length(cc)
X[cc,groupslistX[[j]]] <- normalize.sample(X[cc,groupslistX[[j]]], n, pX[j])
}
}
} else if (standardize) {
if (use.all) {
X[,groupslistX[[j]]] <- scale.sample(X[,groupslistX[[j]]], n, pX[j])
} else {
cc <- complete.cases(X[,groupslistX[[j]]])
ncc <- length(cc)
X[cc,groupslistX[[j]]] <- scale.sample(X[cc,groupslistX[[j]]], n, pX[j])
}
}
if (withY) {
lmY <- length(metr.Y)
if (is.vector(Y)) {
Y <- as.matrix(Y)
}
q <- ncol(Y)
m <- nrow(Y)
if (is.null(group.Y)) {
if (is.null(colnames(Y)))
group.Y <- 1 : q
else
group.Y <- colnames(Y)
}
# names.Y <- colnames(Y)
# } else
# names.Y <- unique(group.Y)
names.Y <- unique(group.Y)
dY <- length(unique(group.Y))
groupslistY <- lapply(1:dY, function(t) which(group.Y == names.Y[t]))
pY <- sapply(1:dY, function(t) length(groupslistY[[t]]))
prepY <- as.list(rep(NA,dY))
dvarY <- rep(NA,dY)
#tblY <- table(group.Y)
#labelsY <- as.factor(names(tblY))
#pY <- as.numeric(tblY)
#dY <- length(pY)
#groupslistY <- lapply(1:dY, function(t) which(group.Y == labelsY[t]))
#prepY <- as.list(rep(NA,dY))
#dvarY <- rep(NA,dY)
if (use.all) {
ms.Y <- sapply(1:dY, function(t) any(!complete.cases(Y[,t])))
ms.grpY <- which(sapply(1:dY, function(t) any(!complete.cases(Y[,t]))))
} else {
ms.Y <- rep(FALSE,dY)
ms.grpY <- numeric(0)
}
if (lmY ==1) {
metr.Y <- as.list(replicate(dY, metr.Y))
} else if (lmX == 2) {
ischar <- suppressWarnings(is.na(as.numeric(metr.Y[2])))
if (!ischar)
metr.Y <- lapply(1:dY, function(u) metr.Y)
}
if (is.character(metr.Y) & lmY == dY) {
ischar <- suppressWarnings(is.na(as.numeric(metr.Y[2])))
if (ischar)
metr.Y <- as.list(metr.Y)
}
if (m != n)
stop("X and Y must have same number of rows (samples)")
if (affine) {
for (j in 1 : dY) {
if (use.all) {
Y[,groupslistY[[j]]] <- normalize.sample(Y[,groupslistY[[j]]], n, pY[j])
} else {
cc <- complete.cases(Y[,groupslistY[[j]]])
ncc <- length(cc)
Y[cc,groupslistX[[j]]] <- normalize.sample(Y[cc,groupslistX[[j]]], ncc, pY[j])
}
}
} else if (standardize) {
if (use.all) {
Y[,groupslistY[[j]]] <- scale.sample(Y[,groupslistY[[j]]], n, pY[j])
} else {
cc <- complete.cases(X[,groupslistX[[j]]])
ncc <- length(cc)
Y[cc,groupslistY[[j]]] <- scale.sample(Y[cc,groupslistY[[j]]], n, pY[j])
}
}
}
if (algorithm == "auto") {
gofast <- (((p == length(names.X))) * (n>200)) & (!dobb3) * all(metr.X %in% c("euclidean", "discrete"))
if (withY)
gofast <- gofast * (q == length(names.Y)) * all(metr.Y %in% c("euclidean", "discrete"))
if (gofast) {
algorithm <- "fast"
} else {
algorithm <- "standard"
}
}
alg.fast <- alg.standard <- alg.memsave <- FALSE
if (algorithm == "fast") {
alg.fast <- TRUE
if (doperm)
terms.smp <- function(terms, smp) {sampleterms.fast.matr(terms, smp)}
} else if (algorithm == "standard") {
alg.standard <- TRUE
if (doperm)
terms.smp <- function(terms, smp, ndisc = NULL) {sampleterms.standard(terms, smp)}
} else if (algorithm == "memsave") {
alg.memsave <- TRUE
if (doperm)
terms.smp <- function(terms, smp, ndisc = NULL) {sampleterms.memsave(terms, smp)}
} else
stop ("Algorithm must be one of \"fast\", \"standard\", \"memsave\" or \"auto\"")
if (!alg.standard & dobb3)
stop("bb3 p-value calculation is only possible with algorithm=\"standard\"!")
if (bias.corr == TRUE) {
termstodcov2 <- function(aijbij,Sab,Tab,n) {
aijbij/ n / (n - 3) - 2 * Sab / n / (n - 2) / (n - 3) + Tab / n / (n - 1) / (n - 2) / (n - 3)
}
dcov2todcov <- function(dcov2) {
sqrt(abs(dcov2)) * sign(dcov2)
}
dcov2todcor <- function(dcov2, dvarX, dvarY) {
(sqrt(abs(dcov2)) * sign(dcov2)) / sqrt(sqrt(dvarX * dvarY))
}
} else {
termstodcov2 <- function(aijbij, Sab, Tab, n) {
aijbij / n / n - 2 * Sab / n / n / n + Tab / n / n / n / n
}
dcov2todcov <- function(dcov2) {
sqrt(dcov2)
}
dcov2todcor <- function(dcov2, dvarX, dvarY) {
sqrt(dcov2) / sqrt(sqrt(dvarX * dvarY))
}
}
if (dogamma) {
testfunc <- function(terms, ...) {
n <- terms$ncc
Saa <- vector_prod_sum(terms$aidot,terms$aidot)
Sbb <- vector_prod_sum(terms$bidot,terms$bidot)
Sab <- vector_prod_sum(terms$aidot,terms$bidot)
dvarX <- terms$aijaij / n / (n - 3) - 2 * Saa/ n / (n - 2) / (n - 3) + terms$adotdot * terms$adotdot / n / (n - 1) / (n - 2) / (n - 3)
dvarY <- terms$bijbij / n / (n - 3) - 2 * Sbb / n / (n - 2) / (n - 3) + terms$bdotdot * terms$bdotdot / n / (n - 1) / (n - 2) / (n - 3)
dcov2 <- terms$aijbij / n / (n - 3) - 2 * Sab / n / (n - 2) / (n - 3) + terms$adotdot * terms$bdotdot / n / (n - 1) / (n - 2) / (n - 3)
U1 <- dvarX * dvarY
U2 <- terms$adotdot / n / (n - 1)
U3 <- terms$bdotdot / n / (n - 1)
alph <- 1 / 2 * (U2 ^ 2 * U3 ^ 2) / U1
beta <- 1 / 2 * (U2 * U3) / U1
stat <- n * dcov2 + U2 * U3
pval <- pgamma(stat, alph, beta, lower.tail = FALSE)
return(pval)
}
} else if (doperm) {
testfunc <- function(dcov2, smp, terms, ...) {
n <- terms$ncc
if (is.na(dcov2))
return(NA)
Tab <- terms$adotdot * terms$bdotdot
reps <- lapply(1:b, function(t) {
terms.sample <- terms.smp(terms,smp[[t]])
return(termstodcov2(terms.sample$aijbij, terms.sample$Sab, Tab, n))
})
pval <- (1 + length(which(reps >= dcov2))) / (1 + b)
return(pval)
}
} else if (docons) {
testfunc <- function(terms, moms.X, moms.Y,...) {
n <- terms$ncc
est.m2 <- sum((moms.X * moms.Y)) / n ^ 10
est.m1 <- terms$adotdot * terms$bdotdot / n ^ 3 / (n - 1)
est.var <- (est.m2 - est.m1 ^ 2)
alpha <- sqrt(est.var / 2 / est.m1 ^ 2)
stat <- terms$aijbij / n - 2 * vector_prod_sum(terms$aidot,terms$bidot) / n ^ 2 + terms$adotdot * terms$bdotdot / n ^ 3
pval <- pchisq(stat * sqrt(2) / sqrt(est.var), df = 1 / alpha, lower.tail = FALSE)
return(pval)
}
} else if (dobb3) {
testfunc <- function(terms, moms.X, moms.Y,...) {
n <- terms$ncc
est.m2 <- sum((moms.X$vc * moms.Y$vc)) / n ^ 10
est.m1 <- terms$adotdot * terms$bdotdot / n ^ 3 / (n - 1)
est.var <- (est.m2 - est.m1 ^ 2)
est.skw <- moms.X$skw * moms.Y$skw
beta <- est.skw / sqrt(8)
stat <- terms$aijbij / n - 2 * vector_prod_sum(terms$aidot,terms$bidot) / n ^ 2 + terms$adotdot * terms$bdotdot / n ^ 3
centstat <- (stat - est.m1) / sqrt(est.var)
pval <- pchisq((centstat * sqrt(2) + 1 / beta) / beta , df = 1 / beta ^ 2, lower.tail = FALSE)
return(pval)
}
} else if (donotest) {
testfunc <- function(...) {}
}
if (!calc.dcov) {
dcov2todcov <- function(...) {}
}
if (!calc.dcor) {
dcov2todcor <- function(...) {}
}
if (doperm & use.all) {
perms <- lapply(1:b, function(t) sample(1:n))
} else {
perms <- NULL
}
extendoutput <- doperm| ((dobb3|docons)*use.pw)
if (fc.discrete) {
for (j in 1:dX) {
if (is.factor(X[,groupslistX[[j]]]) | is.character(X[,groupslistX[[j]]]))
metr.X[[j]] <- "discrete"
}
if (withY) {
for (j in 1:dY) {
if (is.factor(Y[,groupslistY[[j]]]) | is.character(Y[,groupslistY[[j]]]))
metr.Y[[j]] <- "discrete"
}
}
}
if (calc.cor %in% c("spearman","kendall", "pearson")) {
output$corr <- cor(X,Y, use = use, method = calc.cor)
if (calc.pvalue.cor) {
if (calc.cor %in% c("spearman", "pearson")) {
if (!withY) {
corrp <- Hmisc::rcorr(X, type = calc.cor)
output$pvalue.cor <- corrp$P
diag(output$pvalue.cor) <- 0
if (use.all)
output$pvalue.cor[which(corrp$n<n,arr.ind=TRUE)] <- NA
} else {
corrp <- Hmisc::rcorr(X,Y)
output$pvalue.cor <- corrp$P[1:dX,(dX+1):(dX+dY)]
if (use.all)
output$pvalue.cor[which(corrp$n[1:dX,(dX+1):(dX+dY)]<n,arr.ind=TRUE)] <- NA
}
} else
warning("P-Value calculation for Kendall correlation not implemented")
}
}
if (alg.fast) {
discrete.X <- (metr.X == "discrete")
if (withY)
discrete.Y <- (metr.Y == "discrete")
}
if (calc.dcov) {
output$dcov <- matrix(nrow = dX, ncol = dY)
rownames(output$dcov) <- names.X
colnames(output$dcov) <- names.Y
}
if (calc.dcor) {
output$dcor <- matrix(nrow = dX, ncol = dY)
rownames(output$dcor) <- names.X
colnames(output$dcor) <- names.Y
if (!withY)
diag(output$dcor) <- 1
}
if (!donotest) {
output$pvalue <- matrix(nrow = dX, ncol = dY)
rownames(output$pvalue) <- names.X
colnames(output$pvalue) <- names.Y
if (!withY)
diag(output$pvalue) <- 0
}
momsX <- momsY <- NULL
if ((docons | dobb3) & !use.pw) {
momsX <- as.list(rep(NA,dX))
if (withY)
momsY <- as.list(rep(NA,dY))
}
for (j in setdiff(1:dX,ms.grpX)) {
if (alg.fast) {
prepX[[j]] <- prep.fast(X[,j], n, discrete = discrete.X[j], pairwise = use.pw)
} else if (alg.memsave) {
prepX[[j]] <- prep.memsave(X[,groupslistX[[j]]], n, pX[j], metr.X = metr.X[[j]], pairwise = use.pw)
} else if (alg.standard) {
prepX[[j]] <- prep.standard(X[,groupslistX[[j]]], n, pX[j], metr.X = metr.X[[j]], pairwise = use.pw)
}
Saa <- vector_prod_sum(prepX[[j]]$aidot,prepX[[j]]$aidot)
if ((docons | dobb3) & !use.pw) {
momsX[[j]] <- calcmom(aijaij = prepX[[j]]$aijaij, Saa = Saa, adotdot = prepX[[j]]$adotdot, aidot = prepX[[j]]$aidot, distX = prepX[[j]]$distX, n = n, dobb3 = dobb3)
}
dvarX[j] <- termstodcov2(prepX[[j]]$aijaij, Saa, prepX[[j]]$adotdot*prepX[[j]]$adotdot, prepX[[j]]$ncc)
}
if (!withY & calc.dcov)
diag(output$dcov) <- sqrt(dvarX)
if (withY) {
for (j in setdiff(1:dY,ms.grpY)) {
if (alg.fast) {
prepY[[j]] <- prep.fast(Y[,j], n, discrete = discrete.Y[j], pairwise = use.pw)
} else if (alg.memsave) {
prepY[[j]] <- prep.memsave(Y[,groupslistY[[j]]], n, pY[j], metr.X = metr.Y[[j]], pairwise = use.pw)
} else if (alg.standard) {
prepY[[j]] <- prep.standard(Y[,groupslistY[[j]]], n, pY[j], metr.X = metr.Y[[j]], pairwise = use.pw)
}
Sbb <- vector_prod_sum(prepY[[j]]$aidot, prepY[[j]]$aidot)
if ((docons | dobb3) & !use.pw) {
momsY[[j]] <- calcmom(aijaij = prepY[[j]]$aijaij, Saa = Sbb, adotdot = prepY[[j]]$adotdot, aidot = prepY[[j]]$aidot, distX = prepY[[j]]$distX, n = n, dobb3 = dobb3)
}
dvarY[j] <- termstodcov2(prepY[[j]]$aijaij, Sbb, prepY[[j]]$adotdot*prepY[[j]]$adotdot, prepY[[j]]$ncc)
}
}
if (!withY) {
if (dX > 1) {
for (i in setdiff(1:(dX-1),ms.grpX)) {
for (j in setdiff((i+1):dX,ms.grpX)) {
if (alg.fast) {
terms <- preptoterms.fast(prepX[[i]], prepX[[j]], n, pairwise = use.pw, discrete.X[[i]], discrete.X[[j]], perm = extendoutput)
} else if (alg.memsave) {
terms <- preptoterms.memsave(prepX[[i]], prepX[[j]], metr.X[[i]], metr.X[[j]], n, pairwise = use.pw, perm = extendoutput)
} else if (alg.standard) {
terms <- preptoterms.standard(prepX[[i]], prepX[[j]], n, pairwise = use.pw, perm = extendoutput)
}
dcov2XY <- termstodcov2(terms$aijbij, vector_prod_sum(terms$aidot, terms$bidot), terms$adotdot * terms$bdotdot, terms$ncc)
output$dcov[i,j] <- output$dcov[j,i] <- dcov2todcov(dcov2 = dcov2XY)
if (use.pw) {
Saa <- vector_prod_sum(terms$aidot, terms$aidot)
Sbb <- vector_prod_sum(terms$bidot, terms$bidot)
dvX <- termstodcov2(terms$aijaij, Saa, terms$adotdot * terms$adotdot, terms$ncc)
dvY <- termstodcov2(terms$bijbij, Sbb, terms$bdotdot * terms$bdotdot, terms$ncc)
if (docons | dobb3) {
moms.X <- calcmom(aijaij = terms$aijaij, Saa = Saa, adotdot = terms$adotdot, distX = terms$distX, n = terms$ncc, aidot = terms$aidot, dobb3 = dobb3)
moms.Y <- calcmom(aijaij = terms$bijbij, Saa = Sbb, adotdot = terms$bdotdot, distX = terms$distY, n = terms$ncc, aidot = terms$bidot, dobb3 = dobb3)
}
if (doperm) {
perms <- lapply(1:b, function(t) sample(1:terms$ncc))
}
} else {
dvX <- dvarX[i]
dvY <- dvarX[j]
if (docons | dobb3) {
moms.X <- momsX[[i]]
moms.Y <- momsX[[j]]
}
}
output$dcor[i,j] <- output$dcor[j,i] <- dcov2todcor(dcov2 = dcov2XY, dvX, dvY)
output$pvalue[i,j] <- output$pvalue[j,i] <- testfunc(dcov2 = dcov2XY, terms = terms, moms.X = moms.X, moms.Y = moms.Y, n = n, smp = perms, prepX[[i]], prepX[[j]])
}
}
}
} else {
for (i in setdiff(1:dX,ms.grpX)) {
for (j in setdiff(1:dY,ms.grpY)) {
if (alg.fast) {
terms <- preptoterms.fast(prepX[[i]], prepY[[j]], n, pairwise = use.pw, discrete.X[[i]], discrete.Y[[j]], perm = extendoutput)
} else if (alg.memsave) {
terms <- preptoterms.memsave(prepX[[i]], prepY[[j]], metr.X[[i]], metr.Y[[j]], n, pairwise = use.pw, perm = extendoutput)
} else if (alg.standard) {
terms <- preptoterms.standard(prepX[[i]], prepY[[j]], n, pairwise = use.pw, perm = extendoutput)
}
dcov2XY <- termstodcov2(terms$aijbij, vector_prod_sum(terms$aidot, terms$bidot), terms$adotdot * terms$bdotdot, terms$ncc)
output$dcov[i,j] <- dcov2todcov(dcov2 = dcov2XY)
if (use.pw) {
Saa <- vector_prod_sum(terms$aidot, terms$aidot)
Sbb <- vector_prod_sum(terms$bidot, terms$bidot)
dvX <- termstodcov2(terms$aijaij, Saa, terms$adotdot * terms$adotdot, terms$ncc)
dvY <- termstodcov2(terms$bijbij, Sbb, terms$bdotdot * terms$bdotdot, terms$ncc)
if (docons | dobb3) {
moms.X <- calcmom(aijaij = terms$aijaij, Saa = Saa, adotdot = terms$adotdot, distX = terms$distX, aidot = terms$aidot, n = terms$ncc, dobb3 = dobb3)
moms.Y <- calcmom(aijaij = terms$bijbij, Saa = Sbb, adotdot = terms$bdotdot, distX = terms$distY, aidot = terms$bidot, n = terms$ncc, dobb3 = dobb3)
}
if (doperm) {
perms <- lapply(1:b, function(t) sample(1:terms$ncc))
}
} else {
dvX <- dvarX[i]
dvY <- dvarY[j]
if (docons | dobb3) {
moms.X <- momsX[[i]]
moms.Y <- momsY[[j]]
}
}
output$dcor[i,j] <- dcov2todcor(dcov2 = dcov2XY, dvX, dvY)
output$pvalue[i,j] <- testfunc(dcov2 = dcov2XY, terms = terms, moms.X = moms.X, moms.Y = moms.Y, smp = perms, prepX[[i]], prepY[[j]])
}
}
}
if (adjustp %in% c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr")) {
if (withY) {
output$pvalue.adj <- matrix(p.adjust(output$pvalue,method = adjustp), ncol = dY)
} else {
ind <- which(lower.tri(output$pvalue), arr.ind=TRUE)
pvec <- as.vector(output$pvalue[ind])
pvec <- p.adjust(pvec, method = adjustp)
output$pvalue.adj <- diag(0,dX)
ind2 <- ind[,2:1]
output$pvalue.adj[ind] <- output$pvalue.adj[ind2] <- pvec
}
} else if (adjustp != "none")
warning ("adjustp should be one of \"holm\", \"hochberg\", \"hommel\", \"bonferroni\", \"BH\", \"BY\", \"fdr\" \n
No p-value correction performed")
class(output) <- "dcmatrix"
output$withY <- withY
output$dX <- dX
output$n <- n
output$b <- b
output$test <- test
output$calc.dcov <- calc.dcov
output$calc.dcor <- calc.dcor
output$bias.corr <- bias.corr
output$affine <- affine
output$calc.cor <- calc.cor
output$group.X <- group.X
output$names.X <- names.X
output$groupslistX <- groupslistX
if (withY) {
output$group.Y <- group.Y
output$dY <- dY
output$names.Y <- names.Y
output$groupslistY <- groupslistY
}
return(output)
} |
library(magick)
library(bunny)
x <- magick::image_read("input/raw_image.png")
hex_canvas <- image_canvas_hex(border_color="#0d4448", border_size = 2, fill_color = "#ede6f2")
hex_border <- image_canvas_hexborder(border_color="#0d4448", border_size = 4)
img_hex <- hex_canvas %>%
bunny::image_compose(x, gravity = "center", offset = '-50+50')%>%
magick::image_annotate("helpdesk", size=200, gravity = "north", location = '-0+300',font = "Aller", color = "#0d4448")%>%
bunny::image_compose(hex_border, gravity = "center", operator = "Over")
img_hex%>%
magick::image_scale("200x200") %>%
magick::image_write(here::here("input", "logo.png"), density = 600)
img_hex%>%
magick::image_scale("1200x1200") %>%
magick::image_write(here::here("input", "hex.png"), density = 600)
img_hex_gh <- img_hex %>%
image_scale("400x400")
gh_logo <- bunny::github %>%
image_scale("50x50")
gh <- image_canvas_ghcard("#ede6f2") %>%
image_compose(img_hex_gh, gravity = "East", offset = "+0+0") %>%
image_annotate("Are You Being Served?", gravity = "West", location = "+50-30",
color="#0d4448", size=60, font="Aller", weight = 700) %>%
image_compose(gh_logo, gravity="West", offset = "+50+40") %>%
image_annotate("yonicd/helpdesk", gravity="West", location="+110+45",
size=50, font="Ubuntu Mono") %>%
image_border_ghcard("#ede6f2")
gh %>%
image_write(here::here("input", "helpdesk_ghcard.png"))
| /input/bunny.R | permissive | yonicd/helpdesk | R | false | false | 1,438 | r | library(magick)
library(bunny)
x <- magick::image_read("input/raw_image.png")
hex_canvas <- image_canvas_hex(border_color="#0d4448", border_size = 2, fill_color = "#ede6f2")
hex_border <- image_canvas_hexborder(border_color="#0d4448", border_size = 4)
img_hex <- hex_canvas %>%
bunny::image_compose(x, gravity = "center", offset = '-50+50')%>%
magick::image_annotate("helpdesk", size=200, gravity = "north", location = '-0+300',font = "Aller", color = "#0d4448")%>%
bunny::image_compose(hex_border, gravity = "center", operator = "Over")
img_hex%>%
magick::image_scale("200x200") %>%
magick::image_write(here::here("input", "logo.png"), density = 600)
img_hex%>%
magick::image_scale("1200x1200") %>%
magick::image_write(here::here("input", "hex.png"), density = 600)
img_hex_gh <- img_hex %>%
image_scale("400x400")
gh_logo <- bunny::github %>%
image_scale("50x50")
gh <- image_canvas_ghcard("#ede6f2") %>%
image_compose(img_hex_gh, gravity = "East", offset = "+0+0") %>%
image_annotate("Are You Being Served?", gravity = "West", location = "+50-30",
color="#0d4448", size=60, font="Aller", weight = 700) %>%
image_compose(gh_logo, gravity="West", offset = "+50+40") %>%
image_annotate("yonicd/helpdesk", gravity="West", location="+110+45",
size=50, font="Ubuntu Mono") %>%
image_border_ghcard("#ede6f2")
gh %>%
image_write(here::here("input", "helpdesk_ghcard.png"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.