content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
######################################################################################################################### ######################################################################################################################### ###### PROJECT: DBNs ###### NAME: CreateStyle.R ###### AUTHOR: Daniel Ruiz-Perez, PhD Student ###### AFFILIATION: Florida International University ###### ###### DESCRIPTION: This file creates the style.xml file needed to visualize it in Cytoscape. It takes as input a network ###### , a file with the mean abundance and a base style file to update. ######################################################################################################################### ######################################################################################################################### library(scales) library(stringr) options("scipen"=100, "digits"=4) #nameOfNetwork = "DemoFigure.graphml"#list.files(pattern =".*\\.graphml") nameOfNetwork = "/Networks/human_ibd_microbiota_genes_metabolites_dbn_sample_alignment_sr14d_top100x100_filtered_dbnIntraBoot100boots.graphml"#list.files(pattern =".*\\.graphml") nameOfNetwork = "/Networks/human_ibd_microbiota_genes_metabolites_dbn_sample_alignment_sr14d_top100x100_host_genes_reference_dbnIntraBoot.graphml"#list.files(pattern =".*\\.graphml") nameOfNetwork = paste(folder,nameOfNetwork,sep="/") abundances = unlist(read.csv('meanAbundanceiHMPAlignmentHost.txt',sep="\t")) # READ STYLE BASE f = readChar("styleBase.xml", file.info("styleBase.xml")$size) split = strsplit(f, "<dependency name=\"nodeSizeLocked\" value=\"true\"/>", fixed = T) before = unlist(split)[1] after = unlist(split)[2] #READ NETWORK network = readChar(nameOfNetwork, file.info(nameOfNetwork)$size) network= unlist(strsplit(x =network, "\r\n")) network= unlist(strsplit(x =network, "\n")) #CLASSIFY ATTRIBUTES BASED ON TYPE namesAll = str_extract(network[grep("<node id=",network)], "((s|g|m|hg.)__)?.+\\_ti(\\+1)?") namesAll = gsub("<node id=\"", "", namesAll) namesAll = gsub("_ti", "", namesAll) namesAll = gsub("\\+1", "", namesAll) # namesAll = gsub("\\[", "", namesAll) # namesAll = gsub("\\]", "", namesAll) namesAll = unique(namesAll) prefix = substr(namesAll,1,3) #namesAll = substr(namesAll,4,1000) taxa = namesAll[which(prefix %in% "s__")] host = c(namesAll[which(prefix %in% "hgi")],namesAll[which(prefix %in% "hgr")],namesAll[which(prefix %in% "hgs")]) genes = namesAll[which(prefix %in% "g__")] metabolites = namesAll[which(prefix %in% "m__")] clinical = c("Week sample obtained_ti","Week sample obtained_ti+1") #namesAll[! (prefix %in% "g__"| prefix %in% "s__" | prefix %in% "m__")] all = c(host,taxa, genes, metabolites, clinical) ################## NODE COLOR -> timepoint aux ="\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#ff9232\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">" for (i in 1:length(all)){ aux = paste(aux,"\n <discreteMappingEntry value=\"#4d93a8\" attributeValue=\"", paste(all[i],"_ti",sep=""),"\"/>",sep="") } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") ############### SIZE -> incoming edges # maxAcum = 1 # for (i in 1:length(all)) # maxAcum = max(maxAcum, length(grep(pattern = paste(".*target=\"",all[i],"_ti+1","\".*",sep=""), x = network, ignore.case = T))) # # aux =paste(aux,"\n <visualProperty name=\"NODE_SIZE\" default=\"40\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") # for (i in 1:length(all)){ # aux = paste(aux,"\n <discreteMappingEntry value=\"",(20*length(grep(pattern = paste(".*target=\"",all[i],"_ti","\".*",sep=""), x = network, ignore.case = T))/maxAcum+40),"\" attributeValue=\"",namesAll[i],"_ti","\"/>",sep="") # aux = paste(aux,"\n <discreteMappingEntry value=\"",(20*length(grep(pattern = paste(".*target=\"",all[i],"_ti\\+1","\".*",sep=""), x = network, ignore.case = T))/maxAcum+40),"\" attributeValue=\"",namesAll[i],"_ti+1","\"/>",sep="") # } # aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") # f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") ############### TRANSPARENCY -> abundance abundancesScaled = c(rescale(abundances[1:length(host)],c(90,255)), rescale(abundances[(1+length(host)):(length(taxa)+length(host))],c(90,255)), rescale(abundances[(1+length(host)+length(taxa)):(length(genes)+length(taxa)+length(host))],c(90,255)), rescale(abundances[(1+length(host)+length(taxa)+length(genes)):(length(genes)+length(taxa)+length(host)+length(metabolites))],c(90,255))) abundancesScaled = c(200,abundancesScaled) aux =paste(aux,"\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") for (i in 1:length(namesAll)){ aux = paste(aux,"\n <discreteMappingEntry value=\"",round(abundancesScaled[i],1),"\" attributeValue=\"",all[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"",round(abundancesScaled[i],1),"\" attributeValue=\"",all[i],"_ti+1","\"/>",sep="") } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") ############### SHAPE -> type of data aux =paste(aux,"\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") for (i in 1:length(genes)){ aux = paste(aux,"\n <discreteMappingEntry value=\"DIAMOND\" attributeValue=\"",genes[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"DIAMOND\" attributeValue=\"",genes[i],"_ti+1","\"/>",sep="") } for (i in 1:length(host)){ aux = paste(aux,"\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"",host[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"",host[i],"_ti+1","\"/>",sep="") } for (i in 1:length(clinical)){ aux = paste(aux,"\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"",clinical[i],"","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"",clinical[i],"","\"/>",sep="") } for (i in 1:length(metabolites)){ aux = paste(aux,"\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"",metabolites[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"",metabolites[i],"_ti+1","\"/>",sep="") } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") split = strsplit(f, "<dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>", fixed = T) before = unlist(split)[1] after = unlist(split)[2] ################## EDGE LINE TYPE -> intra-inter aux = "\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\">\n <discreteMapping attributeType=\"string\" attributeName=\"shared name\">" for (i in 1:length(all)){ for (j in i:length(all)){ aux = paste(aux,"\n <discreteMappingEntry value=\"EQUAL_DASH\" attributeValue=\"",all[i],"_ti+1 (-) ", all[j],"_ti+1","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"EQUAL_DASH\" attributeValue=\"",all[i],"_ti (-) ", all[j],"_ti","\"/>",sep="") } } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") ################## EDGE PAINT # aux =paste(aux,"\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#CC0033\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") # for (i in 1:length(names)){ # aux = paste(aux,"\n <discreteMappingEntry value=\"#FF9999\" attributeValue=\"",names[i],"_ti (-) ", names[i],"_ti+1","\"/>",sep="") # } # # aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") ################ EDGE TRANSPARENCY based on bootscore. Make the self loops more transparent # aux =paste(aux,"\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") # for (i in 1:length(all)){ # aux = paste(aux,"\n <discreteMappingEntry value=\"70\" attributeValue=\"",all[i],"_ti (-) ", all[i],"_ti+1","\"/>",sep="") # } # aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") # ################## EDGE TRANSPARENCY weights = str_extract(network[grep("key=\"key_bootScore",network)], "\\-*\\d+\\.+\\d+") weights = weights[!is.na(weights)] maxAbsWeight = max(abs(as.numeric(weights))) minAbsWeight = min(abs(as.numeric(weights))) # ######## For edge coefficient aux =paste(aux,"\n<visualProperty name=\"EDGE_TRANSPARENCY\" default=\"2.0\">\n") aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"bootScore\">\n") aux =paste(aux," <continuousMappingPoint lesserValue=\"50.0\" greaterValue=\"50.0\" equalValue=\"50.0\" attrValue=\"",minAbsWeight,"\"/>\n",sep="") aux =paste(aux," <continuousMappingPoint lesserValue=\"50.0\" greaterValue=\"255.0\" equalValue=\"255.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") aux =paste(aux," </continuousMapping>\n") aux =paste(aux," </visualProperty>\n") # ######## make self loops Invisible: aux =paste(aux,"\n<visualProperty default=\"true\" name=\"EDGE_VISIBLE\">") aux =paste(aux,"\n<discreteMapping attributeName=\"name\" attributeType=\"string\">") for (i in 1:length(all)){ aux = paste(aux,"\n <discreteMappingEntry attributeValue=\"",all[i],"_ti (-) ", all[i],"_ti+1\""," value=\"false\"/>",sep="") } aux =paste(aux," \n</discreteMapping>\n") aux =paste(aux," </visualProperty>\n") # ######## For edge coefficient # aux =paste(aux,"\n<visualProperty name=\"EDGE_TRANSPARENCY\" default=\"2.0\">\n") # aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"bootScore\">\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"130.0\" greaterValue=\"130.0\" equalValue=\"130.0\" attrValue=\"",minAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"130.0\" greaterValue=\"255.0\" equalValue=\"255.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," </continuousMapping>\n") # aux =paste(aux," </visualProperty>\n") ################## EDGE WIDTH weights = str_extract(network[grep("key=\"key_weight",network)], "\\-*\\d+\\.+\\d+") weights = weights[!is.na(weights)] maxAbsWeight = max(abs(as.numeric(weights))) medianAbsWeight = median(abs(as.numeric(weights))) # maxAbsWeight = 150 #when we hide intra edges we need this ######## For edge coefficient normalized aux =paste(aux,"\n<visualProperty name=\"EDGE_WIDTH\" default=\"2.0\">\n") aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"weight\">\n") aux =paste(aux," <continuousMappingPoint lesserValue=\"15.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"-",1,"\"/>\n",sep="") aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"1.0\" equalValue=\"1.0\" attrValue=\"0.0\"/>\n") aux =paste(aux," <continuousMappingPoint lesserValue=\"15.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"",1,"\"/>\n",sep="") aux =paste(aux," </continuousMapping>\n") aux =paste(aux," </visualProperty>\n") # ######## For edge coefficient # aux =paste(aux,"\n<visualProperty name=\"EDGE_WIDTH\" default=\"2.0\">\n") # aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"weight\">\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"10.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"-",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"10.0\" equalValue=\"10.0\" attrValue=\"-",medianAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"1.0\" equalValue=\"1.0\" attrValue=\"0.0\"/>\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"10.0\" equalValue=\"10.0\" attrValue=\"",medianAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"10.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," </continuousMapping>\n") # aux =paste(aux," </visualProperty>\n") ########### For edge confidence # aux =paste(aux,"<visualProperty name=\"EDGE_WIDTH\" default=\"2.0\">\n") # aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"weight\">\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"16.0\" greaterValue=\"20.0\" equalValue=\"20.0\" attrValue=\"-",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"16.0\" equalValue=\"16.0\" attrValue=\"-",80.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"8.0\" equalValue=\"8.0\" attrValue=\"-",20.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"1.0\" equalValue=\"1.0\" attrValue=\"0.0\"/>\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"8.0\" equalValue=\"8.0\" attrValue=\"",20.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"16.0\" equalValue=\"16.0\" attrValue=\"",80.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"16.0\" greaterValue=\"20.0\" equalValue=\"20.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," </continuousMapping>\n") # aux =paste(aux," </visualProperty>\n") fileConn<-file("style.xml") writeLines(paste(before,"<dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>",aux,after,sep=""), fileConn) close(fileConn)
/Visualization/CreateStyleHeter.R
permissive
K-Tanjirou/PALM-Public-Respository
R
false
false
14,546
r
######################################################################################################################### ######################################################################################################################### ###### PROJECT: DBNs ###### NAME: CreateStyle.R ###### AUTHOR: Daniel Ruiz-Perez, PhD Student ###### AFFILIATION: Florida International University ###### ###### DESCRIPTION: This file creates the style.xml file needed to visualize it in Cytoscape. It takes as input a network ###### , a file with the mean abundance and a base style file to update. ######################################################################################################################### ######################################################################################################################### library(scales) library(stringr) options("scipen"=100, "digits"=4) #nameOfNetwork = "DemoFigure.graphml"#list.files(pattern =".*\\.graphml") nameOfNetwork = "/Networks/human_ibd_microbiota_genes_metabolites_dbn_sample_alignment_sr14d_top100x100_filtered_dbnIntraBoot100boots.graphml"#list.files(pattern =".*\\.graphml") nameOfNetwork = "/Networks/human_ibd_microbiota_genes_metabolites_dbn_sample_alignment_sr14d_top100x100_host_genes_reference_dbnIntraBoot.graphml"#list.files(pattern =".*\\.graphml") nameOfNetwork = paste(folder,nameOfNetwork,sep="/") abundances = unlist(read.csv('meanAbundanceiHMPAlignmentHost.txt',sep="\t")) # READ STYLE BASE f = readChar("styleBase.xml", file.info("styleBase.xml")$size) split = strsplit(f, "<dependency name=\"nodeSizeLocked\" value=\"true\"/>", fixed = T) before = unlist(split)[1] after = unlist(split)[2] #READ NETWORK network = readChar(nameOfNetwork, file.info(nameOfNetwork)$size) network= unlist(strsplit(x =network, "\r\n")) network= unlist(strsplit(x =network, "\n")) #CLASSIFY ATTRIBUTES BASED ON TYPE namesAll = str_extract(network[grep("<node id=",network)], "((s|g|m|hg.)__)?.+\\_ti(\\+1)?") namesAll = gsub("<node id=\"", "", namesAll) namesAll = gsub("_ti", "", namesAll) namesAll = gsub("\\+1", "", namesAll) # namesAll = gsub("\\[", "", namesAll) # namesAll = gsub("\\]", "", namesAll) namesAll = unique(namesAll) prefix = substr(namesAll,1,3) #namesAll = substr(namesAll,4,1000) taxa = namesAll[which(prefix %in% "s__")] host = c(namesAll[which(prefix %in% "hgi")],namesAll[which(prefix %in% "hgr")],namesAll[which(prefix %in% "hgs")]) genes = namesAll[which(prefix %in% "g__")] metabolites = namesAll[which(prefix %in% "m__")] clinical = c("Week sample obtained_ti","Week sample obtained_ti+1") #namesAll[! (prefix %in% "g__"| prefix %in% "s__" | prefix %in% "m__")] all = c(host,taxa, genes, metabolites, clinical) ################## NODE COLOR -> timepoint aux ="\n <visualProperty name=\"NODE_FILL_COLOR\" default=\"#ff9232\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">" for (i in 1:length(all)){ aux = paste(aux,"\n <discreteMappingEntry value=\"#4d93a8\" attributeValue=\"", paste(all[i],"_ti",sep=""),"\"/>",sep="") } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") ############### SIZE -> incoming edges # maxAcum = 1 # for (i in 1:length(all)) # maxAcum = max(maxAcum, length(grep(pattern = paste(".*target=\"",all[i],"_ti+1","\".*",sep=""), x = network, ignore.case = T))) # # aux =paste(aux,"\n <visualProperty name=\"NODE_SIZE\" default=\"40\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") # for (i in 1:length(all)){ # aux = paste(aux,"\n <discreteMappingEntry value=\"",(20*length(grep(pattern = paste(".*target=\"",all[i],"_ti","\".*",sep=""), x = network, ignore.case = T))/maxAcum+40),"\" attributeValue=\"",namesAll[i],"_ti","\"/>",sep="") # aux = paste(aux,"\n <discreteMappingEntry value=\"",(20*length(grep(pattern = paste(".*target=\"",all[i],"_ti\\+1","\".*",sep=""), x = network, ignore.case = T))/maxAcum+40),"\" attributeValue=\"",namesAll[i],"_ti+1","\"/>",sep="") # } # aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") # f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") ############### TRANSPARENCY -> abundance abundancesScaled = c(rescale(abundances[1:length(host)],c(90,255)), rescale(abundances[(1+length(host)):(length(taxa)+length(host))],c(90,255)), rescale(abundances[(1+length(host)+length(taxa)):(length(genes)+length(taxa)+length(host))],c(90,255)), rescale(abundances[(1+length(host)+length(taxa)+length(genes)):(length(genes)+length(taxa)+length(host)+length(metabolites))],c(90,255))) abundancesScaled = c(200,abundancesScaled) aux =paste(aux,"\n <visualProperty name=\"NODE_TRANSPARENCY\" default=\"255\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") for (i in 1:length(namesAll)){ aux = paste(aux,"\n <discreteMappingEntry value=\"",round(abundancesScaled[i],1),"\" attributeValue=\"",all[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"",round(abundancesScaled[i],1),"\" attributeValue=\"",all[i],"_ti+1","\"/>",sep="") } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") ############### SHAPE -> type of data aux =paste(aux,"\n <visualProperty name=\"NODE_SHAPE\" default=\"ELLIPSE\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") for (i in 1:length(genes)){ aux = paste(aux,"\n <discreteMappingEntry value=\"DIAMOND\" attributeValue=\"",genes[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"DIAMOND\" attributeValue=\"",genes[i],"_ti+1","\"/>",sep="") } for (i in 1:length(host)){ aux = paste(aux,"\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"",host[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"HEXAGON\" attributeValue=\"",host[i],"_ti+1","\"/>",sep="") } for (i in 1:length(clinical)){ aux = paste(aux,"\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"",clinical[i],"","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"TRIANGLE\" attributeValue=\"",clinical[i],"","\"/>",sep="") } for (i in 1:length(metabolites)){ aux = paste(aux,"\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"",metabolites[i],"_ti","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"RECTANGLE\" attributeValue=\"",metabolites[i],"_ti+1","\"/>",sep="") } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") f = paste(before,"<dependency name=\"nodeSizeLocked\" value=\"true\"/>",aux,after,sep= "") split = strsplit(f, "<dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>", fixed = T) before = unlist(split)[1] after = unlist(split)[2] ################## EDGE LINE TYPE -> intra-inter aux = "\n <visualProperty name=\"EDGE_LINE_TYPE\" default=\"SOLID\">\n <discreteMapping attributeType=\"string\" attributeName=\"shared name\">" for (i in 1:length(all)){ for (j in i:length(all)){ aux = paste(aux,"\n <discreteMappingEntry value=\"EQUAL_DASH\" attributeValue=\"",all[i],"_ti+1 (-) ", all[j],"_ti+1","\"/>",sep="") aux = paste(aux,"\n <discreteMappingEntry value=\"EQUAL_DASH\" attributeValue=\"",all[i],"_ti (-) ", all[j],"_ti","\"/>",sep="") } } aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") ################## EDGE PAINT # aux =paste(aux,"\n <visualProperty name=\"EDGE_UNSELECTED_PAINT\" default=\"#CC0033\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") # for (i in 1:length(names)){ # aux = paste(aux,"\n <discreteMappingEntry value=\"#FF9999\" attributeValue=\"",names[i],"_ti (-) ", names[i],"_ti+1","\"/>",sep="") # } # # aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") ################ EDGE TRANSPARENCY based on bootscore. Make the self loops more transparent # aux =paste(aux,"\n <visualProperty name=\"EDGE_TRANSPARENCY\" default=\"255\">\n <discreteMapping attributeType=\"string\" attributeName=\"name\">",sep="") # for (i in 1:length(all)){ # aux = paste(aux,"\n <discreteMappingEntry value=\"70\" attributeValue=\"",all[i],"_ti (-) ", all[i],"_ti+1","\"/>",sep="") # } # aux = paste(aux,"\n </discreteMapping>\n </visualProperty>" ,sep="") # ################## EDGE TRANSPARENCY weights = str_extract(network[grep("key=\"key_bootScore",network)], "\\-*\\d+\\.+\\d+") weights = weights[!is.na(weights)] maxAbsWeight = max(abs(as.numeric(weights))) minAbsWeight = min(abs(as.numeric(weights))) # ######## For edge coefficient aux =paste(aux,"\n<visualProperty name=\"EDGE_TRANSPARENCY\" default=\"2.0\">\n") aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"bootScore\">\n") aux =paste(aux," <continuousMappingPoint lesserValue=\"50.0\" greaterValue=\"50.0\" equalValue=\"50.0\" attrValue=\"",minAbsWeight,"\"/>\n",sep="") aux =paste(aux," <continuousMappingPoint lesserValue=\"50.0\" greaterValue=\"255.0\" equalValue=\"255.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") aux =paste(aux," </continuousMapping>\n") aux =paste(aux," </visualProperty>\n") # ######## make self loops Invisible: aux =paste(aux,"\n<visualProperty default=\"true\" name=\"EDGE_VISIBLE\">") aux =paste(aux,"\n<discreteMapping attributeName=\"name\" attributeType=\"string\">") for (i in 1:length(all)){ aux = paste(aux,"\n <discreteMappingEntry attributeValue=\"",all[i],"_ti (-) ", all[i],"_ti+1\""," value=\"false\"/>",sep="") } aux =paste(aux," \n</discreteMapping>\n") aux =paste(aux," </visualProperty>\n") # ######## For edge coefficient # aux =paste(aux,"\n<visualProperty name=\"EDGE_TRANSPARENCY\" default=\"2.0\">\n") # aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"bootScore\">\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"130.0\" greaterValue=\"130.0\" equalValue=\"130.0\" attrValue=\"",minAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"130.0\" greaterValue=\"255.0\" equalValue=\"255.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," </continuousMapping>\n") # aux =paste(aux," </visualProperty>\n") ################## EDGE WIDTH weights = str_extract(network[grep("key=\"key_weight",network)], "\\-*\\d+\\.+\\d+") weights = weights[!is.na(weights)] maxAbsWeight = max(abs(as.numeric(weights))) medianAbsWeight = median(abs(as.numeric(weights))) # maxAbsWeight = 150 #when we hide intra edges we need this ######## For edge coefficient normalized aux =paste(aux,"\n<visualProperty name=\"EDGE_WIDTH\" default=\"2.0\">\n") aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"weight\">\n") aux =paste(aux," <continuousMappingPoint lesserValue=\"15.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"-",1,"\"/>\n",sep="") aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"1.0\" equalValue=\"1.0\" attrValue=\"0.0\"/>\n") aux =paste(aux," <continuousMappingPoint lesserValue=\"15.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"",1,"\"/>\n",sep="") aux =paste(aux," </continuousMapping>\n") aux =paste(aux," </visualProperty>\n") # ######## For edge coefficient # aux =paste(aux,"\n<visualProperty name=\"EDGE_WIDTH\" default=\"2.0\">\n") # aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"weight\">\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"10.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"-",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"10.0\" equalValue=\"10.0\" attrValue=\"-",medianAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"1.0\" equalValue=\"1.0\" attrValue=\"0.0\"/>\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"10.0\" equalValue=\"10.0\" attrValue=\"",medianAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"10.0\" greaterValue=\"15.0\" equalValue=\"15.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," </continuousMapping>\n") # aux =paste(aux," </visualProperty>\n") ########### For edge confidence # aux =paste(aux,"<visualProperty name=\"EDGE_WIDTH\" default=\"2.0\">\n") # aux =paste(aux," <continuousMapping attributeType=\"float\" attributeName=\"weight\">\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"16.0\" greaterValue=\"20.0\" equalValue=\"20.0\" attrValue=\"-",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"16.0\" equalValue=\"16.0\" attrValue=\"-",80.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"8.0\" equalValue=\"8.0\" attrValue=\"-",20.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"1.0\" greaterValue=\"1.0\" equalValue=\"1.0\" attrValue=\"0.0\"/>\n") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"8.0\" equalValue=\"8.0\" attrValue=\"",20.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"8.0\" greaterValue=\"16.0\" equalValue=\"16.0\" attrValue=\"",80.0,"\"/>\n",sep="") # aux =paste(aux," <continuousMappingPoint lesserValue=\"16.0\" greaterValue=\"20.0\" equalValue=\"20.0\" attrValue=\"",maxAbsWeight,"\"/>\n",sep="") # aux =paste(aux," </continuousMapping>\n") # aux =paste(aux," </visualProperty>\n") fileConn<-file("style.xml") writeLines(paste(before,"<dependency name=\"arrowColorMatchesEdge\" value=\"false\"/>",aux,after,sep=""), fileConn) close(fileConn)
utils::globalVariables(c("STATE", "MONTH","year")) #' Read source data file #' #' @details This function looks for a CSV file called \code{filename} and checks whether it exists or not, #' if found it loads the data using \code{readr::read.csv} and converts it to a dyplr dataframe using \code{dyplr::tbl_df}. #' If no data file with that name exists, the funtion returns an error. #' #' @param filename a string and, optionally a path, representing a CSV file name #' #' @import dplyr #' #' @importFrom readr read_csv #' #' @return a dataframe #' #' @examples #' \dontrun{ #' fars_read("accident_2013.csv")} #' #' @export fars_read <- function(filename) { if(!file.exists(filename)) stop("file '", filename, "' does not exist") data <- suppressMessages({ readr::read_csv(filename, progress = FALSE) }) dplyr::tbl_df(data) } #' Standard data file name #' #' @details This function returns a standard name for a given year for the source zip files #' from the US National Highway Traffic Safety Administration's Fatality Analysis Reporting System #' #' @param year an integer year value (YYYY) #' #' @return a string representing a standard file name for a given year #' #' @examples #' \dontrun{ #' make_filename("accident_%d.csv.bz2", 2013) #' Creates a standard file name for the 2017 dataset #' } #' #' @export make_filename <- function(year) { year <- as.integer(year) sprintf("inst/ext_data/accident_%d.csv.bz2", year) } #' Data date range #' #' This function returns the month and year of the data in a range of annual data files #' #' @details This function iterates over a range of year values and uses the \code{\link{fars_read}} and \code{\link{make_filename}} #' to find and report the content of the MONTH and YEAR columns in each data file. The data files have to be in the same working directory. #' #' @param years a vector of integer year values (YYYY) #' #' @inheritParams fars_read #' #' @inheritParams make_filename #' #' @import dplyr #' #' @import magrittr #' #' @return A tipple of the MONTH and YEAR values for each data file in the \code{years} range #' #' @examples #' \dontrun{ #' fars_read_years(c(2013:2015)) #' } #' #' @export fars_read_years <- function(years) { lapply(years, function(year) { file <- make_filename(year) tryCatch({ dat <- fars_read(file) dplyr::mutate(dat, year = year) %>% dplyr::select(MONTH, year) }, error = function(e) { warning("invalid year: ", year) return(NULL) }) }) } #' Summary statistics #' #' This function provides summary monthly statistics for each year in a range #' #' @details This function uses the output from \code{\link{fars_read_years}} #' to generate summary accident statistics by \code{YEAR} and \code{MONTH}. #' #' @inheritParams fars_read_years #' #' @return table of summary statistics #' #' @import dplyr #' #' @importFrom tidyr spread #' #' @importFrom utils installed.packages #' #' @examples #' \dontrun{ #' fars_summarize_years(c(2013:2015)) #' } #' #' @export fars_summarize_years <- function(years) { dat_list <- fars_read_years(years) dplyr::bind_rows(dat_list) %>% dplyr::group_by(year, MONTH) %>% dplyr::summarize(n = n()) %>% tidyr::spread(year, n) } #' Map Accidents #' #' This function maps accidents in individual U.S. State in a given year #' #' @details For a given year value, this function read the relevant data file #' using the \code{\link{make_filename}} and \code{\link{fars_read}} functions. #' It checks that the stae exists and that any accidents were reported that year in that state. #' The function also removes erroneous longotude and lattitudes entries in the raw data #' (\code{longitude>900} and \code{lattitude>90}) and uses the \code{\link{map}} package to #' draw the relevant map and the \code{\link{graphics}} package to plot dots. #' #' @param state.num the unique identification of a U.S. state #' @param year relevant data year #' @inheritParams fars_read #' @inheritParams make_filname #' #' @import maps #' @import dplyr #' @importFrom graphics points #' #' @return a long/lat plot of reported accidents in the U.S. state and year of choice against a state boundary map #' #' @examples #' \dontrun{ #' fars_map_state("12","2013") #' } #' #' @export fars_map_state <- function(state.num, year) { filename <- make_filename(year) data <- fars_read(filename) state.num <- as.integer(state.num) if(!(state.num %in% unique(data$STATE))) stop("invalid STATE number: ", state.num) data.sub <- dplyr::filter(data, STATE == state.num) if(nrow(data.sub) == 0L) { message("no accidents to plot") return(invisible(NULL)) } is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900 is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90 with(data.sub, { maps::map("state", ylim = range(LATITUDE, na.rm = TRUE), xlim = range(LONGITUD, na.rm = TRUE)) graphics::points(LONGITUD, LATITUDE, pch = 46) }) } #' Testing #' #' This function runs the tests in the tests/ directory. it has not argument #' #' @importFrom testthat test_that test_dir expect_is #' #' @return test results from the testthat package #' #' @examples #' \dontrun{ #' testing() #' } #' #' @export testing<- function(){ #create test output test_output=fars_summarize_years(c(2013:2015)) #run test files in tests/ directory testthat::test_dir("tests/testthat") }
/R/fars_functions.R
no_license
barnabe/building_an_r_package
R
false
false
5,788
r
utils::globalVariables(c("STATE", "MONTH","year")) #' Read source data file #' #' @details This function looks for a CSV file called \code{filename} and checks whether it exists or not, #' if found it loads the data using \code{readr::read.csv} and converts it to a dyplr dataframe using \code{dyplr::tbl_df}. #' If no data file with that name exists, the funtion returns an error. #' #' @param filename a string and, optionally a path, representing a CSV file name #' #' @import dplyr #' #' @importFrom readr read_csv #' #' @return a dataframe #' #' @examples #' \dontrun{ #' fars_read("accident_2013.csv")} #' #' @export fars_read <- function(filename) { if(!file.exists(filename)) stop("file '", filename, "' does not exist") data <- suppressMessages({ readr::read_csv(filename, progress = FALSE) }) dplyr::tbl_df(data) } #' Standard data file name #' #' @details This function returns a standard name for a given year for the source zip files #' from the US National Highway Traffic Safety Administration's Fatality Analysis Reporting System #' #' @param year an integer year value (YYYY) #' #' @return a string representing a standard file name for a given year #' #' @examples #' \dontrun{ #' make_filename("accident_%d.csv.bz2", 2013) #' Creates a standard file name for the 2017 dataset #' } #' #' @export make_filename <- function(year) { year <- as.integer(year) sprintf("inst/ext_data/accident_%d.csv.bz2", year) } #' Data date range #' #' This function returns the month and year of the data in a range of annual data files #' #' @details This function iterates over a range of year values and uses the \code{\link{fars_read}} and \code{\link{make_filename}} #' to find and report the content of the MONTH and YEAR columns in each data file. The data files have to be in the same working directory. #' #' @param years a vector of integer year values (YYYY) #' #' @inheritParams fars_read #' #' @inheritParams make_filename #' #' @import dplyr #' #' @import magrittr #' #' @return A tipple of the MONTH and YEAR values for each data file in the \code{years} range #' #' @examples #' \dontrun{ #' fars_read_years(c(2013:2015)) #' } #' #' @export fars_read_years <- function(years) { lapply(years, function(year) { file <- make_filename(year) tryCatch({ dat <- fars_read(file) dplyr::mutate(dat, year = year) %>% dplyr::select(MONTH, year) }, error = function(e) { warning("invalid year: ", year) return(NULL) }) }) } #' Summary statistics #' #' This function provides summary monthly statistics for each year in a range #' #' @details This function uses the output from \code{\link{fars_read_years}} #' to generate summary accident statistics by \code{YEAR} and \code{MONTH}. #' #' @inheritParams fars_read_years #' #' @return table of summary statistics #' #' @import dplyr #' #' @importFrom tidyr spread #' #' @importFrom utils installed.packages #' #' @examples #' \dontrun{ #' fars_summarize_years(c(2013:2015)) #' } #' #' @export fars_summarize_years <- function(years) { dat_list <- fars_read_years(years) dplyr::bind_rows(dat_list) %>% dplyr::group_by(year, MONTH) %>% dplyr::summarize(n = n()) %>% tidyr::spread(year, n) } #' Map Accidents #' #' This function maps accidents in individual U.S. State in a given year #' #' @details For a given year value, this function read the relevant data file #' using the \code{\link{make_filename}} and \code{\link{fars_read}} functions. #' It checks that the stae exists and that any accidents were reported that year in that state. #' The function also removes erroneous longotude and lattitudes entries in the raw data #' (\code{longitude>900} and \code{lattitude>90}) and uses the \code{\link{map}} package to #' draw the relevant map and the \code{\link{graphics}} package to plot dots. #' #' @param state.num the unique identification of a U.S. state #' @param year relevant data year #' @inheritParams fars_read #' @inheritParams make_filname #' #' @import maps #' @import dplyr #' @importFrom graphics points #' #' @return a long/lat plot of reported accidents in the U.S. state and year of choice against a state boundary map #' #' @examples #' \dontrun{ #' fars_map_state("12","2013") #' } #' #' @export fars_map_state <- function(state.num, year) { filename <- make_filename(year) data <- fars_read(filename) state.num <- as.integer(state.num) if(!(state.num %in% unique(data$STATE))) stop("invalid STATE number: ", state.num) data.sub <- dplyr::filter(data, STATE == state.num) if(nrow(data.sub) == 0L) { message("no accidents to plot") return(invisible(NULL)) } is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900 is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90 with(data.sub, { maps::map("state", ylim = range(LATITUDE, na.rm = TRUE), xlim = range(LONGITUD, na.rm = TRUE)) graphics::points(LONGITUD, LATITUDE, pch = 46) }) } #' Testing #' #' This function runs the tests in the tests/ directory. it has not argument #' #' @importFrom testthat test_that test_dir expect_is #' #' @return test results from the testthat package #' #' @examples #' \dontrun{ #' testing() #' } #' #' @export testing<- function(){ #create test output test_output=fars_summarize_years(c(2013:2015)) #run test files in tests/ directory testthat::test_dir("tests/testthat") }
### X is a matrix; can be given as a matrix or as a matrix of type dgCMatrix from the Matrix package ### y is a vector; can not be given in sparse format ### wObs is a vector for the weights of the observations ### wLambda1 are weights associated with lambda1 ### betaStart - starting position for the beta vector ### graph; following options are possible ### NULL; then a straight line with weights 1 is assumed ### vector of length 2 - giving the 2 dimensions of the graph; assumes equal weights ### list with components - connections and weights; used for arbitrary graphs ### maxIterInner - maximum number of iterations until stop in inner iteration ### maxIterOuter - maximum number of iterations in outer loop ### accuracy - up to what error should the algorihtm run ### maxActivateVars - how many variables should be activated at the same time ### lambda1, lambda2 - vectors of the same length giving the values of lambda1 and lambda2; both have to be positive lasso <- function(X, y, lambda1, wObs=NULL, wLambda1=NULL, betaStart=NULL, maxIterInner=10000, maxActivateVars=10, accuracy=1e-6) { ### check the variables that they have the correct format ### matrix should be sparse of class dgCMatrix if(is.matrix(X)) { X = as(X, "dgCMatrix") } ### check if y has the correct size if(length(y) != dim(X)[1]) { stop("y has incorrect length") } ### check that lambda1 and lambda2 have the right size and are all nonzero if(sum(lambda1 <= 0) > 0) { warning("Lambda1 has to have only positive elements; increasing to 1e-3") lambda1[lambda1 <= 0] = 1e-3 } if(is.null(wObs)) { wObs = rep(1, dim(X)[1]) } if(is.null(wLambda1)) { wLambda1 = rep(1, dim(X)[2]) } else { stop("weights for lambda1 are currently not implemented") } if(is.null(betaStart)) { betaStart = rep(0, dim(X)[2]) } ### adjust lambda for the number of observations n <- dim(X)[1] lambda1 <- n * lambda1 maxIterInner = as.integer(maxIterInner) maxActivateVars = as.integer(maxActivateVars) res <- .Call("LassoWrapper", X, y, wObs, betaStart, wLambda1, maxIterInner, accuracy, maxActivateVars, lambda1) return(res) }
/R/lasso.R
no_license
Sandy4321/FusedLasso-1
R
false
false
2,274
r
### X is a matrix; can be given as a matrix or as a matrix of type dgCMatrix from the Matrix package ### y is a vector; can not be given in sparse format ### wObs is a vector for the weights of the observations ### wLambda1 are weights associated with lambda1 ### betaStart - starting position for the beta vector ### graph; following options are possible ### NULL; then a straight line with weights 1 is assumed ### vector of length 2 - giving the 2 dimensions of the graph; assumes equal weights ### list with components - connections and weights; used for arbitrary graphs ### maxIterInner - maximum number of iterations until stop in inner iteration ### maxIterOuter - maximum number of iterations in outer loop ### accuracy - up to what error should the algorihtm run ### maxActivateVars - how many variables should be activated at the same time ### lambda1, lambda2 - vectors of the same length giving the values of lambda1 and lambda2; both have to be positive lasso <- function(X, y, lambda1, wObs=NULL, wLambda1=NULL, betaStart=NULL, maxIterInner=10000, maxActivateVars=10, accuracy=1e-6) { ### check the variables that they have the correct format ### matrix should be sparse of class dgCMatrix if(is.matrix(X)) { X = as(X, "dgCMatrix") } ### check if y has the correct size if(length(y) != dim(X)[1]) { stop("y has incorrect length") } ### check that lambda1 and lambda2 have the right size and are all nonzero if(sum(lambda1 <= 0) > 0) { warning("Lambda1 has to have only positive elements; increasing to 1e-3") lambda1[lambda1 <= 0] = 1e-3 } if(is.null(wObs)) { wObs = rep(1, dim(X)[1]) } if(is.null(wLambda1)) { wLambda1 = rep(1, dim(X)[2]) } else { stop("weights for lambda1 are currently not implemented") } if(is.null(betaStart)) { betaStart = rep(0, dim(X)[2]) } ### adjust lambda for the number of observations n <- dim(X)[1] lambda1 <- n * lambda1 maxIterInner = as.integer(maxIterInner) maxActivateVars = as.integer(maxActivateVars) res <- .Call("LassoWrapper", X, y, wObs, betaStart, wLambda1, maxIterInner, accuracy, maxActivateVars, lambda1) return(res) }
# MRInput class #' MRInput Class #' #' @description An object containing the four vectors of summary statistics required to calculate Mendelian randomization estimates. #' #' @slot betaX A numeric vector of beta-coefficient values for genetic associations with the first variable (often referred to as the exposure, risk factor, or modifiable phenotype). #' @slot betaY A numeric vector of beta-coefficient values for genetic associations with the second variable (often referred to as the outcome). For a disease outcome, the beta coefficients are log odds estimates from logistic regression analyses. #' @slot betaXse The standard errors associated with the beta-coefficients in \code{betaX}. #' @slot betaYse The standard errors associated with the beta-coefficients in \code{betaY}. #' @slot correlation The matrix of correlations between genetic variants. If this variable is not provided, then we assume that genetic variants are uncorrelated. #' @slot exposure The name of the exposure variable. #' @slot outcome The name of the outcome variable. #' @slot snps The names of the genetic variants (SNPs) included in the analysis. The slots \code{exposure}, \code{outcome}, and \code{snps} are not required, but may be useful for keeping track of various \code{MRInput} objects. They are also used by the \code{mr_plot} function. #' @slot effect_allele The name of the effect allele for each SNP. The beta-coefficients are the associations with the exposure and outcome per additional copy of the effect allele. #' @slot other_allele The name of the non-effect allele. #' @slot eaf The expected allele frequencies (numeric). The slots \code{effect_allele}, \code{other_allele}, and \code{eaf} are neither required, nor currently used in the MendelianRandomization package. They are included for future compatibility with the MR-Base suite of functions. #' #' @details The beta-coefficients are assumed to be estimated for uncorrelated (independent) genetic variants, although a correlation matrix can be specified if the variants are correlated in their distributions. We also assume that the beta-coefficients for associations with the exposure and with the outcome are uncorrelated (corresponding to a two-sample Mendelian randomization analysis), although correlation between associations with the exposure and with the outcome generally have little impact on causal estimates or standard errors. #' Estimates can either be specified by the user, or extracted from the PhenoScanner tool. #' #' @seealso \code{extract.pheno.csv()} for a description of how the above values can be extracted from PhenoScanner \url{http://www.phenoscanner.medschl.cam.ac.uk/}. setClass("MRInput", representation(betaX = "numeric", betaY = "numeric", betaXse = "numeric", betaYse = "numeric", exposure = "character", outcome = "character", snps = "character", effect_allele = "character", other_allele = "character", eaf = "numeric", correlation = "matrix"), prototype = prototype(betaX = ldlc, betaY = chdlodds, betaXse = ldlcse, betaYse = chdloddsse, exposure = "LDL-c", outcome = "CHD", snps = "snp", effect_allele = lipid_effect, other_allele = lipid_other, eaf = lipid_eaf, correlation = calc.rho) ) # Ensure the vectors are all of the same length setValidity("MRInput", function(object) {if(!all(length(object@betaX) == length(object@betaY), length(object@betaXse) == length(object@betaYse), length(object@betaX) == length(object@betaY))) { cat("Vectors do not all have the same length.") } else {} } ) #-------------------------------------------------------------------------------------------- #' WeightedMedian Class #' #' @description An object containing the estimate produced using the median-based method as well as various statistics. #' #' @slot Type The type of median that has been calculated, \code{"simple"}, \code{"weighted"}, or \code{"penalized"}. #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Estimate The causal point estimate from the median-based method. #' @slot StdError The standard error associated with \code{Estimate} (obtained from bootstrapping). #' @slot CILower The lower bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot CIUpper The upper bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot Pvalue P-value associated with the causal estimate from the Wald method. #' @slot SNPs The number of SNPs that used in the calculation. setClass("WeightedMedian", representation(Type = "character", Exposure = "character", Outcome = "character", Estimate = "numeric", StdError = "numeric", CILower = "numeric", CIUpper = "numeric", Alpha = "numeric", Pvalue = "numeric", SNPs = "numeric") ) #-------------------------------------------------------------------------------------------- #' IVW Class #' #' @description An object containing the estimate produced using the inverse-variance weighted (IVW) method as well as various statistics. #' #' @slot Model The model used for estimation: random-effects (\code{"random"}) or fixed-effect (\code{"fixed"}). The default option (\code{"default"}) is to use a fixed-effect model when there are three or fewer genetic variants, and a random-effects model when there are four or more. The (multiplicative) random-effects model allows for heterogeneity between the causal estimates targeted by the genetic variants by allowing over-dispersion in the regression model. Under-dispersion is not permitted (in case of under-dispersion, the residual standard error is set to 1, as in a fixed-effect analysis). #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Correlation The matrix of correlations between genetic variants. #' @slot Robust Whether robust regression was used in the regression model relating the genetic associations with the outcome and those with the exposure. #' @slot Penalized Whether weights in the regression model were penalized for variants with heterogeneous causal estimates. #' @slot Estimate The causal point estimate from the inverse-variance weighted method. #' @slot StdError The standard error associated with \code{Estimate}. #' @slot CILower The lower bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot CIUpper The upper bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot Pvalue P-value associated with the causal estimate. #' @slot SNPs The number of SNPs that were used in the calculation. #' @slot RSE The estimated residual standard error from the regression model. #' @slot Heter.Stat Heterogeneity statistic (Cochran's Q statistic) and associated p-value: the null hypothesis is that all genetic variants estimate the same causal parameter; rejection of the null is an indication that one or more variants may be pleiotropic. setClass("IVW", representation(Model = "character", Exposure = "character", Outcome = "character", Robust = "logical", Penalized = "logical", Correlation = "matrix", Estimate = "numeric", StdError = "numeric", CILower = "numeric", CIUpper = "numeric", Alpha = "numeric", Pvalue = "numeric", SNPs = "numeric", RSE = "numeric", Heter.Stat = "numeric") ) #-------------------------------------------------------------------------------------------- #' Egger Class #' #' @description An object containing the estimate produced using the MR-Egger method as well as various statistics. #' #' The MR-Egger model uses a random-effects model; a fixed-effect model does not make sense as pleiotropy leads to heterogeneity between the causal estimates targeted by the genetic variants. The (multiplicative) random-effects model allows over-dispersion in the regression model. Under-dispersion is not permitted (in case of under-dispersion, the residual standard error is set to 1). #' #' @slot Model Model always takes the value \code{random}, as only random-effects analyses are permitted. #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Correlation The matrix of correlations between genetic variants. #' @slot Robust Whether robust regression was used in the regression model relating the genetic associations with the outcome and those with the exposure. #' @slot Penalized Whether weights in the regression model were penalized for variants with heterogeneous causal estimates. #' @slot Estimate The causal point estimate from the MR-Egger method. #' @slot StdError.Est The standard error associated with \code{Estimate}. #' @slot Pvalue.Est P-value associated with the causal estimate from the Wald method. #' @slot CILower.Est The lower bound of the confidence interval for \code{Estimate} based on \code{StdError.Est}. #' @slot CIUpper.Est The upper bound of the confidence interval for \code{Estimate} based on \code{StdError.Est}. #' @slot Intercept The intercept estimate from the MR-Egger method. Under the InSIDE assumption, the intercept represents the average pleiotropic effect (average direct effect on the outcome) of a genetic variant. If the intercept differs from zero, this is evidence that the genetic variants are not all valid instruments; specifically, there is directional pleiotropy. #' @slot StdError.Int The standard error associated with \code{Intercept}. #' @slot Pvalue.Int P-value associated with the intercept from the Wald method. #' @slot CILower.Int The lower bound of the confidence interval for \code{Intercept} based on \code{StdError.Int}. #' @slot CIUpper.Int The upper bound of the confidence interval for \code{Estimate} based on \code{StdError.Int}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot SNPs The number of SNPs that were used in the calculation. #' @slot Causal.pval P-value associated with the causal estimate. #' @slot Pleio.pval P-value associated with the intercept (p-value for the MR-Egger intercept test of directional pleiotropy). #' @slot RSE The estimated residual standard error from the regression model. #' @slot Heter.Stat Heterogeneity statistic (Cochran's Q statistic) and associated p-value: the null hypothesis is that the MR-Egger regression model describes the associations with the outcome with no excess heterogeneity. #' @slot I.sq A measure of heterogeneity between the genetic associations with the exposure (see Bowden IJE 2016: "Assessing the suitability of summary data for Mendelian randomization analyses using MR-Egger regression: The role of the I2 statistic."). Low values of \code{I.sq} relate both to large differences in precision between MR-Egger and IVW estimates, and to more weak instrument bias (in a two-sample setting, this is attenuation of MR-Egger estimate towards the null). setClass("Egger", representation(Model = "character", Exposure = "character", Outcome = "character", Robust = "logical", Penalized = "logical", Correlation = "matrix", Estimate = "numeric", StdError.Est = "numeric", CILower.Est = "numeric", CIUpper.Est = "numeric", Pvalue.Est = "numeric", Intercept = "numeric", StdError.Int = "numeric", CILower.Int = "numeric", CIUpper.Int = "numeric", Pvalue.Int = "numeric", Pleio.pval = "numeric", Causal.pval = "numeric", Alpha = "numeric", SNPs = "numeric", RSE = "numeric", Heter.Stat = "numeric", I.sq = "numeric") ) #-------------------------------------------------------------------------------------------- #' MRAll Class #' #' @description An object containing the estimates produced using the \code{mr_allmethods} function. #' #' @slot Data The \code{mr_input} object that was used as an input to the \code{mr_allmethods} function. This includes the original data, so that a call to \code{mr_plot} can plot the original data and the various causal estimates. #' @slot Values A data.frame object comprising estimates from the various methods called by the \code{mr_allmethods} function. The first column gives the names of the methods, then the causal estimates, standard errors, 95\% confidence intervals, and p-values. #' @slot Method A string indicating whether all methods are implemented (\code{"all"}, the default option), or just main methods (\code{"main"}), or only a subset of methods (\code{"ivw"}, \code{"egger"}, or \code{"median"}). setClass("MRAll", representation(Data = "MRInput", Values = "data.frame", Method = "character"), contains = "data.frame" ) #-------------------------------------------------------------------------------------------- #' MaxLik Class #' #' @description An object containing the estimate produced using the maximum-likelihood method as well as various statistics. #' #' @slot Model The model used for estimation: fixed-effect (\code{"fixed"}) or random-effects (\code{"random"}). #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Correlation The matrix of correlations between genetic variants. #' @slot Psi The correlations between genetic associations with the exposure and with the outcome. #' @slot Estimate The causal point estimate from the inverse-variance weighted method. #' @slot StdError The standard error associated with \code{Estimate}. #' @slot CILower The lower bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot CIUpper The upper bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot Pvalue P-value associated with the causal estimate. #' @slot SNPs The number of SNPs that were used in the calculation. #' @slot RSE The estimated residual standard error from the regression model. #' @slot Heter.Stat Heterogeneity statistic (likelihood ratio statistic) and associated p-value: the null hypothesis is that all genetic variants estimate the same causal parameter; rejection of the null is an indication that one or more variants may be pleiotropic. setClass("MaxLik", representation(Model = "character", Exposure = "character", Outcome = "character", Correlation = "matrix", Psi = "numeric", Estimate = "numeric", StdError = "numeric", CILower = "numeric", CIUpper = "numeric", Alpha = "numeric", Pvalue = "numeric", SNPs = "numeric", RSE = "numeric", Heter.Stat = "numeric") ) #--------------------------------------------------------------------------------------------
/R/AllClasses.R
no_license
lovemun/MendelianRandomization
R
false
false
17,063
r
# MRInput class #' MRInput Class #' #' @description An object containing the four vectors of summary statistics required to calculate Mendelian randomization estimates. #' #' @slot betaX A numeric vector of beta-coefficient values for genetic associations with the first variable (often referred to as the exposure, risk factor, or modifiable phenotype). #' @slot betaY A numeric vector of beta-coefficient values for genetic associations with the second variable (often referred to as the outcome). For a disease outcome, the beta coefficients are log odds estimates from logistic regression analyses. #' @slot betaXse The standard errors associated with the beta-coefficients in \code{betaX}. #' @slot betaYse The standard errors associated with the beta-coefficients in \code{betaY}. #' @slot correlation The matrix of correlations between genetic variants. If this variable is not provided, then we assume that genetic variants are uncorrelated. #' @slot exposure The name of the exposure variable. #' @slot outcome The name of the outcome variable. #' @slot snps The names of the genetic variants (SNPs) included in the analysis. The slots \code{exposure}, \code{outcome}, and \code{snps} are not required, but may be useful for keeping track of various \code{MRInput} objects. They are also used by the \code{mr_plot} function. #' @slot effect_allele The name of the effect allele for each SNP. The beta-coefficients are the associations with the exposure and outcome per additional copy of the effect allele. #' @slot other_allele The name of the non-effect allele. #' @slot eaf The expected allele frequencies (numeric). The slots \code{effect_allele}, \code{other_allele}, and \code{eaf} are neither required, nor currently used in the MendelianRandomization package. They are included for future compatibility with the MR-Base suite of functions. #' #' @details The beta-coefficients are assumed to be estimated for uncorrelated (independent) genetic variants, although a correlation matrix can be specified if the variants are correlated in their distributions. We also assume that the beta-coefficients for associations with the exposure and with the outcome are uncorrelated (corresponding to a two-sample Mendelian randomization analysis), although correlation between associations with the exposure and with the outcome generally have little impact on causal estimates or standard errors. #' Estimates can either be specified by the user, or extracted from the PhenoScanner tool. #' #' @seealso \code{extract.pheno.csv()} for a description of how the above values can be extracted from PhenoScanner \url{http://www.phenoscanner.medschl.cam.ac.uk/}. setClass("MRInput", representation(betaX = "numeric", betaY = "numeric", betaXse = "numeric", betaYse = "numeric", exposure = "character", outcome = "character", snps = "character", effect_allele = "character", other_allele = "character", eaf = "numeric", correlation = "matrix"), prototype = prototype(betaX = ldlc, betaY = chdlodds, betaXse = ldlcse, betaYse = chdloddsse, exposure = "LDL-c", outcome = "CHD", snps = "snp", effect_allele = lipid_effect, other_allele = lipid_other, eaf = lipid_eaf, correlation = calc.rho) ) # Ensure the vectors are all of the same length setValidity("MRInput", function(object) {if(!all(length(object@betaX) == length(object@betaY), length(object@betaXse) == length(object@betaYse), length(object@betaX) == length(object@betaY))) { cat("Vectors do not all have the same length.") } else {} } ) #-------------------------------------------------------------------------------------------- #' WeightedMedian Class #' #' @description An object containing the estimate produced using the median-based method as well as various statistics. #' #' @slot Type The type of median that has been calculated, \code{"simple"}, \code{"weighted"}, or \code{"penalized"}. #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Estimate The causal point estimate from the median-based method. #' @slot StdError The standard error associated with \code{Estimate} (obtained from bootstrapping). #' @slot CILower The lower bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot CIUpper The upper bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot Pvalue P-value associated with the causal estimate from the Wald method. #' @slot SNPs The number of SNPs that used in the calculation. setClass("WeightedMedian", representation(Type = "character", Exposure = "character", Outcome = "character", Estimate = "numeric", StdError = "numeric", CILower = "numeric", CIUpper = "numeric", Alpha = "numeric", Pvalue = "numeric", SNPs = "numeric") ) #-------------------------------------------------------------------------------------------- #' IVW Class #' #' @description An object containing the estimate produced using the inverse-variance weighted (IVW) method as well as various statistics. #' #' @slot Model The model used for estimation: random-effects (\code{"random"}) or fixed-effect (\code{"fixed"}). The default option (\code{"default"}) is to use a fixed-effect model when there are three or fewer genetic variants, and a random-effects model when there are four or more. The (multiplicative) random-effects model allows for heterogeneity between the causal estimates targeted by the genetic variants by allowing over-dispersion in the regression model. Under-dispersion is not permitted (in case of under-dispersion, the residual standard error is set to 1, as in a fixed-effect analysis). #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Correlation The matrix of correlations between genetic variants. #' @slot Robust Whether robust regression was used in the regression model relating the genetic associations with the outcome and those with the exposure. #' @slot Penalized Whether weights in the regression model were penalized for variants with heterogeneous causal estimates. #' @slot Estimate The causal point estimate from the inverse-variance weighted method. #' @slot StdError The standard error associated with \code{Estimate}. #' @slot CILower The lower bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot CIUpper The upper bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot Pvalue P-value associated with the causal estimate. #' @slot SNPs The number of SNPs that were used in the calculation. #' @slot RSE The estimated residual standard error from the regression model. #' @slot Heter.Stat Heterogeneity statistic (Cochran's Q statistic) and associated p-value: the null hypothesis is that all genetic variants estimate the same causal parameter; rejection of the null is an indication that one or more variants may be pleiotropic. setClass("IVW", representation(Model = "character", Exposure = "character", Outcome = "character", Robust = "logical", Penalized = "logical", Correlation = "matrix", Estimate = "numeric", StdError = "numeric", CILower = "numeric", CIUpper = "numeric", Alpha = "numeric", Pvalue = "numeric", SNPs = "numeric", RSE = "numeric", Heter.Stat = "numeric") ) #-------------------------------------------------------------------------------------------- #' Egger Class #' #' @description An object containing the estimate produced using the MR-Egger method as well as various statistics. #' #' The MR-Egger model uses a random-effects model; a fixed-effect model does not make sense as pleiotropy leads to heterogeneity between the causal estimates targeted by the genetic variants. The (multiplicative) random-effects model allows over-dispersion in the regression model. Under-dispersion is not permitted (in case of under-dispersion, the residual standard error is set to 1). #' #' @slot Model Model always takes the value \code{random}, as only random-effects analyses are permitted. #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Correlation The matrix of correlations between genetic variants. #' @slot Robust Whether robust regression was used in the regression model relating the genetic associations with the outcome and those with the exposure. #' @slot Penalized Whether weights in the regression model were penalized for variants with heterogeneous causal estimates. #' @slot Estimate The causal point estimate from the MR-Egger method. #' @slot StdError.Est The standard error associated with \code{Estimate}. #' @slot Pvalue.Est P-value associated with the causal estimate from the Wald method. #' @slot CILower.Est The lower bound of the confidence interval for \code{Estimate} based on \code{StdError.Est}. #' @slot CIUpper.Est The upper bound of the confidence interval for \code{Estimate} based on \code{StdError.Est}. #' @slot Intercept The intercept estimate from the MR-Egger method. Under the InSIDE assumption, the intercept represents the average pleiotropic effect (average direct effect on the outcome) of a genetic variant. If the intercept differs from zero, this is evidence that the genetic variants are not all valid instruments; specifically, there is directional pleiotropy. #' @slot StdError.Int The standard error associated with \code{Intercept}. #' @slot Pvalue.Int P-value associated with the intercept from the Wald method. #' @slot CILower.Int The lower bound of the confidence interval for \code{Intercept} based on \code{StdError.Int}. #' @slot CIUpper.Int The upper bound of the confidence interval for \code{Estimate} based on \code{StdError.Int}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot SNPs The number of SNPs that were used in the calculation. #' @slot Causal.pval P-value associated with the causal estimate. #' @slot Pleio.pval P-value associated with the intercept (p-value for the MR-Egger intercept test of directional pleiotropy). #' @slot RSE The estimated residual standard error from the regression model. #' @slot Heter.Stat Heterogeneity statistic (Cochran's Q statistic) and associated p-value: the null hypothesis is that the MR-Egger regression model describes the associations with the outcome with no excess heterogeneity. #' @slot I.sq A measure of heterogeneity between the genetic associations with the exposure (see Bowden IJE 2016: "Assessing the suitability of summary data for Mendelian randomization analyses using MR-Egger regression: The role of the I2 statistic."). Low values of \code{I.sq} relate both to large differences in precision between MR-Egger and IVW estimates, and to more weak instrument bias (in a two-sample setting, this is attenuation of MR-Egger estimate towards the null). setClass("Egger", representation(Model = "character", Exposure = "character", Outcome = "character", Robust = "logical", Penalized = "logical", Correlation = "matrix", Estimate = "numeric", StdError.Est = "numeric", CILower.Est = "numeric", CIUpper.Est = "numeric", Pvalue.Est = "numeric", Intercept = "numeric", StdError.Int = "numeric", CILower.Int = "numeric", CIUpper.Int = "numeric", Pvalue.Int = "numeric", Pleio.pval = "numeric", Causal.pval = "numeric", Alpha = "numeric", SNPs = "numeric", RSE = "numeric", Heter.Stat = "numeric", I.sq = "numeric") ) #-------------------------------------------------------------------------------------------- #' MRAll Class #' #' @description An object containing the estimates produced using the \code{mr_allmethods} function. #' #' @slot Data The \code{mr_input} object that was used as an input to the \code{mr_allmethods} function. This includes the original data, so that a call to \code{mr_plot} can plot the original data and the various causal estimates. #' @slot Values A data.frame object comprising estimates from the various methods called by the \code{mr_allmethods} function. The first column gives the names of the methods, then the causal estimates, standard errors, 95\% confidence intervals, and p-values. #' @slot Method A string indicating whether all methods are implemented (\code{"all"}, the default option), or just main methods (\code{"main"}), or only a subset of methods (\code{"ivw"}, \code{"egger"}, or \code{"median"}). setClass("MRAll", representation(Data = "MRInput", Values = "data.frame", Method = "character"), contains = "data.frame" ) #-------------------------------------------------------------------------------------------- #' MaxLik Class #' #' @description An object containing the estimate produced using the maximum-likelihood method as well as various statistics. #' #' @slot Model The model used for estimation: fixed-effect (\code{"fixed"}) or random-effects (\code{"random"}). #' @slot Exposure The name of the exposure variable. #' @slot Outcome The name of the outcome variable. #' @slot Correlation The matrix of correlations between genetic variants. #' @slot Psi The correlations between genetic associations with the exposure and with the outcome. #' @slot Estimate The causal point estimate from the inverse-variance weighted method. #' @slot StdError The standard error associated with \code{Estimate}. #' @slot CILower The lower bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot CIUpper The upper bound of the confidence interval for \code{Estimate} based on \code{StdError}. #' @slot Alpha The significance level used in constructing the confidence interval (default is 0.05). #' @slot Pvalue P-value associated with the causal estimate. #' @slot SNPs The number of SNPs that were used in the calculation. #' @slot RSE The estimated residual standard error from the regression model. #' @slot Heter.Stat Heterogeneity statistic (likelihood ratio statistic) and associated p-value: the null hypothesis is that all genetic variants estimate the same causal parameter; rejection of the null is an indication that one or more variants may be pleiotropic. setClass("MaxLik", representation(Model = "character", Exposure = "character", Outcome = "character", Correlation = "matrix", Psi = "numeric", Estimate = "numeric", StdError = "numeric", CILower = "numeric", CIUpper = "numeric", Alpha = "numeric", Pvalue = "numeric", SNPs = "numeric", RSE = "numeric", Heter.Stat = "numeric") ) #--------------------------------------------------------------------------------------------
\name{imgStdNErosionDilation} \alias{imgStdNErosionDilation} \title{Fixed mask NErosionDilation} \description{ This function applies erosion n times and then dilation n times, with a 0-squared matrix with a given dimension. } \usage{imgStdNErosionDilation(imgdata, n, dim=3)} \arguments{ \item{imgdata}{The image} \item{n}{Times to apply each operation} \item{dim}{mask's dimension (default = 3)} } \value{ return an imagedata object } \examples{ \dontrun{ x <- readJpeg(system.file("samples", "violet.jpg", package="biOps")) y <- imgStdNErosionDilation(x, 4, 5) } } \note{ This function accepts binary images only and will treat gray scale ones as binary images. } \seealso{ \code{\link{imgNErosionDilation}} } \keyword{math}
/08_grad_project/Deliverables/biOps/man/imgStdNErosionDilation.Rd
permissive
blairg23/pattern-recognition
R
false
false
740
rd
\name{imgStdNErosionDilation} \alias{imgStdNErosionDilation} \title{Fixed mask NErosionDilation} \description{ This function applies erosion n times and then dilation n times, with a 0-squared matrix with a given dimension. } \usage{imgStdNErosionDilation(imgdata, n, dim=3)} \arguments{ \item{imgdata}{The image} \item{n}{Times to apply each operation} \item{dim}{mask's dimension (default = 3)} } \value{ return an imagedata object } \examples{ \dontrun{ x <- readJpeg(system.file("samples", "violet.jpg", package="biOps")) y <- imgStdNErosionDilation(x, 4, 5) } } \note{ This function accepts binary images only and will treat gray scale ones as binary images. } \seealso{ \code{\link{imgNErosionDilation}} } \keyword{math}
#' @S3method print Roc print.Roc <- function(x,digits=2,...){ summary(x,digits=digits,print.it=TRUE,...) }
/R/print.Roc.R
no_license
cran/ModelGood
R
false
false
111
r
#' @S3method print Roc print.Roc <- function(x,digits=2,...){ summary(x,digits=digits,print.it=TRUE,...) }
#' @title Transform data #' @description Calculates log fold change by substracting #' a case column with a bait column #' @param df a data.frame with bait and controls #' @author flassen #' @note assummes log2(bait1), log2(control1), log2(bait2).. #' @export logFC <- function(df){ dfNum <- sapply(df, is.numeric) dfNum <- df[, dfNum] pairs <- (ncol(dfNum))/2 stopifnot(pairs==round(pairs)) for (i in 1:pairs){ colMock = dfNum[, (i*2)] colBait = dfNum[, (i*2)-1] # log2(bait) - log2(control) dfNum[[paste0('rep',i)]] = colBait - colMock } return(cbind(df, dfNum[,grepl('rep', colnames(dfNum))])) }
/R/logFC.R
no_license
frhl/pRoteomics
R
false
false
635
r
#' @title Transform data #' @description Calculates log fold change by substracting #' a case column with a bait column #' @param df a data.frame with bait and controls #' @author flassen #' @note assummes log2(bait1), log2(control1), log2(bait2).. #' @export logFC <- function(df){ dfNum <- sapply(df, is.numeric) dfNum <- df[, dfNum] pairs <- (ncol(dfNum))/2 stopifnot(pairs==round(pairs)) for (i in 1:pairs){ colMock = dfNum[, (i*2)] colBait = dfNum[, (i*2)-1] # log2(bait) - log2(control) dfNum[[paste0('rep',i)]] = colBait - colMock } return(cbind(df, dfNum[,grepl('rep', colnames(dfNum))])) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitting_estimation_functions.R \name{logdVK_HR} \alias{logdVK_HR} \title{Compute censored exponent measure} \usage{ logdVK_HR(x, K, par) } \arguments{ \item{x}{Numeric vector with \eqn{d} positive elements where the censored exponent measure is to be evaluated.} \item{K}{Integer vector, subset of \eqn{\{1, \dots, d\}}{{1, ..., d}}. The index set that is not censored.} \item{par}{Numeric vector with \eqn{\frac{d(d - 1)}{2}}{d x (d - 1) / 2} elements. It represents the upper triangular portion of a variogram matrix \eqn{\Gamma}.} } \value{ Numeric. The censored exponent measure of the HR distribution. } \description{ Computes the censored exponent measure density of HR distribution. }
/man/logdVK_HR.Rd
no_license
Black-Swan-ICL/graphicalExtremes
R
false
true
772
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitting_estimation_functions.R \name{logdVK_HR} \alias{logdVK_HR} \title{Compute censored exponent measure} \usage{ logdVK_HR(x, K, par) } \arguments{ \item{x}{Numeric vector with \eqn{d} positive elements where the censored exponent measure is to be evaluated.} \item{K}{Integer vector, subset of \eqn{\{1, \dots, d\}}{{1, ..., d}}. The index set that is not censored.} \item{par}{Numeric vector with \eqn{\frac{d(d - 1)}{2}}{d x (d - 1) / 2} elements. It represents the upper triangular portion of a variogram matrix \eqn{\Gamma}.} } \value{ Numeric. The censored exponent measure of the HR distribution. } \description{ Computes the censored exponent measure density of HR distribution. }
add_bb_dat <- function(file.name=NULL, edata=NULL, sheet.name=NULL, skip.row=NULL, max.row=NULL, cols=NULL, yrs=NULL, start.date=NULL, var.names=NULL) { dat <- edata path <- paste0("./data-raw/",file.name) new.dat <- data_save_bb(path.name=path, sheet.name,skip.row,max.row, cols,yrs,start.date,var.names) dat <- rbind(new.dat,dat) return(dat) }
/R/add_bb_dat.R
no_license
rfaridi/macrobd
R
false
false
401
r
add_bb_dat <- function(file.name=NULL, edata=NULL, sheet.name=NULL, skip.row=NULL, max.row=NULL, cols=NULL, yrs=NULL, start.date=NULL, var.names=NULL) { dat <- edata path <- paste0("./data-raw/",file.name) new.dat <- data_save_bb(path.name=path, sheet.name,skip.row,max.row, cols,yrs,start.date,var.names) dat <- rbind(new.dat,dat) return(dat) }
# variance partitioning #' Calculate the variance partitioning coefficient #' #' @param object An object created by \code{\link{study_parameters}} #' #' @details For partially nested studies, the VPC is calculated for the treatment group. #' #' @return a \code{data.frame} with class \code{plcp_VPC} containing the #' percentage of variance per level and time point. The column #' \code{between_clusters} is also the intraclass correlation for level three, #' i.e. the correlation between two subjects belonging to the same cluster at #' a specific time point. With random slopes in the model the variances per time point #' will be a quadratic function of time. \code{tot_var} is the #' percentage increase or decrease in total variance relative to baseline variance. #' #' The \code{plot} method returns a \code{ggplot2::ggplot} object. #' @seealso \code{\link{plot.plcp_VPC}} #' #' @references Goldstein, H., Browne, W., & Rasbash, J. (2002). #' Partitioning variation in multilevel models. #' \emph{Understanding Statistics: Statistical Issues in Psychology, Education, #' and the Social Sciences, 1}(4), 223-231. #' @examples #' paras <- study_parameters(n1 = 11, #' n2 = 10, #' n3 = 3, #' T_end = 10, #' icc_pre_subject = 0.5, #' icc_pre_cluster = 0, #' icc_slope = 0.05, #' var_ratio = 0.03) #' #' res <- get_VPC(paras) #' res #' #' # Plot #' plot(res) #' @export get_VPC <- function(object) { UseMethod("get_VPC") } #' @rdname get_VPC #' @export get_VPC.plcp <- function(object) { paras <- NA_to_zero(object) u0 <- paras$sigma_subject_intercept u1 <- paras$sigma_subject_slope v0 <- paras$sigma_cluster_intercept v1 <- paras$sigma_cluster_slope v01 <- v0 * v1 * paras$cor_cluster error <- paras$sigma_error u01 <- paras$cor_subject * u0 * u1 time <- get_time_vector(paras) tot_var <- (u0^2 + 2*u01*time + u1^2*time^2 + v0^2 + 2*v01*time + v1^2*time^2 + error^2) lvl3 <- (v0^2 + 2*v01*time + v1^2*time^2)/tot_var lvl2 <- (u0^2 + 2*u01*time + u1^2*time^2)/tot_var lvl1 <- error^2/tot_var tot_var <- tot_var/tot_var[1] res <- data.frame(time, between_clusters = lvl3*100, between_subjects = lvl2*100, within_subjects = lvl1*100, tot_var = (tot_var-1)*100) class(res) <- append("plcp_VPC", class(res)) res } #' @export get_VPC.plcp_multi <- function(object) { warning("Multiple study designs used, only the first is shown") get_VPC.plcp(object[1, ]) } #' Plot method for \code{get_VPC}-objects #' #' @param x An object created with \code{\link{get_VPC}} #' @param ... Optional arguments, currently ignored. #' #' @export plot.plcp_VPC <- function(x, ...) { check_installed("ggplot2") res <- x res$tot_var <- NULL res <- stats::reshape(res, direction = "long", varying = 2:4, times = c("between_clusters", "between_subjects", "within_subjects"), v.names = "proportion", timevar = "level") res$level <- factor(res$level, levels = c("between_clusters", "between_subjects", "within_subjects"), labels = c("between-clusters (L3)", "between-subjects (L2)", "within-subjects (L1)")) p <- ggplot2::ggplot(res, ggplot2::aes_string("time", "proportion", color = "level", fill = "level")) + ggplot2::geom_line() + ggplot2::geom_point() + ggplot2::labs(title = "Variance partitioning", x = "Time point", y = "Percentage of total variance (%)") if(requireNamespace("ggsci", quietly = TRUE)) { p <- p + ggsci::scale_fill_d3() + ggsci::scale_color_d3() } p } #' Print method for \code{get_vpc}-objects #' @param x Object created with \code{link{get_VPC}} #' @param digits Number of digits to print #' @param ... Optional arguments #' @method print plcp_VPC #' @export print.plcp_VPC <- function(x, digits = 2, ...) { cat("# Percentage (%) of total variance at each level and time point\n") print.data.frame(x, digits = digits, scientific = FALSE, ...) invisible(x) } # Standard deviations ----------------------------------------------------- #' Calculate the model implied standard deviations per time point #' #' @param object An object created by \code{\link{study_parameters}} #' @param treatment \code{character}; either \code{"treatment"} or \code{"control"}. #' Indicates for which group SDs should be calculated for. This only makes a difference #' for 3-level partially nested designs. #' @param n Optional; selects row n if \code{object} is a \code{data.frame} of #' parameters #' #' @seealso \code{\link{get_VPC}}, \code{\link{get_correlation_matrix}} #' @return \code{data.frame} with class \code{plcp_sds} containing the model #' implied standard deviations per time point. #' #' @export #' #' @examples #' paras <- study_parameters(n1 = 11, #' n2 = 10, #' n3 = 6, #' T_end = 10, #' icc_pre_subject = 0.5, #' icc_pre_cluster = 0, #' icc_slope = 0.05, #' var_ratio = 0.03) #' #' get_sds(paras) #' #' # plot #' plot(get_sds(paras)) #' get_sds <- function(object, treatment = "treatment", n = 1) { if(!treatment %in% c("treatment", "control")) stop("Wrong 'treatment', allowed options are: 'treatment' or 'control'", call. = FALSE) UseMethod("get_sds") } #' @export get_sds.plcp <- function(object, treatment = "treatment", n = NULL) { .p <- NA_to_zero(object) .p <- prepare_paras(.p) if(treatment == "treatment") { .p <- .p$treatment } else if(treatment == "control") { .p <- .p$control } .p$retention <- NULL .p$n2 <- NULL .p <- .p[c("sigma_subject_intercept", "cor_subject", "cor_cluster", "sigma_subject_slope", "sigma_cluster_slope", "sigma_cluster_intercept", "sigma_error", "n1", "T_end")] res <- do.call(get_sds_, .p) class(res) <- append(c("plcp_sds"), class(res)) res } #' @export get_sds.plcp_multi <- function(object, treatment = "treatment", n = 1) { get_sds.plcp(as.plcp(object[n, ]), treatment = treatment) } get_sds_ <- function(sigma_subject_intercept, cor_subject, sigma_subject_slope, sigma_cluster_intercept, sigma_cluster_slope, cor_cluster, sigma_error, n1, T_end) { time <- seq(0, T_end, length.out = n1) u0 <- sigma_subject_intercept u1 <- sigma_subject_slope u01 <- u0 * u1 * cor_subject v0 <- sigma_cluster_intercept v1 <- sigma_cluster_slope v01 <- v0 * v1 * cor_cluster error <- sigma_error sds <- sqrt((u0^2 + 2*u01*time + u1^2*time^2 + v0^2 + 2 * v01*time + v1^2*time^2 + error^2)) sds_lvl2 <- sqrt((u0^2 + 2*u01*time + u1^2*time^2 + v0^2 + error^2)) res <- data.frame(time = time, SD_with_random_slopes = sds, SD_no_cluster_random_slope = sds_lvl2, SD_no_random_slopes = sqrt(u0^2 + v0^2 + error^2)) res } #' Plot method for \code{get_sds}-objects #' @param x An object of class \code{plcp_sds}. #' @param ... Optional arguments. #' @export plot.plcp_sds <- function(x, ...) { check_installed("ggplot2") .res <- x cs <- .res$SD_no_random_slopes[1] res <- .res res$time <- round(res$time,1) p <- ggplot2::ggplot(res, ggplot2::aes_string("time", "SD_with_random_slopes")) + ggplot2::geom_hline(ggplot2::aes_string(color = "'Random slopes = 0'", yintercept = "SD_no_random_slopes")) + ggplot2::geom_line(ggplot2::aes(color = "With random slopes")) + ggplot2::geom_point(ggplot2::aes(color = "With random slopes")) + ggplot2::scale_x_continuous(breaks = unique(res$time)) + ggplot2::labs(y = "SD", x = "Time point", title = "SD per time point", color = "Model") # facet_grid(~cor_cluster + cor_subject, labeller = label_both) # if(!is.nulls(facet)) p + facet_wrap(facet) p } #' Print method for \code{get_sds}-objects #' @param x An object of class \code{plcp_sds}. #' @param ... Optional arguments. #' @export #' @method print plcp_sds print.plcp_sds <- function(x, ...) { print.data.frame(x, digits = 2, ...) } # # CORRELATIONS LVL 2 # #' Calculate the subject-level (ICC) correlations among time points #' #' @param object An object created by \code{\link{study_parameters}} #' #' @return A \code{n1} x \code{n1} \code{matrix} with the marginal subject-level #' correlations between time points. #' @details The correlation between time point \eqn{T_i} and \eqn{T_{i+1}} within #' the same subject is also called the intraclass correlation (ICC) at level two. #' If the random slopes are non-zero this ICC change over time. #' @export #' #' @examples #' paras <- study_parameters(n1 = 11, #' n2 = 10, #' n3 = 3, #' T_end = 10, #' icc_pre_subject = 0.5, #' icc_pre_cluster = 0, #' icc_slope = 0.05, #' var_ratio = 0.03) #' get_correlation_matrix(paras) get_correlation_matrix <- function(object) { UseMethod("get_correlation_matrix") } #' @export get_correlation_matrix.plcp <- function(object) { paras <- NA_to_zero(object) u0 <- paras$sigma_subject_intercept u1 <- paras$sigma_subject_slope v0 <- paras$sigma_cluster_intercept v1 <- paras$sigma_cluster_slope v01 <- v0 * v1 * paras$cor_cluster error <- paras$sigma_error u01 <- paras$cor_subject * u0 * u1 time <- get_time_vector(paras) n1 <- paras$n1 n2 <- paras$n2 sx2 <- sum( (time - mean(time))^2)/n1 X <- matrix(c(rep(1, n1), time), ncol = 2) Z <- X D <- matrix(c(u0^2, u01, u01, u1^2), ncol = 2) D2 <- matrix(c(v0^2, v01, v01, v1^2), ncol = 2) V <- Z %*% D %*% t(Z) + Z %*% D2 %*% t(Z) + error^2*diag(n1) V <- cov2cor(V) time_rounded <- round(time, 1) dimnames(V) <- list(time_rounded, time_rounded) class(V) <- append(class(V), "plcp_ICC2") V } #' @rdname get_correlation_matrix #' @export get_correlation_matrix.plcp_multi <- function(object) { warning("Multiple study designs used, only the first is shown") get_correlation_matrix.plcp(object[1, ]) } #' Plot method for \code{get_correlation_matrix}-objects #' #' @param x An object created with \code{\link{get_correlation_matrix}} #' @param ... Optional arguments, currently ignored. #' #' @export plot.plcp_ICC2 <- function(x, ...) { check_installed("ggplot2") res <- as.data.frame(x) breaks <- 1:ncol(res) res <- reshape(res, varying = breaks, v.names = "cor", idvar = "time1", timevar = "time2", direction = "long") res$time1 <- res$time1 res$time2 <- res$time2 res$cor2 <- round(res$cor, 2) break_labs <- as.numeric(dimnames(x)[[1]]) p <- ggplot2::ggplot(res, ggplot2::aes_string("time1", "time2", color = "cor", fill = "cor")) + ggplot2::geom_tile() + ggplot2::geom_text(ggplot2::aes_string(label = "cor2"), hjust = "center", color = "black") + ggplot2::scale_x_continuous(breaks = breaks, labels = break_labs) + ggplot2::scale_y_continuous(breaks = breaks, labels = break_labs) + ggplot2::labs(color = "Correlation", fill = "Correlation", x = "Time", y = "Time", title = "Subject-level correlation matrix") + ggplot2::theme_minimal() if(requireNamespace("viridis", quietly = TRUE)) { p <- p + viridis::scale_fill_viridis() + viridis::scale_color_viridis() } p } #' Print method for \code{get_correlation_matrix}-objects #' #' @param x An object created by \code{\link{get_correlation_matrix}} #' #' @param ... Optional arguments #' #' @method print plcp_ICC2 #' @export print.plcp_ICC2 <- function(x, ...) { x <- unclass(x) print(round(x, 2), ...) }
/R/VPC.r
no_license
R-forks-to-learn/powerlmm
R
false
false
13,007
r
# variance partitioning #' Calculate the variance partitioning coefficient #' #' @param object An object created by \code{\link{study_parameters}} #' #' @details For partially nested studies, the VPC is calculated for the treatment group. #' #' @return a \code{data.frame} with class \code{plcp_VPC} containing the #' percentage of variance per level and time point. The column #' \code{between_clusters} is also the intraclass correlation for level three, #' i.e. the correlation between two subjects belonging to the same cluster at #' a specific time point. With random slopes in the model the variances per time point #' will be a quadratic function of time. \code{tot_var} is the #' percentage increase or decrease in total variance relative to baseline variance. #' #' The \code{plot} method returns a \code{ggplot2::ggplot} object. #' @seealso \code{\link{plot.plcp_VPC}} #' #' @references Goldstein, H., Browne, W., & Rasbash, J. (2002). #' Partitioning variation in multilevel models. #' \emph{Understanding Statistics: Statistical Issues in Psychology, Education, #' and the Social Sciences, 1}(4), 223-231. #' @examples #' paras <- study_parameters(n1 = 11, #' n2 = 10, #' n3 = 3, #' T_end = 10, #' icc_pre_subject = 0.5, #' icc_pre_cluster = 0, #' icc_slope = 0.05, #' var_ratio = 0.03) #' #' res <- get_VPC(paras) #' res #' #' # Plot #' plot(res) #' @export get_VPC <- function(object) { UseMethod("get_VPC") } #' @rdname get_VPC #' @export get_VPC.plcp <- function(object) { paras <- NA_to_zero(object) u0 <- paras$sigma_subject_intercept u1 <- paras$sigma_subject_slope v0 <- paras$sigma_cluster_intercept v1 <- paras$sigma_cluster_slope v01 <- v0 * v1 * paras$cor_cluster error <- paras$sigma_error u01 <- paras$cor_subject * u0 * u1 time <- get_time_vector(paras) tot_var <- (u0^2 + 2*u01*time + u1^2*time^2 + v0^2 + 2*v01*time + v1^2*time^2 + error^2) lvl3 <- (v0^2 + 2*v01*time + v1^2*time^2)/tot_var lvl2 <- (u0^2 + 2*u01*time + u1^2*time^2)/tot_var lvl1 <- error^2/tot_var tot_var <- tot_var/tot_var[1] res <- data.frame(time, between_clusters = lvl3*100, between_subjects = lvl2*100, within_subjects = lvl1*100, tot_var = (tot_var-1)*100) class(res) <- append("plcp_VPC", class(res)) res } #' @export get_VPC.plcp_multi <- function(object) { warning("Multiple study designs used, only the first is shown") get_VPC.plcp(object[1, ]) } #' Plot method for \code{get_VPC}-objects #' #' @param x An object created with \code{\link{get_VPC}} #' @param ... Optional arguments, currently ignored. #' #' @export plot.plcp_VPC <- function(x, ...) { check_installed("ggplot2") res <- x res$tot_var <- NULL res <- stats::reshape(res, direction = "long", varying = 2:4, times = c("between_clusters", "between_subjects", "within_subjects"), v.names = "proportion", timevar = "level") res$level <- factor(res$level, levels = c("between_clusters", "between_subjects", "within_subjects"), labels = c("between-clusters (L3)", "between-subjects (L2)", "within-subjects (L1)")) p <- ggplot2::ggplot(res, ggplot2::aes_string("time", "proportion", color = "level", fill = "level")) + ggplot2::geom_line() + ggplot2::geom_point() + ggplot2::labs(title = "Variance partitioning", x = "Time point", y = "Percentage of total variance (%)") if(requireNamespace("ggsci", quietly = TRUE)) { p <- p + ggsci::scale_fill_d3() + ggsci::scale_color_d3() } p } #' Print method for \code{get_vpc}-objects #' @param x Object created with \code{link{get_VPC}} #' @param digits Number of digits to print #' @param ... Optional arguments #' @method print plcp_VPC #' @export print.plcp_VPC <- function(x, digits = 2, ...) { cat("# Percentage (%) of total variance at each level and time point\n") print.data.frame(x, digits = digits, scientific = FALSE, ...) invisible(x) } # Standard deviations ----------------------------------------------------- #' Calculate the model implied standard deviations per time point #' #' @param object An object created by \code{\link{study_parameters}} #' @param treatment \code{character}; either \code{"treatment"} or \code{"control"}. #' Indicates for which group SDs should be calculated for. This only makes a difference #' for 3-level partially nested designs. #' @param n Optional; selects row n if \code{object} is a \code{data.frame} of #' parameters #' #' @seealso \code{\link{get_VPC}}, \code{\link{get_correlation_matrix}} #' @return \code{data.frame} with class \code{plcp_sds} containing the model #' implied standard deviations per time point. #' #' @export #' #' @examples #' paras <- study_parameters(n1 = 11, #' n2 = 10, #' n3 = 6, #' T_end = 10, #' icc_pre_subject = 0.5, #' icc_pre_cluster = 0, #' icc_slope = 0.05, #' var_ratio = 0.03) #' #' get_sds(paras) #' #' # plot #' plot(get_sds(paras)) #' get_sds <- function(object, treatment = "treatment", n = 1) { if(!treatment %in% c("treatment", "control")) stop("Wrong 'treatment', allowed options are: 'treatment' or 'control'", call. = FALSE) UseMethod("get_sds") } #' @export get_sds.plcp <- function(object, treatment = "treatment", n = NULL) { .p <- NA_to_zero(object) .p <- prepare_paras(.p) if(treatment == "treatment") { .p <- .p$treatment } else if(treatment == "control") { .p <- .p$control } .p$retention <- NULL .p$n2 <- NULL .p <- .p[c("sigma_subject_intercept", "cor_subject", "cor_cluster", "sigma_subject_slope", "sigma_cluster_slope", "sigma_cluster_intercept", "sigma_error", "n1", "T_end")] res <- do.call(get_sds_, .p) class(res) <- append(c("plcp_sds"), class(res)) res } #' @export get_sds.plcp_multi <- function(object, treatment = "treatment", n = 1) { get_sds.plcp(as.plcp(object[n, ]), treatment = treatment) } get_sds_ <- function(sigma_subject_intercept, cor_subject, sigma_subject_slope, sigma_cluster_intercept, sigma_cluster_slope, cor_cluster, sigma_error, n1, T_end) { time <- seq(0, T_end, length.out = n1) u0 <- sigma_subject_intercept u1 <- sigma_subject_slope u01 <- u0 * u1 * cor_subject v0 <- sigma_cluster_intercept v1 <- sigma_cluster_slope v01 <- v0 * v1 * cor_cluster error <- sigma_error sds <- sqrt((u0^2 + 2*u01*time + u1^2*time^2 + v0^2 + 2 * v01*time + v1^2*time^2 + error^2)) sds_lvl2 <- sqrt((u0^2 + 2*u01*time + u1^2*time^2 + v0^2 + error^2)) res <- data.frame(time = time, SD_with_random_slopes = sds, SD_no_cluster_random_slope = sds_lvl2, SD_no_random_slopes = sqrt(u0^2 + v0^2 + error^2)) res } #' Plot method for \code{get_sds}-objects #' @param x An object of class \code{plcp_sds}. #' @param ... Optional arguments. #' @export plot.plcp_sds <- function(x, ...) { check_installed("ggplot2") .res <- x cs <- .res$SD_no_random_slopes[1] res <- .res res$time <- round(res$time,1) p <- ggplot2::ggplot(res, ggplot2::aes_string("time", "SD_with_random_slopes")) + ggplot2::geom_hline(ggplot2::aes_string(color = "'Random slopes = 0'", yintercept = "SD_no_random_slopes")) + ggplot2::geom_line(ggplot2::aes(color = "With random slopes")) + ggplot2::geom_point(ggplot2::aes(color = "With random slopes")) + ggplot2::scale_x_continuous(breaks = unique(res$time)) + ggplot2::labs(y = "SD", x = "Time point", title = "SD per time point", color = "Model") # facet_grid(~cor_cluster + cor_subject, labeller = label_both) # if(!is.nulls(facet)) p + facet_wrap(facet) p } #' Print method for \code{get_sds}-objects #' @param x An object of class \code{plcp_sds}. #' @param ... Optional arguments. #' @export #' @method print plcp_sds print.plcp_sds <- function(x, ...) { print.data.frame(x, digits = 2, ...) } # # CORRELATIONS LVL 2 # #' Calculate the subject-level (ICC) correlations among time points #' #' @param object An object created by \code{\link{study_parameters}} #' #' @return A \code{n1} x \code{n1} \code{matrix} with the marginal subject-level #' correlations between time points. #' @details The correlation between time point \eqn{T_i} and \eqn{T_{i+1}} within #' the same subject is also called the intraclass correlation (ICC) at level two. #' If the random slopes are non-zero this ICC change over time. #' @export #' #' @examples #' paras <- study_parameters(n1 = 11, #' n2 = 10, #' n3 = 3, #' T_end = 10, #' icc_pre_subject = 0.5, #' icc_pre_cluster = 0, #' icc_slope = 0.05, #' var_ratio = 0.03) #' get_correlation_matrix(paras) get_correlation_matrix <- function(object) { UseMethod("get_correlation_matrix") } #' @export get_correlation_matrix.plcp <- function(object) { paras <- NA_to_zero(object) u0 <- paras$sigma_subject_intercept u1 <- paras$sigma_subject_slope v0 <- paras$sigma_cluster_intercept v1 <- paras$sigma_cluster_slope v01 <- v0 * v1 * paras$cor_cluster error <- paras$sigma_error u01 <- paras$cor_subject * u0 * u1 time <- get_time_vector(paras) n1 <- paras$n1 n2 <- paras$n2 sx2 <- sum( (time - mean(time))^2)/n1 X <- matrix(c(rep(1, n1), time), ncol = 2) Z <- X D <- matrix(c(u0^2, u01, u01, u1^2), ncol = 2) D2 <- matrix(c(v0^2, v01, v01, v1^2), ncol = 2) V <- Z %*% D %*% t(Z) + Z %*% D2 %*% t(Z) + error^2*diag(n1) V <- cov2cor(V) time_rounded <- round(time, 1) dimnames(V) <- list(time_rounded, time_rounded) class(V) <- append(class(V), "plcp_ICC2") V } #' @rdname get_correlation_matrix #' @export get_correlation_matrix.plcp_multi <- function(object) { warning("Multiple study designs used, only the first is shown") get_correlation_matrix.plcp(object[1, ]) } #' Plot method for \code{get_correlation_matrix}-objects #' #' @param x An object created with \code{\link{get_correlation_matrix}} #' @param ... Optional arguments, currently ignored. #' #' @export plot.plcp_ICC2 <- function(x, ...) { check_installed("ggplot2") res <- as.data.frame(x) breaks <- 1:ncol(res) res <- reshape(res, varying = breaks, v.names = "cor", idvar = "time1", timevar = "time2", direction = "long") res$time1 <- res$time1 res$time2 <- res$time2 res$cor2 <- round(res$cor, 2) break_labs <- as.numeric(dimnames(x)[[1]]) p <- ggplot2::ggplot(res, ggplot2::aes_string("time1", "time2", color = "cor", fill = "cor")) + ggplot2::geom_tile() + ggplot2::geom_text(ggplot2::aes_string(label = "cor2"), hjust = "center", color = "black") + ggplot2::scale_x_continuous(breaks = breaks, labels = break_labs) + ggplot2::scale_y_continuous(breaks = breaks, labels = break_labs) + ggplot2::labs(color = "Correlation", fill = "Correlation", x = "Time", y = "Time", title = "Subject-level correlation matrix") + ggplot2::theme_minimal() if(requireNamespace("viridis", quietly = TRUE)) { p <- p + viridis::scale_fill_viridis() + viridis::scale_color_viridis() } p } #' Print method for \code{get_correlation_matrix}-objects #' #' @param x An object created by \code{\link{get_correlation_matrix}} #' #' @param ... Optional arguments #' #' @method print plcp_ICC2 #' @export print.plcp_ICC2 <- function(x, ...) { x <- unclass(x) print(round(x, 2), ...) }
library(tidyverse) library(scales) library(extrafont) library(directlabels) loadfonts() theme_bgs <- function(){ theme_bw() + theme(text = element_text(family = 'Segoe UI'), plot.title = element_text(face = 'plain', size = 14), plot.subtitle = element_text(family = 'Segoe UI Semibold'), panel.border = element_rect(colour = 'grey85'), panel.grid.minor = element_line(colour = "grey98", size = 0.25), axis.title = element_text(family = 'Segoe UI Semibold', size = 12), axis.text = element_text(size = 12), axis.ticks = element_blank(), legend.justification = 'top', legend.title = element_text(family = 'Segoe UI Semibold'), strip.background = element_rect(fill = 'grey92'), strip.text = element_text(family = 'Segoe UI Semibold')) } theme_set(theme_bgs()) ineq_data <- read_delim("WID_Data_Metadata//WID_Data_18052019-130037.csv", delim = ";", skip = 8, col_names = c("category", "year", "Wealth", "Income")) cite_caption <- "Piketty, Thomas; Saez, Emmanuel and Zucman, Gabriel (2016).\nDistributional National Accounts: Methods and Estimates for the United States." p1 <- ineq_data %>% gather(variable, value, Wealth, Income) %>% ggplot(aes(year, value, colour = variable))+ geom_line(size = 1.2)+ geom_point(show.legend = FALSE)+ scale_y_continuous("Share of Total (%)", labels = percent)+ scale_x_continuous(breaks = seq(0, 3000, 15), name = "Year")+ scale_colour_brewer(palette = 'Set1', name = "")+ ggtitle("Rising Inequality Since 1980\nShare of Income and Wealth for Top 10% in USA")+ labs(caption = cite_caption)+ theme(plot.title = element_text(size = 25), axis.text = element_text(size = 22), legend.text = element_text(size = 22), legend.title = element_text(size = 24), axis.title = element_text(size = 24), plot.caption = element_text(size = 16), legend.position = 'none')+ guides(colour = guide_legend(override.aes = list(size = 3))) p2 <- p1 + geom_dl(aes(label = variable), method = list("smart.grid", fontfamily = "Segoe UI", cex = 2)) ggsave(filename = "inequality_data_vis.png", plot = p2, width = 12, height = 8, units = "in", dpi = 700)
/wid_data_vis.R
permissive
bgstieber/APRA-PD-DAS-2019
R
false
false
2,555
r
library(tidyverse) library(scales) library(extrafont) library(directlabels) loadfonts() theme_bgs <- function(){ theme_bw() + theme(text = element_text(family = 'Segoe UI'), plot.title = element_text(face = 'plain', size = 14), plot.subtitle = element_text(family = 'Segoe UI Semibold'), panel.border = element_rect(colour = 'grey85'), panel.grid.minor = element_line(colour = "grey98", size = 0.25), axis.title = element_text(family = 'Segoe UI Semibold', size = 12), axis.text = element_text(size = 12), axis.ticks = element_blank(), legend.justification = 'top', legend.title = element_text(family = 'Segoe UI Semibold'), strip.background = element_rect(fill = 'grey92'), strip.text = element_text(family = 'Segoe UI Semibold')) } theme_set(theme_bgs()) ineq_data <- read_delim("WID_Data_Metadata//WID_Data_18052019-130037.csv", delim = ";", skip = 8, col_names = c("category", "year", "Wealth", "Income")) cite_caption <- "Piketty, Thomas; Saez, Emmanuel and Zucman, Gabriel (2016).\nDistributional National Accounts: Methods and Estimates for the United States." p1 <- ineq_data %>% gather(variable, value, Wealth, Income) %>% ggplot(aes(year, value, colour = variable))+ geom_line(size = 1.2)+ geom_point(show.legend = FALSE)+ scale_y_continuous("Share of Total (%)", labels = percent)+ scale_x_continuous(breaks = seq(0, 3000, 15), name = "Year")+ scale_colour_brewer(palette = 'Set1', name = "")+ ggtitle("Rising Inequality Since 1980\nShare of Income and Wealth for Top 10% in USA")+ labs(caption = cite_caption)+ theme(plot.title = element_text(size = 25), axis.text = element_text(size = 22), legend.text = element_text(size = 22), legend.title = element_text(size = 24), axis.title = element_text(size = 24), plot.caption = element_text(size = 16), legend.position = 'none')+ guides(colour = guide_legend(override.aes = list(size = 3))) p2 <- p1 + geom_dl(aes(label = variable), method = list("smart.grid", fontfamily = "Segoe UI", cex = 2)) ggsave(filename = "inequality_data_vis.png", plot = p2, width = 12, height = 8, units = "in", dpi = 700)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_strata.R \name{make_strata} \alias{make_strata} \title{Create or Modify Stratification Variables} \usage{ make_strata(x, breaks = 4, nunique = 5, pool = 0.1, depth = 20) } \arguments{ \item{x}{An input vector.} \item{breaks}{A single number giving the number of bins desired to stratify a numeric stratification variable.} \item{nunique}{An integer for the number of unique value threshold in the algorithm.} \item{pool}{A proportion of data used to determine if a particular group is too small and should be pooled into another group. We do not recommend decreasing this argument below its default of 0.1 because of the dangers of stratifying groups that are too small.} \item{depth}{An integer that is used to determine the best number of percentiles that should be used. The number of bins are based on \code{min(5, floor(n / depth))} where \code{n = length(x)}. If \code{x} is numeric, there must be at least 40 rows in the data set (when \code{depth = 20}) to conduct stratified sampling.} } \value{ A factor vector. } \description{ This function can create strata from numeric data and make non-numeric data more conducive for stratification. } \details{ For numeric data, if the number of unique levels is less than \code{nunique}, the data are treated as categorical data. For categorical inputs, the function will find levels of \code{x} than occur in the data with percentage less than \code{pool}. The values from these groups will be randomly assigned to the remaining strata (as will data points that have missing values in \code{x}). For numeric data with more unique values than \code{nunique}, the data will be converted to being categorical based on percentiles of the data. The percentile groups will have no more than 20 percent of the data in each group. Again, missing values in \code{x} are randomly assigned to groups. } \examples{ set.seed(61) x1 <- rpois(100, lambda = 5) table(x1) table(make_strata(x1)) set.seed(554) x2 <- rpois(100, lambda = 1) table(x2) table(make_strata(x2)) # small groups are randomly assigned x3 <- factor(x2) table(x3) table(make_strata(x3)) # `oilType` data from `caret` x4 <- rep(LETTERS[1:7], c(37, 26, 3, 7, 11, 10, 2)) table(x4) table(make_strata(x4)) table(make_strata(x4, pool = 0.1)) table(make_strata(x4, pool = 0.0)) # not enough data to stratify x5 <- rnorm(20) table(make_strata(x5)) set.seed(483) x6 <- rnorm(200) quantile(x6, probs = (0:10) / 10) table(make_strata(x6, breaks = 10)) }
/man/make_strata.Rd
permissive
tidymodels/rsample
R
false
true
2,544
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make_strata.R \name{make_strata} \alias{make_strata} \title{Create or Modify Stratification Variables} \usage{ make_strata(x, breaks = 4, nunique = 5, pool = 0.1, depth = 20) } \arguments{ \item{x}{An input vector.} \item{breaks}{A single number giving the number of bins desired to stratify a numeric stratification variable.} \item{nunique}{An integer for the number of unique value threshold in the algorithm.} \item{pool}{A proportion of data used to determine if a particular group is too small and should be pooled into another group. We do not recommend decreasing this argument below its default of 0.1 because of the dangers of stratifying groups that are too small.} \item{depth}{An integer that is used to determine the best number of percentiles that should be used. The number of bins are based on \code{min(5, floor(n / depth))} where \code{n = length(x)}. If \code{x} is numeric, there must be at least 40 rows in the data set (when \code{depth = 20}) to conduct stratified sampling.} } \value{ A factor vector. } \description{ This function can create strata from numeric data and make non-numeric data more conducive for stratification. } \details{ For numeric data, if the number of unique levels is less than \code{nunique}, the data are treated as categorical data. For categorical inputs, the function will find levels of \code{x} than occur in the data with percentage less than \code{pool}. The values from these groups will be randomly assigned to the remaining strata (as will data points that have missing values in \code{x}). For numeric data with more unique values than \code{nunique}, the data will be converted to being categorical based on percentiles of the data. The percentile groups will have no more than 20 percent of the data in each group. Again, missing values in \code{x} are randomly assigned to groups. } \examples{ set.seed(61) x1 <- rpois(100, lambda = 5) table(x1) table(make_strata(x1)) set.seed(554) x2 <- rpois(100, lambda = 1) table(x2) table(make_strata(x2)) # small groups are randomly assigned x3 <- factor(x2) table(x3) table(make_strata(x3)) # `oilType` data from `caret` x4 <- rep(LETTERS[1:7], c(37, 26, 3, 7, 11, 10, 2)) table(x4) table(make_strata(x4)) table(make_strata(x4, pool = 0.1)) table(make_strata(x4, pool = 0.0)) # not enough data to stratify x5 <- rnorm(20) table(make_strata(x5)) set.seed(483) x6 <- rnorm(200) quantile(x6, probs = (0:10) / 10) table(make_strata(x6, breaks = 10)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MSEplotting.R \name{Tplot3} \alias{Tplot3} \title{Test Trade-Off Plot} \usage{ Tplot3(MSEobj, ..., lims = c(0.2, 0.2, 0.8, 0.8)) } \arguments{ \item{MSEobj}{An object of class MSE} \item{...}{Names of PM methods to plot} \item{lims}{Numeric vector of satisficing limits. Recycled to number of PM methods} } \value{ produces a plot } \description{ Test Trade-Off Plot } \examples{ \dontrun{ Tplot3{myMSE} } } \author{ A. Hordyk }
/man/Tplot3.Rd
no_license
Lijiuqi/DLMtool
R
false
true
510
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MSEplotting.R \name{Tplot3} \alias{Tplot3} \title{Test Trade-Off Plot} \usage{ Tplot3(MSEobj, ..., lims = c(0.2, 0.2, 0.8, 0.8)) } \arguments{ \item{MSEobj}{An object of class MSE} \item{...}{Names of PM methods to plot} \item{lims}{Numeric vector of satisficing limits. Recycled to number of PM methods} } \value{ produces a plot } \description{ Test Trade-Off Plot } \examples{ \dontrun{ Tplot3{myMSE} } } \author{ A. Hordyk }
##### partition coefficients according to the method of Berezhkovskiy 2004. doi: 10.1002/jps.20073. f.adiposeBerezhkovskiy<-function(ionization, pH, pKa1, pKa2, logP, Fup, Fut, Vnl, Vph, Vw,PVnl, PVph, PVw){ if (ionization == "monoproticAcid" ) { logDvow<- 1.115*logP-1.35-log10(1+10^(pH-pKa1)) } else if (ionization == "monoproticBase") { logDvow<- 1.115*logP-1.35-log10(1+10^(pKa1-pH)) } else if (ionization == "diproticAcid") { logDvow<- 1.115*logP-1.35-log10(1+10^(pH-pKa1+pH-pKa2)) } else if (ionization == "diproticBase") { logDvow<- 1.115*logP-1.35-log10(1+10^(pKa1-pH+pKa2-pH)) } else if (ionization == "zwitterionic") { logDvow<- 1.115*logP-1.35-log10(1+10^(-pKa2+pH+pKa1-pH)) } else logDvow<- 1.115*logP-1.35 Kp<-(10^logDvow*(Vnl+0.3*Vph)+0.7*Vph+Vw/Fut)/(10^logDvow*(PVnl+0.3*PVph)+0.7*PVph+PVw/Fup) return(Kp) } f.restBerezhkovskiy<-function(logP, Fup, Fut, Vnl, Vph, Vw,PVnl, PVph, PVw){ Kp<-(10^logP*(Vnl+0.3*Vph)+0.7*Vph+Vw/Fut)/(10^logP*(PVnl+0.3*PVph)+0.7*PVph+PVw/Fup) return(Kp) }
/functions/partionBerezhkovskiy.R
no_license
wfsrqivive/pbktool
R
false
false
1,079
r
##### partition coefficients according to the method of Berezhkovskiy 2004. doi: 10.1002/jps.20073. f.adiposeBerezhkovskiy<-function(ionization, pH, pKa1, pKa2, logP, Fup, Fut, Vnl, Vph, Vw,PVnl, PVph, PVw){ if (ionization == "monoproticAcid" ) { logDvow<- 1.115*logP-1.35-log10(1+10^(pH-pKa1)) } else if (ionization == "monoproticBase") { logDvow<- 1.115*logP-1.35-log10(1+10^(pKa1-pH)) } else if (ionization == "diproticAcid") { logDvow<- 1.115*logP-1.35-log10(1+10^(pH-pKa1+pH-pKa2)) } else if (ionization == "diproticBase") { logDvow<- 1.115*logP-1.35-log10(1+10^(pKa1-pH+pKa2-pH)) } else if (ionization == "zwitterionic") { logDvow<- 1.115*logP-1.35-log10(1+10^(-pKa2+pH+pKa1-pH)) } else logDvow<- 1.115*logP-1.35 Kp<-(10^logDvow*(Vnl+0.3*Vph)+0.7*Vph+Vw/Fut)/(10^logDvow*(PVnl+0.3*PVph)+0.7*PVph+PVw/Fup) return(Kp) } f.restBerezhkovskiy<-function(logP, Fup, Fut, Vnl, Vph, Vw,PVnl, PVph, PVw){ Kp<-(10^logP*(Vnl+0.3*Vph)+0.7*Vph+Vw/Fut)/(10^logP*(PVnl+0.3*PVph)+0.7*PVph+PVw/Fup) return(Kp) }
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist) str(result)
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831135-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
183
r
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist) str(result)
setwd("/jobs/idea/suspensions") readRenviron("/config/.Renviron") # Load packages #### library(dplyr) library(silounloadr) library(lubridate) library(purrr) library(stringr) library(futile.logger) library(forcats) library(janitor) # set up logging if(!dir.exists("logs")) dir.create("logs") flog.threshold(TRACE) flog.appender(appender.tee("logs/suspensions.logs")) flog.info("Connect to Silo/BQ") bigrquery::set_service_token("/config/bq/kipp-chicago-silo-2-aa786970aefd.json") flog.info("Get DL Supsesions") susps <- get_deanslist("suspensions") %>% filter(issuets_date >= "2017-08-21 00:00") %>% collect(n = Inf) %>% clean_names() flog.info("Get Membership") ps_md <-get_ps('membership') membership <- ps_md %>% filter(calendardate >= "2016-08-21 00:00") %>% group_by(schoolid, calendardate) %>% summarize(N = n()) flog.info("Calcualte ADM") adm <- membership %>% collect() %>% mutate(SY = sprintf("SY%s", calc_academic_year(calendardate, date_parser = lubridate::ymd_hms, format = 'short'))) %>% group_by(schoolid, SY) %>% summarize(adm = round(mean(N), 0)) schools <- tibble::tribble( ~schoolid, ~school_name, ~combined_name, ~school_full_name, 78102, "KAP", "Ascend", "KIPP Ascend Primary", 7810, "KAMS", "Ascend", "KIPP Ascend Middle", 400146, "KCCP", "Academy", "KIPP Academy Chicago/KIPP Create", 400163, "KBCP", "Bloom", "KIPP Bloom Collge Prep", 4001802, "KOP", "One", "KIPP One Primary", 400180, "KOA", "One", "KIPP One Academy" ) adm <- adm %>% inner_join(schools, by="schoolid") flog.info("Extracting penalites nested field") penalties <- susps$penalties %>% purrr::map_df(~jsonlite::fromJSON(.x)) %>% clean_names() %>% select(suspensionid, startdate, enddate, numdays, penaltyname ) %>% mutate(startdate = ymd(startdate), enddate = ymd(enddate), diff_days = enddate - startdate, numdays = as.integer(numdays)) %>% arrange(startdate) %>% #filter(!is.na(startdate)) %>% mutate(suspensionid = as.integer(suspensionid)) flog.info('Filtering to OSSs') oss <- susps %>% inner_join(penalties %>% filter(str_detect(penaltyname, "Out of School Suspension")), by = "suspensionid") %>% mutate(SY = sprintf("SY%s", calc_academic_year(startdate, date_parser = lubridate::ymd, format = 'short'))) flog.info('Calculating OSS Rates') oss_2<-oss %>% mutate(startdate = if_else(is.na(startdate), ymd_hms(oss$issuets_date) +days(1), ymd_hms(sprintf("%s 00:00:00", startdate)))) %>% arrange(startdate) %>% #filter(startdate >= ymd("2017-08-24")) %>% mutate(month_1 = month(startdate, label = TRUE, abbr = TRUE), month = forcats::fct_inorder(as.character(month_1), ordered = TRUE)) %>% select(student_number = studentschoolid, student_first = studentfirst, student_last = studentlast, school_name, month, startdate, infraction, category, reporteddetails, gradelevelshort, numdays, adminsummary, SY) %>% distinct() oss_rates<- oss_2 %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate(cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = c("SY", "school_name")) %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100) flog.info('Filtering and calculating ISSs') iss <- susps %>% inner_join(penalties %>% filter(penaltyname == "In School Suspension"), by = "suspensionid") %>% mutate(SY = sprintf("SY%s", calc_academic_year(issuets_date, date_parser = lubridate::ymd_hms, format = 'short'))) iss_2<-iss %>% mutate(startdate = if_else(is.na(startdate), ymd_hms(iss$issuets_date) +days(1), ymd_hms(sprintf("%s 00:00:00", startdate)))) %>% arrange(startdate) %>% # filter(startdate >= ymd("2017-08-24")) %>% mutate(month_1 = month(startdate, label = TRUE, abbr = TRUE), month = forcats::fct_inorder(as.character(month_1), ordered = TRUE)) %>% select( student_number = studentschoolid, student_first = studentfirst, student_last = studentlast, school_name, month, startdate, infraction, category, reporteddetails, gradelevelshort, numdays, adminsummary, SY) %>% distinct() iss_rates<- iss_2 %>% group_by(SY, school_name, month) %>% summarize(N_susps = n()) %>% mutate( cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = "school_name") %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100) flog.info("Calculating regional rates") oss_w_kop<-oss_2 %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate(cum_susps = cumsum(N_susps)) %>% inner_join(adm, by = c("SY", "school_name")) %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100, month = as.character(month), month_year = as.Date(month_year)) %>% arrange(month_year) %>% mutate(month = forcats::fct_inorder(month,ordered = T )) oss_max<-oss_w_kop %>% group_by(SY, school_name) %>% filter(month_year == max(month_year)) oss_kcs<-oss_2 %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate( cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = c("SY", "school_name")) %>% ungroup() %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100, month = as.character(month), month_year = as.Date(month_year)) %>% arrange(month_year) %>% ungroup() %>% mutate(month = forcats::fct_inorder(month,ordered = T )) %>% filter(month == max(month)) %>% ungroup() %>% summarize(cum_susps = sum(cum_susps), adm = sum(adm), cum_susp_rate = cum_susps/adm*100) %>% mutate(school_name = "Region\n(All grades)") oss_kcs_no_k2 <- oss_2 %>% filter(!gradelevelshort %in% c("K", "1st", "2nd")) %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate( cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = "school_name") %>% ungroup() %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100, month = as.character(month), month_year = as.Date(month_year)) %>% arrange(month_year) %>% ungroup() %>% mutate(month = forcats::fct_inorder(month,ordered = T )) %>% filter(month == max(month)) %>% ungroup() %>% summarize(cum_susps = sum(cum_susps), adm = sum(adm), cum_susp_rate = cum_susps/adm*100) %>% mutate(school_name = "Region\n(3-8)") oss_regional<-bind_rows(oss_max, oss_kcs, oss_kcs_no_k2) %>% mutate(regional = grepl("Region", school_name)) oss <- oss_2 iss <- iss_2 save(susps, penalties, oss, oss_rates, oss_regional, iss, iss_rates, adm, file = "/data/dl_suspensions.Rda") flog.info("Telling Shiny Server to restart") system('touch /srv/shiny-server/war/restart.txt')
/airflow/jobs/idea/suspensions/dl_suspensions.R
no_license
www3838438/idea2
R
false
false
7,725
r
setwd("/jobs/idea/suspensions") readRenviron("/config/.Renviron") # Load packages #### library(dplyr) library(silounloadr) library(lubridate) library(purrr) library(stringr) library(futile.logger) library(forcats) library(janitor) # set up logging if(!dir.exists("logs")) dir.create("logs") flog.threshold(TRACE) flog.appender(appender.tee("logs/suspensions.logs")) flog.info("Connect to Silo/BQ") bigrquery::set_service_token("/config/bq/kipp-chicago-silo-2-aa786970aefd.json") flog.info("Get DL Supsesions") susps <- get_deanslist("suspensions") %>% filter(issuets_date >= "2017-08-21 00:00") %>% collect(n = Inf) %>% clean_names() flog.info("Get Membership") ps_md <-get_ps('membership') membership <- ps_md %>% filter(calendardate >= "2016-08-21 00:00") %>% group_by(schoolid, calendardate) %>% summarize(N = n()) flog.info("Calcualte ADM") adm <- membership %>% collect() %>% mutate(SY = sprintf("SY%s", calc_academic_year(calendardate, date_parser = lubridate::ymd_hms, format = 'short'))) %>% group_by(schoolid, SY) %>% summarize(adm = round(mean(N), 0)) schools <- tibble::tribble( ~schoolid, ~school_name, ~combined_name, ~school_full_name, 78102, "KAP", "Ascend", "KIPP Ascend Primary", 7810, "KAMS", "Ascend", "KIPP Ascend Middle", 400146, "KCCP", "Academy", "KIPP Academy Chicago/KIPP Create", 400163, "KBCP", "Bloom", "KIPP Bloom Collge Prep", 4001802, "KOP", "One", "KIPP One Primary", 400180, "KOA", "One", "KIPP One Academy" ) adm <- adm %>% inner_join(schools, by="schoolid") flog.info("Extracting penalites nested field") penalties <- susps$penalties %>% purrr::map_df(~jsonlite::fromJSON(.x)) %>% clean_names() %>% select(suspensionid, startdate, enddate, numdays, penaltyname ) %>% mutate(startdate = ymd(startdate), enddate = ymd(enddate), diff_days = enddate - startdate, numdays = as.integer(numdays)) %>% arrange(startdate) %>% #filter(!is.na(startdate)) %>% mutate(suspensionid = as.integer(suspensionid)) flog.info('Filtering to OSSs') oss <- susps %>% inner_join(penalties %>% filter(str_detect(penaltyname, "Out of School Suspension")), by = "suspensionid") %>% mutate(SY = sprintf("SY%s", calc_academic_year(startdate, date_parser = lubridate::ymd, format = 'short'))) flog.info('Calculating OSS Rates') oss_2<-oss %>% mutate(startdate = if_else(is.na(startdate), ymd_hms(oss$issuets_date) +days(1), ymd_hms(sprintf("%s 00:00:00", startdate)))) %>% arrange(startdate) %>% #filter(startdate >= ymd("2017-08-24")) %>% mutate(month_1 = month(startdate, label = TRUE, abbr = TRUE), month = forcats::fct_inorder(as.character(month_1), ordered = TRUE)) %>% select(student_number = studentschoolid, student_first = studentfirst, student_last = studentlast, school_name, month, startdate, infraction, category, reporteddetails, gradelevelshort, numdays, adminsummary, SY) %>% distinct() oss_rates<- oss_2 %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate(cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = c("SY", "school_name")) %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100) flog.info('Filtering and calculating ISSs') iss <- susps %>% inner_join(penalties %>% filter(penaltyname == "In School Suspension"), by = "suspensionid") %>% mutate(SY = sprintf("SY%s", calc_academic_year(issuets_date, date_parser = lubridate::ymd_hms, format = 'short'))) iss_2<-iss %>% mutate(startdate = if_else(is.na(startdate), ymd_hms(iss$issuets_date) +days(1), ymd_hms(sprintf("%s 00:00:00", startdate)))) %>% arrange(startdate) %>% # filter(startdate >= ymd("2017-08-24")) %>% mutate(month_1 = month(startdate, label = TRUE, abbr = TRUE), month = forcats::fct_inorder(as.character(month_1), ordered = TRUE)) %>% select( student_number = studentschoolid, student_first = studentfirst, student_last = studentlast, school_name, month, startdate, infraction, category, reporteddetails, gradelevelshort, numdays, adminsummary, SY) %>% distinct() iss_rates<- iss_2 %>% group_by(SY, school_name, month) %>% summarize(N_susps = n()) %>% mutate( cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = "school_name") %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100) flog.info("Calculating regional rates") oss_w_kop<-oss_2 %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate(cum_susps = cumsum(N_susps)) %>% inner_join(adm, by = c("SY", "school_name")) %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100, month = as.character(month), month_year = as.Date(month_year)) %>% arrange(month_year) %>% mutate(month = forcats::fct_inorder(month,ordered = T )) oss_max<-oss_w_kop %>% group_by(SY, school_name) %>% filter(month_year == max(month_year)) oss_kcs<-oss_2 %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate( cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = c("SY", "school_name")) %>% ungroup() %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100, month = as.character(month), month_year = as.Date(month_year)) %>% arrange(month_year) %>% ungroup() %>% mutate(month = forcats::fct_inorder(month,ordered = T )) %>% filter(month == max(month)) %>% ungroup() %>% summarize(cum_susps = sum(cum_susps), adm = sum(adm), cum_susp_rate = cum_susps/adm*100) %>% mutate(school_name = "Region\n(All grades)") oss_kcs_no_k2 <- oss_2 %>% filter(!gradelevelshort %in% c("K", "1st", "2nd")) %>% mutate(month_year = floor_date(startdate, unit = "month")) %>% group_by(SY, school_name, month, month_year) %>% summarize(N_susps = n()) %>% group_by(SY, school_name) %>% mutate( cum_susps = cumsum(N_susps) ) %>% inner_join(adm, by = "school_name") %>% ungroup() %>% mutate(susp_rate = N_susps/adm*100, cum_susp_rate = cum_susps/adm*100, month = as.character(month), month_year = as.Date(month_year)) %>% arrange(month_year) %>% ungroup() %>% mutate(month = forcats::fct_inorder(month,ordered = T )) %>% filter(month == max(month)) %>% ungroup() %>% summarize(cum_susps = sum(cum_susps), adm = sum(adm), cum_susp_rate = cum_susps/adm*100) %>% mutate(school_name = "Region\n(3-8)") oss_regional<-bind_rows(oss_max, oss_kcs, oss_kcs_no_k2) %>% mutate(regional = grepl("Region", school_name)) oss <- oss_2 iss <- iss_2 save(susps, penalties, oss, oss_rates, oss_regional, iss, iss_rates, adm, file = "/data/dl_suspensions.Rda") flog.info("Telling Shiny Server to restart") system('touch /srv/shiny-server/war/restart.txt')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggtree-package.R, R/ggtree.R \docType{package} \name{ggtree} \alias{ggtree} \alias{package-ggtree} \alias{ggtree-package} \title{visualizing phylogenetic tree and heterogenous associated data based on grammar of graphics \code{ggtree} provides functions for visualizing phylogenetic tree and its associated data in R.} \usage{ ggtree(tr, mapping = NULL, layout = "rectangular", open.angle = 0, mrsd = NULL, as.Date = FALSE, yscale = "none", yscale_mapping = NULL, ladderize = TRUE, right = FALSE, branch.length = "branch.length", ...) } \arguments{ \item{tr}{phylo object} \item{mapping}{aesthetic mapping} \item{layout}{one of 'rectangular', 'slanted', 'fan', 'circular', 'radial', 'equal_angle' or 'daylight'} \item{open.angle}{open angle, only for 'fan' layout} \item{mrsd}{most recent sampling date} \item{as.Date}{logical whether using Date class in time tree} \item{yscale}{y scale} \item{yscale_mapping}{yscale mapping for category variable} \item{ladderize}{logical (default \code{TRUE}). Should the tree be re-organized to have a 'ladder' aspect?} \item{right}{logical. If \code{ladderize = TRUE}, should the ladder have the smallest clade on the right-hand side? See \code{\link[ape]{ladderize}} for more information.} \item{branch.length}{variable for scaling branch, if 'none' draw cladogram} \item{...}{additional parameter} } \value{ tree } \description{ If you use ggtree in published research, please cite: Guangchuang Yu, David Smith, Huachen Zhu, Yi Guan, Tommy Tsan-Yuk Lam. ggtree: an R package for visualization and annotation of phylogenetic trees with their covariates and other associated data. Methods in Ecology and Evolution 2017, 8(1):28-36, doi:10.1111/2041-210X.12628 drawing phylogenetic tree from phylo object } \examples{ require(ape) tr <- rtree(10) ggtree(tr) } \seealso{ \code{\link[ape]{ladderize}} } \author{ Yu Guangchuang }
/man/ggtree.Rd
no_license
pagnini/ggtree
R
false
true
1,960
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggtree-package.R, R/ggtree.R \docType{package} \name{ggtree} \alias{ggtree} \alias{package-ggtree} \alias{ggtree-package} \title{visualizing phylogenetic tree and heterogenous associated data based on grammar of graphics \code{ggtree} provides functions for visualizing phylogenetic tree and its associated data in R.} \usage{ ggtree(tr, mapping = NULL, layout = "rectangular", open.angle = 0, mrsd = NULL, as.Date = FALSE, yscale = "none", yscale_mapping = NULL, ladderize = TRUE, right = FALSE, branch.length = "branch.length", ...) } \arguments{ \item{tr}{phylo object} \item{mapping}{aesthetic mapping} \item{layout}{one of 'rectangular', 'slanted', 'fan', 'circular', 'radial', 'equal_angle' or 'daylight'} \item{open.angle}{open angle, only for 'fan' layout} \item{mrsd}{most recent sampling date} \item{as.Date}{logical whether using Date class in time tree} \item{yscale}{y scale} \item{yscale_mapping}{yscale mapping for category variable} \item{ladderize}{logical (default \code{TRUE}). Should the tree be re-organized to have a 'ladder' aspect?} \item{right}{logical. If \code{ladderize = TRUE}, should the ladder have the smallest clade on the right-hand side? See \code{\link[ape]{ladderize}} for more information.} \item{branch.length}{variable for scaling branch, if 'none' draw cladogram} \item{...}{additional parameter} } \value{ tree } \description{ If you use ggtree in published research, please cite: Guangchuang Yu, David Smith, Huachen Zhu, Yi Guan, Tommy Tsan-Yuk Lam. ggtree: an R package for visualization and annotation of phylogenetic trees with their covariates and other associated data. Methods in Ecology and Evolution 2017, 8(1):28-36, doi:10.1111/2041-210X.12628 drawing phylogenetic tree from phylo object } \examples{ require(ape) tr <- rtree(10) ggtree(tr) } \seealso{ \code{\link[ape]{ladderize}} } \author{ Yu Guangchuang }
#Plot Maps Together map_df_12 %<>% mutate(year=2012) map_df_14 %<>% mutate(year=2014) map_df <- rbind(map_df_12, map_df_14) # Create map map_df %>% ggplot(aes(x=long, y=lat, group=group)) + geom_polygon(aes(fill = pct_passed), color = NA) + geom_path(data = map_state , colour = "white", size=.1) + coord_quickmap() + labs(title="Legislative Concentration 2014", x="", y="") + facet_wrap(~year, ncol = 2) + theme_void() + scale_fill_viridis(alpha = 1, begin = 0.5, end = 1, direction = -1, discrete = FALSE, option = "C", # trans=log10_trans(), name = "Key:", labels = scales::percent, limits = c(0, 0.5)) ggsave("LegislativeMapCombined.pdf", #plot name plot = last_plot(), #save last plot outputted width=8, height=6, units="in" #dimensions of saved plot ) ```
/code/08_Final_PlotMapsTogether.R
no_license
chosam2322/239TFinal
R
false
false
886
r
#Plot Maps Together map_df_12 %<>% mutate(year=2012) map_df_14 %<>% mutate(year=2014) map_df <- rbind(map_df_12, map_df_14) # Create map map_df %>% ggplot(aes(x=long, y=lat, group=group)) + geom_polygon(aes(fill = pct_passed), color = NA) + geom_path(data = map_state , colour = "white", size=.1) + coord_quickmap() + labs(title="Legislative Concentration 2014", x="", y="") + facet_wrap(~year, ncol = 2) + theme_void() + scale_fill_viridis(alpha = 1, begin = 0.5, end = 1, direction = -1, discrete = FALSE, option = "C", # trans=log10_trans(), name = "Key:", labels = scales::percent, limits = c(0, 0.5)) ggsave("LegislativeMapCombined.pdf", #plot name plot = last_plot(), #save last plot outputted width=8, height=6, units="in" #dimensions of saved plot ) ```
#' @export #' @title checkDataElementDisaggValidity(data,dataset) #' #' @description Utility function to produce a data frame of #'invalid data elements based on current #' DATIM form specification #' #' @param data D2 compliant data frame object #' @param datasets Should be a character vector of data set UIDs. #' Alternatively, if left missing, user will be promted. #' @param return_violations Boolean to return violations only. #' @return Returns a data frame of "dataElementName","categoryOptionComboName", #' "dataElement","categoryOptionCombo" #' of invalid data elements which are present the the data #' checkDataElementDisaggValidity<-function(data,datasets=NA, return_violations=TRUE){ des<-getValidDataElements(datasets) %>% dplyr::select(dataElement= dataelementuid,categoryOptionCombo=categoryoptioncombouid) data_des_cocs_bad<-dplyr::anti_join(data,des,by=c("dataElement","categoryOptionCombo")) if (NROW(data_des_cocs_bad) > 0) { warning("Invalid data element / category option combos found!") if (return_violations) {return(data_des_cocs_bad)} } else { return(TRUE) } }
/R/checkDataElementDisaggValidity.R
permissive
oitigeorge/datim-validation
R
false
false
1,137
r
#' @export #' @title checkDataElementDisaggValidity(data,dataset) #' #' @description Utility function to produce a data frame of #'invalid data elements based on current #' DATIM form specification #' #' @param data D2 compliant data frame object #' @param datasets Should be a character vector of data set UIDs. #' Alternatively, if left missing, user will be promted. #' @param return_violations Boolean to return violations only. #' @return Returns a data frame of "dataElementName","categoryOptionComboName", #' "dataElement","categoryOptionCombo" #' of invalid data elements which are present the the data #' checkDataElementDisaggValidity<-function(data,datasets=NA, return_violations=TRUE){ des<-getValidDataElements(datasets) %>% dplyr::select(dataElement= dataelementuid,categoryOptionCombo=categoryoptioncombouid) data_des_cocs_bad<-dplyr::anti_join(data,des,by=c("dataElement","categoryOptionCombo")) if (NROW(data_des_cocs_bad) > 0) { warning("Invalid data element / category option combos found!") if (return_violations) {return(data_des_cocs_bad)} } else { return(TRUE) } }
#' Check if the GxE model went well #' #' @description #' \code{check_model.fit_model_GxE} computes tests to assess if the model went well. #' It is important to run this step before going ahead with the analysis otherwise you may make mistakes in the interpretation of the results. #' #' @param x outputs from \code{\link{model_GxE}} #' #' @details #' S3 method. #' The different test apply to the model are explained in the book \href{https://priviere.github.io/PPBstats_book/intro-agro.html#section-freq}{here}. #' #' @return It returns a list with the following elements: #' #' \itemize{ #' \item model_GxE the output from the model #' \item data_ggplot a list containing information for ggplot: #' \itemize{ #' \item data_ggplot_residuals a list containing : #' \itemize{ #' \item data_ggplot_normality #' \item data_ggplot_skewness_test #' \item data_ggplot_kurtosis_test #' \item data_ggplot_shapiro_test #' \item data_ggplot_qqplot #' } #' \item data_ggplot_variability_repartition_pie #' \item data_ggplot_var_intra #' } #' } #' #' @author Pierre Riviere #' #' @seealso #' \itemize{ #' \item \code{\link{check_model}} #' \item \code{\link{plot.check_model_GxE}} #' \item \code{\link{mean_comparisons}} #' \item \code{\link{mean_comparisons.check_model_GxE}} #' \item \code{\link{biplot_data}} #' \item \code{\link{biplot_data.check_model_GxE}} #' \item \code{\link{parameter_groups}} #' } #' #' @export #' check_model.fit_model_GxE <- function( x ){ model = x$ANOVA$model out = c(list("model_GxE" = x), "data_ggplot" = list(check_freq_anova(model))) class(out) <- c("PPBstats", "check_model_GxE") return(out) }
/R/check_model.fit_model_GxE.R
no_license
gaelleVF/PPBstats-PPBmelange
R
false
false
1,689
r
#' Check if the GxE model went well #' #' @description #' \code{check_model.fit_model_GxE} computes tests to assess if the model went well. #' It is important to run this step before going ahead with the analysis otherwise you may make mistakes in the interpretation of the results. #' #' @param x outputs from \code{\link{model_GxE}} #' #' @details #' S3 method. #' The different test apply to the model are explained in the book \href{https://priviere.github.io/PPBstats_book/intro-agro.html#section-freq}{here}. #' #' @return It returns a list with the following elements: #' #' \itemize{ #' \item model_GxE the output from the model #' \item data_ggplot a list containing information for ggplot: #' \itemize{ #' \item data_ggplot_residuals a list containing : #' \itemize{ #' \item data_ggplot_normality #' \item data_ggplot_skewness_test #' \item data_ggplot_kurtosis_test #' \item data_ggplot_shapiro_test #' \item data_ggplot_qqplot #' } #' \item data_ggplot_variability_repartition_pie #' \item data_ggplot_var_intra #' } #' } #' #' @author Pierre Riviere #' #' @seealso #' \itemize{ #' \item \code{\link{check_model}} #' \item \code{\link{plot.check_model_GxE}} #' \item \code{\link{mean_comparisons}} #' \item \code{\link{mean_comparisons.check_model_GxE}} #' \item \code{\link{biplot_data}} #' \item \code{\link{biplot_data.check_model_GxE}} #' \item \code{\link{parameter_groups}} #' } #' #' @export #' check_model.fit_model_GxE <- function( x ){ model = x$ANOVA$model out = c(list("model_GxE" = x), "data_ggplot" = list(check_freq_anova(model))) class(out) <- c("PPBstats", "check_model_GxE") return(out) }
library(MASS) selected <- vector() set.seed(2221) sample.n <- 1:40 for(i in sample.n){ sample.kid <- titanic.kid[sample(nrow(titanic.kid), i, replace = FALSE), ] new.data <- rbind(sample.kid, titanic.adult) new.data <- na.omit(new.data) titanic.glm <- glm(survived ~ pclass + sex + parch + sibsp + embarked + fare + kid, data = new.data, family = "binomial") glm.stepAIC <- step(titanic.glm, direction=c("both")) # Check whether "kid" is selected in the model selected[i] <- grepl("kid", glm.stepAIC$call[2]) } selected rm(selected)
/titanic_kid_4_logit_stepwise.R
no_license
ichbinangela/iss
R
false
false
603
r
library(MASS) selected <- vector() set.seed(2221) sample.n <- 1:40 for(i in sample.n){ sample.kid <- titanic.kid[sample(nrow(titanic.kid), i, replace = FALSE), ] new.data <- rbind(sample.kid, titanic.adult) new.data <- na.omit(new.data) titanic.glm <- glm(survived ~ pclass + sex + parch + sibsp + embarked + fare + kid, data = new.data, family = "binomial") glm.stepAIC <- step(titanic.glm, direction=c("both")) # Check whether "kid" is selected in the model selected[i] <- grepl("kid", glm.stepAIC$call[2]) } selected rm(selected)
library(compiler) ils <- cmpfun( function( dim, fn, numIter=100, lower=0, upper=1, pertubationStep=0.1, doLocalSearch=TRUE, localSearchStep=0.01, numIterLocal=50, comb=FALSE, maximization=FALSE, verbose=TRUE, history=TRUE, ...){ if( comb == FALSE ){ if( length(lower) != dim ){ lower <- rep( lower, dim ) } if( length(upper) != dim ){ upper <- rep( upper, dim ) } } if( maximization ){ originalFn <- fn fn <- function(x) -originalFn(x) } generateRandomSolution <- function(){ if( comb == FALSE ){ sapply( 1:dim, function(i) runif( 1, min=lower[i], max=upper[i] ) ) } else{ sample(dim) } } pertubation <- function( x, step=pertubationStep ){ if( comb == FALSE ){ x <- x + (upper-lower) * sapply( 1:dim, function(i) runif( 1, min=-step, max=step ) ) x <- ifelse( x < lower, lower, x ) x <- ifelse( x > upper, upper, x ) } else{ indexChange <- sample(dim,2) if( indexChange[1] > indexChange[2] ){ indexChange <- c(indexChange[2], indexChange[1]) } while( indexChange[2] > indexChange[1] ){ aux <- x[indexChange[1] + 1] x[indexChange[1] + 1] <- x[indexChange[2]] x[indexChange[2]] <- aux indexChange[1] <- indexChange[1] + 1 indexChange[2] <- indexChange[2] - 1 } x } } localSearch <- function( x, step = localSearchStep ){ bestLocal <- x bestFLocal <- fn( x ) testLocal <- function( new ){ fnew <- fn( new ) if( fnew < bestFLocal ){ bestLocal <<- new bestFLocal <<- fnew } } if( comb == FALSE ){ for( i in 1:numIterLocal ){ new <- pertubation( x, step ) testLocal( new ) } } else{ for( i in 1:dim ){ new <- x index <- sample(dim,1) aux <- new[i] new[i] <- new[index] new[index] <- aux testLocal( new ) } } bestLocal } sol <- generateRandomSolution() if( doLocalSearch ){ sol <- localSearch( sol ) } fsol <- fn( sol ) if( history ){ solHistory <- vector( "list", numIter ) fsolHistory <- rep(0, numIter) solHistory[[1]] <- sol fsolHistory[1] <- fsol } for( i in 2:numIter ){ x <- pertubation( sol ) if( doLocalSearch ){ x <- localSearch( x ) } fx <- fn( x ) if( fx < fsol ){ sol <- x fsol <- fx } if( history ){ solHistory[[i]] <- sol fsolHistory[i] <- fsol } } if( verbose ){ print("Best solution:", quote=FALSE); print( sol ) print("Best function evaluation:", quote=FALSE); print( fsol ) } if( history ){ ob <- list( sol=sol, fsol=fsol, solHistory=solHistory, fsolHistory=fsolHistory, bestSolHistory=solHistory, bestfSolHistory=fsolHistory ) } else{ ob <- list( sol=sol, fsol=fsol ) } ob } )
/IC/Inteligência Computacional Apliacada-20170602T134258Z-001/Inteligência Computacional Apliacada/Computação Evolucionária/Algoritmos/Codigos-R-Metaheuristicas/Algoritmos/ils.R
no_license
GerardoFonteles/Mestrado
R
false
false
3,025
r
library(compiler) ils <- cmpfun( function( dim, fn, numIter=100, lower=0, upper=1, pertubationStep=0.1, doLocalSearch=TRUE, localSearchStep=0.01, numIterLocal=50, comb=FALSE, maximization=FALSE, verbose=TRUE, history=TRUE, ...){ if( comb == FALSE ){ if( length(lower) != dim ){ lower <- rep( lower, dim ) } if( length(upper) != dim ){ upper <- rep( upper, dim ) } } if( maximization ){ originalFn <- fn fn <- function(x) -originalFn(x) } generateRandomSolution <- function(){ if( comb == FALSE ){ sapply( 1:dim, function(i) runif( 1, min=lower[i], max=upper[i] ) ) } else{ sample(dim) } } pertubation <- function( x, step=pertubationStep ){ if( comb == FALSE ){ x <- x + (upper-lower) * sapply( 1:dim, function(i) runif( 1, min=-step, max=step ) ) x <- ifelse( x < lower, lower, x ) x <- ifelse( x > upper, upper, x ) } else{ indexChange <- sample(dim,2) if( indexChange[1] > indexChange[2] ){ indexChange <- c(indexChange[2], indexChange[1]) } while( indexChange[2] > indexChange[1] ){ aux <- x[indexChange[1] + 1] x[indexChange[1] + 1] <- x[indexChange[2]] x[indexChange[2]] <- aux indexChange[1] <- indexChange[1] + 1 indexChange[2] <- indexChange[2] - 1 } x } } localSearch <- function( x, step = localSearchStep ){ bestLocal <- x bestFLocal <- fn( x ) testLocal <- function( new ){ fnew <- fn( new ) if( fnew < bestFLocal ){ bestLocal <<- new bestFLocal <<- fnew } } if( comb == FALSE ){ for( i in 1:numIterLocal ){ new <- pertubation( x, step ) testLocal( new ) } } else{ for( i in 1:dim ){ new <- x index <- sample(dim,1) aux <- new[i] new[i] <- new[index] new[index] <- aux testLocal( new ) } } bestLocal } sol <- generateRandomSolution() if( doLocalSearch ){ sol <- localSearch( sol ) } fsol <- fn( sol ) if( history ){ solHistory <- vector( "list", numIter ) fsolHistory <- rep(0, numIter) solHistory[[1]] <- sol fsolHistory[1] <- fsol } for( i in 2:numIter ){ x <- pertubation( sol ) if( doLocalSearch ){ x <- localSearch( x ) } fx <- fn( x ) if( fx < fsol ){ sol <- x fsol <- fx } if( history ){ solHistory[[i]] <- sol fsolHistory[i] <- fsol } } if( verbose ){ print("Best solution:", quote=FALSE); print( sol ) print("Best function evaluation:", quote=FALSE); print( fsol ) } if( history ){ ob <- list( sol=sol, fsol=fsol, solHistory=solHistory, fsolHistory=fsolHistory, bestSolHistory=solHistory, bestfSolHistory=fsolHistory ) } else{ ob <- list( sol=sol, fsol=fsol ) } ob } )
#' @details #' This package is a port of the original Python lime package implementing the #' prediction explanation framework laid out Ribeiro *et al.* (2016). The #' package supports models from `caret` and `mlr` natively, but see #' [the docs][model_support] for how to make it work for any model. #' #' **Main functions:** #' #' Use of `lime` is mainly through two functions. First you create an #' `explainer` object using the [lime()] function based on the training data and #' the model, and then you can use the [explain()] function along with new data #' and the explainer to create explanations for the model output. #' #' Along with these two functions, `lime` also provides the [plot_features()] #' and [plot_text_explanations()] function to visualise the explanations #' directly. #' #' @references Ribeiro, M.T., Singh, S., Guestrin, C. *"Why Should I Trust You?": Explaining the Predictions of Any Classifier*. 2016, <https://arxiv.org/abs/1602.04938> #' #' @aliases lime-package #' @useDynLib lime #' @importFrom Rcpp sourceCpp '_PACKAGE'
/R/lime-package.r
permissive
thomasp85/lime
R
false
false
1,056
r
#' @details #' This package is a port of the original Python lime package implementing the #' prediction explanation framework laid out Ribeiro *et al.* (2016). The #' package supports models from `caret` and `mlr` natively, but see #' [the docs][model_support] for how to make it work for any model. #' #' **Main functions:** #' #' Use of `lime` is mainly through two functions. First you create an #' `explainer` object using the [lime()] function based on the training data and #' the model, and then you can use the [explain()] function along with new data #' and the explainer to create explanations for the model output. #' #' Along with these two functions, `lime` also provides the [plot_features()] #' and [plot_text_explanations()] function to visualise the explanations #' directly. #' #' @references Ribeiro, M.T., Singh, S., Guestrin, C. *"Why Should I Trust You?": Explaining the Predictions of Any Classifier*. 2016, <https://arxiv.org/abs/1602.04938> #' #' @aliases lime-package #' @useDynLib lime #' @importFrom Rcpp sourceCpp '_PACKAGE'
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arrhenius.R \name{arrhenius} \alias{arrhenius} \title{Compute the arrhenius version of a soil respiration} \usage{ arrhenius(param, data_in) } \arguments{ \item{param}{input parameter vector - is a data frame with the following columns: 1. name 2. changeable 3. value 4. minVal 5. maxVal} } \value{ rSoil at each of the Plots with a given value } \description{ \code{arrhenius} returns the predicted soil respiration vlaues }
/man/arrhenius.Rd
no_license
jmzobitz/SoilModeling
R
false
true
504
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arrhenius.R \name{arrhenius} \alias{arrhenius} \title{Compute the arrhenius version of a soil respiration} \usage{ arrhenius(param, data_in) } \arguments{ \item{param}{input parameter vector - is a data frame with the following columns: 1. name 2. changeable 3. value 4. minVal 5. maxVal} } \value{ rSoil at each of the Plots with a given value } \description{ \code{arrhenius} returns the predicted soil respiration vlaues }
#' Query bibliography #' #' Search entries in a bibliography. #' #' @param x Character. Search terms used to filter bibliography (by author, year, title, and journal #' fields); Regex is supported. #' @param bib_file Character. Path to Bib(La)TeX-file. See details. #' @param cache Logical. If \code{cache = TRUE} cached bibliography is used, if available. If #' \code{cache = FALSE} bibliography is re-imported on every function call. #' @param use_betterbiblatex Logical. If \code{use_betterbiblatex = TRUE} references are imported from Zotero/Juris-M. #' Requires that the \href{https://github.com/retorquere/zotero-better-bibtex}{Better Bib(La)TeX} is installed and #' Zotero/Juris-M is running. #' @param encoding Character. Encoding of the Bib(La)TeX-file. #' #' @details The path to the BibTeX-file can be set in the global options and is set to #' \code{references.bib} when the package is loaded. Once the path is changed in the #' RStudio addin, the global option is updated. If \code{use_betterbiblatex = TRUE} references #' are imported from Zotero/Juris-M rather than from the Bib(La)TeX-file. The Bib(La)TeX-file #' is then updated to include the inserted reference. #' #' @return Returns list of class \code{\link[RefManageR]{BibEntry}} including all matching bibliography entries. #' @seealso \code{\link{md_cite}}, \code{\link{insert_citation}} #' #' @import assertthat query_bib <- function( x , bib_file = getOption("citr.bibliography_path") , cache = TRUE , use_betterbiblatex = getOption("citr.use_betterbiblatex") , encoding = getOption("citr.encoding") ) { assert_that(is.string(x)) assert_that(is.string(bib_file)) assert_that(is.flag(cache)) assert_that(is.flag(use_betterbiblatex)) if(use_betterbiblatex && !betterbiblatex_available()) { message("Could not connect to Zotero's Better-BibTeX extension; importing references from", bib_file, ". Is Zotero up and running?") } # Use cached bibliography, if available if(is.null(getOption("citr.bibliography_cache")) || !cache) { if(use_betterbiblatex & betterbiblatex_available()) { bib <- load_betterbiblatex_bib() } else { bib <- RefManageR::ReadBib(file = bib_file, check = FALSE, .Encoding = encoding) } if(cache) options(citr.bibliography_cache = bib) } else { bib <- getOption("citr.bibliography_cache") } pasted_bib <- paste_references(bib) # Create searchable text strings for references entries <- bib[grepl(gsub("\\s", ".+", x), pasted_bib, ignore.case = TRUE)] if(length(entries) > 0) entries else NULL } paste_references <- function(bib) { assert_that(methods::is(bib, "bibentry")) author_names <- sapply(bib, function(x) { author_names <- x$author$family if(is.null(author_names)) { author_names <- unlist(x$author$given) } author_names <- if(is.list(author_names)) { unlist(lapply(author_names, paste, collapse = " ")) } else if(is.null(author_names)) { "" } else { paste(author_names, collapse = " ") } n_authors <- length(author_names) if(n_authors == 1) { author_names } else if(n_authors == 2) { paste(author_names, collapse = " & ") } else if(n_authors > 2 & n_authors < 6) { paste( paste(author_names[-n_authors], collapse = ", ") , author_names[n_authors] , sep = ", & " ) } else if(n_authors >= 6) { paste(author_names[1], "et al.") } }) year <- sapply(bib, function(x) { if(!is.null(x$year)) x$year else { if(!is.null(x$date)) { date <- unlist(x$date) regmatches(date, regexpr("[0-9]{4}", date)) } else { "n.d." } } }) author_names <- gsub("\\}|\\{", "", author_names) titles <- gsub("\\}|\\{|\\\\", "", bib$title) titles <- gsub("\\n", " ", titles) # if author_names is null replace by title no_authors <- author_names == "" author_names[no_authors] <- titles[no_authors] titles <- paste0(" ", titles, ".") titles[no_authors] <- "" journals <- gsub("\\}|\\{|\\\\", "", bib$journal) journals <- paste0(" ", journals, ifelse(journals != "", ".", NULL)) journals[journals == " NULL."] <- "" paste0(author_names, " (", year, ").", titles, journals) } betterbiblatex_available <- function() { tryCatch( rawToChar(curl::curl_fetch_memory(url = "http://localhost:23119/better-bibtex/cayw?probe=probe")$content) == "ready" , error = function(e) FALSE ) } load_betterbiblatex_bib <- function() { betterbibtex_url <- "http://localhost:23119/better-bibtex/library?library.biblatex" betterbibtex_bib <- rawToChar(curl::curl_fetch_memory(url = betterbibtex_url)$content) betterbibtex_bib <- strsplit(betterbibtex_bib, "@comment\\{jabref-meta")[[1]][1] # Remove jab-ref comments betterbibtex_entries <- strsplit(gsub("(@\\w+\\{)", "~!citr!~\\1", betterbibtex_bib), "~!citr!~" )[[1]] # Create and read multiple biblatex files because bibtex::read.bib does not work with large files bib <- c() no_batches <- length(betterbibtex_entries) %/% 100 + 1 for(i in seq_len(no_batches)) { tmp_bib_file <- paste0(paste(sample(c(letters, LETTERS, 0:9), size = 32, replace = TRUE), collapse = ""), ".bib") writeLines(betterbibtex_entries[((i-1) * 100 + 1):min(i * 100, length(betterbibtex_entries))], con = tmp_bib_file) bib <- c(bib, RefManageR::ReadBib(file = tmp_bib_file, check = FALSE)) file.remove(tmp_bib_file) } try(on.exit(file.remove(tmp_bib_file))) class(bib) <- c("BibEntry", "bibentry") bib }
/R/query_bib.R
no_license
mbojan/citr
R
false
false
5,568
r
#' Query bibliography #' #' Search entries in a bibliography. #' #' @param x Character. Search terms used to filter bibliography (by author, year, title, and journal #' fields); Regex is supported. #' @param bib_file Character. Path to Bib(La)TeX-file. See details. #' @param cache Logical. If \code{cache = TRUE} cached bibliography is used, if available. If #' \code{cache = FALSE} bibliography is re-imported on every function call. #' @param use_betterbiblatex Logical. If \code{use_betterbiblatex = TRUE} references are imported from Zotero/Juris-M. #' Requires that the \href{https://github.com/retorquere/zotero-better-bibtex}{Better Bib(La)TeX} is installed and #' Zotero/Juris-M is running. #' @param encoding Character. Encoding of the Bib(La)TeX-file. #' #' @details The path to the BibTeX-file can be set in the global options and is set to #' \code{references.bib} when the package is loaded. Once the path is changed in the #' RStudio addin, the global option is updated. If \code{use_betterbiblatex = TRUE} references #' are imported from Zotero/Juris-M rather than from the Bib(La)TeX-file. The Bib(La)TeX-file #' is then updated to include the inserted reference. #' #' @return Returns list of class \code{\link[RefManageR]{BibEntry}} including all matching bibliography entries. #' @seealso \code{\link{md_cite}}, \code{\link{insert_citation}} #' #' @import assertthat query_bib <- function( x , bib_file = getOption("citr.bibliography_path") , cache = TRUE , use_betterbiblatex = getOption("citr.use_betterbiblatex") , encoding = getOption("citr.encoding") ) { assert_that(is.string(x)) assert_that(is.string(bib_file)) assert_that(is.flag(cache)) assert_that(is.flag(use_betterbiblatex)) if(use_betterbiblatex && !betterbiblatex_available()) { message("Could not connect to Zotero's Better-BibTeX extension; importing references from", bib_file, ". Is Zotero up and running?") } # Use cached bibliography, if available if(is.null(getOption("citr.bibliography_cache")) || !cache) { if(use_betterbiblatex & betterbiblatex_available()) { bib <- load_betterbiblatex_bib() } else { bib <- RefManageR::ReadBib(file = bib_file, check = FALSE, .Encoding = encoding) } if(cache) options(citr.bibliography_cache = bib) } else { bib <- getOption("citr.bibliography_cache") } pasted_bib <- paste_references(bib) # Create searchable text strings for references entries <- bib[grepl(gsub("\\s", ".+", x), pasted_bib, ignore.case = TRUE)] if(length(entries) > 0) entries else NULL } paste_references <- function(bib) { assert_that(methods::is(bib, "bibentry")) author_names <- sapply(bib, function(x) { author_names <- x$author$family if(is.null(author_names)) { author_names <- unlist(x$author$given) } author_names <- if(is.list(author_names)) { unlist(lapply(author_names, paste, collapse = " ")) } else if(is.null(author_names)) { "" } else { paste(author_names, collapse = " ") } n_authors <- length(author_names) if(n_authors == 1) { author_names } else if(n_authors == 2) { paste(author_names, collapse = " & ") } else if(n_authors > 2 & n_authors < 6) { paste( paste(author_names[-n_authors], collapse = ", ") , author_names[n_authors] , sep = ", & " ) } else if(n_authors >= 6) { paste(author_names[1], "et al.") } }) year <- sapply(bib, function(x) { if(!is.null(x$year)) x$year else { if(!is.null(x$date)) { date <- unlist(x$date) regmatches(date, regexpr("[0-9]{4}", date)) } else { "n.d." } } }) author_names <- gsub("\\}|\\{", "", author_names) titles <- gsub("\\}|\\{|\\\\", "", bib$title) titles <- gsub("\\n", " ", titles) # if author_names is null replace by title no_authors <- author_names == "" author_names[no_authors] <- titles[no_authors] titles <- paste0(" ", titles, ".") titles[no_authors] <- "" journals <- gsub("\\}|\\{|\\\\", "", bib$journal) journals <- paste0(" ", journals, ifelse(journals != "", ".", NULL)) journals[journals == " NULL."] <- "" paste0(author_names, " (", year, ").", titles, journals) } betterbiblatex_available <- function() { tryCatch( rawToChar(curl::curl_fetch_memory(url = "http://localhost:23119/better-bibtex/cayw?probe=probe")$content) == "ready" , error = function(e) FALSE ) } load_betterbiblatex_bib <- function() { betterbibtex_url <- "http://localhost:23119/better-bibtex/library?library.biblatex" betterbibtex_bib <- rawToChar(curl::curl_fetch_memory(url = betterbibtex_url)$content) betterbibtex_bib <- strsplit(betterbibtex_bib, "@comment\\{jabref-meta")[[1]][1] # Remove jab-ref comments betterbibtex_entries <- strsplit(gsub("(@\\w+\\{)", "~!citr!~\\1", betterbibtex_bib), "~!citr!~" )[[1]] # Create and read multiple biblatex files because bibtex::read.bib does not work with large files bib <- c() no_batches <- length(betterbibtex_entries) %/% 100 + 1 for(i in seq_len(no_batches)) { tmp_bib_file <- paste0(paste(sample(c(letters, LETTERS, 0:9), size = 32, replace = TRUE), collapse = ""), ".bib") writeLines(betterbibtex_entries[((i-1) * 100 + 1):min(i * 100, length(betterbibtex_entries))], con = tmp_bib_file) bib <- c(bib, RefManageR::ReadBib(file = tmp_bib_file, check = FALSE)) file.remove(tmp_bib_file) } try(on.exit(file.remove(tmp_bib_file))) class(bib) <- c("BibEntry", "bibentry") bib }
#' @title Key-Value Storage #' #' @usage NULL #' @name Dictionary #' @format [R6::R6Class] object. #' #' @description #' A simple key-value store for [R6::R6] generator objects. #' On retrieval of an object, the following applies: #' #' * R6 Factories (objects of class `R6ClassGenerator`) are initialized (with additional arguments). #' * Functions are called (with additional arguments) and must return an instance of a [R6::R6] object. #' * Other objects are returned as-is. #' #' @section Construction: #' ``` #' d = Dictionary$new() #' ``` #' #' @section Methods: #' * `get(key, ...)`\cr #' (`character(1)`, ...) -> `any`\cr #' Retrieves object with key `key` from the dictionary. #' Additional arguments are passed to the stored object during construction. #' #' * `mget(keys, ...)`\cr #' (`character()`, ...) -> named `list()`\cr #' Retrieves objects with keys `keys` from the dictionary, returns them in a list named with `keys`. #' Additional arguments are passed to the stored objects during construction. #' #' * `has(keys)`\cr #' `character()` -> `logical()`\cr #' Returns a logical vector with `TRUE` at its i-th position, if the i-th key exists. #' #' * `keys(pattern)`\cr #' `character(1)` -> `character()`\cr #' Returns all keys which comply to the regular expression `pattern`. #' #' * `add(key, value, ..., required_args = character())`\cr #' (`character(1)`, `any`, ..., `character()`) -> `self`\cr #' Adds object `value` to the dictionary with key `key`, potentially overwriting a previously stored item. #' Additional arguments in `...` are used as default arguments for `value` during construction. #' If the object is not constructable without additional arguments, the require argument names should be provided in `required_args`. #' #' * `remove(key)`\cr #' `character()` -> `self`\cr #' Removes object with key `key` from the dictionary. #' #' * `required_args(key)`\cr #' (`character(1)`) -> `character()`\cr #' Returns the names of arguments required to construct the object. #' #' @family Dictionary #' @export Dictionary = R6Class("Dictionary", cloneable = FALSE, public = list( items = NULL, # construct, set container type (string) initialize = function() { self$items = new.env(parent = emptyenv()) }, format = function() { sprintf("<%s>", class(self)[1L]) }, print = function() { keys = self$keys() catf(sprintf("%s with %i stored values", format(self), length(keys))) catf(str_indent("Keys:", keys)) catf(str_indent("\nPublic:", str_r6_interface(self))) }, keys = function(pattern = NULL) { keys = ls(self$items, all.names = TRUE) if (!is.null(pattern)) keys = keys[grepl(assert_string(pattern), keys)] keys }, has = function(keys) { assert_character(keys, any.missing = FALSE) set_names(map_lgl(keys, exists, envir = self$items, inherits = FALSE), keys) }, get = function(key, ...) { dictionary_retrieve(self, key, ...) }, mget = function(keys, ...) { set_names(lapply(keys, self$get, ...), keys) }, add = function(key, value, ..., required_args = character()) { assert_id(key) assert_character(required_args, any.missing = FALSE) assign(x = key, value = list(value = value, pars = list(...), required_args = required_args), envir = self$items) invisible(self) }, remove = function(key) { if (!self$has(key)) stopf("Element with key '%s' not found!%s", key, did_you_mean(key, self$keys())) rm(list = key, envir = self$items) invisible(self) }, required_args = function(key) { assert_id(key) self$items[[key]][["required_args"]] } ) ) dictionary_retrieve = function(self, key, ...) { obj = get0(key, envir = self$items, inherits = FALSE, ifnotfound = NULL) if (is.null(obj)) stopf("Element with key '%s' not found!%s", key, did_you_mean(key, self$keys())) value = obj$value pars = insert_named(obj$pars, list(...)) if (any(obj$required_args %nin% names(pars))) stopf("Need the arguments %s to construct '%s'", str_collapse(obj$required_args, quote = "'"), key) if (inherits(value, "R6ClassGenerator")) { value = do.call(value$new, pars) } else if (is.function(value)) { value = assert_r6(do.call(value, pars)) } return(value) } #' @export as.data.table.Dictionary = function(x, ...) { setkeyv(as.data.table(list(key = x$keys())), "key")[] }
/R/Dictionary.R
permissive
kdpsingh/mlr3
R
false
false
4,506
r
#' @title Key-Value Storage #' #' @usage NULL #' @name Dictionary #' @format [R6::R6Class] object. #' #' @description #' A simple key-value store for [R6::R6] generator objects. #' On retrieval of an object, the following applies: #' #' * R6 Factories (objects of class `R6ClassGenerator`) are initialized (with additional arguments). #' * Functions are called (with additional arguments) and must return an instance of a [R6::R6] object. #' * Other objects are returned as-is. #' #' @section Construction: #' ``` #' d = Dictionary$new() #' ``` #' #' @section Methods: #' * `get(key, ...)`\cr #' (`character(1)`, ...) -> `any`\cr #' Retrieves object with key `key` from the dictionary. #' Additional arguments are passed to the stored object during construction. #' #' * `mget(keys, ...)`\cr #' (`character()`, ...) -> named `list()`\cr #' Retrieves objects with keys `keys` from the dictionary, returns them in a list named with `keys`. #' Additional arguments are passed to the stored objects during construction. #' #' * `has(keys)`\cr #' `character()` -> `logical()`\cr #' Returns a logical vector with `TRUE` at its i-th position, if the i-th key exists. #' #' * `keys(pattern)`\cr #' `character(1)` -> `character()`\cr #' Returns all keys which comply to the regular expression `pattern`. #' #' * `add(key, value, ..., required_args = character())`\cr #' (`character(1)`, `any`, ..., `character()`) -> `self`\cr #' Adds object `value` to the dictionary with key `key`, potentially overwriting a previously stored item. #' Additional arguments in `...` are used as default arguments for `value` during construction. #' If the object is not constructable without additional arguments, the require argument names should be provided in `required_args`. #' #' * `remove(key)`\cr #' `character()` -> `self`\cr #' Removes object with key `key` from the dictionary. #' #' * `required_args(key)`\cr #' (`character(1)`) -> `character()`\cr #' Returns the names of arguments required to construct the object. #' #' @family Dictionary #' @export Dictionary = R6Class("Dictionary", cloneable = FALSE, public = list( items = NULL, # construct, set container type (string) initialize = function() { self$items = new.env(parent = emptyenv()) }, format = function() { sprintf("<%s>", class(self)[1L]) }, print = function() { keys = self$keys() catf(sprintf("%s with %i stored values", format(self), length(keys))) catf(str_indent("Keys:", keys)) catf(str_indent("\nPublic:", str_r6_interface(self))) }, keys = function(pattern = NULL) { keys = ls(self$items, all.names = TRUE) if (!is.null(pattern)) keys = keys[grepl(assert_string(pattern), keys)] keys }, has = function(keys) { assert_character(keys, any.missing = FALSE) set_names(map_lgl(keys, exists, envir = self$items, inherits = FALSE), keys) }, get = function(key, ...) { dictionary_retrieve(self, key, ...) }, mget = function(keys, ...) { set_names(lapply(keys, self$get, ...), keys) }, add = function(key, value, ..., required_args = character()) { assert_id(key) assert_character(required_args, any.missing = FALSE) assign(x = key, value = list(value = value, pars = list(...), required_args = required_args), envir = self$items) invisible(self) }, remove = function(key) { if (!self$has(key)) stopf("Element with key '%s' not found!%s", key, did_you_mean(key, self$keys())) rm(list = key, envir = self$items) invisible(self) }, required_args = function(key) { assert_id(key) self$items[[key]][["required_args"]] } ) ) dictionary_retrieve = function(self, key, ...) { obj = get0(key, envir = self$items, inherits = FALSE, ifnotfound = NULL) if (is.null(obj)) stopf("Element with key '%s' not found!%s", key, did_you_mean(key, self$keys())) value = obj$value pars = insert_named(obj$pars, list(...)) if (any(obj$required_args %nin% names(pars))) stopf("Need the arguments %s to construct '%s'", str_collapse(obj$required_args, quote = "'"), key) if (inherits(value, "R6ClassGenerator")) { value = do.call(value$new, pars) } else if (is.function(value)) { value = assert_r6(do.call(value, pars)) } return(value) } #' @export as.data.table.Dictionary = function(x, ...) { setkeyv(as.data.table(list(key = x$keys())), "key")[] }
library(XLConnect) library(reshape2) library(dplyr) library(lubridate) library(weatherData) library(data.table) fix_datetime <- function(df, date, time) { lubridate::hour(df[,paste(date)]) <- lubridate::hour(df[,paste(time)]) lubridate::minute(df[,paste(date)]) <- lubridate::minute(df[,paste(time)]) lubridate::second(df[,paste(date)]) <- lubridate::second(df[,paste(time)]) df[,paste(date)] <- lubridate::force_tz(df[,paste(date)], tzone = "America/New_York") df <- df[-which(names(df) %in% paste(time))] return(df) } wb <- loadWorkbook("data/ShepCrk Water Quality Data - Main.xlsx") data <- readWorksheet(wb, sheet = "sheet1", startRow = 4, endRow = 314, startCol = 2, endCol = 36) data <- fix_datetime(data, "Date", "Time") colnames(data) <- c("Site", "Date", "Flow_Condition", "E_coli", "Temperature_C", "Turbidity", "SSC", "Alkalinity", "DOC", "TOC", "Chloride", "Bromide", "Nitrate", "Orthophosphate", "Sulfate", "Ammoniacal_nitrogen", "DIN", "TKN", "TDP", "TP", "Aluminum_dissolved", "Calcium", "Copper_dissolved", "Iron_dissolved", "Magnesium", "Manganese_dissolved", "Potassium", "Sodium", "Zinc_dissolved", "Aluminum_total", "Copper_total", "Iron_total", "Manganese_total", "Zinc_total") data <- data[-which(is.na(data$Date)), ] data[, c(4:34)] <- sapply(data[, c(4:34)], as.numeric) subbasins <- read.csv("data/subbasins.csv", stringsAsFactors = FALSE) subbasins[, c(2:20)] <- sapply(subbasins[, c(2:20)], as.numeric) data <- inner_join(data, subbasins, by = c("Site" = "site_id")) data <- arrange(data, Date) # # KLUK <- getWeatherForDate("KLUK", "2005-04-19", "2007-01-11", # # opt_detailed = TRUE, opt_all_columns = TRUE) # save(KLUK, file = "data/KLUK.RData") data(KLUK) # Use foverlap joins from data.table to left join wq data to weather data # between the hourly date ranges # need to create start and end columns KLUK <- as.data.table(KLUK) KLUK$start <- KLUK$Time KLUK$end <- KLUK$Time + 3600 data <- as.data.table(data) data$start <- data$Date data$end <- data[2:nrow(data), start] data[nrow(data), end:=start + 3600*24] setkey(KLUK, start, end) data <- foverlaps(data, KLUK, type = "any", mult = "first", nomatch = 0L) data <- data[,-c(1:2, 12, 15:18,72:73), with=FALSE] data[, ][PrecipitationIn == "N/A", PrecipitationIn := NA] data[, Wind_Direction := as.factor(Wind_Direction)] data[, Wind_SpeedMPH := as.numeric(Wind_SpeedMPH)] data[, Gust_SpeedMPH := as.numeric(Gust_SpeedMPH)] data[, PrecipitationIn := as.numeric(PrecipitationIn)] data[, Humidity := as.numeric(Humidity)] data[, Conditions := as.factor(Conditions)] data[, Site := as.factor(Site)] data[, Flow_Condition :=as.factor(Flow_Condition)] # The STV approximates the 90th percentile of the water quality distribution # and is intended to be a value that should not be exceeded by more than 10 # percent of the samples taken. # http://water.epa.gov/scitech/swguidance/standards/criteria/health/recreation/upload/factsheet2012.pdf data$limit <- ifelse(data$E_coli < 410, 0, 1) data$limit <- factor(data$limit, levels=c("No"=0, "Yes"=1)) data <- droplevels(data[!is.na(data$E_coli),]) # con_base <- readWorksheet(wb1, sheet = "CON", startRow = 3, endRow = 54, # startCol = 2, endCol = 43) # con_base$Flow.condition <- "Base" # con_storm <- readWorksheet(wb1, sheet = "CON", startRow = 63, endRow = 185, # startCol = 2, endCol = 43) # colnames(con_storm) <- colnames(con_base) # con_storm$Flow.condition = "Storm" # # con <- rbind(con_base, con_storm) # con <- fix_datetime(con, "Sampling.Date", "Time") # rm("con_base", "con_storm") # # variables <- c("site", "date", "temperature (degrees C)", # "Spec_Cond (mS)", # "DO_sat (% sat)", # "DO (mg/L)", # "pH (pH)", # "ORP (mV)", # "Turbidity (NTU)", # "Chlor (ug/L)", # "SSC (mg/L)", # "Alkalinity (mg CaCO3/L)", # "DOC_R5 (mg/L)", # "DOC_Awberc (mg/L)", # "TOC_R5 (mg/L)", # "TOC_Awberc (mg/L)", # "Fluoride (mg/L)", # "Chloride (mg/L)", # "Bromide (mg/L)", # "Nitrate (mg/L)", # "Orthophosphate (mg/L)", # "Sulfate (mg/L)", # "Ammoniacal_nitrogen (mg/L)", # "DIN (mg/L)", # "TKN (mg/L)", # "TDP (mg/L)", # "TP (mg/L)", # "Aluminum_dissolved", # "Calcium (mg/L)", # "Copper_dissolved (ug/L)", # "Iron_dissolved (mg/L)", # "Magnesium (mg/L)", # "Manganese_dissolved (mg/L)", # "Potassium (mg/L)", # "Sodium (mg/L)", # "Zinc_dissolved (ug/L)", # "Aluminum_total (mg/L)", # "Copper_total (mg/L)", # "Iron_total (mg/L)", # "Manganese_total (mg/L)", # "Zinc_total (ug/L)", # "Flow_condition") # # colnames(con) <- variables # # dri_base <- readWorksheet(wb1, sheet = "DRI", startRow = 6, endRow = 57, # startCol = 2, endCol = 43) # dri_base$Flow.Condition <- "Base" # dri_storm <- readWorksheet(wb1, sheet = "DRI", startRow = 65, endRow = 186, # startCol = 2, endCol = 43) # dri_storm$Flow.Condition = "Storm" # # colnames(dri_storm) <- colnames(dri_base) # dri <- rbind(dri_base, dri_storm) # dri <- fix_datetime(dri, "Sampling.Date", "Time") # rm("dri_base", "dri_storm") # # colnames(dri) <- variables # # ref7_base <- readWorksheet(wb1, sheet = "REF7", startRow = 6, endRow = 55, # startCol = 2, endCol = 43) # ref7_base$Flow.Condition <- "Base" # ref7_base$Time <- ymd_hms(ref7_base$Time) # ref7_storm <- readWorksheet(wb1, sheet = "REF7", startRow = 63, endRow = 179, # startCol = 2, endCol = 43) # ref7_storm$Flow.Condition = "Storm" # # colnames(ref7_storm) <- colnames(ref7_base) # ref7 <- rbind(ref7_base, ref7_storm) # ref7 <- fix_datetime(ref7, "Sampling.Date", "Time") # rm("ref7_base", "ref7_storm") # # colnames(ref7) <- variables # # urb_base <- readWorksheet(wb1, sheet = "URB", startRow = 6, endRow = 57, # startCol = 2, endCol = 43) # urb_base$Flow.Condition <- "Base" # urb_storm <- readWorksheet(wb1, sheet = "URB", startRow = 66, endRow = 192, # startCol = 2, endCol = 43) # urb_storm$Flow.Condition = "Storm" # # colnames(urb_storm) <- colnames(urb_base) # urb <- rbind(urb_base, urb_storm) # urb$Time <- ymd_hms(urb$Time) # urb <- fix_datetime(urb, "Sampling.Date", "Time") # rm("urb_base", "urb_storm") # # colnames(urb) <- variables # # roa_base <- readWorksheet(wb1, sheet = "ROA", startRow = 6, endRow = 57, # startCol = 2, endCol = 43) # roa_base$Flow.Condition <- "Base" # roa_storm <- readWorksheet(wb1, sheet = "ROA", startRow = 64, endRow = 181, # startCol = 2, endCol = 43) # roa_storm$Flow.Condition = "Storm" # # colnames(roa_storm) <- colnames(roa_base) # roa_base$Time <- ymd_hms(roa_base$Time) # roa_storm$Time <- ymd_hms(roa_storm$Time) # roa <- rbind(roa_base, roa_storm) # roa <- fix_datetime(roa, "Sampling.Date", "Time") # rm("roa_base", "roa_storm") # # colnames(roa) <- variables # # pwr_base <- readWorksheet(wb1, sheet = "PWR", startRow = 6, endRow = 50, # startCol = 2, endCol = 43) # pwr_base$Flow.Condition <- "Base" # pwr_storm <- readWorksheet(wb1, sheet = "PWR", startRow = 57, endRow = 166, # startCol = 2, endCol = 43) # pwr_storm$Flow.Condition = "Storm" # # colnames(pwr_storm) <- colnames(pwr_base) # pwr_base$Time <- ymd_hms(pwr_base$Time) # pwr_storm$Time <- ymd_hms(pwr_storm$Time) # pwr <- rbind(pwr_base, pwr_storm) # pwr <- fix_datetime(pwr, "Sampling.Date", "Time") # rm("pwr_base", "pwr_storm") # # colnames(pwr) <- variables # # shep_creek <- rbind(con, dri, pwr, ref7, roa, urb) # shep_creek_melt <- melt(shep_creek, id.vars = c("site", "date")) # # shep_creek_cast <- dcast(shep_creek, site + date ~ variable, value.var = "value")
/R/clean_data.R
no_license
jentjr/ShepherdCreek
R
false
false
9,179
r
library(XLConnect) library(reshape2) library(dplyr) library(lubridate) library(weatherData) library(data.table) fix_datetime <- function(df, date, time) { lubridate::hour(df[,paste(date)]) <- lubridate::hour(df[,paste(time)]) lubridate::minute(df[,paste(date)]) <- lubridate::minute(df[,paste(time)]) lubridate::second(df[,paste(date)]) <- lubridate::second(df[,paste(time)]) df[,paste(date)] <- lubridate::force_tz(df[,paste(date)], tzone = "America/New_York") df <- df[-which(names(df) %in% paste(time))] return(df) } wb <- loadWorkbook("data/ShepCrk Water Quality Data - Main.xlsx") data <- readWorksheet(wb, sheet = "sheet1", startRow = 4, endRow = 314, startCol = 2, endCol = 36) data <- fix_datetime(data, "Date", "Time") colnames(data) <- c("Site", "Date", "Flow_Condition", "E_coli", "Temperature_C", "Turbidity", "SSC", "Alkalinity", "DOC", "TOC", "Chloride", "Bromide", "Nitrate", "Orthophosphate", "Sulfate", "Ammoniacal_nitrogen", "DIN", "TKN", "TDP", "TP", "Aluminum_dissolved", "Calcium", "Copper_dissolved", "Iron_dissolved", "Magnesium", "Manganese_dissolved", "Potassium", "Sodium", "Zinc_dissolved", "Aluminum_total", "Copper_total", "Iron_total", "Manganese_total", "Zinc_total") data <- data[-which(is.na(data$Date)), ] data[, c(4:34)] <- sapply(data[, c(4:34)], as.numeric) subbasins <- read.csv("data/subbasins.csv", stringsAsFactors = FALSE) subbasins[, c(2:20)] <- sapply(subbasins[, c(2:20)], as.numeric) data <- inner_join(data, subbasins, by = c("Site" = "site_id")) data <- arrange(data, Date) # # KLUK <- getWeatherForDate("KLUK", "2005-04-19", "2007-01-11", # # opt_detailed = TRUE, opt_all_columns = TRUE) # save(KLUK, file = "data/KLUK.RData") data(KLUK) # Use foverlap joins from data.table to left join wq data to weather data # between the hourly date ranges # need to create start and end columns KLUK <- as.data.table(KLUK) KLUK$start <- KLUK$Time KLUK$end <- KLUK$Time + 3600 data <- as.data.table(data) data$start <- data$Date data$end <- data[2:nrow(data), start] data[nrow(data), end:=start + 3600*24] setkey(KLUK, start, end) data <- foverlaps(data, KLUK, type = "any", mult = "first", nomatch = 0L) data <- data[,-c(1:2, 12, 15:18,72:73), with=FALSE] data[, ][PrecipitationIn == "N/A", PrecipitationIn := NA] data[, Wind_Direction := as.factor(Wind_Direction)] data[, Wind_SpeedMPH := as.numeric(Wind_SpeedMPH)] data[, Gust_SpeedMPH := as.numeric(Gust_SpeedMPH)] data[, PrecipitationIn := as.numeric(PrecipitationIn)] data[, Humidity := as.numeric(Humidity)] data[, Conditions := as.factor(Conditions)] data[, Site := as.factor(Site)] data[, Flow_Condition :=as.factor(Flow_Condition)] # The STV approximates the 90th percentile of the water quality distribution # and is intended to be a value that should not be exceeded by more than 10 # percent of the samples taken. # http://water.epa.gov/scitech/swguidance/standards/criteria/health/recreation/upload/factsheet2012.pdf data$limit <- ifelse(data$E_coli < 410, 0, 1) data$limit <- factor(data$limit, levels=c("No"=0, "Yes"=1)) data <- droplevels(data[!is.na(data$E_coli),]) # con_base <- readWorksheet(wb1, sheet = "CON", startRow = 3, endRow = 54, # startCol = 2, endCol = 43) # con_base$Flow.condition <- "Base" # con_storm <- readWorksheet(wb1, sheet = "CON", startRow = 63, endRow = 185, # startCol = 2, endCol = 43) # colnames(con_storm) <- colnames(con_base) # con_storm$Flow.condition = "Storm" # # con <- rbind(con_base, con_storm) # con <- fix_datetime(con, "Sampling.Date", "Time") # rm("con_base", "con_storm") # # variables <- c("site", "date", "temperature (degrees C)", # "Spec_Cond (mS)", # "DO_sat (% sat)", # "DO (mg/L)", # "pH (pH)", # "ORP (mV)", # "Turbidity (NTU)", # "Chlor (ug/L)", # "SSC (mg/L)", # "Alkalinity (mg CaCO3/L)", # "DOC_R5 (mg/L)", # "DOC_Awberc (mg/L)", # "TOC_R5 (mg/L)", # "TOC_Awberc (mg/L)", # "Fluoride (mg/L)", # "Chloride (mg/L)", # "Bromide (mg/L)", # "Nitrate (mg/L)", # "Orthophosphate (mg/L)", # "Sulfate (mg/L)", # "Ammoniacal_nitrogen (mg/L)", # "DIN (mg/L)", # "TKN (mg/L)", # "TDP (mg/L)", # "TP (mg/L)", # "Aluminum_dissolved", # "Calcium (mg/L)", # "Copper_dissolved (ug/L)", # "Iron_dissolved (mg/L)", # "Magnesium (mg/L)", # "Manganese_dissolved (mg/L)", # "Potassium (mg/L)", # "Sodium (mg/L)", # "Zinc_dissolved (ug/L)", # "Aluminum_total (mg/L)", # "Copper_total (mg/L)", # "Iron_total (mg/L)", # "Manganese_total (mg/L)", # "Zinc_total (ug/L)", # "Flow_condition") # # colnames(con) <- variables # # dri_base <- readWorksheet(wb1, sheet = "DRI", startRow = 6, endRow = 57, # startCol = 2, endCol = 43) # dri_base$Flow.Condition <- "Base" # dri_storm <- readWorksheet(wb1, sheet = "DRI", startRow = 65, endRow = 186, # startCol = 2, endCol = 43) # dri_storm$Flow.Condition = "Storm" # # colnames(dri_storm) <- colnames(dri_base) # dri <- rbind(dri_base, dri_storm) # dri <- fix_datetime(dri, "Sampling.Date", "Time") # rm("dri_base", "dri_storm") # # colnames(dri) <- variables # # ref7_base <- readWorksheet(wb1, sheet = "REF7", startRow = 6, endRow = 55, # startCol = 2, endCol = 43) # ref7_base$Flow.Condition <- "Base" # ref7_base$Time <- ymd_hms(ref7_base$Time) # ref7_storm <- readWorksheet(wb1, sheet = "REF7", startRow = 63, endRow = 179, # startCol = 2, endCol = 43) # ref7_storm$Flow.Condition = "Storm" # # colnames(ref7_storm) <- colnames(ref7_base) # ref7 <- rbind(ref7_base, ref7_storm) # ref7 <- fix_datetime(ref7, "Sampling.Date", "Time") # rm("ref7_base", "ref7_storm") # # colnames(ref7) <- variables # # urb_base <- readWorksheet(wb1, sheet = "URB", startRow = 6, endRow = 57, # startCol = 2, endCol = 43) # urb_base$Flow.Condition <- "Base" # urb_storm <- readWorksheet(wb1, sheet = "URB", startRow = 66, endRow = 192, # startCol = 2, endCol = 43) # urb_storm$Flow.Condition = "Storm" # # colnames(urb_storm) <- colnames(urb_base) # urb <- rbind(urb_base, urb_storm) # urb$Time <- ymd_hms(urb$Time) # urb <- fix_datetime(urb, "Sampling.Date", "Time") # rm("urb_base", "urb_storm") # # colnames(urb) <- variables # # roa_base <- readWorksheet(wb1, sheet = "ROA", startRow = 6, endRow = 57, # startCol = 2, endCol = 43) # roa_base$Flow.Condition <- "Base" # roa_storm <- readWorksheet(wb1, sheet = "ROA", startRow = 64, endRow = 181, # startCol = 2, endCol = 43) # roa_storm$Flow.Condition = "Storm" # # colnames(roa_storm) <- colnames(roa_base) # roa_base$Time <- ymd_hms(roa_base$Time) # roa_storm$Time <- ymd_hms(roa_storm$Time) # roa <- rbind(roa_base, roa_storm) # roa <- fix_datetime(roa, "Sampling.Date", "Time") # rm("roa_base", "roa_storm") # # colnames(roa) <- variables # # pwr_base <- readWorksheet(wb1, sheet = "PWR", startRow = 6, endRow = 50, # startCol = 2, endCol = 43) # pwr_base$Flow.Condition <- "Base" # pwr_storm <- readWorksheet(wb1, sheet = "PWR", startRow = 57, endRow = 166, # startCol = 2, endCol = 43) # pwr_storm$Flow.Condition = "Storm" # # colnames(pwr_storm) <- colnames(pwr_base) # pwr_base$Time <- ymd_hms(pwr_base$Time) # pwr_storm$Time <- ymd_hms(pwr_storm$Time) # pwr <- rbind(pwr_base, pwr_storm) # pwr <- fix_datetime(pwr, "Sampling.Date", "Time") # rm("pwr_base", "pwr_storm") # # colnames(pwr) <- variables # # shep_creek <- rbind(con, dri, pwr, ref7, roa, urb) # shep_creek_melt <- melt(shep_creek, id.vars = c("site", "date")) # # shep_creek_cast <- dcast(shep_creek, site + date ~ variable, value.var = "value")
#' Extract published books #' #' Extracts published books from Lattes list #' #' This function extracts relevant information on published books from a Lattes list #' #' @param x Lattes list (generated internally in [lattes_to_list()]) #' @param ID a unique identifier for each CV being processed. #' #' @return data frame containing parsed information on published books get_books <- function(x, ID = stats::runif(1)){ n.items <- length(x$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`LIVROS-PUBLICADOS-OU-ORGANIZADOS`) out.df <- data.frame(Authors = character(n.items), Bookname = character(n.items), Publisher = character(n.items), Edition = character(n.items), City = character(n.items), Pages = character(n.items), Year = character(n.items), ISBN = character(n.items), stringsAsFactors = FALSE) if (n.items){ for (i in 1:n.items){ item <- lapply(x$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`LIVROS-PUBLICADOS-OU-ORGANIZADOS`[[i]], as.list) out.df$Bookname[i] <- item$`DADOS-BASICOS-DO-LIVRO`$`TITULO-DO-LIVRO` out.df$Publisher[i] <- item$`DETALHAMENTO-DO-LIVRO`$`NOME-DA-EDITORA` out.df$Edition[i] <- item$`DETALHAMENTO-DO-LIVRO`$`NUMERO-DA-EDICAO-REVISAO` out.df$City[i] <- paste0(item$`DETALHAMENTO-DO-LIVRO`$`CIDADE-DA-EDITORA`, ", ", item$`DADOS-BASICOS-DO-LIVRO`$`PAIS-DE-PUBLICACAO`) out.df$Pages[i] <- item$`DETALHAMENTO-DO-LIVRO`$`NUMERO-DE-PAGINAS` out.df$Year[i] <- item$`DADOS-BASICOS-DO-LIVRO`$ANO out.df$ISBN[i] <- ifelse(item$`DETALHAMENTO-DO-LIVRO`$ISBN == "" | item$`DETALHAMENTO-DO-LIVRO`$ISBN == " ", paste0("zNotAvailable no.", ID, "-", i), item$`DETALHAMENTO-DO-LIVRO`$ISBN) out.df$Authors[i] <- get_authors(item) } } return(out.df) }
/R/get_books.R
no_license
fcampelo/ChocoLattes
R
false
false
2,165
r
#' Extract published books #' #' Extracts published books from Lattes list #' #' This function extracts relevant information on published books from a Lattes list #' #' @param x Lattes list (generated internally in [lattes_to_list()]) #' @param ID a unique identifier for each CV being processed. #' #' @return data frame containing parsed information on published books get_books <- function(x, ID = stats::runif(1)){ n.items <- length(x$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`LIVROS-PUBLICADOS-OU-ORGANIZADOS`) out.df <- data.frame(Authors = character(n.items), Bookname = character(n.items), Publisher = character(n.items), Edition = character(n.items), City = character(n.items), Pages = character(n.items), Year = character(n.items), ISBN = character(n.items), stringsAsFactors = FALSE) if (n.items){ for (i in 1:n.items){ item <- lapply(x$`PRODUCAO-BIBLIOGRAFICA`$`LIVROS-E-CAPITULOS`$`LIVROS-PUBLICADOS-OU-ORGANIZADOS`[[i]], as.list) out.df$Bookname[i] <- item$`DADOS-BASICOS-DO-LIVRO`$`TITULO-DO-LIVRO` out.df$Publisher[i] <- item$`DETALHAMENTO-DO-LIVRO`$`NOME-DA-EDITORA` out.df$Edition[i] <- item$`DETALHAMENTO-DO-LIVRO`$`NUMERO-DA-EDICAO-REVISAO` out.df$City[i] <- paste0(item$`DETALHAMENTO-DO-LIVRO`$`CIDADE-DA-EDITORA`, ", ", item$`DADOS-BASICOS-DO-LIVRO`$`PAIS-DE-PUBLICACAO`) out.df$Pages[i] <- item$`DETALHAMENTO-DO-LIVRO`$`NUMERO-DE-PAGINAS` out.df$Year[i] <- item$`DADOS-BASICOS-DO-LIVRO`$ANO out.df$ISBN[i] <- ifelse(item$`DETALHAMENTO-DO-LIVRO`$ISBN == "" | item$`DETALHAMENTO-DO-LIVRO`$ISBN == " ", paste0("zNotAvailable no.", ID, "-", i), item$`DETALHAMENTO-DO-LIVRO`$ISBN) out.df$Authors[i] <- get_authors(item) } } return(out.df) }
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875194299, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 )) result <- do.call(DLMtool::LBSPRgen,testlist) str(result)
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833786-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
2,048
r
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875194299, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 )) result <- do.call(DLMtool::LBSPRgen,testlist) str(result)
context("Test ensemble preprocessing") create_test_xts <- function(n) { dates <- seq(from = as.Date("2012-01-01"), by = "days", length.out = n) xts(x = data.frame(Close = 1:n), order.by = dates) } test_that("El formato debe ser correcto para la prediccion", { max_window <- 5 max_horizon <- 5 df <- create_test_xts(20) x <- time_series_prediction_format(df, max_window = max_window, max_horizon = max_horizon ) window_names <- names(x)[1:max_window] pred_names <- names(x)[(max_window + 1):(max_window + max_horizon)] expected_window_names <- paste0("w", (max_window - 1):0) expected_pred_names <- paste0("h", 1:max_horizon) expect_equal(window_names, expected_window_names) expect_equal(pred_names, expected_pred_names) values <- x[c(window_names, pred_names)] expected <- rbind(1:10, 11:20) expect_equivalent(as.matrix(values), expected) }) test_that("El formato debe ser correcto para la prediccion 2", { max_window <- 5 max_horizon <- 1 df <- create_test_xts(20) x <- time_series_prediction_format(df, max_window = max_window, max_horizon = max_horizon ) window_names <- names(x)[1:max_window] pred_names <- names(x)[(max_window + 1):(max_window + max_horizon)] expected_window_names <- paste0("w", (max_window - 1):0) expected_pred_names <- paste0("h", 1:max_horizon) expect_equal(window_names, expected_window_names) expect_equal(pred_names, expected_pred_names) values <- x[c(window_names, pred_names)] expected <- rbind(3:8, 9:14, 15:20) expect_equivalent(as.matrix(values), expected) })
/tests/testthat/test-test-preprocessing.R
no_license
jmayalag/seer
R
false
false
1,649
r
context("Test ensemble preprocessing") create_test_xts <- function(n) { dates <- seq(from = as.Date("2012-01-01"), by = "days", length.out = n) xts(x = data.frame(Close = 1:n), order.by = dates) } test_that("El formato debe ser correcto para la prediccion", { max_window <- 5 max_horizon <- 5 df <- create_test_xts(20) x <- time_series_prediction_format(df, max_window = max_window, max_horizon = max_horizon ) window_names <- names(x)[1:max_window] pred_names <- names(x)[(max_window + 1):(max_window + max_horizon)] expected_window_names <- paste0("w", (max_window - 1):0) expected_pred_names <- paste0("h", 1:max_horizon) expect_equal(window_names, expected_window_names) expect_equal(pred_names, expected_pred_names) values <- x[c(window_names, pred_names)] expected <- rbind(1:10, 11:20) expect_equivalent(as.matrix(values), expected) }) test_that("El formato debe ser correcto para la prediccion 2", { max_window <- 5 max_horizon <- 1 df <- create_test_xts(20) x <- time_series_prediction_format(df, max_window = max_window, max_horizon = max_horizon ) window_names <- names(x)[1:max_window] pred_names <- names(x)[(max_window + 1):(max_window + max_horizon)] expected_window_names <- paste0("w", (max_window - 1):0) expected_pred_names <- paste0("h", 1:max_horizon) expect_equal(window_names, expected_window_names) expect_equal(pred_names, expected_pred_names) values <- x[c(window_names, pred_names)] expected <- rbind(3:8, 9:14, 15:20) expect_equivalent(as.matrix(values), expected) })
context("Deterministic Bayesian bootstrap tests") library(doParallel) set.seed(123) test_that("rudirichlet produces a valid output", { rand_mat <- rudirichlet(10, 15) expect_true(all(rand_mat >= 0 & rand_mat <= 1)) expect_equivalent(rowSums(rand_mat), rep(1, 10)) }) # TODO: Why does this pass when using test(), but not when # checking the package? test_that("bayesboot produces a valid output", { x <- rnorm(10) b1 <- bayesboot(x, mean, R = 100, R2 = 90, use.weights = FALSE) expect_equal(class(b1), c("bayesboot", "data.frame")) expect_equal(nrow(b1), 100) expect_equal(ncol(b1), 1) b2 <- bayesboot(x, weighted.mean, R = 50, R2 = NULL, use.weights = TRUE) expect_equal(class(b2), c("bayesboot", "data.frame")) expect_equal(nrow(b2), 50) expect_equal(ncol(b2), 1) d <- data.frame(x = 1:10, y = rnorm(10)) boot_stat <- function(d) { coef(lm(y ~ x, data = d)) } b3 <- bayesboot(d, boot_stat, R = 75, R2 = 1000, use.weights = FALSE) expect_equal(class(b3), c("bayesboot", "data.frame")) expect_equal(nrow(b3), 75) expect_equal(ncol(b3), 2) boot_stat <- function(d, w) { coef(lm(y ~ x, data = d, weights = w)) } b4 <- bayesboot(d, boot_stat, R = 130, use.weights = TRUE) expect_equal(class(b4), c("bayesboot", "data.frame")) expect_equal(nrow(b4), 130) expect_equal(ncol(b4), 2) # A "stranger" bootstrap analysis with the data being chars. in a list. # And the statistc being the most common answer. data_list <- list("Yes", "Yes", "No", "Yes", "No", "Yes", "Maybe") boot_stat <- function(d) { t <- table(as.character(d)) c(most_common_answer = names(t)[ which.max(t)]) } b5 <- bayesboot(data_list, boot_stat, R = 50, R2 = 20) expect_equal(class(b5), c("bayesboot", "data.frame")) expect_equal(nrow(b5), 50) expect_equal(ncol(b5), 1) # Another strange bootstrap with a statistic that outputs NAs d <- data.frame(x = 1:15, y = c(1, 2, 3, 4, NA)) expect_warning({ b6 <- bayesboot(d, use.weights = TRUE, R = 100, function(d, w) { c(weighted.mean(d$x, w), weighted.mean(d$y, w)) }) }) expect_output(summary(b1), ".") expect_output(summary(b2), ".") expect_output(summary(b3), ".") expect_output(summary(b4), ".") expect_warning(summary(b5)) expect_true({ plot(b1) plot(b2) plot(b3) plot(b4) TRUE }) expect_warning(plot(b5)) }) test_that("bayesboot can do paralell processing", { library(doParallel) library(foreach) x <- rnorm(10) registerDoParallel(cores = 2) b1 <- bayesboot(x, mean, R = 1000, R2 = 1000, .parallel = TRUE) expect_equal(class(b1), c("bayesboot", "data.frame")) expect_equal(nrow(b1), 1000) expect_equal(ncol(b1), 1) stopImplicitCluster() registerDoParallel(cores = 1) stopImplicitCluster() })
/tests/testthat/test-bayesboot-deterministic.R
no_license
xclu/bayesboot
R
false
false
2,782
r
context("Deterministic Bayesian bootstrap tests") library(doParallel) set.seed(123) test_that("rudirichlet produces a valid output", { rand_mat <- rudirichlet(10, 15) expect_true(all(rand_mat >= 0 & rand_mat <= 1)) expect_equivalent(rowSums(rand_mat), rep(1, 10)) }) # TODO: Why does this pass when using test(), but not when # checking the package? test_that("bayesboot produces a valid output", { x <- rnorm(10) b1 <- bayesboot(x, mean, R = 100, R2 = 90, use.weights = FALSE) expect_equal(class(b1), c("bayesboot", "data.frame")) expect_equal(nrow(b1), 100) expect_equal(ncol(b1), 1) b2 <- bayesboot(x, weighted.mean, R = 50, R2 = NULL, use.weights = TRUE) expect_equal(class(b2), c("bayesboot", "data.frame")) expect_equal(nrow(b2), 50) expect_equal(ncol(b2), 1) d <- data.frame(x = 1:10, y = rnorm(10)) boot_stat <- function(d) { coef(lm(y ~ x, data = d)) } b3 <- bayesboot(d, boot_stat, R = 75, R2 = 1000, use.weights = FALSE) expect_equal(class(b3), c("bayesboot", "data.frame")) expect_equal(nrow(b3), 75) expect_equal(ncol(b3), 2) boot_stat <- function(d, w) { coef(lm(y ~ x, data = d, weights = w)) } b4 <- bayesboot(d, boot_stat, R = 130, use.weights = TRUE) expect_equal(class(b4), c("bayesboot", "data.frame")) expect_equal(nrow(b4), 130) expect_equal(ncol(b4), 2) # A "stranger" bootstrap analysis with the data being chars. in a list. # And the statistc being the most common answer. data_list <- list("Yes", "Yes", "No", "Yes", "No", "Yes", "Maybe") boot_stat <- function(d) { t <- table(as.character(d)) c(most_common_answer = names(t)[ which.max(t)]) } b5 <- bayesboot(data_list, boot_stat, R = 50, R2 = 20) expect_equal(class(b5), c("bayesboot", "data.frame")) expect_equal(nrow(b5), 50) expect_equal(ncol(b5), 1) # Another strange bootstrap with a statistic that outputs NAs d <- data.frame(x = 1:15, y = c(1, 2, 3, 4, NA)) expect_warning({ b6 <- bayesboot(d, use.weights = TRUE, R = 100, function(d, w) { c(weighted.mean(d$x, w), weighted.mean(d$y, w)) }) }) expect_output(summary(b1), ".") expect_output(summary(b2), ".") expect_output(summary(b3), ".") expect_output(summary(b4), ".") expect_warning(summary(b5)) expect_true({ plot(b1) plot(b2) plot(b3) plot(b4) TRUE }) expect_warning(plot(b5)) }) test_that("bayesboot can do paralell processing", { library(doParallel) library(foreach) x <- rnorm(10) registerDoParallel(cores = 2) b1 <- bayesboot(x, mean, R = 1000, R2 = 1000, .parallel = TRUE) expect_equal(class(b1), c("bayesboot", "data.frame")) expect_equal(nrow(b1), 1000) expect_equal(ncol(b1), 1) stopImplicitCluster() registerDoParallel(cores = 1) stopImplicitCluster() })
#' #' H2O Model Related Functions #' #' @importFrom graphics strwidth par legend polygon arrows points grid #' @importFrom grDevices dev.copy dev.off png rainbow adjustcolor NULL #----------------------------------------------------------------------------------------------------------------------- # Helper Functions #----------------------------------------------------------------------------------------------------------------------- #' #' Used to verify data, x, y and turn into the appropriate things #' #' @param data H2OFrame #' @param x features #' @param y response #' @param autoencoder autoencoder flag .verify_dataxy <- function(data, x, y, autoencoder = FALSE) { if(!is.character(x) && !is.numeric(x)) stop('`x` must be column names or indices') if( !autoencoder ) if(!is.character(y) && !is.numeric(y)) stop('`y` must be a column name or index') cc <- colnames(chk.H2OFrame(data)) if(is.character(x)) { if(!all(x %in% cc)) stop("Invalid column names: ", paste(x[!(x %in% cc)], collapse=',')) x_i <- match(x, cc) } else { if(any( x < 1L | x > attr(x,'ncol'))) stop('out of range explanatory variable ', paste(x[x < 1L | x > length(cc)], collapse=',')) x_i <- x x <- cc[x_i] } x_ignore <- c() if( !autoencoder ) { if(is.character(y)){ if(!(y %in% cc)) stop(y, ' is not a column name') y_i <- which(y == cc) } else { if(y < 1L || y > length(cc)) stop('response variable index ', y, ' is out of range') y_i <- y y <- cc[y] } if(!autoencoder && (y %in% x)) { warning('removing response variable from the explanatory variables') x <- setdiff(x,y) } x_ignore <- setdiff(setdiff(cc, x), y) if( length(x_ignore) == 0L ) x_ignore <- '' return(list(x=x, y=y, x_i=x_i, x_ignore=x_ignore, y_i=y_i)) } else { x_ignore <- setdiff(cc, x) if( !missing(y) ) stop("`y` should not be specified for autoencoder=TRUE, remove `y` input") return(list(x=x,x_i=x_i,x_ignore=x_ignore)) } } .verify_datacols <- function(data, cols) { if(!is.character(cols) && !is.numeric(cols)) stop('`cols` must be column names or indices') cc <- colnames(chk.H2OFrame(data)) if(length(cols) == 1L && cols == '') cols <- cc if(is.character(cols)) { if(!all(cols %in% cc)) stop("Invalid column names: ", paste(cols[which(!cols %in% cc)], collapse=", ")) cols_ind <- match(cols, cc) } else { if(any(cols < 1L | cols > length(cc))) stop('out of range explanatory variable ', paste(cols[cols < 1L | cols > length(cc)], collapse=',')) cols_ind <- cols cols <- cc[cols_ind] } cols_ignore <- setdiff(cc, cols) if( length(cols_ignore) == 0L ) cols_ignore <- '' list(cols=cols, cols_ind=cols_ind, cols_ignore=cols_ignore) } .build_cm <- function(cm, actual_names = NULL, predict_names = actual_names, transpose = TRUE) { categories <- length(cm) cf_matrix <- matrix(unlist(cm), nrow=categories) if(transpose) cf_matrix <- t(cf_matrix) cf_total <- apply(cf_matrix, 2L, sum) cf_error <- c(1 - diag(cf_matrix)/apply(cf_matrix,1L,sum), 1 - sum(diag(cf_matrix))/sum(cf_matrix)) cf_matrix <- rbind(cf_matrix, cf_total) cf_matrix <- cbind(cf_matrix, round(cf_error, 3L)) if(!is.null(actual_names)) dimnames(cf_matrix) = list(Actual = c(actual_names, "Totals"), Predicted = c(predict_names, "Error")) cf_matrix } .h2o.modelJob <- function( algo, params, h2oRestApiVersion=.h2o.__REST_API_VERSION, verbose=FALSE) { if( !is.null(params$validation_frame) ) .eval.frame(params$training_frame) if( !is.null(params$validation_frame) ) .eval.frame(params$validation_frame) if (length(grep("stopping_metric", attributes(params)))>0) { if (params$stopping_metric=="r2") stop("r2 cannot be used as an early stopping_metric yet. Check this JIRA https://0xdata.atlassian.net/browse/PUBDEV-5381 for progress.") } if (algo=="pca" && is.null(params$k)) # make sure to set k=1 for default for pca params$k=1 job <- .h2o.startModelJob(algo, params, h2oRestApiVersion) .h2o.getFutureModel(job, verbose = verbose) } .h2o.startModelJob <- function(algo, params, h2oRestApiVersion) { .key.validate(params$key) #---------- Params ----------# param_values <- .h2o.makeModelParams(algo, params, h2oRestApiVersion) #---------- Build! ----------# res <- .h2o.__remoteSend(method = "POST", .h2o.__MODEL_BUILDERS(algo), .params = param_values, h2oRestApiVersion = h2oRestApiVersion) .h2o.processResponseWarnings(res) #---------- Output ----------# job_key <- res$job$key$name dest_key <- res$job$dest$name new("H2OModelFuture",job_key=job_key, model_id=dest_key) } .h2o.makeModelParams <- function(algo, params, h2oRestApiVersion) { #---------- Force evaluate temporary ASTs ----------# ALL_PARAMS <- .h2o.__remoteSend(method = "GET", h2oRestApiVersion = h2oRestApiVersion, .h2o.__MODEL_BUILDERS(algo))$model_builders[[algo]]$parameters #---------- Check user parameter types ----------# param_values <- .h2o.checkAndUnifyModelParameters(algo = algo, allParams = ALL_PARAMS, params = params) #---------- Validate parameters ----------# #.h2o.validateModelParameters(algo, param_values, h2oRestApiVersion) return(param_values) } .h2o.processResponseWarnings <- function(res) { if(length(res$messages) != 0L){ warn <- lapply(res$messages, function(y) { if(class(y) == "list" && y$message_type == "WARN" ) paste0(y$message, ".\n") else "" }) if(any(nzchar(warn))) warning(warn) } } .h2o.startSegmentModelsJob <- function(algo, segment_params, params, h2oRestApiVersion) { #---------- Params ----------# param_values <- .h2o.makeModelParams(algo, params, h2oRestApiVersion) param_values$segment_models_id <- segment_params$segment_models_id param_values$segment_columns <- .collapse.char(segment_params$segment_columns) param_values$parallelism <- segment_params$parallelism #---------- Build! ----------# job <- .h2o.__remoteSend(method = "POST", .h2o.__SEGMENT_MODELS_BUILDERS(algo), .params = param_values, h2oRestApiVersion = h2oRestApiVersion) job_key <- job$key$name dest_key <- job$dest$name new("H2OSegmentModelsFuture",job_key=job_key, segment_models_id=dest_key) } .h2o.segmentModelsJob <- function(algo, segment_params, params, h2oRestApiVersion) { .key.validate(segment_params$segment_models_id) sm <- .h2o.startSegmentModelsJob(algo, segment_params, params, h2oRestApiVersion) .h2o.getFutureSegmentModels(sm) } .h2o.getFutureSegmentModels <- function(object) { .h2o.__waitOnJob(object@job_key) h2o.get_segment_models(object@segment_models_id) } # # Validate given parameters against algorithm parameters validation # REST end-point. Stop execution in case of validation error. # .h2o.validateModelParameters <- function(algo, params, h2oRestApiVersion = .h2o.__REST_API_VERSION) { validation <- .h2o.__remoteSend(method = "POST", paste0(.h2o.__MODEL_BUILDERS(algo), "/parameters"), .params = params, h2oRestApiVersion = h2oRestApiVersion) if(length(validation$messages) != 0L) { error <- lapply(validation$messages, function(x) { if( x$message_type == "ERRR" ) paste0(x$message, ".\n") else "" }) if(any(nzchar(error))) stop(error) warn <- lapply(validation$messages, function(i) { if( i$message_type == "WARN" ) paste0(i$message, ".\n") else "" }) if(any(nzchar(warn))) warning(warn) } } .h2o.createModel <- function(algo, params, h2oRestApiVersion = .h2o.__REST_API_VERSION) { .h2o.getFutureModel(.h2o.startModelJob(algo, params, h2oRestApiVersion)) } .h2o.pollModelUpdates <- function(job) { cat(paste0("\nScoring History for Model ",job$dest$name, " at ", Sys.time(),"\n")) print(paste0("Model Build is ", job$progress*100, "% done...")) if(!is.null(job$progress_msg)){ # print(tail(h2o.getModel(job$dest$name)@model$scoring_history)) }else{ print("Scoring history is not available yet...") #Catch 404 with scoring history. Can occur when nfolds >=2 } } .h2o.getFutureModel <- function(object, verbose=FALSE) { .h2o.__waitOnJob(object@job_key, pollUpdates=ifelse(verbose, .h2o.pollModelUpdates, as.null)) h2o.getModel(object@model_id) } .h2o.prepareModelParameters <- function(algo, params, is_supervised) { if (!is.null(params$training_frame)) params$training_frame <- chk.H2OFrame(params$training_frame) if (!is.null(params$validation_frame)) params$validation_frame <- chk.H2OFrame(params$validation_frame) # Check if specified model request is for supervised algo isSupervised <- if (!is.null(is_supervised)) is_supervised else .isSupervised(algo, params) if (isSupervised) { if (!is.null(params$x)) { x <- params$x; params$x <- NULL } if (!is.null(params$y)) { y <- params$y; params$y <- NULL } args <- .verify_dataxy(params$training_frame, x, y) if( !is.null(params$offset_column) && !is.null(params$offset_column)) args$x_ignore <- args$x_ignore[!( params$offset_column == args$x_ignore )] if( !is.null(params$weights_column) && !is.null(params$weights_column)) args$x_ignore <- args$x_ignore[!( params$weights_column == args$x_ignore )] if( !is.null(params$fold_column) && !is.null(params$fold_column)) args$x_ignore <- args$x_ignore[!( params$fold_column == args$x_ignore )] params$ignored_columns <- args$x_ignore params$response_column <- args$y } else { if (!is.null(params$x)) { x <- params$x params$x <- NULL args <- .verify_datacols(params$training_frame, x) params$ignored_columns <- args$cols_ignore } } # Note: Magic copied from start .h2o.startModelJob params <- lapply(params, function(x) { if(is.integer(x)) x <- as.numeric(x); x }) params } .h2o.getModelParameters <- function(algo, h2oRestApiVersion = .h2o.__REST_API_VERSION) { .h2o.__remoteSend(method = "GET", .h2o.__MODEL_BUILDERS(algo), h2oRestApiVersion = h2oRestApiVersion)$model_builders[[algo]]$parameters } .h2o.checkAndUnifyModelParameters <- function(algo, allParams, params, hyper_params = list()) { # First verify all parameters error <- lapply(allParams, function(i) { e <- "" name <- i$name # R treats integer as not numeric if(is.integer(params[[name]])){ params[[name]] <- as.numeric(params[[name]]) } if (i$required && !((name %in% names(params)) || (name %in% names(hyper_params)))) { e <- paste0("argument \"", name, "\" is missing, with no default\n") } else if (name %in% names(params)) { e <- .h2o.checkParam(i, params[[name]]) if (!nzchar(e)) { params[[name]] <<- .h2o.transformParam(i, params[[name]]) } } e }) if(any(nzchar(error))) stop(error) #---------- Create parameter list to pass ----------# param_values <- lapply(params, function(i) { if(is.H2OFrame(i)) h2o.getId(i) else i }) param_values } # Long precision .is.int64 <- function(v) { number <- suppressWarnings(as.numeric(v)) if(is.na(number)) FALSE else number > -2^63 & number < 2^63 & (floor(number)==ceiling(number)) } # Precise int in double presision .is.int53 <- function(v) { number <- suppressWarnings(as.numeric(v)) if(is.na(number)) FALSE else number > -2^53 & number < 2^53 & (floor(number)==ceiling(number)) } # Check definition of given parameters in given list of parameters # Returns error message or empty string # Note: this function has no side-effects! .h2o.checkParam <- function(paramDef, paramValue) { e <- "" # Fetch mapping for given Java to R types mapping <- .type.map[paramDef$type,] type <- mapping[1L, 1L] scalar <- mapping[1L, 2L] name <- paramDef$name if (is.na(type)) stop("Cannot find type ", paramDef$type, " in .type.map") if (scalar) { # scalar == TRUE if (type == "H2OModel") type <- "character" if (name == "seed") { if(is.character(paramValue) && !.is.int64(paramValue)) e <- paste0("\"seed\" must be of type long or string long, but got a string which cannot be converted to long.\n") else if(is.numeric(paramValue)){ if(!.is.int64(paramValue)){ e <- paste0("\"seed\" must be of type long or string long, but got a number which cannot be converted to long.\n") } else if(!.is.int53(paramValue)) { warning("R can handle only 53-bit integer without loss. If you need to use a less/larger number than the integer, pass seed parameter as the string number. Otherwise, the seed could be inconsistent. (For example, if you need to use autogenerated seed like -8664354335142703762 from H2O server.)") } } } else { if (!inherits(paramValue, type)) { e <- paste0(e, "\"", name , "\" must be of type ", type, ", but got ", class(paramValue), ".\n") } else if ((length(paramDef$values) > 1L) && (is.null(paramValue) || !(tolower(paramValue) %in% tolower(paramDef$values)))) { e <- paste0(e, "\"", name,"\" must be in") for (fact in paramDef$values) e <- paste0(e, " \"", fact, "\",") e <- paste(e, "but got", paramValue) } } } else { # scalar == FALSE if (!inherits(paramValue, type)) e <- paste0("vector of ", name, " must be of type ", type, ", but got ", class(paramValue), ".\n") } e } .h2o.transformParam <- function(paramDef, paramValue, collapseArrays = TRUE) { # Fetch mapping for given Java to R types mapping <- .type.map[paramDef$type,] type <- mapping[1L, 1L] scalar <- mapping[1L, 2L] name <- paramDef$name if (scalar) { # scalar == TRUE if (inherits(paramValue, 'numeric') && paramValue == Inf) { paramValue <- "Infinity" } else if (inherits(paramValue, 'numeric') && paramValue == -Inf) { paramValue <- "-Infinity" } } else { # scalar == FALSE if (inherits(paramValue, 'numeric')) { k = which(paramValue == Inf | paramValue == -Inf) if (length(k) > 0) for (n in k) if (paramValue[n] == Inf) paramValue[n] <- "Infinity" else paramValue[n] <- "-Infinity" } if (collapseArrays) { if(any(sapply(paramValue, function(x) !is.null(x) && is.H2OFrame(x)))) paramValue <- lapply( paramValue, function(x) { if (is.null(x)) NULL else if (all(is.na(x))) NA else paste0('"',h2o.getId(x),'"') }) if (type == "character") paramValue <- .collapse.char(paramValue) else if (paramDef$type == "StringPair[]") paramValue <- .collapse(sapply(paramValue, .collapse.tuple.string)) else if (paramDef$type == "KeyValue[]") { f <- function(i) { .collapse.tuple.key_value(paramValue[i]) } paramValue <- .collapse(sapply(seq(length(paramValue)), f)) } else paramValue <- .collapse(paramValue) } } if( is.H2OFrame(paramValue) ) paramValue <- h2o.getId(paramValue) paramValue } .escape.string <- function(xi) { paste0("\"", xi, "\"") } .collapse.tuple.string <- function(x) { .collapse.tuple(x, .escape.string) } .collapse.tuple.key_value <- function(x) { .collapse.tuple(list( key = .escape.string(names(x)), value = x[[1]] ), identity) } .collapse.tuple <- function(x, escape) { names <- names(x) if (is.null(names)) names <- letters[1:length(x)] r <- c() for (i in 1:length(x)) { s <- paste0(names[i], ": ", escape(x[i])) r <- c(r, s) } paste0("{", paste0(r, collapse = ","), "}") } # Validate a given set of hyper parameters # against algorithm definition. # Transform all parameters in the same way as normal algorithm # would do. .h2o.checkAndUnifyHyperParameters <- function(algo, allParams, hyper_params, do_hyper_params_check) { errors <- lapply(allParams, function(paramDef) { e <- "" name <- paramDef$name hyper_names <- names(hyper_params) # First reject all non-gridable hyper parameters if (!paramDef$gridable && (name %in% hyper_names)) { e <- paste0("argument \"", name, "\" is not gridable\n") } else if (name %in% hyper_names) { # Check all specified hyper parameters # Hyper values for `name` parameter hyper_vals <- hyper_params[[name]] # Collect all possible verification errors if (do_hyper_params_check) { he <- lapply(hyper_vals, function(hv) { # Transform all integer values to numeric hv <- if (is.integer(hv)) as.numeric(hv) else hv .h2o.checkParam(paramDef, hv) }) e <- paste(he, collapse='') } # If there is no error then transform hyper values if (!nzchar(e)) { is_scalar <- .type.map[paramDef$type,][1L, 2L] transf_fce <- function(hv) { # R does not treat integers as numeric if (is.integer(hv)) { hv <- as.numeric(hv) } mapping <- .type.map[paramDef$type,] type <- mapping[1L, 1L] # Note: we apply this transformatio also for types # reported by the backend as scalar because of PUBDEV-1955 if (is.list(hv)) { hv <- as.vector(hv, mode=type) } # Force evaluation of frames and fetch frame_id as # a side effect if (is.H2OFrame(hv) ) hv <- h2o.getId(hv) .h2o.transformParam(paramDef, hv, collapseArrays = FALSE) } transf_hyper_vals <- if (is_scalar) sapply(hyper_vals,transf_fce) else lapply(hyper_vals, transf_fce) hyper_params[[name]] <<- transf_hyper_vals } } e }) if(any(nzchar(errors))) stop(errors) hyper_params } #' Predict on an H2O Model #' #' Obtains predictions from various fitted H2O model objects. #' #' This method dispatches on the type of H2O model to select the correct #' prediction/scoring algorithm. #' The order of the rows in the results is the same as the order in which the #' data was loaded, even if some rows fail (for example, due to missing #' values or unseen factor levels). #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with probabilites and #' default predictions. #' @seealso \code{\link{h2o.deeplearning}}, \code{\link{h2o.gbm}}, #' \code{\link{h2o.glm}}, \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv" #' insurance <- h2o.importFile(f) #' predictors <- colnames(insurance)[1:4] #' response <- "Claims" #' insurance['Group'] <- as.factor(insurance['Group']) #' insurance['Age'] <- as.factor(insurance['Age']) #' splits <- h2o.splitFrame(data = insurance, ratios = 0.8, seed = 1234) #' train <- splits[[1]] #' valid <- splits[[2]] #' insurance_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, #' validation_frame = valid, #' distribution = "huber", #' huber_alpha = 0.9, seed = 1234) #' h2o.predict(insurance_gbm, newdata = insurance) #' } #' @export predict.H2OModel <- function(object, newdata, ...) { h2o.predict.H2OModel(object, newdata, ...) } #' Predict on an H2O Model #' #' @param object a fitted model object for which prediction is desired. #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with probabilites and #' default predictions. #' @export h2o.predict <- function(object, newdata, ...){ UseMethod("h2o.predict", object) } #' Use H2O Transformation model and apply the underlying transformation #' #' @param model A trained model representing the transformation strategy #' @param ... Transformation model-specific parameters #' @return Returns an H2OFrame object with data transformed. #' @export setGeneric("h2o.transform", function(model, ...) { if(!is(model, "H2OModel")) { stop(paste("Argument 'model' must be an H2O Model. Received:", class(model))) } standardGeneric("h2o.transform") }) #' Applies target encoding to a given dataset #' #' @param model A trained model representing the transformation strategy #' @param data An H2OFrame with data to be transformed #' @param blending Use blending during the transformation. Respects model settings when not set. #' @param inflection_point Blending parameter. Only effective when blending is enabled. #' By default, model settings are respected, if not overridden by this setting. #' @param smoothing Blending parameter. Only effective when blending is enabled. #' By default, model settings are respected, if not overridden by this setting. #' @param noise An amount of random noise added to the encoding, this helps prevent overfitting. #' By default, model settings are respected, if not overridden by this setting. #' @param as_training Must be set to True when encoding the training frame. Defaults to False. #' @param ... Mainly used for backwards compatibility, to allow deprecated parameters. #' @return Returns an H2OFrame object with data transformed. #' @export setMethod("h2o.transform", signature("H2OTargetEncoderModel"), function(model, data, blending = NULL, inflection_point = -1, smoothing = -1, noise = NULL, as_training = FALSE, ...) { varargs <- list(...) for (arg in names(varargs)) { if (arg %in% c('data_leakage_handling', 'seed')) { warning(paste0("argument '", arg, "' is deprecated and will be ignored; please define it instead on model creation using `h2o.targetencoder`.")) argval <- varargs[[arg]] if (arg == 'data_leakage_handling' && argval != "None") { warning(paste0("Deprecated `data_leakage_handling=",argval,"` is replaced by `as_training=True`. ", "Please update your code.")) as_training <- TRUE } } else if (arg == 'use_blending') { warning("argument 'use_blending' is deprecated; please use 'blending' instead.") if (missing(blending)) blending <- varargs$use_blending else warning("ignoring 'use_blending' as 'blending' was also provided.") } else { stop(paste("unused argument", arg, "=", varargs[[arg]])) } } params <- list() params$model <- model@model_id params$frame <- h2o.getId(data) if (is.null(blending)){ params$blending <- model@allparameters$blending } else { params$blending <- blending } if (params$blending) { params$inflection_point <- inflection_point params$smoothing <- smoothing } if (!is.null(noise)){ params$noise <- noise } params$as_training <- as_training res <- .h2o.__remoteSend( "TargetEncoderTransform", method = "GET", h2oRestApiVersion = 3,.params = params ) h2o.getFrame(res$name) }) #' #' Transform words (or sequences of words) to vectors using a word2vec model. #' #' @param model A word2vec model. #' @param words An H2OFrame made of a single column containing source words. #' @param aggregate_method Specifies how to aggregate sequences of words. If method is `NONE` #' then no aggregation is performed and each input word is mapped to a single word-vector. #' If method is 'AVERAGE' then input is treated as sequences of words delimited by NA. #' Each word of a sequences is internally mapped to a vector and vectors belonging to #' the same sentence are averaged and returned in the result. #' @examples #' \dontrun{ #' h2o.init() #' #' # Build a simple word2vec model #' data <- as.character(as.h2o(c("a", "b", "a"))) #' w2v_model <- h2o.word2vec(data, sent_sample_rate = 0, min_word_freq = 0, epochs = 1, vec_size = 2) #' #' # Transform words to vectors without aggregation #' sentences <- as.character(as.h2o(c("b", "c", "a", NA, "b"))) #' h2o.transform(w2v_model, sentences) # -> 5 rows total, 2 rows NA ("c" is not in the vocabulary) #' #' # Transform words to vectors and return average vector for each sentence #' h2o.transform(w2v_model, sentences, aggregate_method = "AVERAGE") # -> 2 rows #' } #' @export setMethod("h2o.transform", signature("H2OWordEmbeddingModel"), function(model, words, aggregate_method = c("NONE", "AVERAGE")) { if (!is(model, "H2OModel")) stop(paste("The argument 'model' must be a word2vec model. Received:", class(model))) if (missing(words)) stop("`words` must be specified") if (!is.H2OFrame(words)) stop("`words` must be an H2OFrame") if (ncol(words) != 1) stop("`words` frame must contain a single string column") if (length(aggregate_method) > 1) aggregate_method <- aggregate_method[1] res <- .h2o.__remoteSend(method="GET", "Word2VecTransform", model = model@model_id, words_frame = h2o.getId(words), aggregate_method = aggregate_method) key <- res$vectors_frame$name h2o.getFrame(key) }) #' #' @rdname predict.H2OModel #' @export h2o.predict.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } # Send keys to create predictions url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", h2oRestApiVersion = 4) job_key <- res$key$name dest_key <- res$dest$name .h2o.__waitOnJob(job_key) h2o.getFrame(dest_key) } #' Predict the Leaf Node Assignment on an H2O Model #' #' Obtains leaf node assignment from fitted H2O model objects. #' #' For every row in the test set, return the leaf placements of the row in all the trees in the model. #' Placements can be represented either by paths to the leaf nodes from the tree root or by H2O's internal identifiers. #' The order of the rows in the results is the same as the order in which the #' data was loaded #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param type choice of either "Path" when tree paths are to be returned (default); or "Node_ID" when the output # should be the leaf node IDs. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with categorical leaf assignment identifiers for #' each tree in the model. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' h2o.predict(prostate_gbm, prostate) #' h2o.predict_leaf_node_assignment(prostate_gbm, prostate) #' } #' @export predict_leaf_node_assignment.H2OModel <- function(object, newdata, type = c("Path", "Node_ID"), ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } params <- list(leaf_node_assignment = TRUE) if (!missing(type)) { if (!(type %in% c("Path", "Node_ID"))) { stop("type must be one of: Path, Node_ID") } params$leaf_node_assignment_type <- type } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", .params = params) res <- res$predictions_frame h2o.getFrame(res$name) } #' @rdname predict_leaf_node_assignment.H2OModel #' @export h2o.predict_leaf_node_assignment <- predict_leaf_node_assignment.H2OModel h2o.crossValidate <- function(model, nfolds, model.type = c("gbm", "glm", "deeplearning"), params, strategy = c("mod1", "random")) { output <- data.frame() if( nfolds < 2 ) stop("`nfolds` must be greater than or equal to 2") if( missing(model) & missing(model.type) ) stop("must declare `model` or `model.type`") else if( missing(model) ) { if(model.type == "gbm") model.type = "h2o.gbm" else if(model.type == "glm") model.type = "h2o.glm" else if(model.type == "deeplearning") model.type = "h2o.deeplearning" model <- do.call(model.type, c(params)) } output[1, "fold_num"] <- -1 output[1, "model_key"] <- model@model_id # output[1, "model"] <- model@model$mse_valid data <- params$training_frame data <- eval(data) data.len <- nrow(data) # nfold_vec <- h2o.sample(fr, 1:nfolds) nfold_vec <- sample(rep(1:nfolds, length.out = data.len), data.len) fnum_id <- as.h2o(nfold_vec) fnum_id <- h2o.cbind(fnum_id, data) xval <- lapply(1:nfolds, function(i) { params$training_frame <- data[fnum_id[,1] != i, ] params$validation_frame <- data[fnum_id[,1] == i, ] fold <- do.call(model.type, c(params)) output[(i+1), "fold_num"] <<- i - 1 output[(i+1), "model_key"] <<- fold@model_id # output[(i+1), "cv_err"] <<- mean(as.vector(fold@model$mse_valid)) fold }) model } #' Predict class probabilities at each stage of an H2O Model #' #' The output structure is analogous to the output of \link{h2o.predict_leaf_node_assignment}. For each tree t and #' class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding #' predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models build #' the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0. #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with predicted probability for each tree in the model. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' h2o.predict(prostate_gbm, prostate) #' h2o.staged_predict_proba(prostate_gbm, prostate) #' } #' @export staged_predict_proba.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", predict_staged_proba=TRUE) res <- res$predictions_frame h2o.getFrame(res$name) } #' @rdname staged_predict_proba.H2OModel #' @export h2o.staged_predict_proba <- staged_predict_proba.H2OModel #' Predict feature contributions - SHAP values on an H2O Model (only DRF, GBM and XGBoost models). #' #' Returned H2OFrame has shape (#rows, #features + 1) - there is a feature contribution column for each input #' feature, the last column is the model bias (same value for each row). The sum of the feature contributions #' and the bias term is equal to the raw prediction of the model. Raw prediction of tree-based model is the sum #' of the predictions of the individual trees before the inverse link function is applied to get the actual #' prediction. For Gaussian distribution the sum of the contributions is equal to the model prediction. #' #' Note: Multinomial classification models are currently not supported. #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame contain feature contributions for each input row. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate_gbm <- h2o.gbm(3:9, "AGE", prostate) #' h2o.predict(prostate_gbm, prostate) #' h2o.predict_contributions(prostate_gbm, prostate) #' } #' @export predict_contributions.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", predict_contributions=TRUE, h2oRestApiVersion = 4) job_key <- res$key$name dest_key <- res$dest$name .h2o.__waitOnJob(job_key) h2o.getFrame(dest_key) } #' @rdname predict_contributions.H2OModel #' @export h2o.predict_contributions <- predict_contributions.H2OModel #' Retrieve the number of occurrences of each feature for given observations # on their respective paths in a tree ensemble model. #' Available for GBM, Random Forest and Isolation Forest models. #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame contain per-feature frequencies on the predict path for each input row. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. feature_frequencies.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", feature_frequencies=TRUE) res <- res$predictions_frame h2o.getFrame(res$name) } #' @rdname feature_frequencies.H2OModel #' @export h2o.feature_frequencies <- feature_frequencies.H2OModel #' Model Performance Metrics in H2O #' #' Given a trained h2o model, compute its performance on the given #' dataset. However, if the dataset does not contain the response/target column, no performance will be returned. #' Instead, a warning message will be printed. #' #' #' @param model An \linkS4class{H2OModel} object #' @param newdata An H2OFrame. The model will make predictions #' on this dataset, and subsequently score them. The dataset should #' match the dataset that was used to train the model, in terms of #' column names, types, and dimensions. If newdata is passed in, then train, valid, and xval are ignored. #' @param train A logical value indicating whether to return the training metrics (constructed during training). #' #' Note: when the trained h2o model uses balance_classes, the training metrics constructed during training will be from the balanced training dataset. #' For more information visit: \url{https://0xdata.atlassian.net/browse/TN-9} #' @param valid A logical value indicating whether to return the validation metrics (constructed during training). #' @param xval A logical value indicating whether to return the cross-validation metrics (constructed during training). #' @param data (DEPRECATED) An H2OFrame. This argument is now called `newdata`. #' @return Returns an object of the \linkS4class{H2OModelMetrics} subclass. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' h2o.performance(model = prostate_gbm, newdata=prostate) #' #' ## If model uses balance_classes #' ## the results from train = TRUE will not match the results from newdata = prostate #' prostate_gbm_balanced <- h2o.gbm(3:9, "CAPSULE", prostate, balance_classes = TRUE) #' h2o.performance(model = prostate_gbm_balanced, newdata = prostate) #' h2o.performance(model = prostate_gbm_balanced, train = TRUE) #' } #' @export h2o.performance <- function(model, newdata=NULL, train=FALSE, valid=FALSE, xval=FALSE, data=NULL) { # data is now deprecated and the new arg name is newdata if (!is.null(data)) { warning("The `data` argument is DEPRECATED; use `newdata` instead as `data` will eventually be removed") if (is.null(newdata)) newdata <- data else stop("Do not use both `data` and `newdata`; just use `newdata`") } # Some parameter checking if(!is(model, "H2OModel")) stop("`model` must an H2OModel object") if(!is.null(newdata) && !is.H2OFrame(newdata)) stop("`newdata` must be an H2OFrame object") if(!is.logical(train) || length(train) != 1L || is.na(train)) stop("`train` must be TRUE or FALSE") if(!is.logical(valid) || length(valid) != 1L || is.na(valid)) stop("`valid` must be TRUE or FALSE") if(!is.logical(xval) || length(xval) != 1L || is.na(xval)) stop("`xval` must be TRUE or FALSE") if(sum(valid, xval, train) > 1) stop("only one of `train`, `valid`, and `xval` can be TRUE") missingNewdata <- missing(newdata) || is.null(newdata) if( !missingNewdata ) { if (!is.null(model@parameters$y) && !(model@parameters$y %in% names(newdata))) { print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.") return(NULL) } newdata.id <- h2o.getId(newdata) parms <- list() parms[["model"]] <- model@model_id parms[["frame"]] <- newdata.id res <- .h2o.__remoteSend(method = "POST", .h2o.__MODEL_METRICS(model@model_id,newdata.id), .params = parms) #### # FIXME need to do the client-side filtering... PUBDEV-874: https://0xdata.atlassian.net/browse/PUBDEV-874 model_metrics <- Filter(function(mm) { mm$frame$name==newdata.id}, res$model_metrics)[[1]] # filter on newdata.id, R's builtin Filter function # #### metrics <- model_metrics[!(names(model_metrics) %in% c("__meta", "names", "domains", "model_category"))] model_category <- model_metrics$model_category Class <- paste0("H2O", model_category, "Metrics") metrics$frame <- list() metrics$frame$name <- newdata.id new(Class = Class, algorithm = model@algorithm, on_train = missingNewdata, metrics = metrics) } else if( train || (!train && !valid && !xval) ) return(model@model$training_metrics) # no newdata, train, valid, and xval are false (all defaults), return the training metrics else if( valid ) { if( is.null(model@model$validation_metrics@metrics) ) return(NULL) # no newdata, but valid is true, return the validation metrics else return(model@model$validation_metrics) } else { #if xval if( is.null(model@model$cross_validation_metrics@metrics) ) return(NULL) # no newdata, but xval is true, return the crosss_validation metrics else return(model@model$cross_validation_metrics) } } #' Create Model Metrics from predicted and actual values in H2O #' #' Given predicted values (target for regression, class-1 probabilities or binomial #' or per-class probabilities for multinomial), compute a model metrics object #' #' @param predicted An H2OFrame containing predictions #' @param actuals An H2OFrame containing actual values #' @param domain Vector with response factors for classification. #' @param distribution Distribution for regression. #' @param weights (optional) An H2OFrame containing observation weights. #' @return Returns an object of the \linkS4class{H2OModelMetrics} subclass. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' pred <- h2o.predict(prostate_gbm, prostate)[, 3] ## class-1 probability #' h2o.make_metrics(pred, prostate$CAPSULE) #' } #' @export h2o.make_metrics <- function(predicted, actuals, domain=NULL, distribution=NULL, weights=NULL) { predicted <- .validate.H2OFrame(predicted, required=TRUE) actuals <- .validate.H2OFrame(actuals, required=TRUE) weights <- .validate.H2OFrame(weights, required=FALSE) params <- list() params$predictions_frame <- h2o.getId(predicted) params$actuals_frame <- h2o.getId(actuals) if (!is.null(weights)) { params$weights_frame <- h2o.getId(weights) } params$domain <- domain params$distribution <- distribution if (is.null(domain) && !is.null(h2o.levels(actuals))) domain <- h2o.levels(actuals) ## pythonify the domain if (!is.null(domain)) { out <- paste0('["',domain[1],'"') for (d in 2:length(domain)) { out <- paste0(out,',"',domain[d],'"') } out <- paste0(out, "]") params[["domain"]] <- out } url <- paste0("ModelMetrics/predictions_frame/",params$predictions_frame,"/actuals_frame/",params$actuals_frame) res <- .h2o.__remoteSend(method = "POST", url, .params = params) model_metrics <- res$model_metrics metrics <- model_metrics[!(names(model_metrics) %in% c("__meta", "names", "domains", "model_category"))] name <- "H2ORegressionMetrics" if (!is.null(metrics$AUC)) name <- "H2OBinomialMetrics" else if (!is.null(distribution) && distribution == "ordinal") name <- "H2OOrdinalMetrics" else if (!is.null(metrics$hit_ratio_table)) name <- "H2OMultinomialMetrics" new(Class = name, metrics = metrics) } #' Retrieve the AUC #' #' Retrieves the AUC value from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training AUC value is returned. If more #' than one parameter is set to TRUE, then a named vector of AUCs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training AUC #' @param valid Retrieve the validation AUC #' @param xval Retrieve the cross-validation AUC #' @seealso \code{\link{h2o.giniCoef}} for the Gini coefficient, #' \code{\link{h2o.mse}} for MSE, and \code{\link{h2o.metric}} for the #' various threshold metrics. See \code{\link{h2o.performance}} for #' creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.auc(perf) #' } #' @export h2o.auc <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$AUC ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$AUC if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$AUC) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$AUC) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$AUC) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No AUC for ", class(object))) invisible(NULL) } #' Internal function that calculates a precise AUC from given #' probabilities and actual responses. #' #' Note: The underlying implementation is not distributed and can #' only handle limited size of data. For internal use only. #' #' @param probs An \linkS4class{H2OFrame} holding vector of probabilities. #' @param acts An \linkS4class{H2OFrame} holding vector of actuals. .h2o.perfect_auc <- function(probs, acts) { .newExpr("perfectAUC", probs, acts)[1, 1] } #' Retrieve the AUCPR (Area Under Precision Recall Curve) #' #' Retrieves the AUCPR value from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training AUCPR value is returned. If more #' than one parameter is set to TRUE, then a named vector of AUCPRs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training aucpr #' @param valid Retrieve the validation aucpr #' @param xval Retrieve the cross-validation aucpr #' @seealso \code{\link{h2o.giniCoef}} for the Gini coefficient, #' \code{\link{h2o.mse}} for MSE, and \code{\link{h2o.metric}} for the #' various threshold metrics. See \code{\link{h2o.performance}} for #' creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.aucpr(perf) #' } #' @export h2o.aucpr <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$pr_auc ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$pr_auc if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$pr_auc) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$pr_auc) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$pr_auc) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No aucpr for ", class(object))) invisible(NULL) } #' @rdname h2o.aucpr #' @export h2o.pr_auc <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { .Deprecated("h2o.aucpr") h2o.aucpr(object, train, valid, xval) } #' Retrieve the mean per class error #' #' Retrieves the mean per class error from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training mean per class error value is returned. If more #' than one parameter is set to TRUE, then a named vector of mean per class errors are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training mean per class error #' @param valid Retrieve the validation mean per class error #' @param xval Retrieve the cross-validation mean per class error #' @seealso \code{\link{h2o.mse}} for MSE, and \code{\link{h2o.metric}} for the #' various threshold metrics. See \code{\link{h2o.performance}} for #' creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.mean_per_class_error(perf) #' h2o.mean_per_class_error(model, train=TRUE) #' } #' @export h2o.mean_per_class_error <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$mean_per_class_error ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$mean_per_class_error if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$mean_per_class_error) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$mean_per_class_error) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$mean_per_class_error) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No mean per class error for ", class(object))) invisible(NULL) } #' #' Retrieve the Akaike information criterion (AIC) value #' #' Retrieves the AIC value. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training AIC value is returned. If more #' than one parameter is set to TRUE, then a named vector of AICs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics}. #' @param train Retrieve the training AIC #' @param valid Retrieve the validation AIC #' @param xval Retrieve the cross-validation AIC #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' p_sid <- h2o.runif(prostate) #' prostate_train <- prostate[p_sid > .2,] #' prostate_glm <- h2o.glm(x = 3:7, y = 2, training_frame = prostate_train) #' aic_basic <- h2o.aic(prostate_glm) #' print(aic_basic) #' } #' @export h2o.aic <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$AIC ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$AIC if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$AIC) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$AIC) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$AIC) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No AIC for ", class(object))) invisible(NULL) } #' #' Retrieve the R2 value #' #' Retrieves the R2 value from an H2O model. #' Will return R^2 for GLM Models and will return NaN otherwise. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training R2 value is returned. If more #' than one parameter is set to TRUE, then a named vector of R2s are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training R2 #' @param valid Retrieve the validation set R2 if a validation set was passed in during model build time. #' @param xval Retrieve the cross-validation R2 #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.glm(x = 2:5, y = 1, training_frame = fr) #' #' h2o.r2(m) #' } #' @export h2o.r2 <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$r2 ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$r2 if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$r2) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$r2) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$r2) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No R2 for ", class(object))) invisible(NULL) } #' #' Retrieve the Mean Residual Deviance value #' #' Retrieves the Mean Residual Deviance value from an H2O model. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training Mean Residual Deviance value is returned. If more #' than one parameter is set to TRUE, then a named vector of Mean Residual Deviances are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training Mean Residual Deviance #' @param valid Retrieve the validation Mean Residual Deviance #' @param xval Retrieve the cross-validation Mean Residual Deviance #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.deeplearning(x = 2:5, y = 1, training_frame = fr) #' #' h2o.mean_residual_deviance(m) #' } #' @export h2o.mean_residual_deviance <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$mean_residual_deviance ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$mean_residual_deviance if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$mean_residual_deviance) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$mean_residual_deviance) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$mean_residual_deviance) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No mean residual deviance for ", class(object))) invisible(NULL) } #' Retrieve HGLM ModelMetrics #' #' @param object an H2OModel object or H2OModelMetrics. #' @export h2o.HGLMMetrics <- function(object) { if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) return(model.parts$tm@metrics) } warning(paste0("No HGLM Metric for ",class(object))) invisible(NULL) } #' Retrieve the GINI Coefficcient #' #' Retrieves the GINI coefficient from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training GINIvalue is returned. If more #' than one parameter is set to TRUE, then a named vector of GINIs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object an \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training GINI Coefficcient #' @param valid Retrieve the validation GINI Coefficcient #' @param xval Retrieve the cross-validation GINI Coefficcient #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.giniCoef}} for the #' GINI coefficient, and \code{\link{h2o.metric}} for the various #' threshold metrics. See \code{\link{h2o.performance}} for creating #' H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.giniCoef(perf) #' } #' @export h2o.giniCoef <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if(is(object, "H2OModelMetrics")) return( object@metrics$Gini ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$Gini if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$Gini) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$Gini) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$Gini) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No Gini for ",class(object))) invisible(NULL) } #' #' Return the coefficients that can be applied to the non-standardized data. #' #' Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly. #' #' @param object an \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "cylinders" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_glm <- h2o.glm(balance_classes = TRUE, #' seed = 1234, #' x = predictors, #' y = response, #' training_frame = train, #' validation_frame = valid) #' h2o.coef(cars_glm) #' } #' @export h2o.coef <- function(object) { if (is(object, "H2OModel") && object@algorithm %in% c("glm", "gam", "coxph")) { if ((object@algorithm == "glm" || object@algorithm == "gam") && (object@allparameters$family %in% c("multinomial", "ordinal"))) { grabCoeff(object@model$coefficients_table, "coefs_class", FALSE) } else { structure(object@model$coefficients_table$coefficients, names = object@model$coefficients_table$names) } } else { stop("Can only extract coefficients from GAM, GLM and CoxPH models") } } #' #' Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance. #' #' @param object an \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "cylinders" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_glm <- h2o.glm(balance_classes = TRUE, #' seed = 1234, #' x = predictors, #' y = response, #' training_frame = train, #' validation_frame = valid) #' h2o.coef(cars_glm) #' } #' @export h2o.coef_norm <- function(object) { if (is(object, "H2OModel") && ((object@algorithm == "glm") || (object@algorithm == "gam"))) { if (object@allparameters$family %in% c("multinomial", "ordinal")) { grabCoeff(object@model$coefficients_table, "std_coefs_class", TRUE) } else { structure(object@model$coefficients_table$standardized_coefficients, names = object@model$coefficients_table$names) } } else { stop("Can only extract coefficients from GAMs/GLMs") } } grabCoeff <- function(tempTable, nameStart, standardize=FALSE) { coeffNamesPerClass <- tempTable$names # contains coeff names per class totTableLength <- length(tempTable) startIndex <- 2 endIndex <- (totTableLength-1)/2+1 if (standardize) { startIndex <- (totTableLength-1)/2+2 # starting index for standardized coefficients endIndex <- totTableLength } coeffClassNames <- c("coefficient_names") coeffPerClassAll <- list(coefficients_names=coeffNamesPerClass) cindex <- 0 for (index in c(startIndex:endIndex)) { vals <- tempTable[,index] coeffClassNames <- c(coeffClassNames, paste(nameStart, cindex, sep="_")) cindex <- cindex+1 coeffPerClassAll[[cindex+1]] <- vals } structure(coeffPerClassAll, names=coeffClassNames) } #' Retrieves Mean Squared Error Value #' #' Retrieves the mean squared error value from an \linkS4class{H2OModelMetrics} #' object. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training MSEvalue is returned. If more #' than one parameter is set to TRUE, then a named vector of MSEs are returned, where the names are "train", "valid" #' or "xval". #' #' This function only supports \linkS4class{H2OBinomialMetrics}, #' \linkS4class{H2OMultinomialMetrics}, and \linkS4class{H2ORegressionMetrics} objects. #' #' @param object An \linkS4class{H2OModelMetrics} object of the correct type. #' @param train Retrieve the training MSE #' @param valid Retrieve the validation MSE #' @param xval Retrieve the cross-validation MSE #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.mse}} for MSE, and #' \code{\link{h2o.metric}} for the various threshold metrics. See #' \code{\link{h2o.performance}} for creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.mse(perf) #' } #' @export h2o.mse <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$MSE ) if( is(object, "H2OModel") ) { metrics <- NULL # break out special for clustering vs the rest model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$MSE if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { if( is(object, "H2OClusteringModel") ) v <- model.parts$tm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$tm@metrics$MSE) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$vm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$vm@metrics$MSE) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$xm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$xm@metrics$MSE) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No MSE for ",class(object))) invisible(NULL) } #' Retrieves Root Mean Squared Error Value #' #' Retrieves the root mean squared error value from an \linkS4class{H2OModelMetrics} #' object. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training RMSEvalue is returned. If more #' than one parameter is set to TRUE, then a named vector of RMSEs are returned, where the names are "train", "valid" #' or "xval". #' #' This function only supports \linkS4class{H2OBinomialMetrics}, #' \linkS4class{H2OMultinomialMetrics}, and \linkS4class{H2ORegressionMetrics} objects. #' #' @param object An \linkS4class{H2OModelMetrics} object of the correct type. #' @param train Retrieve the training RMSE #' @param valid Retrieve the validation RMSE #' @param xval Retrieve the cross-validation RMSE #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.mse}} for RMSE, and #' \code{\link{h2o.metric}} for the various threshold metrics. See #' \code{\link{h2o.performance}} for creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.rmse(perf) #' } #' @export h2o.rmse <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$RMSE ) if( is(object, "H2OModel") ) { metrics <- NULL # break out special for clustering vs the rest model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$RMSE if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { if( is(object, "H2OClusteringModel") ) v <- model.parts$tm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$tm@metrics$RMSE) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$vm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$vm@metrics$RMSE) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$xm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$xm@metrics$RMSE) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No RMSE for ",class(object))) invisible(NULL) } #' #' Retrieve the Mean Absolute Error Value #' #' Retrieves the mean absolute error (MAE) value from an H2O model. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training MAE value is returned. If more #' than one parameter is set to TRUE, then a named vector of MAEs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training MAE #' @param valid Retrieve the validation set MAE if a validation set was passed in during model build time. #' @param xval Retrieve the cross-validation MAE #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.deeplearning(x = 2:5, y = 1, training_frame = fr) #' #' h2o.mae(m) #' } #' @export h2o.mae <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$mae ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$mae if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$mae) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$mae) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$mae) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No MAE for ", class(object))) invisible(NULL) } #' #' Retrieve the Root Mean Squared Log Error #' #' Retrieves the root mean squared log error (RMSLE) value from an H2O model. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training rmsle value is returned. If more #' than one parameter is set to TRUE, then a named vector of rmsles are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training rmsle #' @param valid Retrieve the validation set rmsle if a validation set was passed in during model build time. #' @param xval Retrieve the cross-validation rmsle #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.deeplearning(x = 2:5, y = 1, training_frame = fr) #' #' h2o.rmsle(m) #' } #' @export h2o.rmsle <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$rmsle ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$rmsle if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$rmsle) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$rmsle) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$rmsle) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No rmsle for ", class(object))) invisible(NULL) } #' Retrieve the Log Loss Value #' #' Retrieves the log loss output for a \linkS4class{H2OBinomialMetrics} or #' \linkS4class{H2OMultinomialMetrics} object #' If "train", "valid", and "xval" parameters are FALSE (default), then the training Log Loss value is returned. If more #' than one parameter is set to TRUE, then a named vector of Log Losses are returned, where the names are "train", "valid" #' or "xval". #' #' @param object a \linkS4class{H2OModelMetrics} object of the correct type. #' @param train Retrieve the training Log Loss #' @param valid Retrieve the validation Log Loss #' @param xval Retrieve the cross-validation Log Loss #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_splits <- h2o.splitFrame(data = cars, ratios = .8, seed = 1234) #' train <- cars_splits[[1]] #' valid <- cars_splits[[2]] #' car_drf <- h2o.randomForest(x = predictors, #' y = response, #' training_frame = train, #' validation_frame = valid) #' h2o.logloss(car_drf, train = TRUE, valid = TRUE) #' } #' @export h2o.logloss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$logloss ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$logloss if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$logloss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$logloss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$logloss) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste("No log loss for",class(object))) invisible(NULL) } #' #' Retrieve the variable importance. #' #' @param object An \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate_complete.csv.zip" #' pros <- h2o.importFile(f) #' response <- "GLEASON" #' predictors <- c("ID", "AGE", "CAPSULE", "DCAPS", "PSA", "VOL", "DPROS") #' model <- h2o.glm(x = predictors, y = response, training_frame = pros) #' h2o.varimp(model) #' } #' @export h2o.varimp <- function(object) { o <- object if( is(o, "H2OModel") ) { vi <- o@model$variable_importances if( is.null(vi) && !is.null(object@model$standardized_coefficient_magnitudes)) { # may be glm tvi <- object@model$standardized_coefficient_magnitudes maxCoeff <- max(tvi$coefficients) sumCoeff <- sum(tvi$coefficients) scaledCoeff <- tvi$coefficients/maxCoeff percentageC <- tvi$coefficients/sumCoeff variable <- tvi$names relative_importance <- tvi$coefficients scaled_importance <- scaledCoeff percentage <- percentageC vi <- data.frame(variable, relative_importance, scaled_importance, percentage) } # no true variable importances, maybe glm coeffs? (return standardized table...) if( is.null(vi) ) { warning("This model doesn't have variable importances", call. = FALSE) return(invisible(NULL)) } vi } else { warning( paste0("No variable importances for ", class(o)) ) return(NULL) } } #' #' Retrieve per-variable split information for a given Isolation Forest model. #' Output will include: #' - count - The number of times a variable was used to make a split. #' - aggregated_split_ratios - The split ratio is defined as "abs(#left_observations - #right_observations) / #before_split". #' Even splits (#left_observations approx the same as #right_observations) contribute #' less to the total aggregated split ratio value for the given feature; #' highly imbalanced splits (eg. #left_observations >> #right_observations) contribute more. #' - aggregated_split_depths - The sum of all depths of a variable used to make a split. (If a variable is used #' on level N of a tree, then it contributes with N to the total aggregate.) #' @param object An Isolation Forest model represented by \linkS4class{H2OModel} object. #' @export h2o.varsplits <- function(object) { if( is(object, "H2OModel") ) { vi <- object@model$variable_splits if( is.null(vi) ) { warning("This model doesn't have variable splits information, only Isolation Forest can be used with h2o.varsplits().", call. = FALSE) return(invisible(NULL)) } vi } else { warning( paste0("No variable importances for ", class(object)) ) return(NULL) } } #' #' Retrieve Model Score History #' #' @param object An \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, #' validation_frame = valid, #' seed = 1234) #' h2o.scoreHistory(cars_gbm) #' } #' @export h2o.scoreHistory <- function(object) { o <- object if( is(o, "H2OModel") ) { sh <- o@model$scoring_history if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No score history for ", class(o)) ) return(NULL) } } #' #' Retrieve GLM Model Score History buried in GAM model #' @param object An \linkS4class{H2OModel} object. #' @export h2o.scoreHistoryGAM <- function(object) { return(object@model$glm_scoring_history) } #' #' Retrieve actual number of trees for tree algorithms #' #' @param object An \linkS4class{H2OModel} object. #' @export h2o.get_ntrees_actual <- function(object) { o <- object if( is(o, "H2OModel") ) { if(o@algorithm == "gbm" | o@algorithm == "drf"| o@algorithm == "isolationforest"| o@algorithm == "xgboost"){ sh <- o@model$model_summary['number_of_trees'][,1] if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No actual number of trees for this model") ) return(NULL) } } else { warning( paste0("No actual number of trees for ", class(o)) ) return(NULL) } } #' #' Retrieve the respective weight matrix #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param matrix_id An integer, ranging from 1 to number of layers + 1, that specifies the weight matrix to return. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/chicago/chicagoCensus.csv" #' census <- h2o.importFile(f) #' census[, 1] <- as.factor(census[, 1]) #' dl_model <- h2o.deeplearning(x = c(1:3), y = 4, training_frame = census, #' hidden = c(17, 191), #' epochs = 1, #' balance_classes = FALSE, #' export_weights_and_biases = TRUE) #' h2o.weights(dl_model, matrix_id = 1) #' } #' @export h2o.weights <- function(object, matrix_id=1){ o <- object if( is(o, "H2OModel") ) { sh <- o@model$weights[[matrix_id]] if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No weights for ", class(o)) ) return(NULL) } h2o.getFrame(sh$name) } #' #' Return the respective bias vector #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param vector_id An integer, ranging from 1 to number of layers + 1, that specifies the bias vector to return. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "http://h2o-public-test-data.s3.amazonaws.com/smalldata/chicago/chicagoCensus.csv" #' census <- h2o.importFile(f) #' census[, 1] <- as.factor(census[, 1]) #' #' dl_model <- h2o.deeplearning(x = c(1:3), y = 4, training_frame = census, #' hidden = c(17, 191), #' epochs = 1, #' balance_classes = FALSE, #' export_weights_and_biases = TRUE) #' h2o.biases(dl_model, vector_id = 1) #' } #' @export h2o.biases <- function(object, vector_id=1){ o <- object if( is(o, "H2OModel") ) { sh <- o@model$biases[[vector_id]] if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No biases for ", class(o)) ) return(NULL) } h2o.getFrame(sh$name) } #' #' Retrieve the Hit Ratios #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training Hit Ratios value is returned. If more #' than one parameter is set to TRUE, then a named list of Hit Ratio tables are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training Hit Ratio #' @param valid Retrieve the validation Hit Ratio #' @param xval Retrieve the cross-validation Hit Ratio #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/iris/iris_wheader.csv" #' iris <- h2o.importFile(f) #' iris_split <- h2o.splitFrame(data = iris, ratios = 0.8, seed = 1234) #' train <- iris_split[[1]] #' valid <- iris_split[[2]] #' #' iris_xgb <- h2o.xgboost(x = 1:4, y = 5, training_frame = train, validation_frame = valid) #' hrt_iris <- h2o.hit_ratio_table(iris_xgb, valid = TRUE) #' hrt_iris #' } #' @export h2o.hit_ratio_table <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$hit_ratio_table ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$hit_ratio_table if ( !is.null(metric) ) return(metric) } v <- list() v_names <- c() if ( train ) { v[[length(v)+1]] <- model.parts$tm@metrics$hit_ratio_table v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v[[length(v)+1]] <- model.parts$vm@metrics$hit_ratio_table v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v[[length(v)+1]] <- model.parts$xm@metrics$hit_ratio_table v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } # if o is a data.frame, then the hrt was passed in -- just for pretty printing if( is(object, "data.frame") ) return(object) # warn if we got something unexpected... warning( paste0("No hit ratio table for ", class(object)) ) invisible(NULL) } #' H2O Model Metric Accessor Functions #' #' A series of functions that retrieve model metric details. #' #' Many of these functions have an optional thresholds parameter. Currently #' only increments of 0.1 are allowed. If not specified, the functions will #' return all possible values. Otherwise, the function will return the value for #' the indicated threshold. #' #' Currently, the these functions are only supported by #' \linkS4class{H2OBinomialMetrics} objects. #' #' @param object An \linkS4class{H2OModelMetrics} object of the correct type. #' @param thresholds (Optional) A value or a list of values between 0.0 and 1.0. #' If not set, then all thresholds will be returned. #' If "max", then the threshold maximizing the metric will be used. #' @param metric (Optional) the metric to retrieve. #' If not set, then all metrics will be returned. #' @param transform (Optional) a list describing a transformer for the given metric, if any. #' e.g. transform=list(op=foo_fn, name="foo") will rename the given metric to "foo" #' and apply function foo_fn to the metric values. #' @return Returns either a single value, or a list of values. #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.giniCoef}} for the #' GINI coefficient, and \code{\link{h2o.mse}} for MSE. See #' \code{\link{h2o.performance}} for creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.F1(perf) #' } #' @export h2o.metric <- function(object, thresholds, metric, transform=NULL) { if (!is(object, "H2OModelMetrics")) stop(paste0("No ", metric, " for ",class(object)," .Should be a H2OModelMetrics object!")) if (is(object, "H2OBinomialMetrics")){ avail_metrics <- names(object@metrics$thresholds_and_metric_scores) avail_metrics <- avail_metrics[!(avail_metrics %in% c('threshold', 'idx'))] if (missing(thresholds)) { if (missing(metric)) { metrics <- object@metrics$thresholds_and_metric_scores } else { h2o_metric <- sapply(metric, function(m) ifelse(m %in% avail_metrics, m, ifelse(m %in% names(.h2o.metrics_aliases), .h2o.metrics_aliases[m], m))) metrics <- object@metrics$thresholds_and_metric_scores[, c("threshold", h2o_metric)] if (!missing(transform)) { if ('op' %in% names(transform)) { metrics[h2o_metric] <- transform$op(metrics[h2o_metric]) } if ('name' %in% names(transform)) { names(metrics) <- c("threshold", transform$name) } } } } else if (thresholds == 'max' && missing(metric)) { metrics <- object@metrics$max_criteria_and_metric_scores } else { if (missing(metric)) { h2o_metric <- avail_metrics } else { h2o_metric <- unlist(lapply(metric, function(m) ifelse(m %in% avail_metrics, m, ifelse(m %in% names(.h2o.metrics_aliases), .h2o.metrics_aliases[m], m)))) } if (thresholds == 'max') thresholds <- h2o.find_threshold_by_max_metric(object, h2o_metric) metrics <- lapply(thresholds, function(t,o,m) h2o.find_row_by_threshold(o, t)[, m], object, h2o_metric) if (!missing(transform) && 'op' %in% names(transform)) { metrics <- lapply(metrics, transform$op) } } return(metrics) } else { stop(paste0("No ", metric, " for ",class(object))) } } #' @rdname h2o.metric #' @export h2o.F0point5 <- function(object, thresholds){ h2o.metric(object, thresholds, "f0point5") } #' @rdname h2o.metric #' @export h2o.F1 <- function(object, thresholds){ h2o.metric(object, thresholds, "f1") } #' @rdname h2o.metric #' @export h2o.F2 <- function(object, thresholds){ h2o.metric(object, thresholds, "f2") } #' @rdname h2o.metric #' @export h2o.accuracy <- function(object, thresholds){ h2o.metric(object, thresholds, "accuracy") } #' @rdname h2o.metric #' @export h2o.error <- function(object, thresholds){ h2o.metric(object, thresholds, "accuracy", transform=list(name="error", op=function(acc) 1 - acc)) } #' @rdname h2o.metric #' @export h2o.maxPerClassError <- function(object, thresholds){ h2o.metric(object, thresholds, "min_per_class_accuracy", transform=list(name="max_per_class_error", op=function(mpc_acc) 1 - mpc_acc)) } #' @rdname h2o.metric #' @export h2o.mean_per_class_accuracy <- function(object, thresholds){ h2o.metric(object, thresholds, "mean_per_class_accuracy") } #' @rdname h2o.metric #' @export h2o.mcc <- function(object, thresholds){ h2o.metric(object, thresholds, "absolute_mcc") } #' @rdname h2o.metric #' @export h2o.precision <- function(object, thresholds){ h2o.metric(object, thresholds, "precision") } #' @rdname h2o.metric #' @export h2o.tpr <- function(object, thresholds){ h2o.metric(object, thresholds, "tpr") } #' @rdname h2o.metric #' @export h2o.fpr <- function(object, thresholds){ h2o.metric(object, thresholds, "fpr") } #' @rdname h2o.metric #' @export h2o.fnr <- function(object, thresholds){ h2o.metric(object, thresholds, "fnr") } #' @rdname h2o.metric #' @export h2o.tnr <- function(object, thresholds){ h2o.metric(object, thresholds, "tnr") } #' @rdname h2o.metric #' @export h2o.recall <- function(object, thresholds){ h2o.metric(object, thresholds, "tpr") } #' @rdname h2o.metric #' @export h2o.sensitivity <- function(object, thresholds){ h2o.metric(object, thresholds, "tpr") } #' @rdname h2o.metric #' @export h2o.fallout <- function(object, thresholds){ h2o.metric(object, thresholds, "fpr") } #' @rdname h2o.metric #' @export h2o.missrate <- function(object, thresholds){ h2o.metric(object, thresholds, "fnr") } #' @rdname h2o.metric #' @export h2o.specificity <- function(object, thresholds){ h2o.metric(object, thresholds, "tnr") } #' Find the threshold, give the max metric #' #' @rdname h2o.find_threshold_by_max_metric #' @param object H2OBinomialMetrics #' @param metric "F1," for example #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, validation_frame = valid, #' build_tree_one_node = TRUE , seed = 1234) #' perf <- h2o.performance(cars_gbm, cars) #' h2o.find_threshold_by_max_metric(perf, "fnr") #' } #' @export h2o.find_threshold_by_max_metric <- function(object, metric) { if(!is(object, "H2OBinomialMetrics")) stop(paste0("No ", metric, " for ",class(object))) max_metrics <- object@metrics$max_criteria_and_metric_scores h2o_metric <- sapply(metric, function(m) ifelse(m %in% names(.h2o.metrics_aliases), .h2o.metrics_aliases[m], m)) max_metrics[match(paste0("max ", h2o_metric), max_metrics$metric), "threshold"] } #' Find the threshold, give the max metric. No duplicate thresholds allowed #' #' @rdname h2o.find_row_by_threshold #' @param object H2OBinomialMetrics #' @param threshold number between 0 and 1 #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, validation_frame = valid, #' build_tree_one_node = TRUE , seed = 1234) #' perf <- h2o.performance(cars_gbm, cars) #' h2o.find_row_by_threshold(perf, 0.5) #' } #' @export h2o.find_row_by_threshold <- function(object, threshold) { if(!is(object, "H2OBinomialMetrics")) stop(paste0("No ", threshold, " for ",class(object))) tmp <- object@metrics$thresholds_and_metric_scores if( is.null(tmp) ) return(NULL) res <- tmp[abs(as.numeric(tmp$threshold) - threshold) < 1e-8,] # relax the tolerance if( nrow(res) == 0L ) { # couldn't find any threshold within 1e-8 of the requested value, warn and return closest threshold row_num <- which.min(abs(tmp$threshold - threshold)) closest_threshold <- tmp$threshold[row_num] warning( paste0("Could not find exact threshold: ", threshold, " for this set of metrics; using closest threshold found: ", closest_threshold, ". Run `h2o.predict` and apply your desired threshold on a probability column.") ) return( tmp[row_num,] ) } else if( nrow(res) > 1L ) res <- res[1L,] res } #' #' Retrieve the Model Centers #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' h2o.ceiling(fr[, 1]) #' } #' @export h2o.centers <- function(object) { as.data.frame(object@model$centers[,-1]) } #' #' Retrieve the Model Centers STD #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.centersSTD(km) #' } #' @export h2o.centersSTD <- function(object) { as.data.frame(object@model$centers_std)[,-1] } #' #' Get the Within SS #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @export h2o.withinss <- function(object) { h2o.mse(object) } #' #' Get the total within cluster sum of squares. #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training tot_withinss value is returned. If more #' than one parameter is set to TRUE, then a named vector of tot_withinss' are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training total within cluster sum of squares #' @param valid Retrieve the validation total within cluster sum of squares #' @param xval Retrieve the cross-validation total within cluster sum of squares #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.tot_withinss(km, train = TRUE) #' } #' @export h2o.tot_withinss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$tot_withinss ) v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$tot_withinss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v <- c(v,model.parts$vm@metrics$tot_withinss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v <- c(v,model.parts$xm@metrics$tot_withinss) v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' Get the between cluster sum of squares #' #' Get the between cluster sum of squares. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training betweenss value is returned. If more #' than one parameter is set to TRUE, then a named vector of betweenss' are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training between cluster sum of squares #' @param valid Retrieve the validation between cluster sum of squares #' @param xval Retrieve the cross-validation between cluster sum of squares #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.betweenss(km, train = TRUE) #' } #' @export h2o.betweenss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$betweenss ) v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$betweenss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v <- c(v,model.parts$vm@metrics$betweenss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v <- c(v,model.parts$xm@metrics$betweenss) v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Get the total sum of squares. #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training totss value is returned. If more #' than one parameter is set to TRUE, then a named vector of totss' are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training total sum of squares #' @param valid Retrieve the validation total sum of squares #' @param xval Retrieve the cross-validation total sum of squares #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.totss(km, train = TRUE) #' } #' @export h2o.totss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$totss ) v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$totss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v <- c(v,model.parts$vm@metrics$totss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v <- c(v,model.parts$xm@metrics$totss) v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Retrieve the number of iterations. #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.num_iterations(prostate_glm) #' } #' @export h2o.num_iterations <- function(object) { object@model$model_summary$number_of_iterations } #' #' Retrieve centroid statistics #' #' Retrieve the centroid statistics. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training centroid stats value is returned. If more #' than one parameter is set to TRUE, then a named list of centroid stats data frames are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training centroid statistics #' @param valid Retrieve the validation centroid statistics #' @param xval Retrieve the cross-validation centroid statistics #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.centroid_stats(km, train = TRUE) #' } #' @export h2o.centroid_stats <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$centroid_stats ) v <- list() v_names <- c() if ( train ) { v[[length(v)+1]] <- model.parts$tm@metrics$centroid_stats v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v[[length(v)+1]] <- model.parts$vm@metrics$centroid_stats v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v[[length(v)+1]] <- model.parts$xm@metrics$centroid_stats v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Retrieve the cluster sizes #' #' Retrieve the cluster sizes. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training cluster sizes value is returned. If more #' than one parameter is set to TRUE, then a named list of cluster size vectors are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training cluster sizes #' @param valid Retrieve the validation cluster sizes #' @param xval Retrieve the cross-validation cluster sizes #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.cluster_sizes(km, train = TRUE) #' } #' @export h2o.cluster_sizes <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$centroid_stats$size ) v <- list() v_names <- c() if ( train ) { v[[length(v)+1]] <- model.parts$tm@metrics$centroid_stats$size v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v[[length(v)+1]] <- model.parts$vm@metrics$centroid_stats$size v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v[[length(v)+1]] <- model.parts$xm@metrics$centroid_stats$size v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Retrieve the null deviance #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training null deviance value is returned. If more #' than one parameter is set to TRUE, then a named vector of null deviances are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training null deviance #' @param valid Retrieve the validation null deviance #' @param xval Retrieve the cross-validation null deviance #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", nfolds = 0, #' alpha = 0.5, lambda_search = FALSE) #' h2o.null_deviance(prostate_glm, train = TRUE) #' } #' @export h2o.null_deviance <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$null_deviance ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$null_deviance if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$null_deviance) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$null_deviance) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$null_deviance) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No null deviance for ", class(object))) invisible(NULL) } #' Retrieve the residual deviance #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training residual deviance value is returned. If more #' than one parameter is set to TRUE, then a named vector of residual deviances are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training residual deviance #' @param valid Retrieve the validation residual deviance #' @param xval Retrieve the cross-validation residual deviance #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.residual_deviance(prostate_glm, train = TRUE) #' } #' @export h2o.residual_deviance <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$residual_deviance ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$residual_deviance if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$residual_deviance) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$residual_deviance) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$residual_deviance) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No residual deviance for ", class(object))) invisible(NULL) } #' Retrieve the residual degrees of freedom #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training residual degrees of freedom value is returned. If more #' than one parameter is set to TRUE, then a named vector of residual degrees of freedom are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training residual degrees of freedom #' @param valid Retrieve the validation residual degrees of freedom #' @param xval Retrieve the cross-validation residual degrees of freedom #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.residual_dof(prostate_glm, train = TRUE) #' } #' @export h2o.residual_dof <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$residual_degrees_of_freedom ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$residual_degrees_of_freedom if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$residual_degrees_of_freedom) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$residual_degrees_of_freedom) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$residual_degrees_of_freedom) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No residual dof for ", class(object))) invisible(NULL) } #' Retrieve the null degrees of freedom #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training null degrees of freedom value is returned. If more #' than one parameter is set to TRUE, then a named vector of null degrees of freedom are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training null degrees of freedom #' @param valid Retrieve the validation null degrees of freedom #' @param xval Retrieve the cross-validation null degrees of freedom #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", nfolds = 0, #' alpha = 0.5, lambda_search = FALSE) #' h2o.null_dof(prostate_glm, train = TRUE) #' } #' @export h2o.null_dof <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$null_degrees_of_freedom ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$null_degrees_of_freedom if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$null_degrees_of_freedom) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$null_degrees_of_freedom) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$null_degrees_of_freedom) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No null dof for ", class(object))) invisible(NULL) } #' Access H2O Gains/Lift Tables #' #' Retrieve either a single or many Gains/Lift tables from H2O objects. #' #' The \linkS4class{H2OModelMetrics} version of this function will only take #' \linkS4class{H2OBinomialMetrics} objects. #' #' @param object Either an \linkS4class{H2OModel} object or an #' \linkS4class{H2OModelMetrics} object. #' @param newdata An H2OFrame object that can be scored on. #' Requires a valid response column. #' @param valid Retrieve the validation metric. #' @param xval Retrieve the cross-validation metric. #' @param \dots further arguments to be passed to/from this method. #' @return Calling this function on \linkS4class{H2OModel} objects returns a #' Gains/Lift table corresponding to the \code{\link{predict}} function. #' @seealso \code{\link{predict}} for generating prediction frames, #' \code{\link{h2o.performance}} for creating #' \linkS4class{H2OModelMetrics}. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, distribution = "bernoulli", #' training_frame = prostate, validation_frame = prostate, nfolds = 3) #' h2o.gainsLift(model) ## extract training metrics #' h2o.gainsLift(model, valid = TRUE) ## extract validation metrics (here: the same) #' h2o.gainsLift(model, xval = TRUE) ## extract cross-validation metrics #' h2o.gainsLift(model, newdata = prostate) ## score on new data (here: the same) #' # Generating a ModelMetrics object #' perf <- h2o.performance(model, prostate) #' h2o.gainsLift(perf) ## extract from existing metrics object #' } #' @export setGeneric("h2o.gainsLift", function(object, ...) {}) #' @rdname h2o.gainsLift #' @export setMethod("h2o.gainsLift", "H2OModel", function(object, newdata, valid=FALSE, xval=FALSE,...) { model.parts <- .model.parts(object) if( missing(newdata) ) { if( valid ) { if( is.null(model.parts$vm) ) return( invisible(.warn.no.validation()) ) else return( h2o.gainsLift(model.parts$vm) ) } if ( xval ) { if( is.null(model.parts$xm) ) return( invisible(.warn.no.cross.validation())) else return( h2o.gainsLift(model.parts$xm) ) } return( h2o.gainsLift(model.parts$tm) ) } else { if( valid ) stop("Cannot have both `newdata` and `valid=TRUE`", call.=FALSE) if( xval ) stop("Cannot have both `newdata` and `xval=TRUE`", call.=FALSE) } # ok need to score on the newdata url <- paste0("Predictions/models/",object@model_id, "/frames/", h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method="POST") # Make the correct class of metrics object metrics <- new(sub("Model", "Metrics", class(object)), algorithm=object@algorithm, metrics= res$model_metrics[[1L]]) h2o.gainsLift(metrics, ...) }) #' @rdname h2o.gainsLift #' @export setMethod("h2o.gainsLift", "H2OModelMetrics", function(object) { if( is(object, "H2OBinomialMetrics") ) { return(object@metrics$gains_lift_table) } else { warning(paste0("No Gains/Lift table for ",class(object))) return(NULL) } }) #' Kolmogorov-Smirnov metric for binomial models #' #' Retrieves a Kolmogorov-Smirnov metric for given binomial model. The number returned is in range between 0 and 1. #' K-S metric represents the degree of separation between the positive (1) and negative (0) cumulative distribution #' functions. Detailed metrics per each group are to be found in the gains-lift table. #' #' The \linkS4class{H2OModelMetrics} version of this function will only take #' \linkS4class{H2OBinomialMetrics} objects. #' #' @param object Either an \linkS4class{H2OModel} object or an #' \linkS4class{H2OModelMetrics} object. #' @return Kolmogorov-Smirnov metric, a number between 0 and 1. #' @seealso \code{\link{h2o.gainsLift}} to see detailed K-S metrics per group #' #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' data <- h2o.importFile( #' path = "https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip") #' model <- h2o.gbm(x = c("Origin", "Distance"), y = "IsDepDelayed", #' training_frame = data, ntrees = 1) #' h2o.kolmogorov_smirnov(model) #' } #' @export setGeneric("h2o.kolmogorov_smirnov", function(object) {}) #' @rdname h2o.kolmogorov_smirnov #' @export setMethod("h2o.kolmogorov_smirnov", "H2OModelMetrics", function(object) { gains_lift <- h2o.gainsLift(object = object) if(is.null(gains_lift)){ warning(paste0("No Gains/Lift table for ",class(object))) return(NULL) } else { return(max(gains_lift$kolmogorov_smirnov)) } }) #' @rdname h2o.kolmogorov_smirnov #' @export setMethod("h2o.kolmogorov_smirnov", "H2OModel", function(object) { gains_lift <- h2o.gainsLift(object = object) if(is.null(gains_lift)){ warning(paste0("No Gains/Lift table for ",class(object))) return(NULL) } else { return(max(gains_lift$kolmogorov_smirnov)) } }) #' Access H2O Confusion Matrices #' #' Retrieve either a single or many confusion matrices from H2O objects. #' #' The \linkS4class{H2OModelMetrics} version of this function will only take #' \linkS4class{H2OBinomialMetrics} or \linkS4class{H2OMultinomialMetrics} #' objects. If no threshold is specified, all possible thresholds are selected. #' #' @param object Either an \linkS4class{H2OModel} object or an #' \linkS4class{H2OModelMetrics} object. #' @param newdata An H2OFrame object that can be scored on. #' Requires a valid response column. #' @param thresholds (Optional) A value or a list of valid values between 0.0 and 1.0. #' This value is only used in the case of #' \linkS4class{H2OBinomialMetrics} objects. #' @param metrics (Optional) A metric or a list of valid metrics ("min_per_class_accuracy", "absolute_mcc", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"). #' This value is only used in the case of #' \linkS4class{H2OBinomialMetrics} objects. #' @param valid Retrieve the validation metric. #' @param ... Extra arguments for extracting train or valid confusion matrices. #' @return Calling this function on \linkS4class{H2OModel} objects returns a #' confusion matrix corresponding to the \code{\link{predict}} function. #' If used on an \linkS4class{H2OBinomialMetrics} object, returns a list #' of matrices corresponding to the number of thresholds specified. #' @seealso \code{\link{predict}} for generating prediction frames, #' \code{\link{h2o.performance}} for creating #' \linkS4class{H2OModelMetrics}. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' h2o.confusionMatrix(model, prostate) #' # Generating a ModelMetrics object #' perf <- h2o.performance(model, prostate) #' h2o.confusionMatrix(perf) #' } #' @export setGeneric("h2o.confusionMatrix", function(object, ...) {}) #' @rdname h2o.confusionMatrix #' @export setMethod("h2o.confusionMatrix", "H2OModel", function(object, newdata, valid=FALSE, ...) { model.parts <- .model.parts(object) if( missing(newdata) ) { if( valid ) { if( is.null(model.parts$vm) ) return( invisible(.warn.no.validation()) ) else return( h2o.confusionMatrix(model.parts$vm, ...) ) } else return( h2o.confusionMatrix(model.parts$tm, ...) ) } else if( valid ) stop("Cannot have both `newdata` and `valid=TRUE`", call.=FALSE) # ok need to score on the newdata url <- paste0("Predictions/models/",object@model_id, "/frames/", h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method="POST") # Make the correct class of metrics object metrics <- new(sub("Model", "Metrics", class(object)), algorithm=object@algorithm, metrics= res$model_metrics[[1L]]) # FIXME: don't think model metrics come out of Predictions anymore!!! h2o.confusionMatrix(metrics, ...) }) .h2o.metrics_aliases <- list( fallout='fpr', missrate='fnr', recall='tpr', sensitivity='tpr', specificity='tnr' ) .h2o.maximizing_metrics <- c('absolute_mcc', 'accuracy', 'precision', 'f0point5', 'f1', 'f2', 'mean_per_class_accuracy', 'min_per_class_accuracy', 'fpr', 'fnr', 'tpr', 'tnr', names(.h2o.metrics_aliases)) #' @rdname h2o.confusionMatrix #' @export setMethod("h2o.confusionMatrix", "H2OModelMetrics", function(object, thresholds=NULL, metrics=NULL) { if( !is(object, "H2OBinomialMetrics") ) { if( is(object, "H2OMultinomialMetrics") || is(object, "H2OOrdinalMetrics")) return(object@metrics$cm$table) warning(paste0("No Confusion Matrices for ",class(object))) return(NULL) } # H2OBinomial case if( is.null(metrics) && is.null(thresholds) ) { metrics = c("f1") } if( is(metrics, "list") ) metrics_list = metrics else { if( is.null(metrics) ) metrics_list = list() else metrics_list = list(metrics) } if( is(thresholds, "list") ) thresholds_list = thresholds else { if( is.null(thresholds) ) thresholds_list = list() else thresholds_list = list(thresholds) } # error check the metrics_list and thresholds_list if( !all(sapply(thresholds_list, f <- function(x) is.numeric(x) && x >= 0 && x <= 1)) ) stop("All thresholds must be numbers between 0 and 1 (inclusive).") if( !all(sapply(metrics_list, f <- function(x) x %in% .h2o.maximizing_metrics)) ) stop(paste("The only allowable metrics are ", paste(.h2o.maximizing_metrics, collapse=', '))) # make one big list that combines the thresholds and metric-thresholds metrics_thresholds = lapply(metrics_list, f <- function(x) h2o.find_threshold_by_max_metric(object, x)) thresholds_list <- append(thresholds_list, metrics_thresholds) first_metrics_thresholds_offset <- length(thresholds_list) - length(metrics_thresholds) thresh2d <- object@metrics$thresholds_and_metric_scores actual_thresholds <- thresh2d$threshold d <- object@metrics$domain m <- lapply(seq_along(thresholds_list), function(i) { t <- thresholds_list[[i]] row <- h2o.find_row_by_threshold(object,t) if( is.null(row) ) NULL else { tns <- row$tns; fps <- row$fps; fns <- row$fns; tps <- row$tps; rnames <- c(d, "Totals") cnames <- c(d, "Error", "Rate") col1 <- c(tns, fns, tns+fns) col2 <- c(fps, tps, fps+tps) col3 <- c(fps/(fps+tns), fns/(fns+tps), (fps+fns)/(fps+tns+fns+tps)) col4 <- c( paste0(" =", fps, "/", fps+tns), paste0(" =", fns, "/", fns+tps), paste0(" =", fns+fps, "/", fps+tns+fns+tps) ) fmts <- c("%i", "%i", "%f", "%s") tbl <- data.frame(col1,col2,col3,col4) colnames(tbl) <- cnames rownames(tbl) <- rnames header <- "Confusion Matrix (vertical: actual; across: predicted) " if(t %in% metrics_thresholds) { m <- metrics_list[i - first_metrics_thresholds_offset] if( length(m) > 1) m <- m[[1]] header <- paste(header, "for max", m, "@ threshold =", t) } else { header <- paste(header, "@ threshold =", row$threshold) } attr(tbl, "header") <- header attr(tbl, "formats") <- fmts oldClass(tbl) <- c("H2OTable", "data.frame") tbl } }) if( length(m) == 1L ) return( m[[1L]] ) m }) #' Plot an H2O Model #' #' Plots training set (and validation set if available) scoring history for an H2O Model #' #' This method dispatches on the type of H2O model to select the correct #' scoring history. The \code{timestep} and \code{metric} arguments are restricted to what is #' available in the scoring history for a particular type of model. #' #' @param x A fitted \linkS4class{H2OModel} object for which the scoring history plot is desired. #' @param timestep A unit of measurement for the x-axis. #' @param metric A unit of measurement for the y-axis. #' @param ... additional arguments to pass on. #' @return Returns a scoring history plot. #' @seealso \code{\link{h2o.deeplearning}}, \code{\link{h2o.gbm}}, #' \code{\link{h2o.glm}}, \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' if (requireNamespace("mlbench", quietly=TRUE)) { #' library(h2o) #' h2o.init() #' #' df <- as.h2o(mlbench::mlbench.friedman1(10000, 1)) #' rng <- h2o.runif(df, seed = 1234) #' train <- df[rng < 0.8,] #' valid <- df[rng >= 0.8,] #' #' gbm <- h2o.gbm(x = 1:10, y = "y", training_frame = train, validation_frame = valid, #' ntrees = 500, learn_rate = 0.01, score_each_iteration = TRUE) #' plot(gbm) #' plot(gbm, timestep = "duration", metric = "deviance") #' plot(gbm, timestep = "number_of_trees", metric = "deviance") #' plot(gbm, timestep = "number_of_trees", metric = "rmse") #' plot(gbm, timestep = "number_of_trees", metric = "mae") #' } #' } #' @export plot.H2OModel <- function(x, timestep = "AUTO", metric = "AUTO", ...) { df <- as.data.frame(x@model$scoring_history) #Ensure metric and timestep can be passed in as upper case (by converting to lower case) if not "AUTO" if(metric != "AUTO"){ metric = tolower(metric) } if(timestep != "AUTO"){ timestep = tolower(timestep) } # Separate functionality for GLM since output is different from other algos if (x@algorithm == "glm") { # H2OBinomialModel and H2ORegressionModel have the same output # Also GLM has only one timestep option, which is `iteration` timestep <- "iteration" if (metric == "AUTO") { metric <- "log_likelihood" } else if (!(metric %in% c("log_likelihood", "objective"))) { stop("for GLM, metric must be one of: log_likelihood, objective") } graphics::plot(df$iteration, df[,c(metric)], type="l", xlab = timestep, ylab = metric, main = "Validation Scoring History", ...) } else if (x@algorithm == "glrm") { timestep <- "iteration" if (metric == "AUTO") { metric <- "objective" } else if (!(metric %in% c("step_size", "objective"))) { stop("for GLRM, metric must be one of: step_size, objective") } graphics::plot(df$iteration, df[,c(metric)], type="l", xlab = timestep, ylab = metric, main = "Objective Function Value per Iteration", ...) } else if (x@algorithm %in% c("deeplearning", "drf", "gbm")) { if (is(x, "H2OBinomialModel")) { if (metric == "AUTO") { metric <- "logloss" } else if (!(metric %in% c("logloss","auc","classification_error","rmse"))) { stop("metric for H2OBinomialModel must be one of: logloss, auc, classification_error, rmse") } } else if (is(x, "H2OMultinomialModel") || is(x, "H2OOrdinalModel")) { if (metric == "AUTO") { metric <- "classification_error" } else if (!(metric %in% c("logloss","classification_error","rmse"))) { stop("metric for H2OMultinomialModel/H2OOrdinalModel must be one of: logloss, classification_error, rmse") } } else if (is(x, "H2ORegressionModel")) { if (metric == "AUTO") { metric <- "rmse" } else if (!(metric %in% c("rmse","deviance","mae"))) { stop("metric for H2ORegressionModel must be one of: rmse, mae, or deviance") } } else { stop("Must be one of: H2OBinomialModel, H2OMultinomialModel, H2OOrdinalModel or H2ORegressionModel") } # Set timestep if (x@algorithm %in% c("gbm", "drf")) { if (timestep == "AUTO") { timestep <- "number_of_trees" } else if (!(timestep %in% c("duration","number_of_trees"))) { stop("timestep for gbm or drf must be one of: duration, number_of_trees") } } else { # x@algorithm == "deeplearning" # Delete first row of DL scoring history since it contains NAs & NaNs if (df$samples[1] == 0) { df <- df[-1,] } if (timestep == "AUTO") { timestep <- "epochs" } else if (!(timestep %in% c("epochs","samples","duration"))) { stop("timestep for deeplearning must be one of: epochs, samples, duration") } } training_metric <- sprintf("training_%s", metric) validation_metric <- sprintf("validation_%s", metric) if (timestep == "duration") { trim <- function (ss) gsub("^\\s+|\\s+$", "", ss) tt <- trim(df[2, c("duration")]) #base::trimws not implemented for earlier versions of R, so we make our own trim function dur_colname <- sprintf("duration_%s", strsplit(tt, " ")[[1]][2]) #parse units of measurement df[,c(dur_colname)] <- apply(as.matrix(df[,c("duration")]), 1, function(v) as.numeric(strsplit(trim(v), " ")[[1]][1])) timestep <- dur_colname } if (validation_metric %in% names(df)) { #Training and Validation scoring history ylim <- range(c(df[,c(training_metric)], df[,c(validation_metric)])) #sync up y axes if (sum(is.na(ylim))>1) { ylim <- c(0.0, 1.0) } graphics::plot(df[,c(timestep)], df[,c(training_metric)], type="l", xlab = "", ylab = "", axes = FALSE, main = "Scoring History", col = "blue", ylim = ylim, ...) graphics::par(new = TRUE) graphics::plot(df[,c(timestep)], df[,c(validation_metric)], type="l", xlab = timestep, ylab = metric, col = "orange", ylim = ylim, ...) graphics::legend("topright", legend = c("Training", "Validation"), col = c("blue", "orange"), lty = c(1,1)) } else { #Training scoring history only ylim <- range(c(df[,c(training_metric)])) if (sum(is.na(ylim))>1) { ylim <- c(0.0, 1.0) } graphics::plot(df[,c(timestep)], df[,c(training_metric)], type="l", xlab = timestep, ylab = training_metric, main = "Training Scoring History", col = "blue", ylim = ylim) } } else { # algo is not glm, deeplearning, drf, gbm stop("Plotting not implemented for this type of model") } } #' Plot Variable Importances #' # Plot a trained model's variable importances. #' #' @param model A trained model (accepts a trained random forest, GBM, #' or deep learning model, will use \code{\link{h2o.std_coef_plot}} #' for a trained GLM #' @param num_of_features The number of features shown in the plot (default is 10 or all if less than 10). #' @seealso \code{\link{h2o.std_coef_plot}} for GLM. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' h2o.varimp_plot(model) #' #' # for deep learning set the variable_importance parameter to TRUE #' iris_hf <- as.h2o(iris) #' iris_dl <- h2o.deeplearning(x = 1:4, y = 5, training_frame = iris_hf, #' variable_importances = TRUE) #' h2o.varimp_plot(iris_dl) #' } #' @export h2o.varimp_plot <- function(model, num_of_features = NULL){ # store the variable importance table as vi vi <- h2o.varimp(model) # check if num_of_features was passed as an integer, otherwise use all features # default to 10 or less features if num_of_features is not specified # if(is.null(num_of_features)) {num_of_features = length(vi$variable)} # else if ((num_of_features != round(num_of_features)) || (num_of_features <= 0)) stop("num_of_features must be an integer greater than 0") if(is.null(num_of_features)) { feature_count = length(vi$variable) num_of_features = ifelse(feature_count <= 10, length(vi$variable), 10) } else if ((num_of_features != round(num_of_features)) || (num_of_features <= 0)) stop("num_of_features must be an integer greater than 0") # check the model type and then update the model title if(model@algorithm[1] == "deeplearning") {title = "Variable Importance: Deep Learning"} else {title = paste("Variable Importance: ", model_type = toupper(model@algorithm[1]), sep="")} # use the longest ylable to adjust margins so ylabels don't cut off long string labels ylabels = vi$variable ymargin <- max(strwidth(ylabels, "inch")+0.4, na.rm = TRUE) par(mai=c(1.02,ymargin,0.82,0.42)) # if num_of_features = 1, creat only one bar (adjust size to look nice) if(num_of_features == 1) { barplot(rev(head(vi$scaled_importance, n = num_of_features)), names.arg = rev(head(vi$variable, n = num_of_features)), width = 0.2, space = 1, horiz = TRUE, las = 2, ylim=c(0 ,2), xlim = c(0,1), axes = TRUE, col ='#1F77B4', main = title) } # plot num_of_features > 1 else if (num_of_features > 1) { barplot(rev(head(vi$scaled_importance, n = num_of_features)), names.arg = rev(head(vi$variable, n = num_of_features)), space = 1,las = 2, horiz = TRUE, col ='#1F77B4', # blue main = title) } } #' Plot Standardized Coefficient Magnitudes #' #' Plot a GLM model's standardized coefficient magnitudes. #' #' @param model A trained generalized linear model #' @param num_of_features The number of features to be shown in the plot #' @seealso \code{\link{h2o.varimp_plot}} for variable importances plot of #' random forest, GBM, deep learning. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.std_coef_plot(prostate_glm) #' } #' @export h2o.std_coef_plot <- function(model, num_of_features = NULL){ # check that the model is a glm if(model@algorithm[1] != "glm") stop("Warning: model must be a GLM") maxcoeff = 1 if (model@model$model_summary["family"]=="multinomial") { coeff_table <- model@model$standardized_coefficient_magnitudes sorted_table <- coeff_table[order(abs(coeff_table$coefficients)),] norm_coef <- sorted_table$coefficients sort_norm <- norm_coef maxcoeff = max(norm_coef) } else { # get the coefficients table coeff_table_complete <- model@model$coefficients_table # remove the intercept row from the complete coeff_table_complete coeff_table <- coeff_table_complete[coeff_table_complete$names != "Intercept",] # order the coeffcients table by the absolute value of the standardized_coefficients sorted_table <- coeff_table[order(abs(coeff_table$standardized_coefficients)),] # get a vector of normalized coefs. and abs norm coefs., and the corresponding labels norm_coef <- sorted_table$standardized_coefficients sort_norm <- abs(sorted_table$standardized_coefficients) } labels <- sorted_table$names # check if num_of_features was passed as an integer, otherwise use all features if(is.null(num_of_features)) {num_of_features = length(norm_coef)} else if ((num_of_features != round(num_of_features)) || (num_of_features <= 0)) stop("num_of_features must be an integer greater than 0") # initialize a vector of color codes, based on norm_coef values color_code <- c() for(element in norm_coef) {if(element >= 0) color_code <- append(color_code, "#1F77B4") # blue else color_code <- append(color_code, '#FF7F0E')} # orange # get the color sign, needed for the legend color_sign <- c() for(element in norm_coef) {if(element >= 0) color_sign <- append(color_sign, "Positive") # blue else color_sign <- append(color_sign, 'Negative')} # orange # use the longest ylable to adjust margins so ylabels don't cut off long string labels ylabels = labels ymargin <- max(strwidth(ylabels, "inch")+0.4, na.rm = TRUE) par(mai=c(1.02,ymargin,0.82,0.42)) # check if num_of_features = 1 and plot only one bar if(num_of_features == 1) { barplot(rev(sort_norm)[num_of_features], names.arg = rev(labels)[num_of_features], width = 0.2, space = 1, horiz = TRUE, las = 1, ylim=c(0 ,2), xlim = c(0,maxcoeff), col = rev(color_code)[num_of_features], main = "Standardized Coef. Magnitudes") } # create horizontal barplot for one or more features else { barplot(tail(sort_norm, n = num_of_features), names.arg = tail(labels, n = num_of_features), legend.text = TRUE, space = 1, horiz = TRUE, las = 1, col = tail(color_code, n = num_of_features), xlim = c(0,maxcoeff), main = "Standardized Coef. Magnitudes") } # add legend, that adapts if one to all bars are plotted legend('bottomright', legend = unique(tail(color_sign, n = num_of_features)), col = unique(tail(color_code, n = num_of_features)), pch = 20) } #' @export plot.H2OBinomialMetrics <- function(x, type = "roc", main, ...) { # TODO: add more types (i.e. cutoffs) if(!type %in% c("roc", "pr")) stop("type must be 'roc' or 'pr'") if(type == "roc") { xaxis <- "False Positive Rate (TPR)"; yaxis = "True Positive Rate (FPR)" if(missing(main)) { main <- "Receiver Operating Characteristic curve" if(x@on_train) { main <- paste(main, "(on train)") } else if (x@on_valid) { main <- paste(main, "(on valid)") } } xdata <- x@metrics$thresholds_and_metric_scores$fpr ydata <- x@metrics$thresholds_and_metric_scores$tpr graphics::plot(xdata, ydata, main = main, xlab = xaxis, ylab = yaxis, ylim=c(0,1), xlim=c(0,1), type='l', lty=2, col='blue', lwd=2, panel.first = grid()) graphics::abline(0, 1, lty = 2) } else if(type=="pr"){ xaxis <- "Recall (TP/(TP+FP))"; yaxis = "Precision (TPR)" if(missing(main)) { main <- "Precision Recall curve" if(x@on_train) { main <- paste(main, "(on train)") } else if (x@on_valid) { main <- paste(main, "(on valid)") } } xdata <- rev(x@metrics$thresholds_and_metric_scores$recall) ydata <- rev(x@metrics$thresholds_and_metric_scores$precision) graphics::plot(xdata, ydata, main = main, xlab = xaxis, ylab = yaxis, ylim=c(0,1), xlim=c(0,1), type='l', lty=2, col='blue', lwd=2, panel.first = grid()) } } #' @export screeplot.H2ODimReductionModel <- function(x, npcs, type = "barplot", main, ...) { if(x@algorithm != "pca") stop("x must be an H2O PCA model") if(missing(npcs)) npcs = min(10, x@model$parameters$k) else if(!is.numeric(npcs) || npcs < 1 || npcs > x@model$parameters$k) stop(paste("npcs must be a positive integer between 1 and", x@model$parameters$k, "inclusive")) sdevH2O <- h2o.sdev(x) if(missing(main)) main = paste("h2o.prcomp(", strtrim(x@parameters$training_frame, 20), ")", sep="") if(type == "barplot") barplot(sdevH2O[1:npcs]^2, main = main, ylab = "Variances", ...) else if(type == "lines") lines(sdevH2O[1:npcs]^2, main = main, ylab = "Variances", ...) else stop("type must be either 'barplot' or 'lines'") } #' #' Retrieve the standard deviations of principal components #' #' @param object An \linkS4class{H2ODimReductionModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' cars_pca <- h2o.prcomp(cars, transform = "STANDARDIZE", #' k = 3, x = predictors, seed = 12345) #' h2o.sdev(cars_pca) #' } #' @export h2o.sdev <- function(object) { if(!is(object, "H2ODimReductionModel") || object@algorithm != "pca") stop("object must be an H2O PCA model") as.numeric(object@model$importance[1,]) } # extract "bite size" pieces from a model .model.parts <- function(object) { o <- object m <- object@model tm <- object@model$training_metrics vm <- object@model$validation_metrics xm <- object@model$cross_validation_metrics xms <- object@model$cross_validation_metrics_summary if( !is.null(vm@metrics) && !is.null(xm@metrics) ) return( list(o=o,m=m,tm=tm,vm= vm,xm= xm,xms=xms) ) if( is.null(vm@metrics) && !is.null(xm@metrics) ) return( list(o=o,m=m,tm=tm,vm=NULL,xm= xm,xms=xms) ) if( !is.null(vm@metrics) && is.null(xm@metrics) ) return( list(o=o,m=m,tm=tm,vm= vm,xm=NULL,xms=NULL) ) return( list(o=o,m=m,tm=tm,vm=NULL,xm=NULL,xms=NULL) ) } .warn.no.validation <- function() { warning("No validation metrics available.", call.=FALSE) NULL } .warn.no.cross.validation <- function() { warning("No cross-validation metrics available.", call.=FALSE) NULL } .isSupervised <- function(algo, params) { if (algo == "kmeans" || algo == "glrm" || algo == "pca" || (algo == "deeplearning" && !is.null(params$autoencoder) && params$autoencoder)) { FALSE } else { TRUE } } # Transform given name to # expected values ("gbm", "drf") # It allows for having algorithm name aliases .h2o.unifyAlgoName <- function(algo) { result <- if (algo == "randomForest") "drf" else algo result } # # Returns REST API version for given algo. # .h2o.getAlgoVersion <- function(algo, h2oRestApiVersion = .h2o.__REST_API_VERSION) { result <- .h2o.__remoteSend(method = "GET", h2oRestApiVersion = h2oRestApiVersion, .h2o.__MODEL_BUILDERS(algo))$model_builders[[algo]][["__meta"]]$schema_version result } #' Tabulation between Two Columns of an H2OFrame #' #' Simple Co-Occurrence based tabulation of X vs Y, where X and Y are two Vecs in a given dataset. #' Uses histogram of given resolution in X and Y. #' Handles numerical/categorical data and missing values. Supports observation weights. #' #' @param data An H2OFrame object. #' @param x predictor column #' @param y response column #' @param weights_column (optional) observation weights column #' @param nbins_x number of bins for predictor column #' @param nbins_y number of bins for response column #' @return Returns two TwoDimTables of 3 columns each #' count_table: X Y counts #' response_table: X meanY counts #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' df <- as.h2o(iris) #' tab <- h2o.tabulate(data = df, x = "Sepal.Length", y = "Petal.Width", #' weights_column = NULL, nbins_x = 10, nbins_y = 10) #' plot(tab) #' } #' @export h2o.tabulate <- function(data, x, y, weights_column = NULL, nbins_x = 50, nbins_y = 50 ) { args <- .verify_datacols(data, c(x,y)) if(!is.numeric(nbins_x)) stop("`nbins_x` must be a positive number") if(!is.numeric(nbins_y)) stop("`nbins_y` must be a positive number") parms = list() parms$dataset <- attr(data, "id") parms$predictor <- args$cols[1] parms$response <- args$cols[2] if( !missing(weights_column) ) parms$weight <- weights_column parms$nbins_predictor <- nbins_x parms$nbins_response <- nbins_y res <- .h2o.__remoteSend(method = "POST", h2oRestApiVersion = 99, page = "Tabulate", .params = parms) count_table <- res$count_table response_table <- res$response_table out <- list(count_table = count_table, response_table = response_table, cols = args$cols) oldClass(out) <- c("H2OTabulate", "list") out } #' Plot an H2O Tabulate Heatmap #' #' Plots the simple co-occurrence based tabulation of X vs Y as a heatmap, where X and Y are two Vecs in a given dataset. This function requires suggested ggplot2 package. #' #' @param x An H2OTabulate object for which the heatmap plot is desired. #' @param xlab A title for the x-axis. Defaults to what is specified in the given H2OTabulate object. #' @param ylab A title for the y-axis. Defaults to what is specified in the given H2OTabulate object. #' @param base_size Base font size for plot. #' @param ... additional arguments to pass on. #' @return Returns a ggplot2-based heatmap of co-occurance. #' @seealso \code{\link{h2o.tabulate}} #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' df <- as.h2o(iris) #' tab <- h2o.tabulate(data = df, x = "Sepal.Length", y = "Petal.Width", #' weights_column = NULL, nbins_x = 10, nbins_y = 10) #' plot(tab) #' } #' @export plot.H2OTabulate <- function(x, xlab = x$cols[1], ylab = x$cols[2], base_size = 12, ...) { if (!inherits(x, "H2OTabulate")) { stop("Must be an H2OTabulate object") } if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("In order to plot.H2OTabulate you must have ggplot2 package installed") } # Pull small counts table into R memory to plot df <- as.data.frame(x$count_table) names(df) <- c("c1", "c2", "counts") # Reorder the levels for better plotting if (suppressWarnings(is.na(sum(as.numeric(df$c1))))) { c1_order <- order(unique(df$c1)) } else { c1_order <- order(unique(as.numeric(df$c1))) } if (suppressWarnings(is.na(sum(as.numeric(df$c2))))) { c2_order <- order(unique(df$c2)) } else { c2_order <- order(unique(as.numeric(df$c2))) } c1_labels <- unique(df$c1) c2_labels <- unique(df$c2) df$c1 <- factor(df$c1, levels = c1_labels[c1_order]) df$c2 <- factor(df$c2, levels = c2_labels[c2_order]) # Plot heatmap c1 <- c2 <- counts <- NULL #set these to pass CRAN checks w/o warnings (p <- ggplot2::ggplot(df, ggplot2::aes(c1, c2)) + ggplot2::geom_tile(ggplot2::aes(fill = counts), colour = "white") + ggplot2::scale_fill_gradient(low = "white", high = "steelblue")) # Adjust the plot p <- p + ggplot2::theme_grey(base_size = base_size) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_discrete(expand = c(0, 0)) + ggplot2::scale_y_discrete(expand = c(0, 0)) + ggplot2::theme(legend.position = "none", axis.ticks = ggplot2::element_blank(), axis.text.x = ggplot2::element_text(size = base_size * 0.8, angle = 330, hjust = 0, colour = "grey50")) # Return a ggplot object return(p) } #' #' Retrieve the cross-validation models #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a list of H2OModel objects #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_models = TRUE, seed = 1234) #' h2o.cross_validation_models(cars_gbm) #' } #' @export h2o.cross_validation_models <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_models)) return(NULL) lapply(object@model$cross_validation_models, function(x) h2o.getModel(x$name)) } #' #' Retrieve the cross-validation fold assignment #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a H2OFrame #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_fold_assignment = TRUE, seed = 1234) #' h2o.cross_validation_fold_assignment(cars_gbm) #' } #' @export h2o.cross_validation_fold_assignment <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_fold_assignment)) return(NULL) h2o.getFrame(object@model$cross_validation_fold_assignment$name) } #' #' Retrieve the cross-validation holdout predictions #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a H2OFrame #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement","power","weight","acceleration","year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars,ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_predictions = TRUE, seed = 1234) #' h2o.cross_validation_holdout_predictions(cars_gbm) #' } #' @export h2o.cross_validation_holdout_predictions <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_holdout_predictions)) return(NULL) h2o.getFrame(object@model$cross_validation_holdout_predictions$name) } #' #' Retrieve the cross-validation predictions #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a list of H2OFrame objects #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_predictions = TRUE, seed = 1234) #' h2o.cross_validation_predictions(cars_gbm) #' } #' @export h2o.cross_validation_predictions <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_predictions)) return(NULL) lapply(object@model$cross_validation_predictions, function(x) h2o.getFrame(x$name)) } #' Partial Dependence Plots #' #' Partial dependence plot gives a graphical depiction of the marginal effect of a variable on the response. The effect #' of a variable is measured in change in the mean response. Note: Unlike randomForest's partialPlot when plotting #' partial dependence the mean response (probabilities) is returned rather than the mean of the log class probability. #' #' @param object An \linkS4class{H2OModel} object. #' @param data An H2OFrame object used for scoring and constructing the plot. #' @param cols Feature(s) for which partial dependence will be calculated. #' @param destination_key An key reference to the created partial dependence tables in H2O. #' @param nbins Number of bins used. For categorical columns make sure the number of bins exceeds the level count. #' If you enable add_missing_NA, the returned length will be nbin+1. #' @param plot A logical specifying whether to plot partial dependence table. #' @param plot_stddev A logical specifying whether to add std err to partial dependence plot. #' @param weight_column A string denoting which column of data should be used as the weight column. #' @param include_na A logical specifying whether missing value should be included in the Feature values. #' @param user_splits A two-level nested list containing user defined split points for pdp plots for each column. #' If there are two columns using user defined split points, there should be two lists in the nested list. #' Inside each list, the first element is the column name followed by values defined by the user. #' @param col_pairs_2dpdp A two-level nested list like this: col_pairs_2dpdp = list(c("col1_name", "col2_name"), #' c("col1_name","col3_name"), ...,) where a 2D partial plots will be generated for col1_name, col2_name pair, for #' col1_name, col3_name pair and whatever other pairs that are specified in the nested list. #' @param save_to Fully qualified prefix of the image files the resulting plots should be saved to, e.g. '/home/user/pdp'. #' Plots for each feature are saved separately in PNG format, each file receives a suffix equal to the corresponding feature name, e.g. `/home/user/pdp_AGE.png`. #' If the files already exists, they will be overridden. Files are only saves if plot = TRUE (default). #' @return Plot and list of calculated mean response tables for each feature requested. #' @param row_index Row for which partial dependence will be calculated instead of the whole input frame. #' @param targets Target classes for multinomial model. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate[, "CAPSULE"] <- as.factor(prostate[, "CAPSULE"] ) #' prostate[, "RACE"] <- as.factor(prostate[, "RACE"] ) #' prostate_gbm <- h2o.gbm(x = c("AGE", "RACE"), #' y = "CAPSULE", #' training_frame = prostate, #' ntrees = 10, #' max_depth = 5, #' learn_rate = 0.1) #' h2o.partialPlot(object = prostate_gbm, data = prostate, cols = c("AGE", "RACE")) #' #' iris_hex <- as.h2o(iris) #' iris_gbm <- h2o.gbm(x = c(1:4), y = 5, training_frame = iris_hex) #' #' # one target class #' h2o.partialPlot(object = iris_gbm, data = iris_hex, cols="Petal.Length", targets=c("setosa")) #' # three target classes #' h2o.partialPlot(object = iris_gbm, data = iris_hex, cols="Petal.Length", #' targets=c("setosa", "virginica", "versicolor")) #' } #' @export h2o.partialPlot <- function(object, data, cols, destination_key, nbins=20, plot = TRUE, plot_stddev = TRUE, weight_column=-1, include_na=FALSE, user_splits=NULL, col_pairs_2dpdp=NULL, save_to=NULL, row_index=-1, targets=NULL) { if(!is(object, "H2OModel")) stop("object must be an H2Omodel") if( is(object, "H2OOrdinalModel")) stop("object must be a regression model or binary and multinomial classfier") if(!is(data, "H2OFrame")) stop("data must be H2OFrame") if(!is.numeric(nbins) | !(nbins > 0) ) stop("nbins must be a positive numeric") if(!is.logical(plot)) stop("plot must be a logical value") if(!is.logical(plot_stddev)) stop("plot must be a logical value") if(!is.logical(include_na)) stop("add_missing_NA must be a logical value") if((is(object, "H2OMultinomialModel"))){ if(is.null(targets)) stop("targets parameter has to be set for multinomial classification") for(i in 1:length(targets)){ if(!is.character(targets[i])) stop("targets parameter must be a list of string values") } } noPairs = missing(col_pairs_2dpdp) noCols = missing(cols) if(noCols && noPairs) cols = object@parameters$x # set to default only if both are missing y = object@parameters$y numCols = 0 numColPairs = 0 if (!missing(cols)) { # check valid cols in cols for 1d pdp x <- cols args <- .verify_dataxy(data, x, y) } cpairs <- NULL if (!missing(col_pairs_2dpdp)) { # verify valid cols for 2d pdp for (onePair in col_pairs_2dpdp) { pargs <- .verify_dataxy(data, onePair, y) cpairs <- c(cpairs, paste0("[", paste (pargs$x, collapse = ','), "]")) } numColPairs = length(cpairs) } if (is.numeric(weight_column) && (weight_column != -1)) { stop("weight_column should be a column name of your data frame.") } else if (is.character(weight_column)) { # weight_column_index is column name if (!weight_column %in% h2o.names(data)) stop("weight_column_index should be one of your columns in your data frame.") else weight_column <- match(weight_column, h2o.names(data))-1 } if (!is.numeric(row_index)) { stop("row_index should be numeric.") } parms = list() if (!missing(col_pairs_2dpdp)) { parms$col_pairs_2dpdp <- paste0("[", paste (cpairs, collapse = ','), "]") } if (!missing(cols)) { parms$cols <- paste0("[", paste (args$x, collapse = ','), "]") numCols = length(cols) } if(is.null(targets)){ num_1d_pp_data <- numCols } else { num_1d_pp_data <- numCols * length(targets) } noCols = missing(cols) parms$model_id <- attr(object, "model_id") parms$frame_id <- attr(data, "id") parms$nbins <- nbins parms$weight_column_index <- weight_column parms$add_missing_na <- include_na parms$row_index = row_index if (is.null(user_splits) || length(user_splits) == 0) { parms$user_cols <- NULL parms$user_splits <- NULL parms$num_user_splits <- NULL } else { user_cols <- c() user_values <- c() user_num_splits <- c() column_names <- h2o.names(data) for (ind in c(1:length(user_splits))) { aList <- user_splits[[ind]] csname = aList[1] if (csname %in% column_names) { if (h2o.isnumeric(data[csname]) || h2o.isfactor(data[csname])) { nVal <- length(aList)-1 if (h2o.isfactor(data[csname])) { domains <- h2o.levels(data[csname]) # enum values tempVal <- aList[2:length(aList)] intVals <- c(1:length(tempVal)) for (eleind in c(1:nVal)) { eleIndex <- which(domains == tempVal[eleind]) if (eleIndex>0) { intVals[eleind] <- which(domains == tempVal[eleind]) - 1 } else { stop("Illegal enum value encountered. To include missing values in your feature values, set include_na to TRUE") } } user_values <- c(user_values, intVals) } else { vals <- as.numeric(unlist(strsplit(aList[2:length(aList)], ","))) user_values <- c(user_values, vals) } user_num_splits <- c(user_num_splits, nVal) user_cols <- c(user_cols, csname) } else { stop ("Partial dependency plots are generated for numerical and categorical columns only.") } } else { stop( "column names used in user_splits are not valid. They should be chosen from the columns of your data set" ) } } parms$user_cols <- paste0("[", paste(user_cols, collapse=','), "]") parms$user_splits <- paste0("[", paste(user_values, collapse=','), "]") parms$num_user_splits <- paste0("[", paste(user_num_splits, collapse=','), "]") } if(!is.null(targets)) { parms$targets <- paste0("[", paste (targets, collapse = ','), "]") } if(!missing(destination_key)) parms$destination_key = destination_key res <- .h2o.__remoteSend(method = "POST", h2oRestApiVersion = 3, page = "PartialDependence/", .params = parms) .h2o.__waitOnJob(res$key$name) url <- gsub("/3/", "", res$dest$URL) res <- .h2o.__remoteSend(url, method = "GET", h2oRestApiVersion = 3) ## Change feature names to the original supplied, the following is okay because order is preserved pps <- res$partial_dependence_data min_y <- min(pps[[1]][,2]) max_y <- max(pps[[1]][,2]) min_lower <- min_y max_upper <- max_y col_name_index <- 1 for (i in 1:length(pps)) { pp <- pps[[i]] if (!all(is.na(pp))) { min_y <- min(min_y, min(pp[,2])) max_y <- max(max_y, max(pp[,2])) min_lower <- min(min_lower, pp[,2] - pp[,3]) max_upper <- max(max_upper, pp[,2] + pp[,3]) if (i <= num_1d_pp_data) { if(is.null(targets)){ col_name_index = i title <- paste("Partial dependency plot for", cols[col_name_index]) } else if(!is.null(targets)){ if(length(cols) > 1 && i %% length(cols) == 0) { col_name_index = col_name_index + 1 } if(length(targets) > 1) { title <- paste("Partial dependency plot for", cols[col_name_index], "and classes\n", paste(targets, collapse=", ")) } else { title <- paste("Partial dependency plot for", cols[col_name_index], "and class", targets) } } names(pps[[i]]) <- c(cols[col_name_index], "mean_response", "stddev_response", "std_error_mean_response") attr(pps[[i]],"description") <- title } else { names(pps[[i]]) <- c(col_pairs_2dpdp[[i-num_1d_pp_data]][1], col_pairs_2dpdp[[i-num_1d_pp_data]][2], "mean_response", "stddev_response", "std_error_mean_response") attr(pps[[i]],"description") <- paste('2D partial dependence plot for', col_pairs_2dpdp[[i-num_1d_pp_data]][1], "and", col_pairs_2dpdp[[i-num_1d_pp_data]][1]) } } } col_types = unlist(h2o.getTypes(data)) col_names = names(data) pp.plot.1d <- function(pp) { if(!all(is.na(pp))) { x <- pp[,1] y <- pp[,2] stddev <- pp[,3] type <- col_types[which(col_names == names(pp)[1])] if(type == "enum") { line_type <- "p" lty <- NULL pch <- 19 pp[, 1] <- factor(pp[,1], levels=pp[,1]) } else { line_type <- "l" lty <- 1 pch <- NULL } ## Plot one standard deviation above and below the mean if(plot_stddev) { ## Added upper and lower std dev confidence bound upper = y + stddev lower = y - stddev plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol="red", medlty=0, staplelty=0, boxlty=0, col="red", main = attr(pp,"description"), ylim = c(min(lower), max(upper))) polygon(c(x, rev(x)), c(lower, rev(upper)), col = adjustcolor("red", alpha.f = 0.1), border = F) if(type == "enum"){ x <- c(1:length(x)) arrows(x, lower, x, upper, code=3, angle=90, length=0.1, col="red") } } else { plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol="red", medlty=0, staplelty=0, boxlty=0, col="red", main = attr(pp,"description")) } } else { print("Partial Dependence not calculated--make sure nbins is as high as the level count") } } pp.plot.1d.multinomial <- function(pps) { colors <- rainbow(length(pps)) for(i in 1:length(pps)) { pp <- pps[[i]] if(!all(is.na(pp))) { x <- pp[,1] y <- pp[,2] stddev <- pp[,3] color <- colors[i] title <- attr(pp,"description") type <- col_types[which(col_names == names(pp)[1])] if(type == "enum"){ line_type <- "p" lty <- NULL pch <- 19 pp[, 1] <- factor(x, labels=x) } else { line_type <- "l" lty <- 1 pch <- NULL } if(plot_stddev) { upper <- y + stddev lower <- y - stddev if(i == 1){ plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, main = title, col = color, ylim = c(min_lower, max_upper + 0.1 * abs(max_upper))) } else { points(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, col = color) } polygon(c(x, rev(x)), c(lower, rev(upper)), col = adjustcolor(color, alpha.f = 0.1), border = F) if(type == "enum"){ x <- c(1:length(x)) arrows(x, lower, x, upper, code=3, angle=90, length=0.1, col=color) } } else { if(i == 1) { plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, main = title, col = color, ylim = c(min_y, max_y + 0.05 * abs(max_y))) } else { points(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, col = color) } } legend("topright",legend=targets, col=colors, lty=lty, pch=pch, bty="n", ncol=length(pps)) } else { print("Partial Dependence not calculated--make sure nbins is as high as the level count") } } } pp.plot.2d <- function(pp, nBins=nbins, user_cols=NULL, user_num_splits=NULL) { xtickMarks <- NULL ytickMarks <- NULL if (!all(is.na(pp))) { if (col_types[which(col_names == names(pp)[1])] == "enum") { x <- replaceEnumLevel(pp[,1], unique(pp[,1])) xtickMarks <- unique(pp[,1]) } else { x <- pp[,1] } if (col_types[which(col_names == names(pp)[2])] == "enum") { y <- replaceEnumLevel(pp[,2], unique(pp[,2])) ytickMarks <- unique(pp[,2]) } else { y <- pp[,2] } allMetric <- reShape(x, y, pp[, 3], names(pp)[1], names(pp)[2], nBins, user_cols, user_num_splits) XX <- allMetric[[1]] YY <- allMetric[[2]] ZZ <- allMetric[[3]] tTitle <- "" if (!is.null(xtickMarks)) { xc <- c(1:length(xtickMarks)) tTitle <- paste0("X axis tick marks: ", paste(xc, xtickMarks, sep=":", collapse=", ")) } if (!is.null(ytickMarks)) { yc <- c(1:length(ytickMarks)) temp <- paste0("Y axis tick marks: ", paste(yc, ytickMarks, sep=":", collapse=", ")) tTitle <- paste0(tTitle, temp) } ## Plot one standard deviation above and below the mean if (plot_stddev) { ## Added upper and lower std dev confidence bound upper = pp[, 3] + pp[, 4] lower = pp[, 3] - pp[, 4] Zupper = matrix(upper, ncol=dim(XX)[2], byrow=F) Zlower = matrix(lower, ncol=dim(XX)[2], byrow=F) rgl::open3d() plot3Drgl::persp3Drgl(XX, YY, ZZ, theta=30, phi=15, axes=TRUE,scale=2, box=TRUE, nticks=5, ticktype="detailed", xlab=names(pp)[1], ylab=names(pp)[2], zlab="2D partial plots", main=tTitle, border='black', alpha=0.5) plot3Drgl::persp3Drgl(XX, YY, Zupper, alpha=0.2, lwd=2, add=TRUE, border='yellow') plot3Drgl::persp3Drgl(XX, YY, Zlower, alpha=0.2, lwd=2, add=TRUE, border='green') rgl::grid3d(c("x", "y", "z")) } else { rgl::persp3d(XX, YY, ZZ, theta=30, phi=50, axes=TRUE,scale=2, box=TRUE, nticks=5, ticktype="detailed", xlab=names(pp)[1], ylab=names(pp)[2], zlab="2D partial plots", main=tTitle, border='black', alpha=0.5) rgl::grid3d(c("x", "y", "z")) } } else { print("2D Partial Dependence not calculated--make sure nbins is as high as the level count") } } pp.plot.save.1d <- function(pp) { # If user accidentally provides one of the most common suffixes in R, it is removed. save_to <- gsub(replacement = "",pattern = "(\\.png)|(\\.jpg)|(\\.pdf)", x = save_to) destination_file <- paste0(save_to,"_",names(pp)[1],'.png') png(destination_file) pp.plot.1d(pp) dev.off() } pp.plot.save.1d.multinomial <- function(pps) { # If user accidentally provides one of the most common suffixes in R, it is removed. save_to <- gsub(replacement = "",pattern = "(\\.png)|(\\.jpg)|(\\.pdf)", x = save_to) destination_file <- paste0(save_to,"_",names(pps[[1]])[1],'.png') png(destination_file) pp.plot.1d.multinomial(pps) dev.off() } pp.plot.save.2d <- function(pp, nBins=nbins, user_cols=NULL, user_num_splits=NULL) { # If user accidentally provides one of the most common suffixes in R, it is removed. save_to <- gsub(replacement = "", pattern = "(\\.png)|(\\.jpg)|(\\.pdf)", x = save_to) colnames = paste0(names(pp)[1], "_", names(pp)[2]) destination_file <- paste0(save_to,"_",colnames,'.png') pp.plot.2d(pp, nbins, user_cols, user_num_splits) rgl::snapshot3d(destination_file) dev.off() } # 1D PDP plot and save if(plot && !noCols) { if(is.null(targets)){ # multonomial PDP lapply(pps[1:num_1d_pp_data], pp.plot.1d) if(!is.null(save_to)){ lapply(pps[1:num_1d_pp_data], pp.plot.save.1d) } } else { from <- 1 to <- length(targets) for(i in 1:numCols) { pp = pps[from:to] pp.plot.1d.multinomial(pp) if(!is.null(save_to)){ pp.plot.save.1d.multinomial(pp) } from <- from + to to <- to + length(targets) } } } # 2D PDP plot and save if (!noPairs && requireNamespace("plot3Drgl", quietly = TRUE) && requireNamespace("rgl", quietly = TRUE)) { if (plot && !is.null(save_to)) { # plot and save to file if (is.null(user_splits)) { sapply( pps[(num_1d_pp_data + 1):(num_1d_pp_data + numColPairs)], pp.plot.save.2d, nBins = nbins, user_cols = NULL, user_num_splits = NULL ) } else { sapply( pps[(num_1d_pp_data + 1):(num_1d_pp_data + numColPairs)], pp.plot.save.2d, nBins = nbins, user_cols = user_cols, user_num_splits = user_num_splits ) } } else { # only plot if (is.null(user_splits)) { sapply( pps[(numCols + 1):(numCols + numColPairs)], pp.plot.2d, nBins = nbins, user_cols = NULL, user_num_splits = NULL ) } else { sapply( pps[(numCols + 1):(numCols + numColPairs)], pp.plot.2d, nBins = nbins, user_cols = user_cols, user_num_splits = user_num_splits ) } } } else if (plot && !noPairs) { warning("Install packages plot3Drgl and rgl in order to generate 2D partial plots.") } if(length(pps) == 1) { return(pps[[1]]) } else { return(pps) } } replaceEnumLevel <- function(originalV, vlevels) { x <- rep(1, length(originalV)) for (ind in c(1:length(originalV))) { x[ind] <- which(originalV[ind] == vlevels) } x } reShape<- function(x, y, z, xname, yname, nbin, user_cols, user_num_splits) { ybin <- nbin if(!is.null(user_cols)) { if (yname %in% user_cols) { ybin <- user_num_splits[which(yname==user_cols)] } } xbin <- floor(length(x)/ybin) X<-matrix(x, nrow=ybin, ncol=xbin,byrow=F) Y <- matrix(y, nrow=ybin, ncol=xbin, byrow=F) Z <- matrix(z, nrow=ybin, ncol=xbin, byrow=F) list(X,Y,Z) } #' Feature Generation via H2O Deep Learning #' #' Extract the non-linear feature from an H2O data set using an H2O deep learning #' model. #' @param object An \linkS4class{H2OModel} object that represents the deep #' learning model to be used for feature extraction. #' @param data An H2OFrame object. #' @param layer Index (integer) of the hidden layer to extract #' @return Returns an H2OFrame object with as many features as the #' number of units in the hidden layer of the specified index. #' @seealso \code{\link{h2o.deeplearning}} for making H2O Deep Learning models. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path = system.file("extdata", "prostate.csv", package = "h2o") #' prostate = h2o.importFile(path = prostate_path) #' prostate_dl = h2o.deeplearning(x = 3:9, y = 2, training_frame = prostate, #' hidden = c(100, 200), epochs = 5) #' prostate_deepfeatures_layer1 = h2o.deepfeatures(prostate_dl, prostate, layer = 1) #' prostate_deepfeatures_layer2 = h2o.deepfeatures(prostate_dl, prostate, layer = 2) #' head(prostate_deepfeatures_layer1) #' head(prostate_deepfeatures_layer2) #' #' } #' @export h2o.deepfeatures <- function(object, data, layer) { url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(data)) if (is.null(layer)) layer <- 1 if (is.numeric(layer)) { index = layer - 1 res <- .h2o.__remoteSend(url, method = "POST", deep_features_hidden_layer=index, h2oRestApiVersion = 4) } else { res <- .h2o.__remoteSend(url, method = "POST", deep_features_hidden_layer_name=layer, h2oRestApiVersion = 4) } job_key <- res$key$name dest_key <- res$dest$name .h2o.__waitOnJob(job_key) h2o.getFrame(dest_key) } #' #' The H2ONode class. #' #' @slot id An \code{integer} representing node's unique identifier. Generated by H2O. #' @slot levels A \code{character} representing categorical levels on split from parent's node belonging into this node. NULL for root node or non-categorical splits. #' #' #' @aliases H2ONode #' setClass("H2ONode", representation( id = "integer" )) #' #' The H2OLeafNode class. #' #' This class represents a single leaf node in an \code{H2OTree}. #' #' #' @aliases H2OLeafNode #' setClass("H2OLeafNode", representation( prediction = "numeric" ), contains = "H2ONode") #' #' The H2OSplitNode class. #' #' This class represents a single non-terminal node in an \code{H2OTree}. #' @slot threshold A \code{numeric} split threshold, typically when the split column is numerical. #' @slot left_child A \code{H2ONodeOrNULL} representing the left child node, if a node has one. #' @slot right_child A \code{H2ONodeOrNULL} representing the right child node, if a node has one. #' @slot split_feature A \code{character} representing the name of the column this node splits on. #' @slot left_levels A \code{character} representing the levels of a categorical feature heading to the left child of this node. NA for non-categorical split. #' @slot right_levels A \code{character} representing the levels of a categorical feature heading to the right child of this node. NA for non-categorical split. #' @slot na_direction A \code{character} representing the direction of NA values. LEFT means NA values go to the left child node, RIGH means NA values go to the right child node. #' @aliases H2OSplitNode #' @export setClass( "H2OSplitNode", representation( threshold = "numeric", left_child = "H2ONode", right_child = "H2ONode", split_feature = "character", left_levels = "character", right_levels = "character", na_direction = "character" ), contains = "H2ONode" ) #' @rdname H2ONode-class #' @param object an \code{H2ONode} object. #' @export setMethod('show', 'H2ONode', function(object){ print.H2ONode(object) }) print.H2ONode <- function(node){ cat("Node ID", node@id, "\n\n") if(class(node) == "H2OLeafNode"){ cat("Terminal node. Prediction is", node@prediction) return() } if(!is.null(node@left_child)) cat("Left child node ID =", node@left_child@id, "\n") else cat("There is no left child \n") if(!is.null(node@right_child)) cat("Right child node ID =", node@right_child@id,"\n") else cat("There is no right child \n") cat("\n") cat("Splits on column", node@split_feature, "\n") if(is.na(node@threshold)){ if(!is.null(node@left_child)) cat(" - Categorical levels going to the left node:", node@left_levels, "\n") if(!is.null(node@right_child)) cat(" - Categorical levels to the right node:", node@right_levels, "\n") } else { cat("Split threshold <", node@threshold,"to the left node, >=",node@threshold ,"to the right node\n") } cat("\n") if(!is.na(node@na_direction)) cat("NA values go to the", node@na_direction,"node") } #' #' The H2OTree class. #' #' This class represents a model of a Tree built by one of H2O's algorithms (GBM, Random Forest). #' @slot root_node A \code{H2ONode} representing the beginning of the tree behind the model. Allows further tree traversal. #' @slot left_children An \code{integer} vector with left child nodes of tree's nodes #' @slot right_children An \code{integer} vector with right child nodes of tree's nodes #' @slot node_ids An \code{integer} representing identification number of a node. Node IDs are generated by H2O. #' @slot descriptions A \code{character} vector with descriptions for each node to be found in the tree. Contains split threshold if the split is based on numerical column. #' For cactegorical splits, it contains list of categorical levels for transition from the parent node. #' @slot model_id A \code{character} with the name of the model this tree is related to. #' @slot tree_number An \code{integer} representing the order in which the tree has been built in the model. #' @slot tree_class A \code{character} representing name of tree's class. Number of tree classes equals to the number of levels in categorical response column. #' As there is exactly one class per categorical level, name of tree's class equals to the corresponding categorical level of response column. #' In case of regression and binomial, the name of the categorical level is ignored can be omitted, as there is exactly one tree built in both cases. #' @slot thresholds A \code{numeric} split thresholds. Split thresholds are not only related to numerical splits, but might be present in case of categorical split as well. #' @slot features A \code{character} with names of the feature/column used for the split. #' @slot levels A \code{character} representing categorical levels on split from parent's node belonging into this node. NULL for root node or non-categorical splits. #' @slot nas A \code{character} representing if NA values go to the left node or right node. May be NA if node is a leaf. #' @slot predictions A \code{numeric} representing predictions for each node in the graph. #' @slot tree_decision_path A \code{character}, plain language rules representation of a trained decision tree #' @slot decision_paths A \code{character} representing plain language rules that were used in a particular prediction. #' @slot left_cat_split A \code{character} list of categorical levels leading to the left child node. Only present when split is categorical, otherwise none. #' @slot right_cat_split A \code{character} list of categorical levels leading to the right child node. Only present when split is categorical, otherwise none. #' @aliases H2OTree #' @export setClass( "H2OTree", representation( root_node = "H2ONode", left_children = "integer", right_children = "integer", node_ids = "integer", descriptions = "character", model_id = "character", tree_number = "integer", tree_class = "character", thresholds = "numeric", features = "character", levels = "list", nas = "character", predictions = "numeric", tree_decision_path = "character", decision_paths = "character", left_cat_split = "list", right_cat_split = "list" ) ) #' @rdname H2OTree-class #' @param object an \code{H2OTree} object. #' @export setMethod('show', 'H2OTree', function(object){ print.H2OTree(object) }) print.H2OTree <- function(tree){ cat(paste0("Tree related to model '", tree@model_id,"'. Tree number is"), paste0(tree@tree_number,", tree class is '",tree@tree_class, "'\n")) cat("The tree has", length(tree), "nodes") } #' #' Overrides the behavior of length() function on H2OTree class. Returns number of nodes in an \code{H2OTree} #' @param x An \code{H2OTree} to count nodes for. #' setMethod("length", signature(x = "H2OTree"), function(x) { length(x@left_children) }) .h2o.walk_tree <- function(node, tree){ if(node == -1) {return(NULL)} child_node_index <- node + 1 left <- tree@left_children[child_node_index] right <- tree@right_children[child_node_index] node_levels <- if(is.null(tree@levels[[node + 1]])) NA_character_ else tree@levels[[node + 1]] left_child = .h2o.walk_tree(left, tree) right_child = .h2o.walk_tree(right, tree) node <- NULL if(is.null(left_child) && is.null(right_child)){ node <- new("H2OLeafNode", id = tree@node_ids[child_node_index], prediction = tree@predictions[child_node_index] ) } else { left_node_levels <- if(is.null(tree@levels[[left + 1]])) NA_character_ else tree@levels[[left + 1]] right_node_levels <- if(is.null(tree@levels[[right + 1]])) NA_character_ else tree@levels[[right + 1]] node <- new ("H2OSplitNode", id = tree@node_ids[child_node_index], left_child = left_child, right_child = right_child, threshold = tree@thresholds[child_node_index], split_feature = tree@features[child_node_index], na_direction = tree@nas[child_node_index], left_levels = left_node_levels, right_levels = right_node_levels) } node } #' Fetchces a single tree of a H2O model. This function is intended to be used on Gradient Boosting Machine models or Distributed Random Forest models. #' #' #' @param model Model with trees #' @param tree_number Number of the tree in the model to fetch, starting with 1 #' @param tree_class Name of the class of the tree (if applicable). This value is ignored for regression and binomial response column, as there is only one tree built. #' As there is exactly one class per categorical level, name of tree's class equals to the corresponding categorical level of response column. #' @return Returns an H2OTree object with detailed information about a tree. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv" #' iris <- h2o.importFile(f) #' gbm_model <- h2o.gbm(y = "species", training_frame = iris) #' tree <- h2o.getModelTree(gbm_model, 1, "Iris-setosa") #' } #' @export h2o.getModelTree <- function(model, tree_number, tree_class = NA) { url <- "Tree" tree_class_request = tree_class; if(is.na(tree_class)){ tree_class_request <- ""; } res <- .h2o.__remoteSend( url, method = "GET", h2oRestApiVersion = 3, model = model@model_id, tree_number = tree_number - 1, tree_class = tree_class_request ) res$thresholds[is.nan(res$thresholds)] <- NA if(length(res$left_children) < 1) stop("Tree does not contain any nodes.") if(res$left_children[1] == -1){ # If the root node has no children res$nas <- c("NA") res$levels <- list(NULL) res$thresholds <- c(as.double(NA)) } # Protection against NA only arrays being evaluated as logical if(is.logical(res$features)){ res$features <- as.character(res$features) } if(is.logical(res$nas)){ res$nas <- as.character(res$nas) } if(is.logical(res$thresholds)){ res$thresholds <- as.numeric(res$thresholds) } if(is.logical(res$predictions)){ res$predictions <- as.numeric(res$predictions) } if(is.logical(res$predictions)){ res$predictions <- as.numeric(res$predictions) } # Start of the tree-building process tree <- new( "H2OTree", left_children = res$left_children, right_children = res$right_children, descriptions = res$descriptions, model_id = model@model_id, tree_number = as.integer(res$tree_number + 1), thresholds = res$thresholds, features = res$features, nas = res$nas, predictions = res$predictions, tree_decision_path = res$tree_decision_path, decision_paths = res$decision_paths ) node_index <- 0 left_ordered <- c() right_ordered <- c() node_ids <- c(res$root_node_id) for(i in 1:length(tree@left_children)){ if(tree@left_children[i] != -1){ node_index <- node_index + 1 left_ordered[i] <- node_index node_ids[node_index + 1] <- tree@left_children[i] } else { left_ordered[i] <- -1 } if(tree@right_children[i] != -1){ node_index <- node_index + 1 right_ordered[i] <- node_index node_ids[node_index + 1] <- tree@right_children[i] } else { right_ordered[i] <- -1 } } tree@node_ids <- node_ids tree@left_children <- as.integer(left_ordered) tree@right_children <- as.integer(right_ordered) if(!is.null(res$tree_class)){ tree@tree_class <- res$tree_class } if(is.logical(res$levels)){ # Vector of NAs is recognized as logical type in R tree@levels <- rep(list(NULL), length(res$levels)) } else { tree@levels <- res$levels } for (i in 1:length(tree@levels)){ if(!is.null(tree@levels[[i]])){ tree@levels[[i]] <- tree@levels[[i]] + 1 } } # Convert numerical categorical levels to characters pointer <-as.integer(1); for(i in 1:length(tree@left_children)){ right <- tree@right_children[i]; left <- tree@left_children[i] split_column_cat_index <- match(tree@features[i], model@model$names) # Indexof split column on children's parent node if(is.na(split_column_cat_index)){ # If the split is not categorical, just increment & continue if(right != -1) pointer <- pointer + 1; if(left != -1) pointer <- pointer + 1; next } split_column_domain <- model@model$domains[[split_column_cat_index]] # Left child node's levels converted to characters left_char_categoricals <- c() if(left != -1) { pointer <- pointer + 1; if(!is.null(tree@levels[[pointer]])){ for(level_index in 1:length(tree@levels[[pointer]])){ left_char_categoricals[level_index] <- split_column_domain[tree@levels[[pointer]][level_index]] } tree@levels[[pointer]] <- left_char_categoricals; } } # Right child node's levels converted to characters, if there is any right_char_categoricals <- c() if(right != -1) { pointer <- pointer + 1; if(!is.null(tree@levels[[pointer]])){ for(level_index in 1:length(tree@levels[[pointer]])){ right_char_categoricals[level_index] <- split_column_domain[tree@levels[[pointer]][level_index]] } tree@levels[[pointer]] <- right_char_categoricals } } } for (i in 1: length(tree@left_children)){ left_idx = tree@left_children[i] right_idx = tree@right_children[i] if(left_idx != -1){ tree@left_cat_split[i] <- tree@levels[left_idx + 1] } else { tree@left_cat_split[i] <- NULL } if(right_idx != -1){ tree@right_cat_split[i] <- tree@levels[right_idx + 1] } else { tree@right_cat_split[i] <- NULL } } tree@root_node <- .h2o.walk_tree(0, tree) tree } #' @export print.h2o.stackedEnsemble.summary <- function(x, ...) cat(x, sep = "\n") #' Get the seed from H2OModel which was used during training. #' If a user does not set the seed parameter before training, the seed is autogenerated. #' It returns seed as the string if the value is bigger than the integer. #' For example, an autogenerated seed is always long so that the seed in R is a string. #' #' @param object a fitted \linkS4class{H2OModel} object. #' @return Returns seed to be used during training a model. Could be numeric or string. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' seed <- h2o.get_seed(prostate_gbm) #' } #' @export get_seed.H2OModel <- function(object) { object@parameters$seed } #' @rdname get_seed.H2OModel #' @export h2o.get_seed <- get_seed.H2OModel #' Imports a model under given path, creating a Generic model with it. #' #' Usage example: #' generic_model <- h2o.genericModel(model_file_path = "/path/to/mojo.zip") #' predictions <- h2o.predict(generic_model, dataset) #' #' @param mojo_file_path Filesystem path to the model imported #' @return Returns H2O Generic Model based on given embedded model #' #' @examples #' \dontrun{ #' #' # Import default Iris dataset as H2O frame #' data <- as.h2o(iris) #' #' # Train a very simple GBM model #' features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") #' original_model <- h2o.gbm(x = features, y = "Species", training_frame = data) #' #' # Download the trained GBM model as MOJO (temporary directory used in this example) #' mojo_original_name <- h2o.download_mojo(model = original_model, path = tempdir()) #' mojo_original_path <- paste0(tempdir(), "/", mojo_original_name) #' #' # Import the MOJO as Generic model #' generic_model <- h2o.genericModel(mojo_original_path) #' #' # Perform scoring with the generic model #' generic_model_predictions <- h2o.predict(generic_model, data) #' } #' @export h2o.genericModel <- function(mojo_file_path){ h2o.generic(path = mojo_file_path) } #' Imports a MOJO under given path, creating a Generic model with it. #' #' Usage example: #' mojo_model <- h2o.import_mojo(model_file_path = "/path/to/mojo.zip") #' predictions <- h2o.predict(mojo_model, dataset) #' #' @param mojo_file_path Filesystem path to the model imported #' @return Returns H2O Generic Model embedding given MOJO model #' #' @examples #' \dontrun{ #' #' # Import default Iris dataset as H2O frame #' data <- as.h2o(iris) #' #' # Train a very simple GBM model #' features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") #' original_model <- h2o.gbm(x = features, y = "Species", training_frame = data) #' #' # Download the trained GBM model as MOJO (temporary directory used in this example) #' mojo_original_path <- h2o.save_mojo(original_model, path = tempdir()) #' #' # Import the MOJO and obtain a Generic model #' mojo_model <- h2o.import_mojo(mojo_original_path) #' #' # Perform scoring with the generic model #' predictions <- h2o.predict(mojo_model, data) #' } #' @export h2o.import_mojo <- function(mojo_file_path){ model <- h2o.generic(path = mojo_file_path) return(model) } #' Imports a MOJO from a local filesystem, creating a Generic model with it. #' #' Usage example: #' mojo_model <- h2o.upload_mojo(model_file_path = "/path/to/local/mojo.zip") #' predictions <- h2o.predict(mojo_model, dataset) #' #' @param mojo_local_file_path Filesystem path to the model imported #' @return Returns H2O Generic Model embedding given MOJO model #' #' @examples #' \dontrun{ #' #' # Import default Iris dataset as H2O frame #' data <- as.h2o(iris) #' #' # Train a very simple GBM model #' features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") #' original_model <- h2o.gbm(x = features, y = "Species", training_frame = data) #' #' # Download the trained GBM model as MOJO (temporary directory used in this example) #' mojo_original_name <- h2o.download_mojo(model = original_model, path = tempdir()) #' mojo_original_path <- paste0(tempdir(), "/", mojo_original_name) #' #' # Upload the MOJO from local filesystem and obtain a Generic model #' mojo_model <- h2o.upload_mojo(mojo_original_path) #' #' # Perform scoring with the generic model #' predictions <- h2o.predict(mojo_model, data) #' } #' @export h2o.upload_mojo <- function(mojo_local_file_path){ model_file_key <- h2o.uploadFile(mojo_local_file_path, parse = FALSE) model <- h2o.generic(model_key = model_file_key) return(model) } #' #' Reset model threshold and return old threshold value. #' #' @param object An \linkS4class{H2OModel} object. #' @param threshold A threshold value from 0 to 1 included. #' @return Returns the previous threshold used in the model. #' #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' old_threshold <- h2o.reset_threshold(prostate_glm, 0.9) #' } #' @export h2o.reset_threshold <- function(object, threshold) { o <- object if( is(o, "H2OModel") ) { .newExpr("model.reset.threshold", list(o@model_id, threshold))[1,1] } else { warning( paste0("Threshold cannot be reset for class ", class(o)) ) return(NULL) } }
/h2o-r/h2o-package/R/models.R
permissive
Winfredemalx54/h2o-3
R
false
false
194,760
r
#' #' H2O Model Related Functions #' #' @importFrom graphics strwidth par legend polygon arrows points grid #' @importFrom grDevices dev.copy dev.off png rainbow adjustcolor NULL #----------------------------------------------------------------------------------------------------------------------- # Helper Functions #----------------------------------------------------------------------------------------------------------------------- #' #' Used to verify data, x, y and turn into the appropriate things #' #' @param data H2OFrame #' @param x features #' @param y response #' @param autoencoder autoencoder flag .verify_dataxy <- function(data, x, y, autoencoder = FALSE) { if(!is.character(x) && !is.numeric(x)) stop('`x` must be column names or indices') if( !autoencoder ) if(!is.character(y) && !is.numeric(y)) stop('`y` must be a column name or index') cc <- colnames(chk.H2OFrame(data)) if(is.character(x)) { if(!all(x %in% cc)) stop("Invalid column names: ", paste(x[!(x %in% cc)], collapse=',')) x_i <- match(x, cc) } else { if(any( x < 1L | x > attr(x,'ncol'))) stop('out of range explanatory variable ', paste(x[x < 1L | x > length(cc)], collapse=',')) x_i <- x x <- cc[x_i] } x_ignore <- c() if( !autoencoder ) { if(is.character(y)){ if(!(y %in% cc)) stop(y, ' is not a column name') y_i <- which(y == cc) } else { if(y < 1L || y > length(cc)) stop('response variable index ', y, ' is out of range') y_i <- y y <- cc[y] } if(!autoencoder && (y %in% x)) { warning('removing response variable from the explanatory variables') x <- setdiff(x,y) } x_ignore <- setdiff(setdiff(cc, x), y) if( length(x_ignore) == 0L ) x_ignore <- '' return(list(x=x, y=y, x_i=x_i, x_ignore=x_ignore, y_i=y_i)) } else { x_ignore <- setdiff(cc, x) if( !missing(y) ) stop("`y` should not be specified for autoencoder=TRUE, remove `y` input") return(list(x=x,x_i=x_i,x_ignore=x_ignore)) } } .verify_datacols <- function(data, cols) { if(!is.character(cols) && !is.numeric(cols)) stop('`cols` must be column names or indices') cc <- colnames(chk.H2OFrame(data)) if(length(cols) == 1L && cols == '') cols <- cc if(is.character(cols)) { if(!all(cols %in% cc)) stop("Invalid column names: ", paste(cols[which(!cols %in% cc)], collapse=", ")) cols_ind <- match(cols, cc) } else { if(any(cols < 1L | cols > length(cc))) stop('out of range explanatory variable ', paste(cols[cols < 1L | cols > length(cc)], collapse=',')) cols_ind <- cols cols <- cc[cols_ind] } cols_ignore <- setdiff(cc, cols) if( length(cols_ignore) == 0L ) cols_ignore <- '' list(cols=cols, cols_ind=cols_ind, cols_ignore=cols_ignore) } .build_cm <- function(cm, actual_names = NULL, predict_names = actual_names, transpose = TRUE) { categories <- length(cm) cf_matrix <- matrix(unlist(cm), nrow=categories) if(transpose) cf_matrix <- t(cf_matrix) cf_total <- apply(cf_matrix, 2L, sum) cf_error <- c(1 - diag(cf_matrix)/apply(cf_matrix,1L,sum), 1 - sum(diag(cf_matrix))/sum(cf_matrix)) cf_matrix <- rbind(cf_matrix, cf_total) cf_matrix <- cbind(cf_matrix, round(cf_error, 3L)) if(!is.null(actual_names)) dimnames(cf_matrix) = list(Actual = c(actual_names, "Totals"), Predicted = c(predict_names, "Error")) cf_matrix } .h2o.modelJob <- function( algo, params, h2oRestApiVersion=.h2o.__REST_API_VERSION, verbose=FALSE) { if( !is.null(params$validation_frame) ) .eval.frame(params$training_frame) if( !is.null(params$validation_frame) ) .eval.frame(params$validation_frame) if (length(grep("stopping_metric", attributes(params)))>0) { if (params$stopping_metric=="r2") stop("r2 cannot be used as an early stopping_metric yet. Check this JIRA https://0xdata.atlassian.net/browse/PUBDEV-5381 for progress.") } if (algo=="pca" && is.null(params$k)) # make sure to set k=1 for default for pca params$k=1 job <- .h2o.startModelJob(algo, params, h2oRestApiVersion) .h2o.getFutureModel(job, verbose = verbose) } .h2o.startModelJob <- function(algo, params, h2oRestApiVersion) { .key.validate(params$key) #---------- Params ----------# param_values <- .h2o.makeModelParams(algo, params, h2oRestApiVersion) #---------- Build! ----------# res <- .h2o.__remoteSend(method = "POST", .h2o.__MODEL_BUILDERS(algo), .params = param_values, h2oRestApiVersion = h2oRestApiVersion) .h2o.processResponseWarnings(res) #---------- Output ----------# job_key <- res$job$key$name dest_key <- res$job$dest$name new("H2OModelFuture",job_key=job_key, model_id=dest_key) } .h2o.makeModelParams <- function(algo, params, h2oRestApiVersion) { #---------- Force evaluate temporary ASTs ----------# ALL_PARAMS <- .h2o.__remoteSend(method = "GET", h2oRestApiVersion = h2oRestApiVersion, .h2o.__MODEL_BUILDERS(algo))$model_builders[[algo]]$parameters #---------- Check user parameter types ----------# param_values <- .h2o.checkAndUnifyModelParameters(algo = algo, allParams = ALL_PARAMS, params = params) #---------- Validate parameters ----------# #.h2o.validateModelParameters(algo, param_values, h2oRestApiVersion) return(param_values) } .h2o.processResponseWarnings <- function(res) { if(length(res$messages) != 0L){ warn <- lapply(res$messages, function(y) { if(class(y) == "list" && y$message_type == "WARN" ) paste0(y$message, ".\n") else "" }) if(any(nzchar(warn))) warning(warn) } } .h2o.startSegmentModelsJob <- function(algo, segment_params, params, h2oRestApiVersion) { #---------- Params ----------# param_values <- .h2o.makeModelParams(algo, params, h2oRestApiVersion) param_values$segment_models_id <- segment_params$segment_models_id param_values$segment_columns <- .collapse.char(segment_params$segment_columns) param_values$parallelism <- segment_params$parallelism #---------- Build! ----------# job <- .h2o.__remoteSend(method = "POST", .h2o.__SEGMENT_MODELS_BUILDERS(algo), .params = param_values, h2oRestApiVersion = h2oRestApiVersion) job_key <- job$key$name dest_key <- job$dest$name new("H2OSegmentModelsFuture",job_key=job_key, segment_models_id=dest_key) } .h2o.segmentModelsJob <- function(algo, segment_params, params, h2oRestApiVersion) { .key.validate(segment_params$segment_models_id) sm <- .h2o.startSegmentModelsJob(algo, segment_params, params, h2oRestApiVersion) .h2o.getFutureSegmentModels(sm) } .h2o.getFutureSegmentModels <- function(object) { .h2o.__waitOnJob(object@job_key) h2o.get_segment_models(object@segment_models_id) } # # Validate given parameters against algorithm parameters validation # REST end-point. Stop execution in case of validation error. # .h2o.validateModelParameters <- function(algo, params, h2oRestApiVersion = .h2o.__REST_API_VERSION) { validation <- .h2o.__remoteSend(method = "POST", paste0(.h2o.__MODEL_BUILDERS(algo), "/parameters"), .params = params, h2oRestApiVersion = h2oRestApiVersion) if(length(validation$messages) != 0L) { error <- lapply(validation$messages, function(x) { if( x$message_type == "ERRR" ) paste0(x$message, ".\n") else "" }) if(any(nzchar(error))) stop(error) warn <- lapply(validation$messages, function(i) { if( i$message_type == "WARN" ) paste0(i$message, ".\n") else "" }) if(any(nzchar(warn))) warning(warn) } } .h2o.createModel <- function(algo, params, h2oRestApiVersion = .h2o.__REST_API_VERSION) { .h2o.getFutureModel(.h2o.startModelJob(algo, params, h2oRestApiVersion)) } .h2o.pollModelUpdates <- function(job) { cat(paste0("\nScoring History for Model ",job$dest$name, " at ", Sys.time(),"\n")) print(paste0("Model Build is ", job$progress*100, "% done...")) if(!is.null(job$progress_msg)){ # print(tail(h2o.getModel(job$dest$name)@model$scoring_history)) }else{ print("Scoring history is not available yet...") #Catch 404 with scoring history. Can occur when nfolds >=2 } } .h2o.getFutureModel <- function(object, verbose=FALSE) { .h2o.__waitOnJob(object@job_key, pollUpdates=ifelse(verbose, .h2o.pollModelUpdates, as.null)) h2o.getModel(object@model_id) } .h2o.prepareModelParameters <- function(algo, params, is_supervised) { if (!is.null(params$training_frame)) params$training_frame <- chk.H2OFrame(params$training_frame) if (!is.null(params$validation_frame)) params$validation_frame <- chk.H2OFrame(params$validation_frame) # Check if specified model request is for supervised algo isSupervised <- if (!is.null(is_supervised)) is_supervised else .isSupervised(algo, params) if (isSupervised) { if (!is.null(params$x)) { x <- params$x; params$x <- NULL } if (!is.null(params$y)) { y <- params$y; params$y <- NULL } args <- .verify_dataxy(params$training_frame, x, y) if( !is.null(params$offset_column) && !is.null(params$offset_column)) args$x_ignore <- args$x_ignore[!( params$offset_column == args$x_ignore )] if( !is.null(params$weights_column) && !is.null(params$weights_column)) args$x_ignore <- args$x_ignore[!( params$weights_column == args$x_ignore )] if( !is.null(params$fold_column) && !is.null(params$fold_column)) args$x_ignore <- args$x_ignore[!( params$fold_column == args$x_ignore )] params$ignored_columns <- args$x_ignore params$response_column <- args$y } else { if (!is.null(params$x)) { x <- params$x params$x <- NULL args <- .verify_datacols(params$training_frame, x) params$ignored_columns <- args$cols_ignore } } # Note: Magic copied from start .h2o.startModelJob params <- lapply(params, function(x) { if(is.integer(x)) x <- as.numeric(x); x }) params } .h2o.getModelParameters <- function(algo, h2oRestApiVersion = .h2o.__REST_API_VERSION) { .h2o.__remoteSend(method = "GET", .h2o.__MODEL_BUILDERS(algo), h2oRestApiVersion = h2oRestApiVersion)$model_builders[[algo]]$parameters } .h2o.checkAndUnifyModelParameters <- function(algo, allParams, params, hyper_params = list()) { # First verify all parameters error <- lapply(allParams, function(i) { e <- "" name <- i$name # R treats integer as not numeric if(is.integer(params[[name]])){ params[[name]] <- as.numeric(params[[name]]) } if (i$required && !((name %in% names(params)) || (name %in% names(hyper_params)))) { e <- paste0("argument \"", name, "\" is missing, with no default\n") } else if (name %in% names(params)) { e <- .h2o.checkParam(i, params[[name]]) if (!nzchar(e)) { params[[name]] <<- .h2o.transformParam(i, params[[name]]) } } e }) if(any(nzchar(error))) stop(error) #---------- Create parameter list to pass ----------# param_values <- lapply(params, function(i) { if(is.H2OFrame(i)) h2o.getId(i) else i }) param_values } # Long precision .is.int64 <- function(v) { number <- suppressWarnings(as.numeric(v)) if(is.na(number)) FALSE else number > -2^63 & number < 2^63 & (floor(number)==ceiling(number)) } # Precise int in double presision .is.int53 <- function(v) { number <- suppressWarnings(as.numeric(v)) if(is.na(number)) FALSE else number > -2^53 & number < 2^53 & (floor(number)==ceiling(number)) } # Check definition of given parameters in given list of parameters # Returns error message or empty string # Note: this function has no side-effects! .h2o.checkParam <- function(paramDef, paramValue) { e <- "" # Fetch mapping for given Java to R types mapping <- .type.map[paramDef$type,] type <- mapping[1L, 1L] scalar <- mapping[1L, 2L] name <- paramDef$name if (is.na(type)) stop("Cannot find type ", paramDef$type, " in .type.map") if (scalar) { # scalar == TRUE if (type == "H2OModel") type <- "character" if (name == "seed") { if(is.character(paramValue) && !.is.int64(paramValue)) e <- paste0("\"seed\" must be of type long or string long, but got a string which cannot be converted to long.\n") else if(is.numeric(paramValue)){ if(!.is.int64(paramValue)){ e <- paste0("\"seed\" must be of type long or string long, but got a number which cannot be converted to long.\n") } else if(!.is.int53(paramValue)) { warning("R can handle only 53-bit integer without loss. If you need to use a less/larger number than the integer, pass seed parameter as the string number. Otherwise, the seed could be inconsistent. (For example, if you need to use autogenerated seed like -8664354335142703762 from H2O server.)") } } } else { if (!inherits(paramValue, type)) { e <- paste0(e, "\"", name , "\" must be of type ", type, ", but got ", class(paramValue), ".\n") } else if ((length(paramDef$values) > 1L) && (is.null(paramValue) || !(tolower(paramValue) %in% tolower(paramDef$values)))) { e <- paste0(e, "\"", name,"\" must be in") for (fact in paramDef$values) e <- paste0(e, " \"", fact, "\",") e <- paste(e, "but got", paramValue) } } } else { # scalar == FALSE if (!inherits(paramValue, type)) e <- paste0("vector of ", name, " must be of type ", type, ", but got ", class(paramValue), ".\n") } e } .h2o.transformParam <- function(paramDef, paramValue, collapseArrays = TRUE) { # Fetch mapping for given Java to R types mapping <- .type.map[paramDef$type,] type <- mapping[1L, 1L] scalar <- mapping[1L, 2L] name <- paramDef$name if (scalar) { # scalar == TRUE if (inherits(paramValue, 'numeric') && paramValue == Inf) { paramValue <- "Infinity" } else if (inherits(paramValue, 'numeric') && paramValue == -Inf) { paramValue <- "-Infinity" } } else { # scalar == FALSE if (inherits(paramValue, 'numeric')) { k = which(paramValue == Inf | paramValue == -Inf) if (length(k) > 0) for (n in k) if (paramValue[n] == Inf) paramValue[n] <- "Infinity" else paramValue[n] <- "-Infinity" } if (collapseArrays) { if(any(sapply(paramValue, function(x) !is.null(x) && is.H2OFrame(x)))) paramValue <- lapply( paramValue, function(x) { if (is.null(x)) NULL else if (all(is.na(x))) NA else paste0('"',h2o.getId(x),'"') }) if (type == "character") paramValue <- .collapse.char(paramValue) else if (paramDef$type == "StringPair[]") paramValue <- .collapse(sapply(paramValue, .collapse.tuple.string)) else if (paramDef$type == "KeyValue[]") { f <- function(i) { .collapse.tuple.key_value(paramValue[i]) } paramValue <- .collapse(sapply(seq(length(paramValue)), f)) } else paramValue <- .collapse(paramValue) } } if( is.H2OFrame(paramValue) ) paramValue <- h2o.getId(paramValue) paramValue } .escape.string <- function(xi) { paste0("\"", xi, "\"") } .collapse.tuple.string <- function(x) { .collapse.tuple(x, .escape.string) } .collapse.tuple.key_value <- function(x) { .collapse.tuple(list( key = .escape.string(names(x)), value = x[[1]] ), identity) } .collapse.tuple <- function(x, escape) { names <- names(x) if (is.null(names)) names <- letters[1:length(x)] r <- c() for (i in 1:length(x)) { s <- paste0(names[i], ": ", escape(x[i])) r <- c(r, s) } paste0("{", paste0(r, collapse = ","), "}") } # Validate a given set of hyper parameters # against algorithm definition. # Transform all parameters in the same way as normal algorithm # would do. .h2o.checkAndUnifyHyperParameters <- function(algo, allParams, hyper_params, do_hyper_params_check) { errors <- lapply(allParams, function(paramDef) { e <- "" name <- paramDef$name hyper_names <- names(hyper_params) # First reject all non-gridable hyper parameters if (!paramDef$gridable && (name %in% hyper_names)) { e <- paste0("argument \"", name, "\" is not gridable\n") } else if (name %in% hyper_names) { # Check all specified hyper parameters # Hyper values for `name` parameter hyper_vals <- hyper_params[[name]] # Collect all possible verification errors if (do_hyper_params_check) { he <- lapply(hyper_vals, function(hv) { # Transform all integer values to numeric hv <- if (is.integer(hv)) as.numeric(hv) else hv .h2o.checkParam(paramDef, hv) }) e <- paste(he, collapse='') } # If there is no error then transform hyper values if (!nzchar(e)) { is_scalar <- .type.map[paramDef$type,][1L, 2L] transf_fce <- function(hv) { # R does not treat integers as numeric if (is.integer(hv)) { hv <- as.numeric(hv) } mapping <- .type.map[paramDef$type,] type <- mapping[1L, 1L] # Note: we apply this transformatio also for types # reported by the backend as scalar because of PUBDEV-1955 if (is.list(hv)) { hv <- as.vector(hv, mode=type) } # Force evaluation of frames and fetch frame_id as # a side effect if (is.H2OFrame(hv) ) hv <- h2o.getId(hv) .h2o.transformParam(paramDef, hv, collapseArrays = FALSE) } transf_hyper_vals <- if (is_scalar) sapply(hyper_vals,transf_fce) else lapply(hyper_vals, transf_fce) hyper_params[[name]] <<- transf_hyper_vals } } e }) if(any(nzchar(errors))) stop(errors) hyper_params } #' Predict on an H2O Model #' #' Obtains predictions from various fitted H2O model objects. #' #' This method dispatches on the type of H2O model to select the correct #' prediction/scoring algorithm. #' The order of the rows in the results is the same as the order in which the #' data was loaded, even if some rows fail (for example, due to missing #' values or unseen factor levels). #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with probabilites and #' default predictions. #' @seealso \code{\link{h2o.deeplearning}}, \code{\link{h2o.gbm}}, #' \code{\link{h2o.glm}}, \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv" #' insurance <- h2o.importFile(f) #' predictors <- colnames(insurance)[1:4] #' response <- "Claims" #' insurance['Group'] <- as.factor(insurance['Group']) #' insurance['Age'] <- as.factor(insurance['Age']) #' splits <- h2o.splitFrame(data = insurance, ratios = 0.8, seed = 1234) #' train <- splits[[1]] #' valid <- splits[[2]] #' insurance_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, #' validation_frame = valid, #' distribution = "huber", #' huber_alpha = 0.9, seed = 1234) #' h2o.predict(insurance_gbm, newdata = insurance) #' } #' @export predict.H2OModel <- function(object, newdata, ...) { h2o.predict.H2OModel(object, newdata, ...) } #' Predict on an H2O Model #' #' @param object a fitted model object for which prediction is desired. #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with probabilites and #' default predictions. #' @export h2o.predict <- function(object, newdata, ...){ UseMethod("h2o.predict", object) } #' Use H2O Transformation model and apply the underlying transformation #' #' @param model A trained model representing the transformation strategy #' @param ... Transformation model-specific parameters #' @return Returns an H2OFrame object with data transformed. #' @export setGeneric("h2o.transform", function(model, ...) { if(!is(model, "H2OModel")) { stop(paste("Argument 'model' must be an H2O Model. Received:", class(model))) } standardGeneric("h2o.transform") }) #' Applies target encoding to a given dataset #' #' @param model A trained model representing the transformation strategy #' @param data An H2OFrame with data to be transformed #' @param blending Use blending during the transformation. Respects model settings when not set. #' @param inflection_point Blending parameter. Only effective when blending is enabled. #' By default, model settings are respected, if not overridden by this setting. #' @param smoothing Blending parameter. Only effective when blending is enabled. #' By default, model settings are respected, if not overridden by this setting. #' @param noise An amount of random noise added to the encoding, this helps prevent overfitting. #' By default, model settings are respected, if not overridden by this setting. #' @param as_training Must be set to True when encoding the training frame. Defaults to False. #' @param ... Mainly used for backwards compatibility, to allow deprecated parameters. #' @return Returns an H2OFrame object with data transformed. #' @export setMethod("h2o.transform", signature("H2OTargetEncoderModel"), function(model, data, blending = NULL, inflection_point = -1, smoothing = -1, noise = NULL, as_training = FALSE, ...) { varargs <- list(...) for (arg in names(varargs)) { if (arg %in% c('data_leakage_handling', 'seed')) { warning(paste0("argument '", arg, "' is deprecated and will be ignored; please define it instead on model creation using `h2o.targetencoder`.")) argval <- varargs[[arg]] if (arg == 'data_leakage_handling' && argval != "None") { warning(paste0("Deprecated `data_leakage_handling=",argval,"` is replaced by `as_training=True`. ", "Please update your code.")) as_training <- TRUE } } else if (arg == 'use_blending') { warning("argument 'use_blending' is deprecated; please use 'blending' instead.") if (missing(blending)) blending <- varargs$use_blending else warning("ignoring 'use_blending' as 'blending' was also provided.") } else { stop(paste("unused argument", arg, "=", varargs[[arg]])) } } params <- list() params$model <- model@model_id params$frame <- h2o.getId(data) if (is.null(blending)){ params$blending <- model@allparameters$blending } else { params$blending <- blending } if (params$blending) { params$inflection_point <- inflection_point params$smoothing <- smoothing } if (!is.null(noise)){ params$noise <- noise } params$as_training <- as_training res <- .h2o.__remoteSend( "TargetEncoderTransform", method = "GET", h2oRestApiVersion = 3,.params = params ) h2o.getFrame(res$name) }) #' #' Transform words (or sequences of words) to vectors using a word2vec model. #' #' @param model A word2vec model. #' @param words An H2OFrame made of a single column containing source words. #' @param aggregate_method Specifies how to aggregate sequences of words. If method is `NONE` #' then no aggregation is performed and each input word is mapped to a single word-vector. #' If method is 'AVERAGE' then input is treated as sequences of words delimited by NA. #' Each word of a sequences is internally mapped to a vector and vectors belonging to #' the same sentence are averaged and returned in the result. #' @examples #' \dontrun{ #' h2o.init() #' #' # Build a simple word2vec model #' data <- as.character(as.h2o(c("a", "b", "a"))) #' w2v_model <- h2o.word2vec(data, sent_sample_rate = 0, min_word_freq = 0, epochs = 1, vec_size = 2) #' #' # Transform words to vectors without aggregation #' sentences <- as.character(as.h2o(c("b", "c", "a", NA, "b"))) #' h2o.transform(w2v_model, sentences) # -> 5 rows total, 2 rows NA ("c" is not in the vocabulary) #' #' # Transform words to vectors and return average vector for each sentence #' h2o.transform(w2v_model, sentences, aggregate_method = "AVERAGE") # -> 2 rows #' } #' @export setMethod("h2o.transform", signature("H2OWordEmbeddingModel"), function(model, words, aggregate_method = c("NONE", "AVERAGE")) { if (!is(model, "H2OModel")) stop(paste("The argument 'model' must be a word2vec model. Received:", class(model))) if (missing(words)) stop("`words` must be specified") if (!is.H2OFrame(words)) stop("`words` must be an H2OFrame") if (ncol(words) != 1) stop("`words` frame must contain a single string column") if (length(aggregate_method) > 1) aggregate_method <- aggregate_method[1] res <- .h2o.__remoteSend(method="GET", "Word2VecTransform", model = model@model_id, words_frame = h2o.getId(words), aggregate_method = aggregate_method) key <- res$vectors_frame$name h2o.getFrame(key) }) #' #' @rdname predict.H2OModel #' @export h2o.predict.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } # Send keys to create predictions url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", h2oRestApiVersion = 4) job_key <- res$key$name dest_key <- res$dest$name .h2o.__waitOnJob(job_key) h2o.getFrame(dest_key) } #' Predict the Leaf Node Assignment on an H2O Model #' #' Obtains leaf node assignment from fitted H2O model objects. #' #' For every row in the test set, return the leaf placements of the row in all the trees in the model. #' Placements can be represented either by paths to the leaf nodes from the tree root or by H2O's internal identifiers. #' The order of the rows in the results is the same as the order in which the #' data was loaded #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param type choice of either "Path" when tree paths are to be returned (default); or "Node_ID" when the output # should be the leaf node IDs. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with categorical leaf assignment identifiers for #' each tree in the model. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' h2o.predict(prostate_gbm, prostate) #' h2o.predict_leaf_node_assignment(prostate_gbm, prostate) #' } #' @export predict_leaf_node_assignment.H2OModel <- function(object, newdata, type = c("Path", "Node_ID"), ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } params <- list(leaf_node_assignment = TRUE) if (!missing(type)) { if (!(type %in% c("Path", "Node_ID"))) { stop("type must be one of: Path, Node_ID") } params$leaf_node_assignment_type <- type } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", .params = params) res <- res$predictions_frame h2o.getFrame(res$name) } #' @rdname predict_leaf_node_assignment.H2OModel #' @export h2o.predict_leaf_node_assignment <- predict_leaf_node_assignment.H2OModel h2o.crossValidate <- function(model, nfolds, model.type = c("gbm", "glm", "deeplearning"), params, strategy = c("mod1", "random")) { output <- data.frame() if( nfolds < 2 ) stop("`nfolds` must be greater than or equal to 2") if( missing(model) & missing(model.type) ) stop("must declare `model` or `model.type`") else if( missing(model) ) { if(model.type == "gbm") model.type = "h2o.gbm" else if(model.type == "glm") model.type = "h2o.glm" else if(model.type == "deeplearning") model.type = "h2o.deeplearning" model <- do.call(model.type, c(params)) } output[1, "fold_num"] <- -1 output[1, "model_key"] <- model@model_id # output[1, "model"] <- model@model$mse_valid data <- params$training_frame data <- eval(data) data.len <- nrow(data) # nfold_vec <- h2o.sample(fr, 1:nfolds) nfold_vec <- sample(rep(1:nfolds, length.out = data.len), data.len) fnum_id <- as.h2o(nfold_vec) fnum_id <- h2o.cbind(fnum_id, data) xval <- lapply(1:nfolds, function(i) { params$training_frame <- data[fnum_id[,1] != i, ] params$validation_frame <- data[fnum_id[,1] == i, ] fold <- do.call(model.type, c(params)) output[(i+1), "fold_num"] <<- i - 1 output[(i+1), "model_key"] <<- fold@model_id # output[(i+1), "cv_err"] <<- mean(as.vector(fold@model$mse_valid)) fold }) model } #' Predict class probabilities at each stage of an H2O Model #' #' The output structure is analogous to the output of \link{h2o.predict_leaf_node_assignment}. For each tree t and #' class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding #' predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models build #' the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0. #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame object with predicted probability for each tree in the model. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' h2o.predict(prostate_gbm, prostate) #' h2o.staged_predict_proba(prostate_gbm, prostate) #' } #' @export staged_predict_proba.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", predict_staged_proba=TRUE) res <- res$predictions_frame h2o.getFrame(res$name) } #' @rdname staged_predict_proba.H2OModel #' @export h2o.staged_predict_proba <- staged_predict_proba.H2OModel #' Predict feature contributions - SHAP values on an H2O Model (only DRF, GBM and XGBoost models). #' #' Returned H2OFrame has shape (#rows, #features + 1) - there is a feature contribution column for each input #' feature, the last column is the model bias (same value for each row). The sum of the feature contributions #' and the bias term is equal to the raw prediction of the model. Raw prediction of tree-based model is the sum #' of the predictions of the individual trees before the inverse link function is applied to get the actual #' prediction. For Gaussian distribution the sum of the contributions is equal to the model prediction. #' #' Note: Multinomial classification models are currently not supported. #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame contain feature contributions for each input row. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate_gbm <- h2o.gbm(3:9, "AGE", prostate) #' h2o.predict(prostate_gbm, prostate) #' h2o.predict_contributions(prostate_gbm, prostate) #' } #' @export predict_contributions.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", predict_contributions=TRUE, h2oRestApiVersion = 4) job_key <- res$key$name dest_key <- res$dest$name .h2o.__waitOnJob(job_key) h2o.getFrame(dest_key) } #' @rdname predict_contributions.H2OModel #' @export h2o.predict_contributions <- predict_contributions.H2OModel #' Retrieve the number of occurrences of each feature for given observations # on their respective paths in a tree ensemble model. #' Available for GBM, Random Forest and Isolation Forest models. #' #' @param object a fitted \linkS4class{H2OModel} object for which prediction is #' desired #' @param newdata An H2OFrame object in which to look for #' variables with which to predict. #' @param ... additional arguments to pass on. #' @return Returns an H2OFrame contain per-feature frequencies on the predict path for each input row. #' @seealso \code{\link{h2o.gbm}} and \code{\link{h2o.randomForest}} for model #' generation in h2o. feature_frequencies.H2OModel <- function(object, newdata, ...) { if (missing(newdata)) { stop("predictions with a missing `newdata` argument is not implemented yet") } url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method = "POST", feature_frequencies=TRUE) res <- res$predictions_frame h2o.getFrame(res$name) } #' @rdname feature_frequencies.H2OModel #' @export h2o.feature_frequencies <- feature_frequencies.H2OModel #' Model Performance Metrics in H2O #' #' Given a trained h2o model, compute its performance on the given #' dataset. However, if the dataset does not contain the response/target column, no performance will be returned. #' Instead, a warning message will be printed. #' #' #' @param model An \linkS4class{H2OModel} object #' @param newdata An H2OFrame. The model will make predictions #' on this dataset, and subsequently score them. The dataset should #' match the dataset that was used to train the model, in terms of #' column names, types, and dimensions. If newdata is passed in, then train, valid, and xval are ignored. #' @param train A logical value indicating whether to return the training metrics (constructed during training). #' #' Note: when the trained h2o model uses balance_classes, the training metrics constructed during training will be from the balanced training dataset. #' For more information visit: \url{https://0xdata.atlassian.net/browse/TN-9} #' @param valid A logical value indicating whether to return the validation metrics (constructed during training). #' @param xval A logical value indicating whether to return the cross-validation metrics (constructed during training). #' @param data (DEPRECATED) An H2OFrame. This argument is now called `newdata`. #' @return Returns an object of the \linkS4class{H2OModelMetrics} subclass. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' h2o.performance(model = prostate_gbm, newdata=prostate) #' #' ## If model uses balance_classes #' ## the results from train = TRUE will not match the results from newdata = prostate #' prostate_gbm_balanced <- h2o.gbm(3:9, "CAPSULE", prostate, balance_classes = TRUE) #' h2o.performance(model = prostate_gbm_balanced, newdata = prostate) #' h2o.performance(model = prostate_gbm_balanced, train = TRUE) #' } #' @export h2o.performance <- function(model, newdata=NULL, train=FALSE, valid=FALSE, xval=FALSE, data=NULL) { # data is now deprecated and the new arg name is newdata if (!is.null(data)) { warning("The `data` argument is DEPRECATED; use `newdata` instead as `data` will eventually be removed") if (is.null(newdata)) newdata <- data else stop("Do not use both `data` and `newdata`; just use `newdata`") } # Some parameter checking if(!is(model, "H2OModel")) stop("`model` must an H2OModel object") if(!is.null(newdata) && !is.H2OFrame(newdata)) stop("`newdata` must be an H2OFrame object") if(!is.logical(train) || length(train) != 1L || is.na(train)) stop("`train` must be TRUE or FALSE") if(!is.logical(valid) || length(valid) != 1L || is.na(valid)) stop("`valid` must be TRUE or FALSE") if(!is.logical(xval) || length(xval) != 1L || is.na(xval)) stop("`xval` must be TRUE or FALSE") if(sum(valid, xval, train) > 1) stop("only one of `train`, `valid`, and `xval` can be TRUE") missingNewdata <- missing(newdata) || is.null(newdata) if( !missingNewdata ) { if (!is.null(model@parameters$y) && !(model@parameters$y %in% names(newdata))) { print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.") return(NULL) } newdata.id <- h2o.getId(newdata) parms <- list() parms[["model"]] <- model@model_id parms[["frame"]] <- newdata.id res <- .h2o.__remoteSend(method = "POST", .h2o.__MODEL_METRICS(model@model_id,newdata.id), .params = parms) #### # FIXME need to do the client-side filtering... PUBDEV-874: https://0xdata.atlassian.net/browse/PUBDEV-874 model_metrics <- Filter(function(mm) { mm$frame$name==newdata.id}, res$model_metrics)[[1]] # filter on newdata.id, R's builtin Filter function # #### metrics <- model_metrics[!(names(model_metrics) %in% c("__meta", "names", "domains", "model_category"))] model_category <- model_metrics$model_category Class <- paste0("H2O", model_category, "Metrics") metrics$frame <- list() metrics$frame$name <- newdata.id new(Class = Class, algorithm = model@algorithm, on_train = missingNewdata, metrics = metrics) } else if( train || (!train && !valid && !xval) ) return(model@model$training_metrics) # no newdata, train, valid, and xval are false (all defaults), return the training metrics else if( valid ) { if( is.null(model@model$validation_metrics@metrics) ) return(NULL) # no newdata, but valid is true, return the validation metrics else return(model@model$validation_metrics) } else { #if xval if( is.null(model@model$cross_validation_metrics@metrics) ) return(NULL) # no newdata, but xval is true, return the crosss_validation metrics else return(model@model$cross_validation_metrics) } } #' Create Model Metrics from predicted and actual values in H2O #' #' Given predicted values (target for regression, class-1 probabilities or binomial #' or per-class probabilities for multinomial), compute a model metrics object #' #' @param predicted An H2OFrame containing predictions #' @param actuals An H2OFrame containing actual values #' @param domain Vector with response factors for classification. #' @param distribution Distribution for regression. #' @param weights (optional) An H2OFrame containing observation weights. #' @return Returns an object of the \linkS4class{H2OModelMetrics} subclass. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' pred <- h2o.predict(prostate_gbm, prostate)[, 3] ## class-1 probability #' h2o.make_metrics(pred, prostate$CAPSULE) #' } #' @export h2o.make_metrics <- function(predicted, actuals, domain=NULL, distribution=NULL, weights=NULL) { predicted <- .validate.H2OFrame(predicted, required=TRUE) actuals <- .validate.H2OFrame(actuals, required=TRUE) weights <- .validate.H2OFrame(weights, required=FALSE) params <- list() params$predictions_frame <- h2o.getId(predicted) params$actuals_frame <- h2o.getId(actuals) if (!is.null(weights)) { params$weights_frame <- h2o.getId(weights) } params$domain <- domain params$distribution <- distribution if (is.null(domain) && !is.null(h2o.levels(actuals))) domain <- h2o.levels(actuals) ## pythonify the domain if (!is.null(domain)) { out <- paste0('["',domain[1],'"') for (d in 2:length(domain)) { out <- paste0(out,',"',domain[d],'"') } out <- paste0(out, "]") params[["domain"]] <- out } url <- paste0("ModelMetrics/predictions_frame/",params$predictions_frame,"/actuals_frame/",params$actuals_frame) res <- .h2o.__remoteSend(method = "POST", url, .params = params) model_metrics <- res$model_metrics metrics <- model_metrics[!(names(model_metrics) %in% c("__meta", "names", "domains", "model_category"))] name <- "H2ORegressionMetrics" if (!is.null(metrics$AUC)) name <- "H2OBinomialMetrics" else if (!is.null(distribution) && distribution == "ordinal") name <- "H2OOrdinalMetrics" else if (!is.null(metrics$hit_ratio_table)) name <- "H2OMultinomialMetrics" new(Class = name, metrics = metrics) } #' Retrieve the AUC #' #' Retrieves the AUC value from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training AUC value is returned. If more #' than one parameter is set to TRUE, then a named vector of AUCs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training AUC #' @param valid Retrieve the validation AUC #' @param xval Retrieve the cross-validation AUC #' @seealso \code{\link{h2o.giniCoef}} for the Gini coefficient, #' \code{\link{h2o.mse}} for MSE, and \code{\link{h2o.metric}} for the #' various threshold metrics. See \code{\link{h2o.performance}} for #' creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.auc(perf) #' } #' @export h2o.auc <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$AUC ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$AUC if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$AUC) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$AUC) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$AUC) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No AUC for ", class(object))) invisible(NULL) } #' Internal function that calculates a precise AUC from given #' probabilities and actual responses. #' #' Note: The underlying implementation is not distributed and can #' only handle limited size of data. For internal use only. #' #' @param probs An \linkS4class{H2OFrame} holding vector of probabilities. #' @param acts An \linkS4class{H2OFrame} holding vector of actuals. .h2o.perfect_auc <- function(probs, acts) { .newExpr("perfectAUC", probs, acts)[1, 1] } #' Retrieve the AUCPR (Area Under Precision Recall Curve) #' #' Retrieves the AUCPR value from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training AUCPR value is returned. If more #' than one parameter is set to TRUE, then a named vector of AUCPRs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training aucpr #' @param valid Retrieve the validation aucpr #' @param xval Retrieve the cross-validation aucpr #' @seealso \code{\link{h2o.giniCoef}} for the Gini coefficient, #' \code{\link{h2o.mse}} for MSE, and \code{\link{h2o.metric}} for the #' various threshold metrics. See \code{\link{h2o.performance}} for #' creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.aucpr(perf) #' } #' @export h2o.aucpr <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$pr_auc ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$pr_auc if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$pr_auc) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$pr_auc) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$pr_auc) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No aucpr for ", class(object))) invisible(NULL) } #' @rdname h2o.aucpr #' @export h2o.pr_auc <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { .Deprecated("h2o.aucpr") h2o.aucpr(object, train, valid, xval) } #' Retrieve the mean per class error #' #' Retrieves the mean per class error from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training mean per class error value is returned. If more #' than one parameter is set to TRUE, then a named vector of mean per class errors are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training mean per class error #' @param valid Retrieve the validation mean per class error #' @param xval Retrieve the cross-validation mean per class error #' @seealso \code{\link{h2o.mse}} for MSE, and \code{\link{h2o.metric}} for the #' various threshold metrics. See \code{\link{h2o.performance}} for #' creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.mean_per_class_error(perf) #' h2o.mean_per_class_error(model, train=TRUE) #' } #' @export h2o.mean_per_class_error <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$mean_per_class_error ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$mean_per_class_error if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$mean_per_class_error) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$mean_per_class_error) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$mean_per_class_error) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No mean per class error for ", class(object))) invisible(NULL) } #' #' Retrieve the Akaike information criterion (AIC) value #' #' Retrieves the AIC value. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training AIC value is returned. If more #' than one parameter is set to TRUE, then a named vector of AICs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics}. #' @param train Retrieve the training AIC #' @param valid Retrieve the validation AIC #' @param xval Retrieve the cross-validation AIC #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' p_sid <- h2o.runif(prostate) #' prostate_train <- prostate[p_sid > .2,] #' prostate_glm <- h2o.glm(x = 3:7, y = 2, training_frame = prostate_train) #' aic_basic <- h2o.aic(prostate_glm) #' print(aic_basic) #' } #' @export h2o.aic <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$AIC ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$AIC if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$AIC) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$AIC) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$AIC) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No AIC for ", class(object))) invisible(NULL) } #' #' Retrieve the R2 value #' #' Retrieves the R2 value from an H2O model. #' Will return R^2 for GLM Models and will return NaN otherwise. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training R2 value is returned. If more #' than one parameter is set to TRUE, then a named vector of R2s are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training R2 #' @param valid Retrieve the validation set R2 if a validation set was passed in during model build time. #' @param xval Retrieve the cross-validation R2 #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.glm(x = 2:5, y = 1, training_frame = fr) #' #' h2o.r2(m) #' } #' @export h2o.r2 <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$r2 ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$r2 if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$r2) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$r2) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$r2) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No R2 for ", class(object))) invisible(NULL) } #' #' Retrieve the Mean Residual Deviance value #' #' Retrieves the Mean Residual Deviance value from an H2O model. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training Mean Residual Deviance value is returned. If more #' than one parameter is set to TRUE, then a named vector of Mean Residual Deviances are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training Mean Residual Deviance #' @param valid Retrieve the validation Mean Residual Deviance #' @param xval Retrieve the cross-validation Mean Residual Deviance #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.deeplearning(x = 2:5, y = 1, training_frame = fr) #' #' h2o.mean_residual_deviance(m) #' } #' @export h2o.mean_residual_deviance <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$mean_residual_deviance ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$mean_residual_deviance if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$mean_residual_deviance) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$mean_residual_deviance) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$mean_residual_deviance) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No mean residual deviance for ", class(object))) invisible(NULL) } #' Retrieve HGLM ModelMetrics #' #' @param object an H2OModel object or H2OModelMetrics. #' @export h2o.HGLMMetrics <- function(object) { if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) return(model.parts$tm@metrics) } warning(paste0("No HGLM Metric for ",class(object))) invisible(NULL) } #' Retrieve the GINI Coefficcient #' #' Retrieves the GINI coefficient from an \linkS4class{H2OBinomialMetrics}. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training GINIvalue is returned. If more #' than one parameter is set to TRUE, then a named vector of GINIs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object an \linkS4class{H2OBinomialMetrics} object. #' @param train Retrieve the training GINI Coefficcient #' @param valid Retrieve the validation GINI Coefficcient #' @param xval Retrieve the cross-validation GINI Coefficcient #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.giniCoef}} for the #' GINI coefficient, and \code{\link{h2o.metric}} for the various #' threshold metrics. See \code{\link{h2o.performance}} for creating #' H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.giniCoef(perf) #' } #' @export h2o.giniCoef <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if(is(object, "H2OModelMetrics")) return( object@metrics$Gini ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$Gini if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$Gini) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$Gini) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$Gini) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No Gini for ",class(object))) invisible(NULL) } #' #' Return the coefficients that can be applied to the non-standardized data. #' #' Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly. #' #' @param object an \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "cylinders" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_glm <- h2o.glm(balance_classes = TRUE, #' seed = 1234, #' x = predictors, #' y = response, #' training_frame = train, #' validation_frame = valid) #' h2o.coef(cars_glm) #' } #' @export h2o.coef <- function(object) { if (is(object, "H2OModel") && object@algorithm %in% c("glm", "gam", "coxph")) { if ((object@algorithm == "glm" || object@algorithm == "gam") && (object@allparameters$family %in% c("multinomial", "ordinal"))) { grabCoeff(object@model$coefficients_table, "coefs_class", FALSE) } else { structure(object@model$coefficients_table$coefficients, names = object@model$coefficients_table$names) } } else { stop("Can only extract coefficients from GAM, GLM and CoxPH models") } } #' #' Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance. #' #' @param object an \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "cylinders" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_glm <- h2o.glm(balance_classes = TRUE, #' seed = 1234, #' x = predictors, #' y = response, #' training_frame = train, #' validation_frame = valid) #' h2o.coef(cars_glm) #' } #' @export h2o.coef_norm <- function(object) { if (is(object, "H2OModel") && ((object@algorithm == "glm") || (object@algorithm == "gam"))) { if (object@allparameters$family %in% c("multinomial", "ordinal")) { grabCoeff(object@model$coefficients_table, "std_coefs_class", TRUE) } else { structure(object@model$coefficients_table$standardized_coefficients, names = object@model$coefficients_table$names) } } else { stop("Can only extract coefficients from GAMs/GLMs") } } grabCoeff <- function(tempTable, nameStart, standardize=FALSE) { coeffNamesPerClass <- tempTable$names # contains coeff names per class totTableLength <- length(tempTable) startIndex <- 2 endIndex <- (totTableLength-1)/2+1 if (standardize) { startIndex <- (totTableLength-1)/2+2 # starting index for standardized coefficients endIndex <- totTableLength } coeffClassNames <- c("coefficient_names") coeffPerClassAll <- list(coefficients_names=coeffNamesPerClass) cindex <- 0 for (index in c(startIndex:endIndex)) { vals <- tempTable[,index] coeffClassNames <- c(coeffClassNames, paste(nameStart, cindex, sep="_")) cindex <- cindex+1 coeffPerClassAll[[cindex+1]] <- vals } structure(coeffPerClassAll, names=coeffClassNames) } #' Retrieves Mean Squared Error Value #' #' Retrieves the mean squared error value from an \linkS4class{H2OModelMetrics} #' object. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training MSEvalue is returned. If more #' than one parameter is set to TRUE, then a named vector of MSEs are returned, where the names are "train", "valid" #' or "xval". #' #' This function only supports \linkS4class{H2OBinomialMetrics}, #' \linkS4class{H2OMultinomialMetrics}, and \linkS4class{H2ORegressionMetrics} objects. #' #' @param object An \linkS4class{H2OModelMetrics} object of the correct type. #' @param train Retrieve the training MSE #' @param valid Retrieve the validation MSE #' @param xval Retrieve the cross-validation MSE #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.mse}} for MSE, and #' \code{\link{h2o.metric}} for the various threshold metrics. See #' \code{\link{h2o.performance}} for creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.mse(perf) #' } #' @export h2o.mse <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$MSE ) if( is(object, "H2OModel") ) { metrics <- NULL # break out special for clustering vs the rest model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$MSE if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { if( is(object, "H2OClusteringModel") ) v <- model.parts$tm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$tm@metrics$MSE) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$vm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$vm@metrics$MSE) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$xm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$xm@metrics$MSE) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No MSE for ",class(object))) invisible(NULL) } #' Retrieves Root Mean Squared Error Value #' #' Retrieves the root mean squared error value from an \linkS4class{H2OModelMetrics} #' object. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training RMSEvalue is returned. If more #' than one parameter is set to TRUE, then a named vector of RMSEs are returned, where the names are "train", "valid" #' or "xval". #' #' This function only supports \linkS4class{H2OBinomialMetrics}, #' \linkS4class{H2OMultinomialMetrics}, and \linkS4class{H2ORegressionMetrics} objects. #' #' @param object An \linkS4class{H2OModelMetrics} object of the correct type. #' @param train Retrieve the training RMSE #' @param valid Retrieve the validation RMSE #' @param xval Retrieve the cross-validation RMSE #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.mse}} for RMSE, and #' \code{\link{h2o.metric}} for the various threshold metrics. See #' \code{\link{h2o.performance}} for creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.rmse(perf) #' } #' @export h2o.rmse <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$RMSE ) if( is(object, "H2OModel") ) { metrics <- NULL # break out special for clustering vs the rest model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$RMSE if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { if( is(object, "H2OClusteringModel") ) v <- model.parts$tm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$tm@metrics$RMSE) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$vm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$vm@metrics$RMSE) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { if( is(object, "H2OClusteringModel") ) v <- model.parts$xm@metrics$centroid_stats$within_cluster_sum_of_squares else v <- c(v,model.parts$xm@metrics$RMSE) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No RMSE for ",class(object))) invisible(NULL) } #' #' Retrieve the Mean Absolute Error Value #' #' Retrieves the mean absolute error (MAE) value from an H2O model. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training MAE value is returned. If more #' than one parameter is set to TRUE, then a named vector of MAEs are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training MAE #' @param valid Retrieve the validation set MAE if a validation set was passed in during model build time. #' @param xval Retrieve the cross-validation MAE #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.deeplearning(x = 2:5, y = 1, training_frame = fr) #' #' h2o.mae(m) #' } #' @export h2o.mae <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$mae ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$mae if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$mae) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$mae) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$mae) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No MAE for ", class(object))) invisible(NULL) } #' #' Retrieve the Root Mean Squared Log Error #' #' Retrieves the root mean squared log error (RMSLE) value from an H2O model. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training rmsle value is returned. If more #' than one parameter is set to TRUE, then a named vector of rmsles are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training rmsle #' @param valid Retrieve the validation set rmsle if a validation set was passed in during model build time. #' @param xval Retrieve the cross-validation rmsle #' @examples #' \dontrun{ #' library(h2o) #' #' h <- h2o.init() #' fr <- as.h2o(iris) #' #' m <- h2o.deeplearning(x = 2:5, y = 1, training_frame = fr) #' #' h2o.rmsle(m) #' } #' @export h2o.rmsle <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$rmsle ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$rmsle if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$rmsle) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$rmsle) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$rmsle) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No rmsle for ", class(object))) invisible(NULL) } #' Retrieve the Log Loss Value #' #' Retrieves the log loss output for a \linkS4class{H2OBinomialMetrics} or #' \linkS4class{H2OMultinomialMetrics} object #' If "train", "valid", and "xval" parameters are FALSE (default), then the training Log Loss value is returned. If more #' than one parameter is set to TRUE, then a named vector of Log Losses are returned, where the names are "train", "valid" #' or "xval". #' #' @param object a \linkS4class{H2OModelMetrics} object of the correct type. #' @param train Retrieve the training Log Loss #' @param valid Retrieve the validation Log Loss #' @param xval Retrieve the cross-validation Log Loss #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_splits <- h2o.splitFrame(data = cars, ratios = .8, seed = 1234) #' train <- cars_splits[[1]] #' valid <- cars_splits[[2]] #' car_drf <- h2o.randomForest(x = predictors, #' y = response, #' training_frame = train, #' validation_frame = valid) #' h2o.logloss(car_drf, train = TRUE, valid = TRUE) #' } #' @export h2o.logloss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$logloss ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$logloss if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$logloss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$logloss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$logloss) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste("No log loss for",class(object))) invisible(NULL) } #' #' Retrieve the variable importance. #' #' @param object An \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate_complete.csv.zip" #' pros <- h2o.importFile(f) #' response <- "GLEASON" #' predictors <- c("ID", "AGE", "CAPSULE", "DCAPS", "PSA", "VOL", "DPROS") #' model <- h2o.glm(x = predictors, y = response, training_frame = pros) #' h2o.varimp(model) #' } #' @export h2o.varimp <- function(object) { o <- object if( is(o, "H2OModel") ) { vi <- o@model$variable_importances if( is.null(vi) && !is.null(object@model$standardized_coefficient_magnitudes)) { # may be glm tvi <- object@model$standardized_coefficient_magnitudes maxCoeff <- max(tvi$coefficients) sumCoeff <- sum(tvi$coefficients) scaledCoeff <- tvi$coefficients/maxCoeff percentageC <- tvi$coefficients/sumCoeff variable <- tvi$names relative_importance <- tvi$coefficients scaled_importance <- scaledCoeff percentage <- percentageC vi <- data.frame(variable, relative_importance, scaled_importance, percentage) } # no true variable importances, maybe glm coeffs? (return standardized table...) if( is.null(vi) ) { warning("This model doesn't have variable importances", call. = FALSE) return(invisible(NULL)) } vi } else { warning( paste0("No variable importances for ", class(o)) ) return(NULL) } } #' #' Retrieve per-variable split information for a given Isolation Forest model. #' Output will include: #' - count - The number of times a variable was used to make a split. #' - aggregated_split_ratios - The split ratio is defined as "abs(#left_observations - #right_observations) / #before_split". #' Even splits (#left_observations approx the same as #right_observations) contribute #' less to the total aggregated split ratio value for the given feature; #' highly imbalanced splits (eg. #left_observations >> #right_observations) contribute more. #' - aggregated_split_depths - The sum of all depths of a variable used to make a split. (If a variable is used #' on level N of a tree, then it contributes with N to the total aggregate.) #' @param object An Isolation Forest model represented by \linkS4class{H2OModel} object. #' @export h2o.varsplits <- function(object) { if( is(object, "H2OModel") ) { vi <- object@model$variable_splits if( is.null(vi) ) { warning("This model doesn't have variable splits information, only Isolation Forest can be used with h2o.varsplits().", call. = FALSE) return(invisible(NULL)) } vi } else { warning( paste0("No variable importances for ", class(object)) ) return(NULL) } } #' #' Retrieve Model Score History #' #' @param object An \linkS4class{H2OModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, #' validation_frame = valid, #' seed = 1234) #' h2o.scoreHistory(cars_gbm) #' } #' @export h2o.scoreHistory <- function(object) { o <- object if( is(o, "H2OModel") ) { sh <- o@model$scoring_history if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No score history for ", class(o)) ) return(NULL) } } #' #' Retrieve GLM Model Score History buried in GAM model #' @param object An \linkS4class{H2OModel} object. #' @export h2o.scoreHistoryGAM <- function(object) { return(object@model$glm_scoring_history) } #' #' Retrieve actual number of trees for tree algorithms #' #' @param object An \linkS4class{H2OModel} object. #' @export h2o.get_ntrees_actual <- function(object) { o <- object if( is(o, "H2OModel") ) { if(o@algorithm == "gbm" | o@algorithm == "drf"| o@algorithm == "isolationforest"| o@algorithm == "xgboost"){ sh <- o@model$model_summary['number_of_trees'][,1] if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No actual number of trees for this model") ) return(NULL) } } else { warning( paste0("No actual number of trees for ", class(o)) ) return(NULL) } } #' #' Retrieve the respective weight matrix #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param matrix_id An integer, ranging from 1 to number of layers + 1, that specifies the weight matrix to return. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/chicago/chicagoCensus.csv" #' census <- h2o.importFile(f) #' census[, 1] <- as.factor(census[, 1]) #' dl_model <- h2o.deeplearning(x = c(1:3), y = 4, training_frame = census, #' hidden = c(17, 191), #' epochs = 1, #' balance_classes = FALSE, #' export_weights_and_biases = TRUE) #' h2o.weights(dl_model, matrix_id = 1) #' } #' @export h2o.weights <- function(object, matrix_id=1){ o <- object if( is(o, "H2OModel") ) { sh <- o@model$weights[[matrix_id]] if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No weights for ", class(o)) ) return(NULL) } h2o.getFrame(sh$name) } #' #' Return the respective bias vector #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param vector_id An integer, ranging from 1 to number of layers + 1, that specifies the bias vector to return. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "http://h2o-public-test-data.s3.amazonaws.com/smalldata/chicago/chicagoCensus.csv" #' census <- h2o.importFile(f) #' census[, 1] <- as.factor(census[, 1]) #' #' dl_model <- h2o.deeplearning(x = c(1:3), y = 4, training_frame = census, #' hidden = c(17, 191), #' epochs = 1, #' balance_classes = FALSE, #' export_weights_and_biases = TRUE) #' h2o.biases(dl_model, vector_id = 1) #' } #' @export h2o.biases <- function(object, vector_id=1){ o <- object if( is(o, "H2OModel") ) { sh <- o@model$biases[[vector_id]] if( is.null(sh) ) return(NULL) sh } else { warning( paste0("No biases for ", class(o)) ) return(NULL) } h2o.getFrame(sh$name) } #' #' Retrieve the Hit Ratios #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training Hit Ratios value is returned. If more #' than one parameter is set to TRUE, then a named list of Hit Ratio tables are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} object. #' @param train Retrieve the training Hit Ratio #' @param valid Retrieve the validation Hit Ratio #' @param xval Retrieve the cross-validation Hit Ratio #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/iris/iris_wheader.csv" #' iris <- h2o.importFile(f) #' iris_split <- h2o.splitFrame(data = iris, ratios = 0.8, seed = 1234) #' train <- iris_split[[1]] #' valid <- iris_split[[2]] #' #' iris_xgb <- h2o.xgboost(x = 1:4, y = 5, training_frame = train, validation_frame = valid) #' hrt_iris <- h2o.hit_ratio_table(iris_xgb, valid = TRUE) #' hrt_iris #' } #' @export h2o.hit_ratio_table <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$hit_ratio_table ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$hit_ratio_table if ( !is.null(metric) ) return(metric) } v <- list() v_names <- c() if ( train ) { v[[length(v)+1]] <- model.parts$tm@metrics$hit_ratio_table v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v[[length(v)+1]] <- model.parts$vm@metrics$hit_ratio_table v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v[[length(v)+1]] <- model.parts$xm@metrics$hit_ratio_table v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } # if o is a data.frame, then the hrt was passed in -- just for pretty printing if( is(object, "data.frame") ) return(object) # warn if we got something unexpected... warning( paste0("No hit ratio table for ", class(object)) ) invisible(NULL) } #' H2O Model Metric Accessor Functions #' #' A series of functions that retrieve model metric details. #' #' Many of these functions have an optional thresholds parameter. Currently #' only increments of 0.1 are allowed. If not specified, the functions will #' return all possible values. Otherwise, the function will return the value for #' the indicated threshold. #' #' Currently, the these functions are only supported by #' \linkS4class{H2OBinomialMetrics} objects. #' #' @param object An \linkS4class{H2OModelMetrics} object of the correct type. #' @param thresholds (Optional) A value or a list of values between 0.0 and 1.0. #' If not set, then all thresholds will be returned. #' If "max", then the threshold maximizing the metric will be used. #' @param metric (Optional) the metric to retrieve. #' If not set, then all metrics will be returned. #' @param transform (Optional) a list describing a transformer for the given metric, if any. #' e.g. transform=list(op=foo_fn, name="foo") will rename the given metric to "foo" #' and apply function foo_fn to the metric values. #' @return Returns either a single value, or a list of values. #' @seealso \code{\link{h2o.auc}} for AUC, \code{\link{h2o.giniCoef}} for the #' GINI coefficient, and \code{\link{h2o.mse}} for MSE. See #' \code{\link{h2o.performance}} for creating H2OModelMetrics objects. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' perf <- h2o.performance(model, prostate) #' h2o.F1(perf) #' } #' @export h2o.metric <- function(object, thresholds, metric, transform=NULL) { if (!is(object, "H2OModelMetrics")) stop(paste0("No ", metric, " for ",class(object)," .Should be a H2OModelMetrics object!")) if (is(object, "H2OBinomialMetrics")){ avail_metrics <- names(object@metrics$thresholds_and_metric_scores) avail_metrics <- avail_metrics[!(avail_metrics %in% c('threshold', 'idx'))] if (missing(thresholds)) { if (missing(metric)) { metrics <- object@metrics$thresholds_and_metric_scores } else { h2o_metric <- sapply(metric, function(m) ifelse(m %in% avail_metrics, m, ifelse(m %in% names(.h2o.metrics_aliases), .h2o.metrics_aliases[m], m))) metrics <- object@metrics$thresholds_and_metric_scores[, c("threshold", h2o_metric)] if (!missing(transform)) { if ('op' %in% names(transform)) { metrics[h2o_metric] <- transform$op(metrics[h2o_metric]) } if ('name' %in% names(transform)) { names(metrics) <- c("threshold", transform$name) } } } } else if (thresholds == 'max' && missing(metric)) { metrics <- object@metrics$max_criteria_and_metric_scores } else { if (missing(metric)) { h2o_metric <- avail_metrics } else { h2o_metric <- unlist(lapply(metric, function(m) ifelse(m %in% avail_metrics, m, ifelse(m %in% names(.h2o.metrics_aliases), .h2o.metrics_aliases[m], m)))) } if (thresholds == 'max') thresholds <- h2o.find_threshold_by_max_metric(object, h2o_metric) metrics <- lapply(thresholds, function(t,o,m) h2o.find_row_by_threshold(o, t)[, m], object, h2o_metric) if (!missing(transform) && 'op' %in% names(transform)) { metrics <- lapply(metrics, transform$op) } } return(metrics) } else { stop(paste0("No ", metric, " for ",class(object))) } } #' @rdname h2o.metric #' @export h2o.F0point5 <- function(object, thresholds){ h2o.metric(object, thresholds, "f0point5") } #' @rdname h2o.metric #' @export h2o.F1 <- function(object, thresholds){ h2o.metric(object, thresholds, "f1") } #' @rdname h2o.metric #' @export h2o.F2 <- function(object, thresholds){ h2o.metric(object, thresholds, "f2") } #' @rdname h2o.metric #' @export h2o.accuracy <- function(object, thresholds){ h2o.metric(object, thresholds, "accuracy") } #' @rdname h2o.metric #' @export h2o.error <- function(object, thresholds){ h2o.metric(object, thresholds, "accuracy", transform=list(name="error", op=function(acc) 1 - acc)) } #' @rdname h2o.metric #' @export h2o.maxPerClassError <- function(object, thresholds){ h2o.metric(object, thresholds, "min_per_class_accuracy", transform=list(name="max_per_class_error", op=function(mpc_acc) 1 - mpc_acc)) } #' @rdname h2o.metric #' @export h2o.mean_per_class_accuracy <- function(object, thresholds){ h2o.metric(object, thresholds, "mean_per_class_accuracy") } #' @rdname h2o.metric #' @export h2o.mcc <- function(object, thresholds){ h2o.metric(object, thresholds, "absolute_mcc") } #' @rdname h2o.metric #' @export h2o.precision <- function(object, thresholds){ h2o.metric(object, thresholds, "precision") } #' @rdname h2o.metric #' @export h2o.tpr <- function(object, thresholds){ h2o.metric(object, thresholds, "tpr") } #' @rdname h2o.metric #' @export h2o.fpr <- function(object, thresholds){ h2o.metric(object, thresholds, "fpr") } #' @rdname h2o.metric #' @export h2o.fnr <- function(object, thresholds){ h2o.metric(object, thresholds, "fnr") } #' @rdname h2o.metric #' @export h2o.tnr <- function(object, thresholds){ h2o.metric(object, thresholds, "tnr") } #' @rdname h2o.metric #' @export h2o.recall <- function(object, thresholds){ h2o.metric(object, thresholds, "tpr") } #' @rdname h2o.metric #' @export h2o.sensitivity <- function(object, thresholds){ h2o.metric(object, thresholds, "tpr") } #' @rdname h2o.metric #' @export h2o.fallout <- function(object, thresholds){ h2o.metric(object, thresholds, "fpr") } #' @rdname h2o.metric #' @export h2o.missrate <- function(object, thresholds){ h2o.metric(object, thresholds, "fnr") } #' @rdname h2o.metric #' @export h2o.specificity <- function(object, thresholds){ h2o.metric(object, thresholds, "tnr") } #' Find the threshold, give the max metric #' #' @rdname h2o.find_threshold_by_max_metric #' @param object H2OBinomialMetrics #' @param metric "F1," for example #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, validation_frame = valid, #' build_tree_one_node = TRUE , seed = 1234) #' perf <- h2o.performance(cars_gbm, cars) #' h2o.find_threshold_by_max_metric(perf, "fnr") #' } #' @export h2o.find_threshold_by_max_metric <- function(object, metric) { if(!is(object, "H2OBinomialMetrics")) stop(paste0("No ", metric, " for ",class(object))) max_metrics <- object@metrics$max_criteria_and_metric_scores h2o_metric <- sapply(metric, function(m) ifelse(m %in% names(.h2o.metrics_aliases), .h2o.metrics_aliases[m], m)) max_metrics[match(paste0("max ", h2o_metric), max_metrics$metric), "threshold"] } #' Find the threshold, give the max metric. No duplicate thresholds allowed #' #' @rdname h2o.find_row_by_threshold #' @param object H2OBinomialMetrics #' @param threshold number between 0 and 1 #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, #' training_frame = train, validation_frame = valid, #' build_tree_one_node = TRUE , seed = 1234) #' perf <- h2o.performance(cars_gbm, cars) #' h2o.find_row_by_threshold(perf, 0.5) #' } #' @export h2o.find_row_by_threshold <- function(object, threshold) { if(!is(object, "H2OBinomialMetrics")) stop(paste0("No ", threshold, " for ",class(object))) tmp <- object@metrics$thresholds_and_metric_scores if( is.null(tmp) ) return(NULL) res <- tmp[abs(as.numeric(tmp$threshold) - threshold) < 1e-8,] # relax the tolerance if( nrow(res) == 0L ) { # couldn't find any threshold within 1e-8 of the requested value, warn and return closest threshold row_num <- which.min(abs(tmp$threshold - threshold)) closest_threshold <- tmp$threshold[row_num] warning( paste0("Could not find exact threshold: ", threshold, " for this set of metrics; using closest threshold found: ", closest_threshold, ". Run `h2o.predict` and apply your desired threshold on a probability column.") ) return( tmp[row_num,] ) } else if( nrow(res) > 1L ) res <- res[1L,] res } #' #' Retrieve the Model Centers #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' h2o.ceiling(fr[, 1]) #' } #' @export h2o.centers <- function(object) { as.data.frame(object@model$centers[,-1]) } #' #' Retrieve the Model Centers STD #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.centersSTD(km) #' } #' @export h2o.centersSTD <- function(object) { as.data.frame(object@model$centers_std)[,-1] } #' #' Get the Within SS #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @export h2o.withinss <- function(object) { h2o.mse(object) } #' #' Get the total within cluster sum of squares. #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training tot_withinss value is returned. If more #' than one parameter is set to TRUE, then a named vector of tot_withinss' are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training total within cluster sum of squares #' @param valid Retrieve the validation total within cluster sum of squares #' @param xval Retrieve the cross-validation total within cluster sum of squares #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.tot_withinss(km, train = TRUE) #' } #' @export h2o.tot_withinss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$tot_withinss ) v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$tot_withinss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v <- c(v,model.parts$vm@metrics$tot_withinss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v <- c(v,model.parts$xm@metrics$tot_withinss) v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' Get the between cluster sum of squares #' #' Get the between cluster sum of squares. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training betweenss value is returned. If more #' than one parameter is set to TRUE, then a named vector of betweenss' are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training between cluster sum of squares #' @param valid Retrieve the validation between cluster sum of squares #' @param xval Retrieve the cross-validation between cluster sum of squares #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.betweenss(km, train = TRUE) #' } #' @export h2o.betweenss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$betweenss ) v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$betweenss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v <- c(v,model.parts$vm@metrics$betweenss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v <- c(v,model.parts$xm@metrics$betweenss) v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Get the total sum of squares. #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training totss value is returned. If more #' than one parameter is set to TRUE, then a named vector of totss' are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training total sum of squares #' @param valid Retrieve the validation total sum of squares #' @param xval Retrieve the cross-validation total sum of squares #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.totss(km, train = TRUE) #' } #' @export h2o.totss <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$totss ) v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$totss) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v <- c(v,model.parts$vm@metrics$totss) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v <- c(v,model.parts$xm@metrics$totss) v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Retrieve the number of iterations. #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.num_iterations(prostate_glm) #' } #' @export h2o.num_iterations <- function(object) { object@model$model_summary$number_of_iterations } #' #' Retrieve centroid statistics #' #' Retrieve the centroid statistics. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training centroid stats value is returned. If more #' than one parameter is set to TRUE, then a named list of centroid stats data frames are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training centroid statistics #' @param valid Retrieve the validation centroid statistics #' @param xval Retrieve the cross-validation centroid statistics #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.centroid_stats(km, train = TRUE) #' } #' @export h2o.centroid_stats <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$centroid_stats ) v <- list() v_names <- c() if ( train ) { v[[length(v)+1]] <- model.parts$tm@metrics$centroid_stats v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v[[length(v)+1]] <- model.parts$vm@metrics$centroid_stats v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v[[length(v)+1]] <- model.parts$xm@metrics$centroid_stats v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Retrieve the cluster sizes #' #' Retrieve the cluster sizes. #' If "train", "valid", and "xval" parameters are FALSE (default), then the training cluster sizes value is returned. If more #' than one parameter is set to TRUE, then a named list of cluster size vectors are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OClusteringModel} object. #' @param train Retrieve the training cluster sizes #' @param valid Retrieve the validation cluster sizes #' @param xval Retrieve the cross-validation cluster sizes #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' fr <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv") #' predictors <- c("sepal_len", "sepal_wid", "petal_len", "petal_wid") #' km <- h2o.kmeans(x = predictors, training_frame = fr, k = 3, nfolds = 3) #' h2o.cluster_sizes(km, train = TRUE) #' } #' @export h2o.cluster_sizes <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) return( model.parts$tm@metrics$centroid_stats$size ) v <- list() v_names <- c() if ( train ) { v[[length(v)+1]] <- model.parts$tm@metrics$centroid_stats$size v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) invisible(.warn.no.validation()) else { v[[length(v)+1]] <- model.parts$vm@metrics$centroid_stats$size v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) invisible(.warn.no.cross.validation()) else { v[[length(v)+1]] <- model.parts$xm@metrics$centroid_stats$size v_names <- c(v_names,"xval") } } names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } #' #' Retrieve the null deviance #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training null deviance value is returned. If more #' than one parameter is set to TRUE, then a named vector of null deviances are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training null deviance #' @param valid Retrieve the validation null deviance #' @param xval Retrieve the cross-validation null deviance #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", nfolds = 0, #' alpha = 0.5, lambda_search = FALSE) #' h2o.null_deviance(prostate_glm, train = TRUE) #' } #' @export h2o.null_deviance <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$null_deviance ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$null_deviance if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$null_deviance) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$null_deviance) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$null_deviance) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No null deviance for ", class(object))) invisible(NULL) } #' Retrieve the residual deviance #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training residual deviance value is returned. If more #' than one parameter is set to TRUE, then a named vector of residual deviances are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training residual deviance #' @param valid Retrieve the validation residual deviance #' @param xval Retrieve the cross-validation residual deviance #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.residual_deviance(prostate_glm, train = TRUE) #' } #' @export h2o.residual_deviance <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$residual_deviance ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$residual_deviance if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$residual_deviance) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$residual_deviance) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$residual_deviance) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No residual deviance for ", class(object))) invisible(NULL) } #' Retrieve the residual degrees of freedom #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training residual degrees of freedom value is returned. If more #' than one parameter is set to TRUE, then a named vector of residual degrees of freedom are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training residual degrees of freedom #' @param valid Retrieve the validation residual degrees of freedom #' @param xval Retrieve the cross-validation residual degrees of freedom #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.residual_dof(prostate_glm, train = TRUE) #' } #' @export h2o.residual_dof <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$residual_degrees_of_freedom ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$residual_degrees_of_freedom if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$residual_degrees_of_freedom) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$residual_degrees_of_freedom) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$residual_degrees_of_freedom) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No residual dof for ", class(object))) invisible(NULL) } #' Retrieve the null degrees of freedom #' #' If "train", "valid", and "xval" parameters are FALSE (default), then the training null degrees of freedom value is returned. If more #' than one parameter is set to TRUE, then a named vector of null degrees of freedom are returned, where the names are "train", "valid" #' or "xval". #' #' @param object An \linkS4class{H2OModel} or \linkS4class{H2OModelMetrics} #' @param train Retrieve the training null degrees of freedom #' @param valid Retrieve the validation null degrees of freedom #' @param xval Retrieve the cross-validation null degrees of freedom #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", nfolds = 0, #' alpha = 0.5, lambda_search = FALSE) #' h2o.null_dof(prostate_glm, train = TRUE) #' } #' @export h2o.null_dof <- function(object, train=FALSE, valid=FALSE, xval=FALSE) { if( is(object, "H2OModelMetrics") ) return( object@metrics$null_degrees_of_freedom ) if( is(object, "H2OModel") ) { model.parts <- .model.parts(object) if ( !train && !valid && !xval ) { metric <- model.parts$tm@metrics$null_degrees_of_freedom if ( !is.null(metric) ) return(metric) } v <- c() v_names <- c() if ( train ) { v <- c(v,model.parts$tm@metrics$null_degrees_of_freedom) v_names <- c(v_names,"train") } if ( valid ) { if( is.null(model.parts$vm) ) return(invisible(.warn.no.validation())) else { v <- c(v,model.parts$vm@metrics$null_degrees_of_freedom) v_names <- c(v_names,"valid") } } if ( xval ) { if( is.null(model.parts$xm) ) return(invisible(.warn.no.cross.validation())) else { v <- c(v,model.parts$xm@metrics$null_degrees_of_freedom) v_names <- c(v_names,"xval") } } if ( !is.null(v) ) { names(v) <- v_names if ( length(v)==1 ) { return( v[[1]] ) } else { return( v ) } } } warning(paste0("No null dof for ", class(object))) invisible(NULL) } #' Access H2O Gains/Lift Tables #' #' Retrieve either a single or many Gains/Lift tables from H2O objects. #' #' The \linkS4class{H2OModelMetrics} version of this function will only take #' \linkS4class{H2OBinomialMetrics} objects. #' #' @param object Either an \linkS4class{H2OModel} object or an #' \linkS4class{H2OModelMetrics} object. #' @param newdata An H2OFrame object that can be scored on. #' Requires a valid response column. #' @param valid Retrieve the validation metric. #' @param xval Retrieve the cross-validation metric. #' @param \dots further arguments to be passed to/from this method. #' @return Calling this function on \linkS4class{H2OModel} objects returns a #' Gains/Lift table corresponding to the \code{\link{predict}} function. #' @seealso \code{\link{predict}} for generating prediction frames, #' \code{\link{h2o.performance}} for creating #' \linkS4class{H2OModelMetrics}. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, distribution = "bernoulli", #' training_frame = prostate, validation_frame = prostate, nfolds = 3) #' h2o.gainsLift(model) ## extract training metrics #' h2o.gainsLift(model, valid = TRUE) ## extract validation metrics (here: the same) #' h2o.gainsLift(model, xval = TRUE) ## extract cross-validation metrics #' h2o.gainsLift(model, newdata = prostate) ## score on new data (here: the same) #' # Generating a ModelMetrics object #' perf <- h2o.performance(model, prostate) #' h2o.gainsLift(perf) ## extract from existing metrics object #' } #' @export setGeneric("h2o.gainsLift", function(object, ...) {}) #' @rdname h2o.gainsLift #' @export setMethod("h2o.gainsLift", "H2OModel", function(object, newdata, valid=FALSE, xval=FALSE,...) { model.parts <- .model.parts(object) if( missing(newdata) ) { if( valid ) { if( is.null(model.parts$vm) ) return( invisible(.warn.no.validation()) ) else return( h2o.gainsLift(model.parts$vm) ) } if ( xval ) { if( is.null(model.parts$xm) ) return( invisible(.warn.no.cross.validation())) else return( h2o.gainsLift(model.parts$xm) ) } return( h2o.gainsLift(model.parts$tm) ) } else { if( valid ) stop("Cannot have both `newdata` and `valid=TRUE`", call.=FALSE) if( xval ) stop("Cannot have both `newdata` and `xval=TRUE`", call.=FALSE) } # ok need to score on the newdata url <- paste0("Predictions/models/",object@model_id, "/frames/", h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method="POST") # Make the correct class of metrics object metrics <- new(sub("Model", "Metrics", class(object)), algorithm=object@algorithm, metrics= res$model_metrics[[1L]]) h2o.gainsLift(metrics, ...) }) #' @rdname h2o.gainsLift #' @export setMethod("h2o.gainsLift", "H2OModelMetrics", function(object) { if( is(object, "H2OBinomialMetrics") ) { return(object@metrics$gains_lift_table) } else { warning(paste0("No Gains/Lift table for ",class(object))) return(NULL) } }) #' Kolmogorov-Smirnov metric for binomial models #' #' Retrieves a Kolmogorov-Smirnov metric for given binomial model. The number returned is in range between 0 and 1. #' K-S metric represents the degree of separation between the positive (1) and negative (0) cumulative distribution #' functions. Detailed metrics per each group are to be found in the gains-lift table. #' #' The \linkS4class{H2OModelMetrics} version of this function will only take #' \linkS4class{H2OBinomialMetrics} objects. #' #' @param object Either an \linkS4class{H2OModel} object or an #' \linkS4class{H2OModelMetrics} object. #' @return Kolmogorov-Smirnov metric, a number between 0 and 1. #' @seealso \code{\link{h2o.gainsLift}} to see detailed K-S metrics per group #' #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' data <- h2o.importFile( #' path = "https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip") #' model <- h2o.gbm(x = c("Origin", "Distance"), y = "IsDepDelayed", #' training_frame = data, ntrees = 1) #' h2o.kolmogorov_smirnov(model) #' } #' @export setGeneric("h2o.kolmogorov_smirnov", function(object) {}) #' @rdname h2o.kolmogorov_smirnov #' @export setMethod("h2o.kolmogorov_smirnov", "H2OModelMetrics", function(object) { gains_lift <- h2o.gainsLift(object = object) if(is.null(gains_lift)){ warning(paste0("No Gains/Lift table for ",class(object))) return(NULL) } else { return(max(gains_lift$kolmogorov_smirnov)) } }) #' @rdname h2o.kolmogorov_smirnov #' @export setMethod("h2o.kolmogorov_smirnov", "H2OModel", function(object) { gains_lift <- h2o.gainsLift(object = object) if(is.null(gains_lift)){ warning(paste0("No Gains/Lift table for ",class(object))) return(NULL) } else { return(max(gains_lift$kolmogorov_smirnov)) } }) #' Access H2O Confusion Matrices #' #' Retrieve either a single or many confusion matrices from H2O objects. #' #' The \linkS4class{H2OModelMetrics} version of this function will only take #' \linkS4class{H2OBinomialMetrics} or \linkS4class{H2OMultinomialMetrics} #' objects. If no threshold is specified, all possible thresholds are selected. #' #' @param object Either an \linkS4class{H2OModel} object or an #' \linkS4class{H2OModelMetrics} object. #' @param newdata An H2OFrame object that can be scored on. #' Requires a valid response column. #' @param thresholds (Optional) A value or a list of valid values between 0.0 and 1.0. #' This value is only used in the case of #' \linkS4class{H2OBinomialMetrics} objects. #' @param metrics (Optional) A metric or a list of valid metrics ("min_per_class_accuracy", "absolute_mcc", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"). #' This value is only used in the case of #' \linkS4class{H2OBinomialMetrics} objects. #' @param valid Retrieve the validation metric. #' @param ... Extra arguments for extracting train or valid confusion matrices. #' @return Calling this function on \linkS4class{H2OModel} objects returns a #' confusion matrix corresponding to the \code{\link{predict}} function. #' If used on an \linkS4class{H2OBinomialMetrics} object, returns a list #' of matrices corresponding to the number of thresholds specified. #' @seealso \code{\link{predict}} for generating prediction frames, #' \code{\link{h2o.performance}} for creating #' \linkS4class{H2OModelMetrics}. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' h2o.confusionMatrix(model, prostate) #' # Generating a ModelMetrics object #' perf <- h2o.performance(model, prostate) #' h2o.confusionMatrix(perf) #' } #' @export setGeneric("h2o.confusionMatrix", function(object, ...) {}) #' @rdname h2o.confusionMatrix #' @export setMethod("h2o.confusionMatrix", "H2OModel", function(object, newdata, valid=FALSE, ...) { model.parts <- .model.parts(object) if( missing(newdata) ) { if( valid ) { if( is.null(model.parts$vm) ) return( invisible(.warn.no.validation()) ) else return( h2o.confusionMatrix(model.parts$vm, ...) ) } else return( h2o.confusionMatrix(model.parts$tm, ...) ) } else if( valid ) stop("Cannot have both `newdata` and `valid=TRUE`", call.=FALSE) # ok need to score on the newdata url <- paste0("Predictions/models/",object@model_id, "/frames/", h2o.getId(newdata)) res <- .h2o.__remoteSend(url, method="POST") # Make the correct class of metrics object metrics <- new(sub("Model", "Metrics", class(object)), algorithm=object@algorithm, metrics= res$model_metrics[[1L]]) # FIXME: don't think model metrics come out of Predictions anymore!!! h2o.confusionMatrix(metrics, ...) }) .h2o.metrics_aliases <- list( fallout='fpr', missrate='fnr', recall='tpr', sensitivity='tpr', specificity='tnr' ) .h2o.maximizing_metrics <- c('absolute_mcc', 'accuracy', 'precision', 'f0point5', 'f1', 'f2', 'mean_per_class_accuracy', 'min_per_class_accuracy', 'fpr', 'fnr', 'tpr', 'tnr', names(.h2o.metrics_aliases)) #' @rdname h2o.confusionMatrix #' @export setMethod("h2o.confusionMatrix", "H2OModelMetrics", function(object, thresholds=NULL, metrics=NULL) { if( !is(object, "H2OBinomialMetrics") ) { if( is(object, "H2OMultinomialMetrics") || is(object, "H2OOrdinalMetrics")) return(object@metrics$cm$table) warning(paste0("No Confusion Matrices for ",class(object))) return(NULL) } # H2OBinomial case if( is.null(metrics) && is.null(thresholds) ) { metrics = c("f1") } if( is(metrics, "list") ) metrics_list = metrics else { if( is.null(metrics) ) metrics_list = list() else metrics_list = list(metrics) } if( is(thresholds, "list") ) thresholds_list = thresholds else { if( is.null(thresholds) ) thresholds_list = list() else thresholds_list = list(thresholds) } # error check the metrics_list and thresholds_list if( !all(sapply(thresholds_list, f <- function(x) is.numeric(x) && x >= 0 && x <= 1)) ) stop("All thresholds must be numbers between 0 and 1 (inclusive).") if( !all(sapply(metrics_list, f <- function(x) x %in% .h2o.maximizing_metrics)) ) stop(paste("The only allowable metrics are ", paste(.h2o.maximizing_metrics, collapse=', '))) # make one big list that combines the thresholds and metric-thresholds metrics_thresholds = lapply(metrics_list, f <- function(x) h2o.find_threshold_by_max_metric(object, x)) thresholds_list <- append(thresholds_list, metrics_thresholds) first_metrics_thresholds_offset <- length(thresholds_list) - length(metrics_thresholds) thresh2d <- object@metrics$thresholds_and_metric_scores actual_thresholds <- thresh2d$threshold d <- object@metrics$domain m <- lapply(seq_along(thresholds_list), function(i) { t <- thresholds_list[[i]] row <- h2o.find_row_by_threshold(object,t) if( is.null(row) ) NULL else { tns <- row$tns; fps <- row$fps; fns <- row$fns; tps <- row$tps; rnames <- c(d, "Totals") cnames <- c(d, "Error", "Rate") col1 <- c(tns, fns, tns+fns) col2 <- c(fps, tps, fps+tps) col3 <- c(fps/(fps+tns), fns/(fns+tps), (fps+fns)/(fps+tns+fns+tps)) col4 <- c( paste0(" =", fps, "/", fps+tns), paste0(" =", fns, "/", fns+tps), paste0(" =", fns+fps, "/", fps+tns+fns+tps) ) fmts <- c("%i", "%i", "%f", "%s") tbl <- data.frame(col1,col2,col3,col4) colnames(tbl) <- cnames rownames(tbl) <- rnames header <- "Confusion Matrix (vertical: actual; across: predicted) " if(t %in% metrics_thresholds) { m <- metrics_list[i - first_metrics_thresholds_offset] if( length(m) > 1) m <- m[[1]] header <- paste(header, "for max", m, "@ threshold =", t) } else { header <- paste(header, "@ threshold =", row$threshold) } attr(tbl, "header") <- header attr(tbl, "formats") <- fmts oldClass(tbl) <- c("H2OTable", "data.frame") tbl } }) if( length(m) == 1L ) return( m[[1L]] ) m }) #' Plot an H2O Model #' #' Plots training set (and validation set if available) scoring history for an H2O Model #' #' This method dispatches on the type of H2O model to select the correct #' scoring history. The \code{timestep} and \code{metric} arguments are restricted to what is #' available in the scoring history for a particular type of model. #' #' @param x A fitted \linkS4class{H2OModel} object for which the scoring history plot is desired. #' @param timestep A unit of measurement for the x-axis. #' @param metric A unit of measurement for the y-axis. #' @param ... additional arguments to pass on. #' @return Returns a scoring history plot. #' @seealso \code{\link{h2o.deeplearning}}, \code{\link{h2o.gbm}}, #' \code{\link{h2o.glm}}, \code{\link{h2o.randomForest}} for model #' generation in h2o. #' @examples #' \dontrun{ #' if (requireNamespace("mlbench", quietly=TRUE)) { #' library(h2o) #' h2o.init() #' #' df <- as.h2o(mlbench::mlbench.friedman1(10000, 1)) #' rng <- h2o.runif(df, seed = 1234) #' train <- df[rng < 0.8,] #' valid <- df[rng >= 0.8,] #' #' gbm <- h2o.gbm(x = 1:10, y = "y", training_frame = train, validation_frame = valid, #' ntrees = 500, learn_rate = 0.01, score_each_iteration = TRUE) #' plot(gbm) #' plot(gbm, timestep = "duration", metric = "deviance") #' plot(gbm, timestep = "number_of_trees", metric = "deviance") #' plot(gbm, timestep = "number_of_trees", metric = "rmse") #' plot(gbm, timestep = "number_of_trees", metric = "mae") #' } #' } #' @export plot.H2OModel <- function(x, timestep = "AUTO", metric = "AUTO", ...) { df <- as.data.frame(x@model$scoring_history) #Ensure metric and timestep can be passed in as upper case (by converting to lower case) if not "AUTO" if(metric != "AUTO"){ metric = tolower(metric) } if(timestep != "AUTO"){ timestep = tolower(timestep) } # Separate functionality for GLM since output is different from other algos if (x@algorithm == "glm") { # H2OBinomialModel and H2ORegressionModel have the same output # Also GLM has only one timestep option, which is `iteration` timestep <- "iteration" if (metric == "AUTO") { metric <- "log_likelihood" } else if (!(metric %in% c("log_likelihood", "objective"))) { stop("for GLM, metric must be one of: log_likelihood, objective") } graphics::plot(df$iteration, df[,c(metric)], type="l", xlab = timestep, ylab = metric, main = "Validation Scoring History", ...) } else if (x@algorithm == "glrm") { timestep <- "iteration" if (metric == "AUTO") { metric <- "objective" } else if (!(metric %in% c("step_size", "objective"))) { stop("for GLRM, metric must be one of: step_size, objective") } graphics::plot(df$iteration, df[,c(metric)], type="l", xlab = timestep, ylab = metric, main = "Objective Function Value per Iteration", ...) } else if (x@algorithm %in% c("deeplearning", "drf", "gbm")) { if (is(x, "H2OBinomialModel")) { if (metric == "AUTO") { metric <- "logloss" } else if (!(metric %in% c("logloss","auc","classification_error","rmse"))) { stop("metric for H2OBinomialModel must be one of: logloss, auc, classification_error, rmse") } } else if (is(x, "H2OMultinomialModel") || is(x, "H2OOrdinalModel")) { if (metric == "AUTO") { metric <- "classification_error" } else if (!(metric %in% c("logloss","classification_error","rmse"))) { stop("metric for H2OMultinomialModel/H2OOrdinalModel must be one of: logloss, classification_error, rmse") } } else if (is(x, "H2ORegressionModel")) { if (metric == "AUTO") { metric <- "rmse" } else if (!(metric %in% c("rmse","deviance","mae"))) { stop("metric for H2ORegressionModel must be one of: rmse, mae, or deviance") } } else { stop("Must be one of: H2OBinomialModel, H2OMultinomialModel, H2OOrdinalModel or H2ORegressionModel") } # Set timestep if (x@algorithm %in% c("gbm", "drf")) { if (timestep == "AUTO") { timestep <- "number_of_trees" } else if (!(timestep %in% c("duration","number_of_trees"))) { stop("timestep for gbm or drf must be one of: duration, number_of_trees") } } else { # x@algorithm == "deeplearning" # Delete first row of DL scoring history since it contains NAs & NaNs if (df$samples[1] == 0) { df <- df[-1,] } if (timestep == "AUTO") { timestep <- "epochs" } else if (!(timestep %in% c("epochs","samples","duration"))) { stop("timestep for deeplearning must be one of: epochs, samples, duration") } } training_metric <- sprintf("training_%s", metric) validation_metric <- sprintf("validation_%s", metric) if (timestep == "duration") { trim <- function (ss) gsub("^\\s+|\\s+$", "", ss) tt <- trim(df[2, c("duration")]) #base::trimws not implemented for earlier versions of R, so we make our own trim function dur_colname <- sprintf("duration_%s", strsplit(tt, " ")[[1]][2]) #parse units of measurement df[,c(dur_colname)] <- apply(as.matrix(df[,c("duration")]), 1, function(v) as.numeric(strsplit(trim(v), " ")[[1]][1])) timestep <- dur_colname } if (validation_metric %in% names(df)) { #Training and Validation scoring history ylim <- range(c(df[,c(training_metric)], df[,c(validation_metric)])) #sync up y axes if (sum(is.na(ylim))>1) { ylim <- c(0.0, 1.0) } graphics::plot(df[,c(timestep)], df[,c(training_metric)], type="l", xlab = "", ylab = "", axes = FALSE, main = "Scoring History", col = "blue", ylim = ylim, ...) graphics::par(new = TRUE) graphics::plot(df[,c(timestep)], df[,c(validation_metric)], type="l", xlab = timestep, ylab = metric, col = "orange", ylim = ylim, ...) graphics::legend("topright", legend = c("Training", "Validation"), col = c("blue", "orange"), lty = c(1,1)) } else { #Training scoring history only ylim <- range(c(df[,c(training_metric)])) if (sum(is.na(ylim))>1) { ylim <- c(0.0, 1.0) } graphics::plot(df[,c(timestep)], df[,c(training_metric)], type="l", xlab = timestep, ylab = training_metric, main = "Training Scoring History", col = "blue", ylim = ylim) } } else { # algo is not glm, deeplearning, drf, gbm stop("Plotting not implemented for this type of model") } } #' Plot Variable Importances #' # Plot a trained model's variable importances. #' #' @param model A trained model (accepts a trained random forest, GBM, #' or deep learning model, will use \code{\link{h2o.std_coef_plot}} #' for a trained GLM #' @param num_of_features The number of features shown in the plot (default is 10 or all if less than 10). #' @seealso \code{\link{h2o.std_coef_plot}} for GLM. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' model <- h2o.gbm(x = 3:9, y = 2, training_frame = prostate, distribution = "bernoulli") #' h2o.varimp_plot(model) #' #' # for deep learning set the variable_importance parameter to TRUE #' iris_hf <- as.h2o(iris) #' iris_dl <- h2o.deeplearning(x = 1:4, y = 5, training_frame = iris_hf, #' variable_importances = TRUE) #' h2o.varimp_plot(iris_dl) #' } #' @export h2o.varimp_plot <- function(model, num_of_features = NULL){ # store the variable importance table as vi vi <- h2o.varimp(model) # check if num_of_features was passed as an integer, otherwise use all features # default to 10 or less features if num_of_features is not specified # if(is.null(num_of_features)) {num_of_features = length(vi$variable)} # else if ((num_of_features != round(num_of_features)) || (num_of_features <= 0)) stop("num_of_features must be an integer greater than 0") if(is.null(num_of_features)) { feature_count = length(vi$variable) num_of_features = ifelse(feature_count <= 10, length(vi$variable), 10) } else if ((num_of_features != round(num_of_features)) || (num_of_features <= 0)) stop("num_of_features must be an integer greater than 0") # check the model type and then update the model title if(model@algorithm[1] == "deeplearning") {title = "Variable Importance: Deep Learning"} else {title = paste("Variable Importance: ", model_type = toupper(model@algorithm[1]), sep="")} # use the longest ylable to adjust margins so ylabels don't cut off long string labels ylabels = vi$variable ymargin <- max(strwidth(ylabels, "inch")+0.4, na.rm = TRUE) par(mai=c(1.02,ymargin,0.82,0.42)) # if num_of_features = 1, creat only one bar (adjust size to look nice) if(num_of_features == 1) { barplot(rev(head(vi$scaled_importance, n = num_of_features)), names.arg = rev(head(vi$variable, n = num_of_features)), width = 0.2, space = 1, horiz = TRUE, las = 2, ylim=c(0 ,2), xlim = c(0,1), axes = TRUE, col ='#1F77B4', main = title) } # plot num_of_features > 1 else if (num_of_features > 1) { barplot(rev(head(vi$scaled_importance, n = num_of_features)), names.arg = rev(head(vi$variable, n = num_of_features)), space = 1,las = 2, horiz = TRUE, col ='#1F77B4', # blue main = title) } } #' Plot Standardized Coefficient Magnitudes #' #' Plot a GLM model's standardized coefficient magnitudes. #' #' @param model A trained generalized linear model #' @param num_of_features The number of features to be shown in the plot #' @seealso \code{\link{h2o.varimp_plot}} for variable importances plot of #' random forest, GBM, deep learning. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' h2o.std_coef_plot(prostate_glm) #' } #' @export h2o.std_coef_plot <- function(model, num_of_features = NULL){ # check that the model is a glm if(model@algorithm[1] != "glm") stop("Warning: model must be a GLM") maxcoeff = 1 if (model@model$model_summary["family"]=="multinomial") { coeff_table <- model@model$standardized_coefficient_magnitudes sorted_table <- coeff_table[order(abs(coeff_table$coefficients)),] norm_coef <- sorted_table$coefficients sort_norm <- norm_coef maxcoeff = max(norm_coef) } else { # get the coefficients table coeff_table_complete <- model@model$coefficients_table # remove the intercept row from the complete coeff_table_complete coeff_table <- coeff_table_complete[coeff_table_complete$names != "Intercept",] # order the coeffcients table by the absolute value of the standardized_coefficients sorted_table <- coeff_table[order(abs(coeff_table$standardized_coefficients)),] # get a vector of normalized coefs. and abs norm coefs., and the corresponding labels norm_coef <- sorted_table$standardized_coefficients sort_norm <- abs(sorted_table$standardized_coefficients) } labels <- sorted_table$names # check if num_of_features was passed as an integer, otherwise use all features if(is.null(num_of_features)) {num_of_features = length(norm_coef)} else if ((num_of_features != round(num_of_features)) || (num_of_features <= 0)) stop("num_of_features must be an integer greater than 0") # initialize a vector of color codes, based on norm_coef values color_code <- c() for(element in norm_coef) {if(element >= 0) color_code <- append(color_code, "#1F77B4") # blue else color_code <- append(color_code, '#FF7F0E')} # orange # get the color sign, needed for the legend color_sign <- c() for(element in norm_coef) {if(element >= 0) color_sign <- append(color_sign, "Positive") # blue else color_sign <- append(color_sign, 'Negative')} # orange # use the longest ylable to adjust margins so ylabels don't cut off long string labels ylabels = labels ymargin <- max(strwidth(ylabels, "inch")+0.4, na.rm = TRUE) par(mai=c(1.02,ymargin,0.82,0.42)) # check if num_of_features = 1 and plot only one bar if(num_of_features == 1) { barplot(rev(sort_norm)[num_of_features], names.arg = rev(labels)[num_of_features], width = 0.2, space = 1, horiz = TRUE, las = 1, ylim=c(0 ,2), xlim = c(0,maxcoeff), col = rev(color_code)[num_of_features], main = "Standardized Coef. Magnitudes") } # create horizontal barplot for one or more features else { barplot(tail(sort_norm, n = num_of_features), names.arg = tail(labels, n = num_of_features), legend.text = TRUE, space = 1, horiz = TRUE, las = 1, col = tail(color_code, n = num_of_features), xlim = c(0,maxcoeff), main = "Standardized Coef. Magnitudes") } # add legend, that adapts if one to all bars are plotted legend('bottomright', legend = unique(tail(color_sign, n = num_of_features)), col = unique(tail(color_code, n = num_of_features)), pch = 20) } #' @export plot.H2OBinomialMetrics <- function(x, type = "roc", main, ...) { # TODO: add more types (i.e. cutoffs) if(!type %in% c("roc", "pr")) stop("type must be 'roc' or 'pr'") if(type == "roc") { xaxis <- "False Positive Rate (TPR)"; yaxis = "True Positive Rate (FPR)" if(missing(main)) { main <- "Receiver Operating Characteristic curve" if(x@on_train) { main <- paste(main, "(on train)") } else if (x@on_valid) { main <- paste(main, "(on valid)") } } xdata <- x@metrics$thresholds_and_metric_scores$fpr ydata <- x@metrics$thresholds_and_metric_scores$tpr graphics::plot(xdata, ydata, main = main, xlab = xaxis, ylab = yaxis, ylim=c(0,1), xlim=c(0,1), type='l', lty=2, col='blue', lwd=2, panel.first = grid()) graphics::abline(0, 1, lty = 2) } else if(type=="pr"){ xaxis <- "Recall (TP/(TP+FP))"; yaxis = "Precision (TPR)" if(missing(main)) { main <- "Precision Recall curve" if(x@on_train) { main <- paste(main, "(on train)") } else if (x@on_valid) { main <- paste(main, "(on valid)") } } xdata <- rev(x@metrics$thresholds_and_metric_scores$recall) ydata <- rev(x@metrics$thresholds_and_metric_scores$precision) graphics::plot(xdata, ydata, main = main, xlab = xaxis, ylab = yaxis, ylim=c(0,1), xlim=c(0,1), type='l', lty=2, col='blue', lwd=2, panel.first = grid()) } } #' @export screeplot.H2ODimReductionModel <- function(x, npcs, type = "barplot", main, ...) { if(x@algorithm != "pca") stop("x must be an H2O PCA model") if(missing(npcs)) npcs = min(10, x@model$parameters$k) else if(!is.numeric(npcs) || npcs < 1 || npcs > x@model$parameters$k) stop(paste("npcs must be a positive integer between 1 and", x@model$parameters$k, "inclusive")) sdevH2O <- h2o.sdev(x) if(missing(main)) main = paste("h2o.prcomp(", strtrim(x@parameters$training_frame, 20), ")", sep="") if(type == "barplot") barplot(sdevH2O[1:npcs]^2, main = main, ylab = "Variances", ...) else if(type == "lines") lines(sdevH2O[1:npcs]^2, main = main, ylab = "Variances", ...) else stop("type must be either 'barplot' or 'lines'") } #' #' Retrieve the standard deviations of principal components #' #' @param object An \linkS4class{H2ODimReductionModel} object. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' cars_pca <- h2o.prcomp(cars, transform = "STANDARDIZE", #' k = 3, x = predictors, seed = 12345) #' h2o.sdev(cars_pca) #' } #' @export h2o.sdev <- function(object) { if(!is(object, "H2ODimReductionModel") || object@algorithm != "pca") stop("object must be an H2O PCA model") as.numeric(object@model$importance[1,]) } # extract "bite size" pieces from a model .model.parts <- function(object) { o <- object m <- object@model tm <- object@model$training_metrics vm <- object@model$validation_metrics xm <- object@model$cross_validation_metrics xms <- object@model$cross_validation_metrics_summary if( !is.null(vm@metrics) && !is.null(xm@metrics) ) return( list(o=o,m=m,tm=tm,vm= vm,xm= xm,xms=xms) ) if( is.null(vm@metrics) && !is.null(xm@metrics) ) return( list(o=o,m=m,tm=tm,vm=NULL,xm= xm,xms=xms) ) if( !is.null(vm@metrics) && is.null(xm@metrics) ) return( list(o=o,m=m,tm=tm,vm= vm,xm=NULL,xms=NULL) ) return( list(o=o,m=m,tm=tm,vm=NULL,xm=NULL,xms=NULL) ) } .warn.no.validation <- function() { warning("No validation metrics available.", call.=FALSE) NULL } .warn.no.cross.validation <- function() { warning("No cross-validation metrics available.", call.=FALSE) NULL } .isSupervised <- function(algo, params) { if (algo == "kmeans" || algo == "glrm" || algo == "pca" || (algo == "deeplearning" && !is.null(params$autoencoder) && params$autoencoder)) { FALSE } else { TRUE } } # Transform given name to # expected values ("gbm", "drf") # It allows for having algorithm name aliases .h2o.unifyAlgoName <- function(algo) { result <- if (algo == "randomForest") "drf" else algo result } # # Returns REST API version for given algo. # .h2o.getAlgoVersion <- function(algo, h2oRestApiVersion = .h2o.__REST_API_VERSION) { result <- .h2o.__remoteSend(method = "GET", h2oRestApiVersion = h2oRestApiVersion, .h2o.__MODEL_BUILDERS(algo))$model_builders[[algo]][["__meta"]]$schema_version result } #' Tabulation between Two Columns of an H2OFrame #' #' Simple Co-Occurrence based tabulation of X vs Y, where X and Y are two Vecs in a given dataset. #' Uses histogram of given resolution in X and Y. #' Handles numerical/categorical data and missing values. Supports observation weights. #' #' @param data An H2OFrame object. #' @param x predictor column #' @param y response column #' @param weights_column (optional) observation weights column #' @param nbins_x number of bins for predictor column #' @param nbins_y number of bins for response column #' @return Returns two TwoDimTables of 3 columns each #' count_table: X Y counts #' response_table: X meanY counts #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' df <- as.h2o(iris) #' tab <- h2o.tabulate(data = df, x = "Sepal.Length", y = "Petal.Width", #' weights_column = NULL, nbins_x = 10, nbins_y = 10) #' plot(tab) #' } #' @export h2o.tabulate <- function(data, x, y, weights_column = NULL, nbins_x = 50, nbins_y = 50 ) { args <- .verify_datacols(data, c(x,y)) if(!is.numeric(nbins_x)) stop("`nbins_x` must be a positive number") if(!is.numeric(nbins_y)) stop("`nbins_y` must be a positive number") parms = list() parms$dataset <- attr(data, "id") parms$predictor <- args$cols[1] parms$response <- args$cols[2] if( !missing(weights_column) ) parms$weight <- weights_column parms$nbins_predictor <- nbins_x parms$nbins_response <- nbins_y res <- .h2o.__remoteSend(method = "POST", h2oRestApiVersion = 99, page = "Tabulate", .params = parms) count_table <- res$count_table response_table <- res$response_table out <- list(count_table = count_table, response_table = response_table, cols = args$cols) oldClass(out) <- c("H2OTabulate", "list") out } #' Plot an H2O Tabulate Heatmap #' #' Plots the simple co-occurrence based tabulation of X vs Y as a heatmap, where X and Y are two Vecs in a given dataset. This function requires suggested ggplot2 package. #' #' @param x An H2OTabulate object for which the heatmap plot is desired. #' @param xlab A title for the x-axis. Defaults to what is specified in the given H2OTabulate object. #' @param ylab A title for the y-axis. Defaults to what is specified in the given H2OTabulate object. #' @param base_size Base font size for plot. #' @param ... additional arguments to pass on. #' @return Returns a ggplot2-based heatmap of co-occurance. #' @seealso \code{\link{h2o.tabulate}} #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' df <- as.h2o(iris) #' tab <- h2o.tabulate(data = df, x = "Sepal.Length", y = "Petal.Width", #' weights_column = NULL, nbins_x = 10, nbins_y = 10) #' plot(tab) #' } #' @export plot.H2OTabulate <- function(x, xlab = x$cols[1], ylab = x$cols[2], base_size = 12, ...) { if (!inherits(x, "H2OTabulate")) { stop("Must be an H2OTabulate object") } if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("In order to plot.H2OTabulate you must have ggplot2 package installed") } # Pull small counts table into R memory to plot df <- as.data.frame(x$count_table) names(df) <- c("c1", "c2", "counts") # Reorder the levels for better plotting if (suppressWarnings(is.na(sum(as.numeric(df$c1))))) { c1_order <- order(unique(df$c1)) } else { c1_order <- order(unique(as.numeric(df$c1))) } if (suppressWarnings(is.na(sum(as.numeric(df$c2))))) { c2_order <- order(unique(df$c2)) } else { c2_order <- order(unique(as.numeric(df$c2))) } c1_labels <- unique(df$c1) c2_labels <- unique(df$c2) df$c1 <- factor(df$c1, levels = c1_labels[c1_order]) df$c2 <- factor(df$c2, levels = c2_labels[c2_order]) # Plot heatmap c1 <- c2 <- counts <- NULL #set these to pass CRAN checks w/o warnings (p <- ggplot2::ggplot(df, ggplot2::aes(c1, c2)) + ggplot2::geom_tile(ggplot2::aes(fill = counts), colour = "white") + ggplot2::scale_fill_gradient(low = "white", high = "steelblue")) # Adjust the plot p <- p + ggplot2::theme_grey(base_size = base_size) + ggplot2::labs(x = xlab, y = ylab) + ggplot2::scale_x_discrete(expand = c(0, 0)) + ggplot2::scale_y_discrete(expand = c(0, 0)) + ggplot2::theme(legend.position = "none", axis.ticks = ggplot2::element_blank(), axis.text.x = ggplot2::element_text(size = base_size * 0.8, angle = 330, hjust = 0, colour = "grey50")) # Return a ggplot object return(p) } #' #' Retrieve the cross-validation models #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a list of H2OModel objects #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_models = TRUE, seed = 1234) #' h2o.cross_validation_models(cars_gbm) #' } #' @export h2o.cross_validation_models <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_models)) return(NULL) lapply(object@model$cross_validation_models, function(x) h2o.getModel(x$name)) } #' #' Retrieve the cross-validation fold assignment #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a H2OFrame #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_fold_assignment = TRUE, seed = 1234) #' h2o.cross_validation_fold_assignment(cars_gbm) #' } #' @export h2o.cross_validation_fold_assignment <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_fold_assignment)) return(NULL) h2o.getFrame(object@model$cross_validation_fold_assignment$name) } #' #' Retrieve the cross-validation holdout predictions #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a H2OFrame #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement","power","weight","acceleration","year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars,ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_predictions = TRUE, seed = 1234) #' h2o.cross_validation_holdout_predictions(cars_gbm) #' } #' @export h2o.cross_validation_holdout_predictions <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_holdout_predictions)) return(NULL) h2o.getFrame(object@model$cross_validation_holdout_predictions$name) } #' #' Retrieve the cross-validation predictions #' #' @param object An \linkS4class{H2OModel} object. #' @return Returns a list of H2OFrame objects #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv" #' cars <- h2o.importFile(f) #' cars["economy_20mpg"] <- as.factor(cars["economy_20mpg"]) #' predictors <- c("displacement", "power", "weight", "acceleration", "year") #' response <- "economy_20mpg" #' cars_split <- h2o.splitFrame(data = cars, ratios = 0.8, seed = 1234) #' train <- cars_split[[1]] #' valid <- cars_split[[2]] #' cars_gbm <- h2o.gbm(x = predictors, y = response, training_frame = train, #' nfolds = 5, keep_cross_validation_predictions = TRUE, seed = 1234) #' h2o.cross_validation_predictions(cars_gbm) #' } #' @export h2o.cross_validation_predictions <- function(object) { if(!is(object, "H2OModel")) stop("object must be an H2O model") if (is.null(object@model$cross_validation_predictions)) return(NULL) lapply(object@model$cross_validation_predictions, function(x) h2o.getFrame(x$name)) } #' Partial Dependence Plots #' #' Partial dependence plot gives a graphical depiction of the marginal effect of a variable on the response. The effect #' of a variable is measured in change in the mean response. Note: Unlike randomForest's partialPlot when plotting #' partial dependence the mean response (probabilities) is returned rather than the mean of the log class probability. #' #' @param object An \linkS4class{H2OModel} object. #' @param data An H2OFrame object used for scoring and constructing the plot. #' @param cols Feature(s) for which partial dependence will be calculated. #' @param destination_key An key reference to the created partial dependence tables in H2O. #' @param nbins Number of bins used. For categorical columns make sure the number of bins exceeds the level count. #' If you enable add_missing_NA, the returned length will be nbin+1. #' @param plot A logical specifying whether to plot partial dependence table. #' @param plot_stddev A logical specifying whether to add std err to partial dependence plot. #' @param weight_column A string denoting which column of data should be used as the weight column. #' @param include_na A logical specifying whether missing value should be included in the Feature values. #' @param user_splits A two-level nested list containing user defined split points for pdp plots for each column. #' If there are two columns using user defined split points, there should be two lists in the nested list. #' Inside each list, the first element is the column name followed by values defined by the user. #' @param col_pairs_2dpdp A two-level nested list like this: col_pairs_2dpdp = list(c("col1_name", "col2_name"), #' c("col1_name","col3_name"), ...,) where a 2D partial plots will be generated for col1_name, col2_name pair, for #' col1_name, col3_name pair and whatever other pairs that are specified in the nested list. #' @param save_to Fully qualified prefix of the image files the resulting plots should be saved to, e.g. '/home/user/pdp'. #' Plots for each feature are saved separately in PNG format, each file receives a suffix equal to the corresponding feature name, e.g. `/home/user/pdp_AGE.png`. #' If the files already exists, they will be overridden. Files are only saves if plot = TRUE (default). #' @return Plot and list of calculated mean response tables for each feature requested. #' @param row_index Row for which partial dependence will be calculated instead of the whole input frame. #' @param targets Target classes for multinomial model. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate[, "CAPSULE"] <- as.factor(prostate[, "CAPSULE"] ) #' prostate[, "RACE"] <- as.factor(prostate[, "RACE"] ) #' prostate_gbm <- h2o.gbm(x = c("AGE", "RACE"), #' y = "CAPSULE", #' training_frame = prostate, #' ntrees = 10, #' max_depth = 5, #' learn_rate = 0.1) #' h2o.partialPlot(object = prostate_gbm, data = prostate, cols = c("AGE", "RACE")) #' #' iris_hex <- as.h2o(iris) #' iris_gbm <- h2o.gbm(x = c(1:4), y = 5, training_frame = iris_hex) #' #' # one target class #' h2o.partialPlot(object = iris_gbm, data = iris_hex, cols="Petal.Length", targets=c("setosa")) #' # three target classes #' h2o.partialPlot(object = iris_gbm, data = iris_hex, cols="Petal.Length", #' targets=c("setosa", "virginica", "versicolor")) #' } #' @export h2o.partialPlot <- function(object, data, cols, destination_key, nbins=20, plot = TRUE, plot_stddev = TRUE, weight_column=-1, include_na=FALSE, user_splits=NULL, col_pairs_2dpdp=NULL, save_to=NULL, row_index=-1, targets=NULL) { if(!is(object, "H2OModel")) stop("object must be an H2Omodel") if( is(object, "H2OOrdinalModel")) stop("object must be a regression model or binary and multinomial classfier") if(!is(data, "H2OFrame")) stop("data must be H2OFrame") if(!is.numeric(nbins) | !(nbins > 0) ) stop("nbins must be a positive numeric") if(!is.logical(plot)) stop("plot must be a logical value") if(!is.logical(plot_stddev)) stop("plot must be a logical value") if(!is.logical(include_na)) stop("add_missing_NA must be a logical value") if((is(object, "H2OMultinomialModel"))){ if(is.null(targets)) stop("targets parameter has to be set for multinomial classification") for(i in 1:length(targets)){ if(!is.character(targets[i])) stop("targets parameter must be a list of string values") } } noPairs = missing(col_pairs_2dpdp) noCols = missing(cols) if(noCols && noPairs) cols = object@parameters$x # set to default only if both are missing y = object@parameters$y numCols = 0 numColPairs = 0 if (!missing(cols)) { # check valid cols in cols for 1d pdp x <- cols args <- .verify_dataxy(data, x, y) } cpairs <- NULL if (!missing(col_pairs_2dpdp)) { # verify valid cols for 2d pdp for (onePair in col_pairs_2dpdp) { pargs <- .verify_dataxy(data, onePair, y) cpairs <- c(cpairs, paste0("[", paste (pargs$x, collapse = ','), "]")) } numColPairs = length(cpairs) } if (is.numeric(weight_column) && (weight_column != -1)) { stop("weight_column should be a column name of your data frame.") } else if (is.character(weight_column)) { # weight_column_index is column name if (!weight_column %in% h2o.names(data)) stop("weight_column_index should be one of your columns in your data frame.") else weight_column <- match(weight_column, h2o.names(data))-1 } if (!is.numeric(row_index)) { stop("row_index should be numeric.") } parms = list() if (!missing(col_pairs_2dpdp)) { parms$col_pairs_2dpdp <- paste0("[", paste (cpairs, collapse = ','), "]") } if (!missing(cols)) { parms$cols <- paste0("[", paste (args$x, collapse = ','), "]") numCols = length(cols) } if(is.null(targets)){ num_1d_pp_data <- numCols } else { num_1d_pp_data <- numCols * length(targets) } noCols = missing(cols) parms$model_id <- attr(object, "model_id") parms$frame_id <- attr(data, "id") parms$nbins <- nbins parms$weight_column_index <- weight_column parms$add_missing_na <- include_na parms$row_index = row_index if (is.null(user_splits) || length(user_splits) == 0) { parms$user_cols <- NULL parms$user_splits <- NULL parms$num_user_splits <- NULL } else { user_cols <- c() user_values <- c() user_num_splits <- c() column_names <- h2o.names(data) for (ind in c(1:length(user_splits))) { aList <- user_splits[[ind]] csname = aList[1] if (csname %in% column_names) { if (h2o.isnumeric(data[csname]) || h2o.isfactor(data[csname])) { nVal <- length(aList)-1 if (h2o.isfactor(data[csname])) { domains <- h2o.levels(data[csname]) # enum values tempVal <- aList[2:length(aList)] intVals <- c(1:length(tempVal)) for (eleind in c(1:nVal)) { eleIndex <- which(domains == tempVal[eleind]) if (eleIndex>0) { intVals[eleind] <- which(domains == tempVal[eleind]) - 1 } else { stop("Illegal enum value encountered. To include missing values in your feature values, set include_na to TRUE") } } user_values <- c(user_values, intVals) } else { vals <- as.numeric(unlist(strsplit(aList[2:length(aList)], ","))) user_values <- c(user_values, vals) } user_num_splits <- c(user_num_splits, nVal) user_cols <- c(user_cols, csname) } else { stop ("Partial dependency plots are generated for numerical and categorical columns only.") } } else { stop( "column names used in user_splits are not valid. They should be chosen from the columns of your data set" ) } } parms$user_cols <- paste0("[", paste(user_cols, collapse=','), "]") parms$user_splits <- paste0("[", paste(user_values, collapse=','), "]") parms$num_user_splits <- paste0("[", paste(user_num_splits, collapse=','), "]") } if(!is.null(targets)) { parms$targets <- paste0("[", paste (targets, collapse = ','), "]") } if(!missing(destination_key)) parms$destination_key = destination_key res <- .h2o.__remoteSend(method = "POST", h2oRestApiVersion = 3, page = "PartialDependence/", .params = parms) .h2o.__waitOnJob(res$key$name) url <- gsub("/3/", "", res$dest$URL) res <- .h2o.__remoteSend(url, method = "GET", h2oRestApiVersion = 3) ## Change feature names to the original supplied, the following is okay because order is preserved pps <- res$partial_dependence_data min_y <- min(pps[[1]][,2]) max_y <- max(pps[[1]][,2]) min_lower <- min_y max_upper <- max_y col_name_index <- 1 for (i in 1:length(pps)) { pp <- pps[[i]] if (!all(is.na(pp))) { min_y <- min(min_y, min(pp[,2])) max_y <- max(max_y, max(pp[,2])) min_lower <- min(min_lower, pp[,2] - pp[,3]) max_upper <- max(max_upper, pp[,2] + pp[,3]) if (i <= num_1d_pp_data) { if(is.null(targets)){ col_name_index = i title <- paste("Partial dependency plot for", cols[col_name_index]) } else if(!is.null(targets)){ if(length(cols) > 1 && i %% length(cols) == 0) { col_name_index = col_name_index + 1 } if(length(targets) > 1) { title <- paste("Partial dependency plot for", cols[col_name_index], "and classes\n", paste(targets, collapse=", ")) } else { title <- paste("Partial dependency plot for", cols[col_name_index], "and class", targets) } } names(pps[[i]]) <- c(cols[col_name_index], "mean_response", "stddev_response", "std_error_mean_response") attr(pps[[i]],"description") <- title } else { names(pps[[i]]) <- c(col_pairs_2dpdp[[i-num_1d_pp_data]][1], col_pairs_2dpdp[[i-num_1d_pp_data]][2], "mean_response", "stddev_response", "std_error_mean_response") attr(pps[[i]],"description") <- paste('2D partial dependence plot for', col_pairs_2dpdp[[i-num_1d_pp_data]][1], "and", col_pairs_2dpdp[[i-num_1d_pp_data]][1]) } } } col_types = unlist(h2o.getTypes(data)) col_names = names(data) pp.plot.1d <- function(pp) { if(!all(is.na(pp))) { x <- pp[,1] y <- pp[,2] stddev <- pp[,3] type <- col_types[which(col_names == names(pp)[1])] if(type == "enum") { line_type <- "p" lty <- NULL pch <- 19 pp[, 1] <- factor(pp[,1], levels=pp[,1]) } else { line_type <- "l" lty <- 1 pch <- NULL } ## Plot one standard deviation above and below the mean if(plot_stddev) { ## Added upper and lower std dev confidence bound upper = y + stddev lower = y - stddev plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol="red", medlty=0, staplelty=0, boxlty=0, col="red", main = attr(pp,"description"), ylim = c(min(lower), max(upper))) polygon(c(x, rev(x)), c(lower, rev(upper)), col = adjustcolor("red", alpha.f = 0.1), border = F) if(type == "enum"){ x <- c(1:length(x)) arrows(x, lower, x, upper, code=3, angle=90, length=0.1, col="red") } } else { plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol="red", medlty=0, staplelty=0, boxlty=0, col="red", main = attr(pp,"description")) } } else { print("Partial Dependence not calculated--make sure nbins is as high as the level count") } } pp.plot.1d.multinomial <- function(pps) { colors <- rainbow(length(pps)) for(i in 1:length(pps)) { pp <- pps[[i]] if(!all(is.na(pp))) { x <- pp[,1] y <- pp[,2] stddev <- pp[,3] color <- colors[i] title <- attr(pp,"description") type <- col_types[which(col_names == names(pp)[1])] if(type == "enum"){ line_type <- "p" lty <- NULL pch <- 19 pp[, 1] <- factor(x, labels=x) } else { line_type <- "l" lty <- 1 pch <- NULL } if(plot_stddev) { upper <- y + stddev lower <- y - stddev if(i == 1){ plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, main = title, col = color, ylim = c(min_lower, max_upper + 0.1 * abs(max_upper))) } else { points(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, col = color) } polygon(c(x, rev(x)), c(lower, rev(upper)), col = adjustcolor(color, alpha.f = 0.1), border = F) if(type == "enum"){ x <- c(1:length(x)) arrows(x, lower, x, upper, code=3, angle=90, length=0.1, col=color) } } else { if(i == 1) { plot(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, main = title, col = color, ylim = c(min_y, max_y + 0.05 * abs(max_y))) } else { points(pp[,1:2], type = line_type, pch=pch, medpch=pch, medcol=color, medlty=0, staplelty=0, boxlty=0, col = color) } } legend("topright",legend=targets, col=colors, lty=lty, pch=pch, bty="n", ncol=length(pps)) } else { print("Partial Dependence not calculated--make sure nbins is as high as the level count") } } } pp.plot.2d <- function(pp, nBins=nbins, user_cols=NULL, user_num_splits=NULL) { xtickMarks <- NULL ytickMarks <- NULL if (!all(is.na(pp))) { if (col_types[which(col_names == names(pp)[1])] == "enum") { x <- replaceEnumLevel(pp[,1], unique(pp[,1])) xtickMarks <- unique(pp[,1]) } else { x <- pp[,1] } if (col_types[which(col_names == names(pp)[2])] == "enum") { y <- replaceEnumLevel(pp[,2], unique(pp[,2])) ytickMarks <- unique(pp[,2]) } else { y <- pp[,2] } allMetric <- reShape(x, y, pp[, 3], names(pp)[1], names(pp)[2], nBins, user_cols, user_num_splits) XX <- allMetric[[1]] YY <- allMetric[[2]] ZZ <- allMetric[[3]] tTitle <- "" if (!is.null(xtickMarks)) { xc <- c(1:length(xtickMarks)) tTitle <- paste0("X axis tick marks: ", paste(xc, xtickMarks, sep=":", collapse=", ")) } if (!is.null(ytickMarks)) { yc <- c(1:length(ytickMarks)) temp <- paste0("Y axis tick marks: ", paste(yc, ytickMarks, sep=":", collapse=", ")) tTitle <- paste0(tTitle, temp) } ## Plot one standard deviation above and below the mean if (plot_stddev) { ## Added upper and lower std dev confidence bound upper = pp[, 3] + pp[, 4] lower = pp[, 3] - pp[, 4] Zupper = matrix(upper, ncol=dim(XX)[2], byrow=F) Zlower = matrix(lower, ncol=dim(XX)[2], byrow=F) rgl::open3d() plot3Drgl::persp3Drgl(XX, YY, ZZ, theta=30, phi=15, axes=TRUE,scale=2, box=TRUE, nticks=5, ticktype="detailed", xlab=names(pp)[1], ylab=names(pp)[2], zlab="2D partial plots", main=tTitle, border='black', alpha=0.5) plot3Drgl::persp3Drgl(XX, YY, Zupper, alpha=0.2, lwd=2, add=TRUE, border='yellow') plot3Drgl::persp3Drgl(XX, YY, Zlower, alpha=0.2, lwd=2, add=TRUE, border='green') rgl::grid3d(c("x", "y", "z")) } else { rgl::persp3d(XX, YY, ZZ, theta=30, phi=50, axes=TRUE,scale=2, box=TRUE, nticks=5, ticktype="detailed", xlab=names(pp)[1], ylab=names(pp)[2], zlab="2D partial plots", main=tTitle, border='black', alpha=0.5) rgl::grid3d(c("x", "y", "z")) } } else { print("2D Partial Dependence not calculated--make sure nbins is as high as the level count") } } pp.plot.save.1d <- function(pp) { # If user accidentally provides one of the most common suffixes in R, it is removed. save_to <- gsub(replacement = "",pattern = "(\\.png)|(\\.jpg)|(\\.pdf)", x = save_to) destination_file <- paste0(save_to,"_",names(pp)[1],'.png') png(destination_file) pp.plot.1d(pp) dev.off() } pp.plot.save.1d.multinomial <- function(pps) { # If user accidentally provides one of the most common suffixes in R, it is removed. save_to <- gsub(replacement = "",pattern = "(\\.png)|(\\.jpg)|(\\.pdf)", x = save_to) destination_file <- paste0(save_to,"_",names(pps[[1]])[1],'.png') png(destination_file) pp.plot.1d.multinomial(pps) dev.off() } pp.plot.save.2d <- function(pp, nBins=nbins, user_cols=NULL, user_num_splits=NULL) { # If user accidentally provides one of the most common suffixes in R, it is removed. save_to <- gsub(replacement = "", pattern = "(\\.png)|(\\.jpg)|(\\.pdf)", x = save_to) colnames = paste0(names(pp)[1], "_", names(pp)[2]) destination_file <- paste0(save_to,"_",colnames,'.png') pp.plot.2d(pp, nbins, user_cols, user_num_splits) rgl::snapshot3d(destination_file) dev.off() } # 1D PDP plot and save if(plot && !noCols) { if(is.null(targets)){ # multonomial PDP lapply(pps[1:num_1d_pp_data], pp.plot.1d) if(!is.null(save_to)){ lapply(pps[1:num_1d_pp_data], pp.plot.save.1d) } } else { from <- 1 to <- length(targets) for(i in 1:numCols) { pp = pps[from:to] pp.plot.1d.multinomial(pp) if(!is.null(save_to)){ pp.plot.save.1d.multinomial(pp) } from <- from + to to <- to + length(targets) } } } # 2D PDP plot and save if (!noPairs && requireNamespace("plot3Drgl", quietly = TRUE) && requireNamespace("rgl", quietly = TRUE)) { if (plot && !is.null(save_to)) { # plot and save to file if (is.null(user_splits)) { sapply( pps[(num_1d_pp_data + 1):(num_1d_pp_data + numColPairs)], pp.plot.save.2d, nBins = nbins, user_cols = NULL, user_num_splits = NULL ) } else { sapply( pps[(num_1d_pp_data + 1):(num_1d_pp_data + numColPairs)], pp.plot.save.2d, nBins = nbins, user_cols = user_cols, user_num_splits = user_num_splits ) } } else { # only plot if (is.null(user_splits)) { sapply( pps[(numCols + 1):(numCols + numColPairs)], pp.plot.2d, nBins = nbins, user_cols = NULL, user_num_splits = NULL ) } else { sapply( pps[(numCols + 1):(numCols + numColPairs)], pp.plot.2d, nBins = nbins, user_cols = user_cols, user_num_splits = user_num_splits ) } } } else if (plot && !noPairs) { warning("Install packages plot3Drgl and rgl in order to generate 2D partial plots.") } if(length(pps) == 1) { return(pps[[1]]) } else { return(pps) } } replaceEnumLevel <- function(originalV, vlevels) { x <- rep(1, length(originalV)) for (ind in c(1:length(originalV))) { x[ind] <- which(originalV[ind] == vlevels) } x } reShape<- function(x, y, z, xname, yname, nbin, user_cols, user_num_splits) { ybin <- nbin if(!is.null(user_cols)) { if (yname %in% user_cols) { ybin <- user_num_splits[which(yname==user_cols)] } } xbin <- floor(length(x)/ybin) X<-matrix(x, nrow=ybin, ncol=xbin,byrow=F) Y <- matrix(y, nrow=ybin, ncol=xbin, byrow=F) Z <- matrix(z, nrow=ybin, ncol=xbin, byrow=F) list(X,Y,Z) } #' Feature Generation via H2O Deep Learning #' #' Extract the non-linear feature from an H2O data set using an H2O deep learning #' model. #' @param object An \linkS4class{H2OModel} object that represents the deep #' learning model to be used for feature extraction. #' @param data An H2OFrame object. #' @param layer Index (integer) of the hidden layer to extract #' @return Returns an H2OFrame object with as many features as the #' number of units in the hidden layer of the specified index. #' @seealso \code{\link{h2o.deeplearning}} for making H2O Deep Learning models. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path = system.file("extdata", "prostate.csv", package = "h2o") #' prostate = h2o.importFile(path = prostate_path) #' prostate_dl = h2o.deeplearning(x = 3:9, y = 2, training_frame = prostate, #' hidden = c(100, 200), epochs = 5) #' prostate_deepfeatures_layer1 = h2o.deepfeatures(prostate_dl, prostate, layer = 1) #' prostate_deepfeatures_layer2 = h2o.deepfeatures(prostate_dl, prostate, layer = 2) #' head(prostate_deepfeatures_layer1) #' head(prostate_deepfeatures_layer2) #' #' } #' @export h2o.deepfeatures <- function(object, data, layer) { url <- paste0('Predictions/models/', object@model_id, '/frames/', h2o.getId(data)) if (is.null(layer)) layer <- 1 if (is.numeric(layer)) { index = layer - 1 res <- .h2o.__remoteSend(url, method = "POST", deep_features_hidden_layer=index, h2oRestApiVersion = 4) } else { res <- .h2o.__remoteSend(url, method = "POST", deep_features_hidden_layer_name=layer, h2oRestApiVersion = 4) } job_key <- res$key$name dest_key <- res$dest$name .h2o.__waitOnJob(job_key) h2o.getFrame(dest_key) } #' #' The H2ONode class. #' #' @slot id An \code{integer} representing node's unique identifier. Generated by H2O. #' @slot levels A \code{character} representing categorical levels on split from parent's node belonging into this node. NULL for root node or non-categorical splits. #' #' #' @aliases H2ONode #' setClass("H2ONode", representation( id = "integer" )) #' #' The H2OLeafNode class. #' #' This class represents a single leaf node in an \code{H2OTree}. #' #' #' @aliases H2OLeafNode #' setClass("H2OLeafNode", representation( prediction = "numeric" ), contains = "H2ONode") #' #' The H2OSplitNode class. #' #' This class represents a single non-terminal node in an \code{H2OTree}. #' @slot threshold A \code{numeric} split threshold, typically when the split column is numerical. #' @slot left_child A \code{H2ONodeOrNULL} representing the left child node, if a node has one. #' @slot right_child A \code{H2ONodeOrNULL} representing the right child node, if a node has one. #' @slot split_feature A \code{character} representing the name of the column this node splits on. #' @slot left_levels A \code{character} representing the levels of a categorical feature heading to the left child of this node. NA for non-categorical split. #' @slot right_levels A \code{character} representing the levels of a categorical feature heading to the right child of this node. NA for non-categorical split. #' @slot na_direction A \code{character} representing the direction of NA values. LEFT means NA values go to the left child node, RIGH means NA values go to the right child node. #' @aliases H2OSplitNode #' @export setClass( "H2OSplitNode", representation( threshold = "numeric", left_child = "H2ONode", right_child = "H2ONode", split_feature = "character", left_levels = "character", right_levels = "character", na_direction = "character" ), contains = "H2ONode" ) #' @rdname H2ONode-class #' @param object an \code{H2ONode} object. #' @export setMethod('show', 'H2ONode', function(object){ print.H2ONode(object) }) print.H2ONode <- function(node){ cat("Node ID", node@id, "\n\n") if(class(node) == "H2OLeafNode"){ cat("Terminal node. Prediction is", node@prediction) return() } if(!is.null(node@left_child)) cat("Left child node ID =", node@left_child@id, "\n") else cat("There is no left child \n") if(!is.null(node@right_child)) cat("Right child node ID =", node@right_child@id,"\n") else cat("There is no right child \n") cat("\n") cat("Splits on column", node@split_feature, "\n") if(is.na(node@threshold)){ if(!is.null(node@left_child)) cat(" - Categorical levels going to the left node:", node@left_levels, "\n") if(!is.null(node@right_child)) cat(" - Categorical levels to the right node:", node@right_levels, "\n") } else { cat("Split threshold <", node@threshold,"to the left node, >=",node@threshold ,"to the right node\n") } cat("\n") if(!is.na(node@na_direction)) cat("NA values go to the", node@na_direction,"node") } #' #' The H2OTree class. #' #' This class represents a model of a Tree built by one of H2O's algorithms (GBM, Random Forest). #' @slot root_node A \code{H2ONode} representing the beginning of the tree behind the model. Allows further tree traversal. #' @slot left_children An \code{integer} vector with left child nodes of tree's nodes #' @slot right_children An \code{integer} vector with right child nodes of tree's nodes #' @slot node_ids An \code{integer} representing identification number of a node. Node IDs are generated by H2O. #' @slot descriptions A \code{character} vector with descriptions for each node to be found in the tree. Contains split threshold if the split is based on numerical column. #' For cactegorical splits, it contains list of categorical levels for transition from the parent node. #' @slot model_id A \code{character} with the name of the model this tree is related to. #' @slot tree_number An \code{integer} representing the order in which the tree has been built in the model. #' @slot tree_class A \code{character} representing name of tree's class. Number of tree classes equals to the number of levels in categorical response column. #' As there is exactly one class per categorical level, name of tree's class equals to the corresponding categorical level of response column. #' In case of regression and binomial, the name of the categorical level is ignored can be omitted, as there is exactly one tree built in both cases. #' @slot thresholds A \code{numeric} split thresholds. Split thresholds are not only related to numerical splits, but might be present in case of categorical split as well. #' @slot features A \code{character} with names of the feature/column used for the split. #' @slot levels A \code{character} representing categorical levels on split from parent's node belonging into this node. NULL for root node or non-categorical splits. #' @slot nas A \code{character} representing if NA values go to the left node or right node. May be NA if node is a leaf. #' @slot predictions A \code{numeric} representing predictions for each node in the graph. #' @slot tree_decision_path A \code{character}, plain language rules representation of a trained decision tree #' @slot decision_paths A \code{character} representing plain language rules that were used in a particular prediction. #' @slot left_cat_split A \code{character} list of categorical levels leading to the left child node. Only present when split is categorical, otherwise none. #' @slot right_cat_split A \code{character} list of categorical levels leading to the right child node. Only present when split is categorical, otherwise none. #' @aliases H2OTree #' @export setClass( "H2OTree", representation( root_node = "H2ONode", left_children = "integer", right_children = "integer", node_ids = "integer", descriptions = "character", model_id = "character", tree_number = "integer", tree_class = "character", thresholds = "numeric", features = "character", levels = "list", nas = "character", predictions = "numeric", tree_decision_path = "character", decision_paths = "character", left_cat_split = "list", right_cat_split = "list" ) ) #' @rdname H2OTree-class #' @param object an \code{H2OTree} object. #' @export setMethod('show', 'H2OTree', function(object){ print.H2OTree(object) }) print.H2OTree <- function(tree){ cat(paste0("Tree related to model '", tree@model_id,"'. Tree number is"), paste0(tree@tree_number,", tree class is '",tree@tree_class, "'\n")) cat("The tree has", length(tree), "nodes") } #' #' Overrides the behavior of length() function on H2OTree class. Returns number of nodes in an \code{H2OTree} #' @param x An \code{H2OTree} to count nodes for. #' setMethod("length", signature(x = "H2OTree"), function(x) { length(x@left_children) }) .h2o.walk_tree <- function(node, tree){ if(node == -1) {return(NULL)} child_node_index <- node + 1 left <- tree@left_children[child_node_index] right <- tree@right_children[child_node_index] node_levels <- if(is.null(tree@levels[[node + 1]])) NA_character_ else tree@levels[[node + 1]] left_child = .h2o.walk_tree(left, tree) right_child = .h2o.walk_tree(right, tree) node <- NULL if(is.null(left_child) && is.null(right_child)){ node <- new("H2OLeafNode", id = tree@node_ids[child_node_index], prediction = tree@predictions[child_node_index] ) } else { left_node_levels <- if(is.null(tree@levels[[left + 1]])) NA_character_ else tree@levels[[left + 1]] right_node_levels <- if(is.null(tree@levels[[right + 1]])) NA_character_ else tree@levels[[right + 1]] node <- new ("H2OSplitNode", id = tree@node_ids[child_node_index], left_child = left_child, right_child = right_child, threshold = tree@thresholds[child_node_index], split_feature = tree@features[child_node_index], na_direction = tree@nas[child_node_index], left_levels = left_node_levels, right_levels = right_node_levels) } node } #' Fetchces a single tree of a H2O model. This function is intended to be used on Gradient Boosting Machine models or Distributed Random Forest models. #' #' #' @param model Model with trees #' @param tree_number Number of the tree in the model to fetch, starting with 1 #' @param tree_class Name of the class of the tree (if applicable). This value is ignored for regression and binomial response column, as there is only one tree built. #' As there is exactly one class per categorical level, name of tree's class equals to the corresponding categorical level of response column. #' @return Returns an H2OTree object with detailed information about a tree. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' f <- "http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_train.csv" #' iris <- h2o.importFile(f) #' gbm_model <- h2o.gbm(y = "species", training_frame = iris) #' tree <- h2o.getModelTree(gbm_model, 1, "Iris-setosa") #' } #' @export h2o.getModelTree <- function(model, tree_number, tree_class = NA) { url <- "Tree" tree_class_request = tree_class; if(is.na(tree_class)){ tree_class_request <- ""; } res <- .h2o.__remoteSend( url, method = "GET", h2oRestApiVersion = 3, model = model@model_id, tree_number = tree_number - 1, tree_class = tree_class_request ) res$thresholds[is.nan(res$thresholds)] <- NA if(length(res$left_children) < 1) stop("Tree does not contain any nodes.") if(res$left_children[1] == -1){ # If the root node has no children res$nas <- c("NA") res$levels <- list(NULL) res$thresholds <- c(as.double(NA)) } # Protection against NA only arrays being evaluated as logical if(is.logical(res$features)){ res$features <- as.character(res$features) } if(is.logical(res$nas)){ res$nas <- as.character(res$nas) } if(is.logical(res$thresholds)){ res$thresholds <- as.numeric(res$thresholds) } if(is.logical(res$predictions)){ res$predictions <- as.numeric(res$predictions) } if(is.logical(res$predictions)){ res$predictions <- as.numeric(res$predictions) } # Start of the tree-building process tree <- new( "H2OTree", left_children = res$left_children, right_children = res$right_children, descriptions = res$descriptions, model_id = model@model_id, tree_number = as.integer(res$tree_number + 1), thresholds = res$thresholds, features = res$features, nas = res$nas, predictions = res$predictions, tree_decision_path = res$tree_decision_path, decision_paths = res$decision_paths ) node_index <- 0 left_ordered <- c() right_ordered <- c() node_ids <- c(res$root_node_id) for(i in 1:length(tree@left_children)){ if(tree@left_children[i] != -1){ node_index <- node_index + 1 left_ordered[i] <- node_index node_ids[node_index + 1] <- tree@left_children[i] } else { left_ordered[i] <- -1 } if(tree@right_children[i] != -1){ node_index <- node_index + 1 right_ordered[i] <- node_index node_ids[node_index + 1] <- tree@right_children[i] } else { right_ordered[i] <- -1 } } tree@node_ids <- node_ids tree@left_children <- as.integer(left_ordered) tree@right_children <- as.integer(right_ordered) if(!is.null(res$tree_class)){ tree@tree_class <- res$tree_class } if(is.logical(res$levels)){ # Vector of NAs is recognized as logical type in R tree@levels <- rep(list(NULL), length(res$levels)) } else { tree@levels <- res$levels } for (i in 1:length(tree@levels)){ if(!is.null(tree@levels[[i]])){ tree@levels[[i]] <- tree@levels[[i]] + 1 } } # Convert numerical categorical levels to characters pointer <-as.integer(1); for(i in 1:length(tree@left_children)){ right <- tree@right_children[i]; left <- tree@left_children[i] split_column_cat_index <- match(tree@features[i], model@model$names) # Indexof split column on children's parent node if(is.na(split_column_cat_index)){ # If the split is not categorical, just increment & continue if(right != -1) pointer <- pointer + 1; if(left != -1) pointer <- pointer + 1; next } split_column_domain <- model@model$domains[[split_column_cat_index]] # Left child node's levels converted to characters left_char_categoricals <- c() if(left != -1) { pointer <- pointer + 1; if(!is.null(tree@levels[[pointer]])){ for(level_index in 1:length(tree@levels[[pointer]])){ left_char_categoricals[level_index] <- split_column_domain[tree@levels[[pointer]][level_index]] } tree@levels[[pointer]] <- left_char_categoricals; } } # Right child node's levels converted to characters, if there is any right_char_categoricals <- c() if(right != -1) { pointer <- pointer + 1; if(!is.null(tree@levels[[pointer]])){ for(level_index in 1:length(tree@levels[[pointer]])){ right_char_categoricals[level_index] <- split_column_domain[tree@levels[[pointer]][level_index]] } tree@levels[[pointer]] <- right_char_categoricals } } } for (i in 1: length(tree@left_children)){ left_idx = tree@left_children[i] right_idx = tree@right_children[i] if(left_idx != -1){ tree@left_cat_split[i] <- tree@levels[left_idx + 1] } else { tree@left_cat_split[i] <- NULL } if(right_idx != -1){ tree@right_cat_split[i] <- tree@levels[right_idx + 1] } else { tree@right_cat_split[i] <- NULL } } tree@root_node <- .h2o.walk_tree(0, tree) tree } #' @export print.h2o.stackedEnsemble.summary <- function(x, ...) cat(x, sep = "\n") #' Get the seed from H2OModel which was used during training. #' If a user does not set the seed parameter before training, the seed is autogenerated. #' It returns seed as the string if the value is bigger than the integer. #' For example, an autogenerated seed is always long so that the seed in R is a string. #' #' @param object a fitted \linkS4class{H2OModel} object. #' @return Returns seed to be used during training a model. Could be numeric or string. #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.uploadFile(path = prostate_path) #' prostate$CAPSULE <- as.factor(prostate$CAPSULE) #' prostate_gbm <- h2o.gbm(3:9, "CAPSULE", prostate) #' seed <- h2o.get_seed(prostate_gbm) #' } #' @export get_seed.H2OModel <- function(object) { object@parameters$seed } #' @rdname get_seed.H2OModel #' @export h2o.get_seed <- get_seed.H2OModel #' Imports a model under given path, creating a Generic model with it. #' #' Usage example: #' generic_model <- h2o.genericModel(model_file_path = "/path/to/mojo.zip") #' predictions <- h2o.predict(generic_model, dataset) #' #' @param mojo_file_path Filesystem path to the model imported #' @return Returns H2O Generic Model based on given embedded model #' #' @examples #' \dontrun{ #' #' # Import default Iris dataset as H2O frame #' data <- as.h2o(iris) #' #' # Train a very simple GBM model #' features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") #' original_model <- h2o.gbm(x = features, y = "Species", training_frame = data) #' #' # Download the trained GBM model as MOJO (temporary directory used in this example) #' mojo_original_name <- h2o.download_mojo(model = original_model, path = tempdir()) #' mojo_original_path <- paste0(tempdir(), "/", mojo_original_name) #' #' # Import the MOJO as Generic model #' generic_model <- h2o.genericModel(mojo_original_path) #' #' # Perform scoring with the generic model #' generic_model_predictions <- h2o.predict(generic_model, data) #' } #' @export h2o.genericModel <- function(mojo_file_path){ h2o.generic(path = mojo_file_path) } #' Imports a MOJO under given path, creating a Generic model with it. #' #' Usage example: #' mojo_model <- h2o.import_mojo(model_file_path = "/path/to/mojo.zip") #' predictions <- h2o.predict(mojo_model, dataset) #' #' @param mojo_file_path Filesystem path to the model imported #' @return Returns H2O Generic Model embedding given MOJO model #' #' @examples #' \dontrun{ #' #' # Import default Iris dataset as H2O frame #' data <- as.h2o(iris) #' #' # Train a very simple GBM model #' features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") #' original_model <- h2o.gbm(x = features, y = "Species", training_frame = data) #' #' # Download the trained GBM model as MOJO (temporary directory used in this example) #' mojo_original_path <- h2o.save_mojo(original_model, path = tempdir()) #' #' # Import the MOJO and obtain a Generic model #' mojo_model <- h2o.import_mojo(mojo_original_path) #' #' # Perform scoring with the generic model #' predictions <- h2o.predict(mojo_model, data) #' } #' @export h2o.import_mojo <- function(mojo_file_path){ model <- h2o.generic(path = mojo_file_path) return(model) } #' Imports a MOJO from a local filesystem, creating a Generic model with it. #' #' Usage example: #' mojo_model <- h2o.upload_mojo(model_file_path = "/path/to/local/mojo.zip") #' predictions <- h2o.predict(mojo_model, dataset) #' #' @param mojo_local_file_path Filesystem path to the model imported #' @return Returns H2O Generic Model embedding given MOJO model #' #' @examples #' \dontrun{ #' #' # Import default Iris dataset as H2O frame #' data <- as.h2o(iris) #' #' # Train a very simple GBM model #' features <- c("Sepal.Length", "Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") #' original_model <- h2o.gbm(x = features, y = "Species", training_frame = data) #' #' # Download the trained GBM model as MOJO (temporary directory used in this example) #' mojo_original_name <- h2o.download_mojo(model = original_model, path = tempdir()) #' mojo_original_path <- paste0(tempdir(), "/", mojo_original_name) #' #' # Upload the MOJO from local filesystem and obtain a Generic model #' mojo_model <- h2o.upload_mojo(mojo_original_path) #' #' # Perform scoring with the generic model #' predictions <- h2o.predict(mojo_model, data) #' } #' @export h2o.upload_mojo <- function(mojo_local_file_path){ model_file_key <- h2o.uploadFile(mojo_local_file_path, parse = FALSE) model <- h2o.generic(model_key = model_file_key) return(model) } #' #' Reset model threshold and return old threshold value. #' #' @param object An \linkS4class{H2OModel} object. #' @param threshold A threshold value from 0 to 1 included. #' @return Returns the previous threshold used in the model. #' #' @examples #' \dontrun{ #' library(h2o) #' h2o.init() #' #' prostate_path <- system.file("extdata", "prostate.csv", package = "h2o") #' prostate <- h2o.importFile(prostate_path) #' prostate[, 2] <- as.factor(prostate[, 2]) #' prostate_glm <- h2o.glm(y = "CAPSULE", x = c("AGE", "RACE", "PSA", "DCAPS"), #' training_frame = prostate, family = "binomial", #' nfolds = 0, alpha = 0.5, lambda_search = FALSE) #' old_threshold <- h2o.reset_threshold(prostate_glm, 0.9) #' } #' @export h2o.reset_threshold <- function(object, threshold) { o <- object if( is(o, "H2OModel") ) { .newExpr("model.reset.threshold", list(o@model_id, threshold))[1,1] } else { warning( paste0("Threshold cannot be reset for class ", class(o)) ) return(NULL) } }
#' Transfer data between source and destination data frames. #' #' Transfer data between source and destination data frames using matching of identifiers. #' #' @author Aaron Hayman #' @param a1 name of identifier column in source data frame 'a'. #' @param a2 name of data column in the source data frame 'a' (to be transferred to the destination data frame 'b'). #' @param b1 name of identifier column in destination data frame 'b' to be matched with a1 in data frame 'a'. #' @param b2 (optional) name of data column in destination data frame 'b' (where data from a2 in source data frame 'a' will be transferred to). #' @details Moves data from data frame 'a' to data frame 'b'. #' Finds matches between column a1 and b1 and then transfers data from a2 to b2 where a1 and b1 match. #' #' If b2 is omitted from the function argument then data from a2 will be added to a new column. #' If the b2 argument is used then data from a2 will be merged into existing column. #' #' If there is ever a situation where more than one row in the source data frame has matched a row in the destination data frame and the data #' for transfer within those multiple rows is not all identical, then the data from the first of the matching rows will have been transferred #' and a warning will be issued. #' @return Returns column b2. # @seealso \code{\link{nchar}} which this function wraps #' @export #' @examples #' \donttest{ #' friends #' > name occupation birthday #' > 1 bob banker 1/2/89 #' > 2 tom builder 4/6/75 #' > 3 ted driver 26/7/81 #' family #' > name relation birthday #' > 1 mavis mum 5/3/56 #' > 2 pete dad 12/9/58 #' > 3 bob cousin forgotten #' family$birthday = transfer(friends$name,friends$birthday,family$name,family$birthday) #' family #' > name relation birthday #' > 1 mavis mum 5/3/56 #' > 2 pete dad 12/9/58 #' > 3 bob cousin 1/2/89 #' } ################################################################################################# #### transfer_data #### ################################################################################################# # moves data from data frame 'a' to data frame 'b' # finds matches between column a1 and b1 and then tranfers data from a2 to b2 where a1 and b1 match # # i.e. if an animal exists in two data sets, information can be drawn from one dataset to the other # without worrying about format. # # e.g. friends # > name occupation birthday # > 1 bob banker 1/2/89 # > 2 tom builder 4/6/75 # > 3 ted driver 26/7/81 # family # > name relation birthday # > 1 mavis mum 5/3/56 # > 2 pete dad 12/9/58 # > 3 bob cousin forgotten # family$birthday=transfer(friends$name,friends$birthday,family$name,family$birthday) # family # > name relation birthday # > 1 mavis mum 5/3/56 # > 2 pete dad 12/9/58 # > 3 bob cousin 1/2/89 transfer_data=function(a1,a2,b1,b2) { #a1 is from identifier #a2 is from data #b1 is to identifier if(missing(b2)){b2=NA} if(length(b2)==1){b2=rep(b2,length(b1))} if(any(duplicated(unique(data.frame(a1,a2))$a1))) { warning( paste0(deparse(substitute(a1)),' contains non-unique values with ambiguous terms in ' ,deparse(substitute(a2)),',\n first value(s) used. Check this is appropriate') ) ain=get_info(a1) if(any(duplicated(ain$value))) { ain=ain[ain$value %in% ain$value[duplicated(ain$value)] , ] ain=ain[order(ain$value),] ind=ain$start_index ind[ain$num_rep>1]=paste0(ind[ain$num_rep>1],'-',ain$end_index[ain$num_rep>1]) seps=rep(', ',length(ind)) inin=get_info(ain$value) seps[inin$end_index]='\n' ind = unlist(strsplit(paste(paste0(ind,seps),collapse=''),'\n')) length(ind) = sum(1:length(ind)<5) ind=paste0(' identifier "',inin$value[1:length(ind)],'" occurs at indices: ',ind) if(length(ind)==4) ind[4]=' ...' ind=paste(ind,collapse='\n') mess=paste0('Identifiers from ',deparse(substitute(a1)),' are reused non-consecutively:\n',ind ,'\n check the identifier used is appropriate') stop(mess) } } b2[!is.na(match(b1,a1))]=a2[match(b1,a1)[!is.na(match(b1,a1))]] return(b2) } ############################################################## ################## END OF transfer_data FUNCTION ############# ##############################################################
/R/transfer_data.R
no_license
jgrevel/BAST1-R-Library
R
false
false
4,555
r
#' Transfer data between source and destination data frames. #' #' Transfer data between source and destination data frames using matching of identifiers. #' #' @author Aaron Hayman #' @param a1 name of identifier column in source data frame 'a'. #' @param a2 name of data column in the source data frame 'a' (to be transferred to the destination data frame 'b'). #' @param b1 name of identifier column in destination data frame 'b' to be matched with a1 in data frame 'a'. #' @param b2 (optional) name of data column in destination data frame 'b' (where data from a2 in source data frame 'a' will be transferred to). #' @details Moves data from data frame 'a' to data frame 'b'. #' Finds matches between column a1 and b1 and then transfers data from a2 to b2 where a1 and b1 match. #' #' If b2 is omitted from the function argument then data from a2 will be added to a new column. #' If the b2 argument is used then data from a2 will be merged into existing column. #' #' If there is ever a situation where more than one row in the source data frame has matched a row in the destination data frame and the data #' for transfer within those multiple rows is not all identical, then the data from the first of the matching rows will have been transferred #' and a warning will be issued. #' @return Returns column b2. # @seealso \code{\link{nchar}} which this function wraps #' @export #' @examples #' \donttest{ #' friends #' > name occupation birthday #' > 1 bob banker 1/2/89 #' > 2 tom builder 4/6/75 #' > 3 ted driver 26/7/81 #' family #' > name relation birthday #' > 1 mavis mum 5/3/56 #' > 2 pete dad 12/9/58 #' > 3 bob cousin forgotten #' family$birthday = transfer(friends$name,friends$birthday,family$name,family$birthday) #' family #' > name relation birthday #' > 1 mavis mum 5/3/56 #' > 2 pete dad 12/9/58 #' > 3 bob cousin 1/2/89 #' } ################################################################################################# #### transfer_data #### ################################################################################################# # moves data from data frame 'a' to data frame 'b' # finds matches between column a1 and b1 and then tranfers data from a2 to b2 where a1 and b1 match # # i.e. if an animal exists in two data sets, information can be drawn from one dataset to the other # without worrying about format. # # e.g. friends # > name occupation birthday # > 1 bob banker 1/2/89 # > 2 tom builder 4/6/75 # > 3 ted driver 26/7/81 # family # > name relation birthday # > 1 mavis mum 5/3/56 # > 2 pete dad 12/9/58 # > 3 bob cousin forgotten # family$birthday=transfer(friends$name,friends$birthday,family$name,family$birthday) # family # > name relation birthday # > 1 mavis mum 5/3/56 # > 2 pete dad 12/9/58 # > 3 bob cousin 1/2/89 transfer_data=function(a1,a2,b1,b2) { #a1 is from identifier #a2 is from data #b1 is to identifier if(missing(b2)){b2=NA} if(length(b2)==1){b2=rep(b2,length(b1))} if(any(duplicated(unique(data.frame(a1,a2))$a1))) { warning( paste0(deparse(substitute(a1)),' contains non-unique values with ambiguous terms in ' ,deparse(substitute(a2)),',\n first value(s) used. Check this is appropriate') ) ain=get_info(a1) if(any(duplicated(ain$value))) { ain=ain[ain$value %in% ain$value[duplicated(ain$value)] , ] ain=ain[order(ain$value),] ind=ain$start_index ind[ain$num_rep>1]=paste0(ind[ain$num_rep>1],'-',ain$end_index[ain$num_rep>1]) seps=rep(', ',length(ind)) inin=get_info(ain$value) seps[inin$end_index]='\n' ind = unlist(strsplit(paste(paste0(ind,seps),collapse=''),'\n')) length(ind) = sum(1:length(ind)<5) ind=paste0(' identifier "',inin$value[1:length(ind)],'" occurs at indices: ',ind) if(length(ind)==4) ind[4]=' ...' ind=paste(ind,collapse='\n') mess=paste0('Identifiers from ',deparse(substitute(a1)),' are reused non-consecutively:\n',ind ,'\n check the identifier used is appropriate') stop(mess) } } b2[!is.na(match(b1,a1))]=a2[match(b1,a1)[!is.na(match(b1,a1))]] return(b2) } ############################################################## ################## END OF transfer_data FUNCTION ############# ##############################################################
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read_sas('C:/MEPS/.FYC..sas7bdat'); year <- .year. if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.) if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.) if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X) FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Add aggregate sources of payment if(year <= 1999) FYC <- FYC %>% mutate(TOTTRI.yy. = TOTCHM.yy.) FYC <- FYC %>% mutate( TOTOTH.yy. = TOTOFD.yy. + TOTSTL.yy. + TOTOPR.yy. + TOTOPU.yy. + TOTOSR.yy., TOTOTZ.yy. = TOTOTH.yy. + TOTWCP.yy. + TOTVA.yy., TOTPTR.yy. = TOTPRV.yy. + TOTTRI.yy.) # Marital status if(year == 1996){ FYC <- FYC %>% mutate(MARRY42X = ifelse(MARRY2X <= 6, MARRY2X, MARRY2X-6), MARRY31X = ifelse(MARRY1X <= 6, MARRY1X, MARRY1X-6)) } FYC <- FYC %>% mutate_at(vars(starts_with("MARRY")), funs(replace(., .< 0, NA))) %>% mutate(married = coalesce(MARRY.yy.X, MARRY42X, MARRY31X)) %>% mutate(married = recode_factor(married, .default = "Missing", .missing = "Missing", "1" = "Married", "2" = "Widowed", "3" = "Divorced", "4" = "Separated", "5" = "Never married", "6" = "Inapplicable (age < 16)")) FYCdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~PERWT.yy.F, data = FYC, nest = TRUE) # Loop over sources of payment sops <- c("EXP", "SLF", "PTR", "MCR", "MCD", "OTZ") results <- list() for(sp in sops) { key <- paste0("TOT", sp) formula <- as.formula(sprintf("~%s.yy.", key)) results[[key]] <- svyby(formula, FUN = svytotal, by = ~married, design = FYCdsgn) } print(results)
/mepstrends/hc_use/json/code/r/totEXP__married__sop__.r
permissive
HHS-AHRQ/MEPS-summary-tables
R
false
false
2,135
r
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read_sas('C:/MEPS/.FYC..sas7bdat'); year <- .year. if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.) if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.) if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X) FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Add aggregate sources of payment if(year <= 1999) FYC <- FYC %>% mutate(TOTTRI.yy. = TOTCHM.yy.) FYC <- FYC %>% mutate( TOTOTH.yy. = TOTOFD.yy. + TOTSTL.yy. + TOTOPR.yy. + TOTOPU.yy. + TOTOSR.yy., TOTOTZ.yy. = TOTOTH.yy. + TOTWCP.yy. + TOTVA.yy., TOTPTR.yy. = TOTPRV.yy. + TOTTRI.yy.) # Marital status if(year == 1996){ FYC <- FYC %>% mutate(MARRY42X = ifelse(MARRY2X <= 6, MARRY2X, MARRY2X-6), MARRY31X = ifelse(MARRY1X <= 6, MARRY1X, MARRY1X-6)) } FYC <- FYC %>% mutate_at(vars(starts_with("MARRY")), funs(replace(., .< 0, NA))) %>% mutate(married = coalesce(MARRY.yy.X, MARRY42X, MARRY31X)) %>% mutate(married = recode_factor(married, .default = "Missing", .missing = "Missing", "1" = "Married", "2" = "Widowed", "3" = "Divorced", "4" = "Separated", "5" = "Never married", "6" = "Inapplicable (age < 16)")) FYCdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~PERWT.yy.F, data = FYC, nest = TRUE) # Loop over sources of payment sops <- c("EXP", "SLF", "PTR", "MCR", "MCD", "OTZ") results <- list() for(sp in sops) { key <- paste0("TOT", sp) formula <- as.formula(sprintf("~%s.yy.", key)) results[[key]] <- svyby(formula, FUN = svytotal, by = ~married, design = FYCdsgn) } print(results)
# Using genoplotr from Bowtie2 data library(Biostrings) library(seqinr) library(genoPlotR) # read in the CSV from find_ter_BT2_Samtools.R df <- read.csv('Ecoli_geneome_ter_bam.csv', header = TRUE) #sanity df # read in the CSV files from SAM_ter_tocsv.R program: #A MG1655 <- read.csv('MG1655.csv') BW2952 <- read.csv('BW2952.csv') REL606 <- read.csv('REL606.csv') #B1 APEC078 <- read.csv('APEC078.csv') IAI1 <- read.csv('IAI1.csv') E11368 <- read.csv('E11368.csv') #B2 S88 <- read.csv('S88.csv') UTI89 <- read.csv('UTI89.csv') E2348 <- read.csv('E2348.csv') #D IAI39 <- read.csv('IAI39.csv') SMS35 <- read.csv('SMS35.csv') UMN026 <- read.csv('UMN026.csv') CE10 <- read.csv('CE10.csv') D042 <- read.csv('D042.csv') #E TW14359 <- read.csv('TW14359.csv') Sakai <- read.csv('Sakai.csv') EDL933 <- read.csv('EDL933.csv') # We need 4 columns for genoplotr to work # name, start, end, strand # drop the rname column and seq #A MG1655 <- subset(MG1655, select = -c(rname, seq)) BW2952 <- subset(BW2952, select = -c(rname, seq)) REL606 <- subset(REL606, select = -c(rname, seq)) #B1 APEC078 <- subset(APEC078, select = -c(rname, seq)) IAI1 <- subset(IAI1, select = -c(rname, seq)) E11368 <- subset(E11368, select = -c(rname, seq)) #B2 S88 <- subset(S88, select = -c(rname, seq)) UTI89 <- subset(UTI89, select = -c(rname, seq)) E2348 <- subset(E2348, select = -c(rname, seq)) #D IAI39 <- subset(IAI39, select = -c(rname, seq)) SMS35 <- subset(SMS35, select = -c(rname, seq)) UMN026 <- subset(UMN026, select = -c(rname, seq)) CE10 <- subset(CE10, select = -c(rname, seq)) D042 <- subset(D042, select = -c(rname, seq)) #E TW14359 <- subset(TW14359, select = -c(rname, seq)) Sakai <- subset(Sakai, select = -c(rname, seq)) EDL933 <- subset(EDL933, select = -c(rname, seq)) # turn qwidth into end by adding qwidth to pos #A MG1655$qwidth <- MG1655$pos + MG1655$qwidth BW2952$qwidth <- BW2952$pos + BW2952$qwidth REL606$qwidth <- REL606$pos + REL606$qwidth #B1 APEC078$qwidth <- APEC078$pos + APEC078$qwidth IAI1$qwidth <- IAI1$pos + IAI1$qwidth E11368$qwidth <- E11368$pos + E11368$qwidth #B2 S88$qwidth <- S88$pos + S88$qwidth UTI89$qwidth <- UTI89$pos + UTI89$qwidth E2348$qwidth <- E2348$pos + E2348$qwidth #D IAI39$qwidth <- IAI39$pos + IAI39$qwidth SMS35$qwidth <- SMS35$pos + SMS35$qwidth UMN026$qwidth <- UMN026$pos + UMN026$qwidth CE10$qwidth <- CE10$pos + CE10$qwidth D042$qwidth <- D042$pos + D042$qwidth #E TW14359$qwidth <- TW14359$pos + TW14359$qwidth Sakai$qwidth <- Sakai$pos + Sakai$qwidth EDL933$qwidth <- EDL933$pos + EDL933$qwidth # reset the column names #A colnames(MG1655) <- c('name', 'start', 'end', 'strand') colnames(BW2952) <- c('name', 'start', 'end', 'strand') colnames(REL606) <- c('name', 'start', 'end', 'strand') #B1 colnames(APEC078) <- c('name', 'start', 'end', 'strand') colnames(IAI1) <- c('name', 'start', 'end', 'strand') colnames(E11368) <- c('name', 'start', 'end', 'strand') #B2 colnames(S88) <- c('name', 'start', 'end', 'strand') colnames(UTI89) <- c('name', 'start', 'end', 'strand') colnames(E2348) <- c('name', 'start', 'end', 'strand') #D colnames(IAI39) <- c('name', 'start', 'end', 'strand') colnames(SMS35) <- c('name', 'start', 'end', 'strand') colnames(UMN026) <- c('name', 'start', 'end', 'strand') colnames(CE10) <- c('name', 'start', 'end', 'strand') colnames(D042) <- c('name', 'start', 'end', 'strand') #E colnames(TW14359) <- c('name', 'start', 'end', 'strand') colnames(Sakai) <- c('name', 'start', 'end', 'strand') colnames(EDL933) <- c('name', 'start', 'end', 'strand') # SANITY #A MG1655 BW2952 REL606 #B1 APEC078 IAI1 E11368 #B2 S88 UTI89 E2348 #D IAI39 SMS35 UMN026 CE10 D042 #E TW14359 Sakai EDL933 #################################### # ADD DIF site from BLAST #################################### dif <- read.csv('Ecoli_dif_blastn.csv', header = FALSE) dif <- dif[-c(4,6,7)] colnames(dif) <- c('name', 'start', 'end', 'strand') dif$strand[dif$strand == 'minus'] <- '-' dif$strand[dif$strand == 'plus'] <- '+' dif # add dif to the pre-made ter dfs MG1655 <- rbind(MG1655, dif[dif$name=='MG1655',]) BW2952 <- rbind(BW2952, dif[dif$name=='BW2952',]) REL606 <- rbind(REL606, dif[dif$name=='REL606',]) APEC078 <- rbind(APEC078, dif[dif$name=='APECO78',]) IAI1 <- rbind(IAI1, dif[dif$name=='IAI1',]) E11368 <- rbind(E11368, dif[dif$name=='11368',]) S88 <- rbind(S88, dif[dif$name=='S88',]) UTI89 <- rbind(UTI89, dif[dif$name=='UTI89',]) E2348 <- rbind(E2348, dif[dif$name=='E2348/69',]) IAI39 <- rbind(IAI39, dif[dif$name=='IAI39',]) SMS35 <- rbind(SMS35, dif[dif$name=='SMS-3-5',]) UMN026 <- rbind(UMN026, dif[dif$name=='UMN026',]) CE10 <- rbind(CE10, dif[dif$name=='CE10',]) D042 <- rbind(D042, dif[dif$name=='42',]) TW14359 <- rbind(TW14359, dif[dif$name=='TW14359',]) Sakai <- rbind(Sakai, dif[dif$name=='Sakai',]) EDL933 <- rbind(EDL933, dif[dif$name=='EDL933',]) # RENAME [11] AS dif MG1655$name[11] <- 'dif' BW2952$name[11] <- 'dif' REL606$name[11] <- 'dif' APEC078$name[11] <- 'dif' IAI1$name[11] <- 'dif' E11368$name[11] <- 'dif' S88$name[11] <- 'dif' UTI89$name[11] <- 'dif' E2348$name[11] <- 'dif' IAI39$name[11] <- 'dif' SMS35$name[11] <- 'dif' UMN026$name[11] <- 'dif' CE10$name[11] <- 'dif' D042$name[11] <- 'dif' TW14359$name[11] <- 'dif' Sakai$name[11] <- 'dif' EDL933$name[11] <- 'dif' # reset indexes #A rownames(MG1655) <- NULL rownames(BW2952) <- NULL rownames(REL606) <- NULL #B1 rownames(APEC078) <- NULL rownames(IAI1) <- NULL rownames(E11368) <- NULL #B2 rownames(S88) <- NULL rownames(UTI89) <- NULL rownames(E2348) <- NULL #D rownames(IAI39) <- NULL rownames(SMS35) <- NULL rownames(UMN026) <- NULL rownames(CE10) <- NULL rownames(D042) <- NULL #E rownames(TW14359) <- NULL rownames(Sakai) <- NULL rownames(EDL933) <- NULL ########################################################### # now use Genoplotr package to get dna_seg objects ########################################################### # Turn into dna_seg dna1 <- dna_seg(MG1655) dna2 <- dna_seg(BW2952) dna3 <- dna_seg(REL606) dna4 <- dna_seg(APEC078) dna5 <- dna_seg(IAI1) dna6 <- dna_seg(E11368) dna7 <- dna_seg(S88) dna8 <- dna_seg(UTI89) dna9 <- dna_seg(E2348) dna10 <- dna_seg(IAI39) dna11 <- dna_seg(SMS35) dna12 <- dna_seg(UMN026) dna13 <- dna_seg(CE10) dna14 <- dna_seg(D042) dna15 <- dna_seg(TW14359) dna16 <- dna_seg(Sakai) dna17 <- dna_seg(EDL933) cols <- c('red', 'blue', 'orange', 'darkgreen', 'purple', 'grey', 'grey', 'grey', 'grey', 'grey', 'black') # SPECIFY THE COL COLOUR dna1$col <- 'black' dna2$col <- 'black' dna3$col <- 'black' dna4$col <- 'black' dna5$col <- 'black' dna6$col <- 'black' dna7$col <- 'black' dna8$col <- 'black' dna9$col <- 'black' dna10$col <- 'black' dna11$col <- 'black' dna12$col <- 'black' dna13$col <- 'black' dna14$col <- 'black' dna15$col <- 'black' dna16$col <- 'black' dna17$col <- 'black' # SPECIFY THE FILL COLOUR dna1$fill <- cols dna2$fill <- cols dna3$fill <- cols dna4$fill <- cols dna5$fill <- cols dna6$fill <- cols dna7$fill <- cols dna8$fill <- cols dna9$fill <- cols dna10$fill <- cols dna11$fill <- cols dna12$fill <- cols dna13$fill <- cols dna14$fill <- cols dna15$fill <- cols dna16$fill <- cols dna17$fill <- cols # INCREASING END POS BY 10000 NT TO ENLARGE ARROWS dna1$end[1:10] <- c(dna1$start[1:10] + 50000) dna2$end[1:10] <- c(dna2$start[1:10] + 50000) dna3$end[1:10] <- c(dna3$start[1:10] + 50000) dna4$end[1:10] <- c(dna4$start[1:10] + 50000) dna5$end[1:10] <- c(dna5$start[1:10] + 50000) dna6$end[1:10] <- c(dna6$start[1:10] + 50000) dna7$end[1:10] <- c(dna7$start[1:10] + 50000) dna8$end[1:10] <- c(dna8$start[1:10] + 50000) dna9$end[1:10] <- c(dna9$start[1:10] + 50000) dna10$end[1:10] <- c(dna10$start[1:10] + 50000) dna11$end[1:10] <- c(dna11$start[1:10] + 50000) dna12$end[1:10] <- c(dna12$start[1:10] + 50000) dna13$end[1:10] <- c(dna13$start[1:10] + 50000) dna14$end[1:10] <- c(dna14$start[1:10] + 50000) dna15$end[1:10] <- c(dna15$start[1:10] + 50000) dna16$end[1:10] <- c(dna16$start[1:10] + 50000) dna17$end[1:10] <- c(dna17$start[1:10] + 50000) # change dif to be side blocks dna1$gene_type[11] <- 'side_blocks' dna2$gene_type[11] <- 'side_blocks' dna3$gene_type[11] <- 'side_blocks' dna4$gene_type[11] <- 'side_blocks' dna5$gene_type[11] <- 'side_blocks' dna6$gene_type[11] <- 'side_blocks' dna7$gene_type[11] <- 'side_blocks' dna8$gene_type[11] <- 'side_blocks' dna9$gene_type[11] <- 'side_blocks' dna10$gene_type[11] <- 'side_blocks' dna11$gene_type[11] <- 'side_blocks' dna12$gene_type[11] <- 'side_blocks' dna13$gene_type[11] <- 'side_blocks' dna14$gene_type[11] <- 'side_blocks' dna15$gene_type[11] <- 'side_blocks' dna16$gene_type[11] <- 'side_blocks' dna17$gene_type[11] <- 'side_blocks' # specify dif parameters to stop terC overlap EDL933 and SMS35 (this is purely aesthetic) # Do not run lines 363 - 369 if you want to keep the positions true dna17$start[11] <- c(dna17$start[3] + 50700) dna17$end[11] <- c(dna17$start[3] + 50750) dna17 dna11$start[11] <- c(dna11$start[3] + 50700) dna11$end[11] <- c(dna11$start[3] + 50750) dna11 # turn into a massive list dna_segs <-list(dna1, dna2, dna3, dna4, dna5, dna6, dna7, dna8, dna9, dna10, dna11, dna12, dna13, dna14, dna15, dna16, dna17) # sanity dna1 dna2 dna3 dna4 dna5 dna6 dna7 dna8 dna9 dna10 dna11 dna12 dna13 dna14 dna15 dna16 dna17 # give names which will be used as the plot label names <- c('MG1655','BW2952','REL606','APEC078','IAI1','11368', 'S88','UTI89','E2348/69','IAI39','SMS35','UMN026','CE10', '042','TW14359','Sakai','EDL933') names(dna_segs) <- names # SANITY dna_segs ## Calculating middle positions mid_pos1 <- middle(dna_segs[[1]]) mid_pos2 <- middle(dna_segs[[2]]) mid_pos3 <- middle(dna_segs[[3]]) mid_pos4 <- middle(dna_segs[[4]]) mid_pos5 <- middle(dna_segs[[5]]) mid_pos6 <- middle(dna_segs[[6]]) mid_pos7 <- middle(dna_segs[[7]]) mid_pos8 <- middle(dna_segs[[8]]) mid_pos9 <- middle(dna_segs[[9]]) mid_pos10 <- middle(dna_segs[[10]]) mid_pos11 <- middle(dna_segs[[11]]) mid_pos12 <- middle(dna_segs[[12]]) mid_pos13 <- middle(dna_segs[[13]]) mid_pos14 <- middle(dna_segs[[14]]) mid_pos15 <- middle(dna_segs[[15]]) mid_pos16 <- middle(dna_segs[[16]]) mid_pos17 <- middle(dna_segs[[17]]) # Annotations annot1 <- annotation(x1 = mid_pos1, text = dna_segs[[1]]$name, rot = 90, col = cols) annot2 <- annotation(x1 = mid_pos2, text = dna_segs[[2]]$name, rot = 90, col = cols) annot3 <- annotation(x1 = mid_pos3, text = dna_segs[[3]]$name, rot = 90, col = cols) annot4 <- annotation(x1 = mid_pos4, text = dna_segs[[4]]$name, rot = 90, col = cols) annot5 <- annotation(x1 = mid_pos5, text = dna_segs[[5]]$name, rot = 90, col = cols) annot6 <- annotation(x1 = mid_pos6, text = dna_segs[[6]]$name, rot = 90, col = cols) annot7 <- annotation(x1 = mid_pos7, text = dna_segs[[7]]$name, rot = 90, col = cols) annot8 <- annotation(x1 = mid_pos8, text = dna_segs[[8]]$name, rot = 90, col = cols) annot9 <- annotation(x1 = mid_pos9, text = dna_segs[[9]]$name, rot = 90, col = cols) annot10 <- annotation(x1 = mid_pos10, text = dna_segs[[10]]$name, rot = 90, col = cols) annot11 <- annotation(x1 = mid_pos11, text = dna_segs[[11]]$name, rot = 90, col = cols) annot12 <- annotation(x1 = mid_pos12, text = dna_segs[[12]]$name, rot = 90, col = cols) annot13 <- annotation(x1 = mid_pos13, text = dna_segs[[13]]$name, rot = 90, col = cols) annot14 <- annotation(x1 = mid_pos14, text = dna_segs[[14]]$name, rot = 90, col = cols) annot15 <- annotation(x1 = mid_pos15, text = dna_segs[[15]]$name, rot = 90, col = cols) annot16 <- annotation(x1 = mid_pos16, text = dna_segs[[16]]$name, rot = 90, col = cols) annot17 <- annotation(x1 = mid_pos17, text = dna_segs[[17]]$name, rot = 90, col = cols) #list of annotation objects annots <- list(annot1, annot2, annot3, annot4, annot5, annot6, annot7, annot8, annot9, annot10, annot11, annot12, annot13, annot14, annot15, annot16, annot17) # plot plot_gene_map(dna_segs=dna_segs, comparisons=NULL, annotations = annots, annotation_height = 3, annotation_cex = 0.6, main = 'E.coli Ter Locations Determined by BOWTIE2', dna_seg_scale=FALSE, dna_seg_label_cex=0.8, scale = FALSE, arrow_head_len = 30000, gene_type = 'side_blocks') ################################################################################################################### # # REVERSE PLOT # # This puts the comparison in context of the RFT # and shows blocking orientations when compared with circle diagrams ################################################################################################################### # reverse each dna_seg object dna1_r <- reverse(dna1) dna2_r <- reverse(dna2) dna3_r <- reverse(dna3) dna4_r <- reverse(dna4) dna5_r <- reverse(dna5) dna6_r <- reverse(dna6) dna7_r <- reverse(dna7) dna8_r <- reverse(dna8) dna9_r <- reverse(dna9) dna10_r <- reverse(dna10) dna11_r <- reverse(dna11) dna12_r <- reverse(dna12) dna13_r <- reverse(dna13) dna14_r <- reverse(dna14) dna15_r <- reverse(dna15) dna16_r <- reverse(dna16) dna17_r <- reverse(dna17) # sanity dna1_r dna2_r dna3_r dna4_r dna5_r dna6_r dna7_r dna8_r dna9_r dna10_r dna11_r dna12_r dna13_r dna14_r dna15_r dna16_r dna17_r # reset the strand column back to the original dna_seg ## this will show the ter site in correct polarity dna1_r$strand <- dna1$strand dna2_r$strand <- dna2$strand dna3_r$strand <- dna3$strand dna4_r$strand <- dna4$strand dna5_r$strand <- dna5$strand dna6_r$strand <- dna6$strand dna7_r$strand <- dna7$strand dna8_r$strand <- dna8$strand dna9_r$strand <- dna9$strand dna10_r$strand <- dna10$strand dna11_r$strand <- dna11$strand dna12_r$strand <- dna12$strand dna13_r$strand <- dna13$strand dna14_r$strand <- dna14$strand dna15_r$strand <- dna15$strand dna16_r$strand <- dna16$strand dna17_r$strand <- dna17$strand ################################################# # TURN ALL GENOMES INTO plusGrob or MinGrob ################################################# ########################################### # Using triangle shape (grob) ########################################### ## TEMPLATE ## Functions returning grobs. ## Creates a triangle for ter sites triangleGrob <- function(gene, ...) { x <- c(gene$start, (gene$start+gene$end)/2, gene$end) y1 <- 0.5 + 0.5*gene$strand y <- c(y1, 0.5, y1) polygonGrob(x, y,gp=gpar(fill=gene$fill, col=gene$col, lty=gene$lty, lwd=gene$lwd), default.units="native") } # template set... # set the genetype in each df to triangleGrob dna1_r$gene_type[1:10] <- 'triangleGrob' dna4_r$gene_type[1:10] <- 'triangleGrob' dna7_r$gene_type[1:10] <- 'triangleGrob' dna12_r$gene_type[1:10] <- 'triangleGrob' dna15_r$gene_type[1:10] <- 'triangleGrob' ################################################################################## ## Creates a permissible triangle shape grob for ter sites # plus grobs plusGrob <- function(gene, ...) { plus_x <- c((gene$start+gene$end)/2, (gene$start+gene$end)/2, gene$end+5000) plus_y <- c(1.1, -0.1, 0.5) polygonGrob(plus_x, plus_y,gp=gpar(fill=gene$fill, col=gene$col, lty=gene$lty, lwd=gene$lwd), default.units="native") } # min grob minGrob <- function(gene, ...) { min_x <- c(gene$start-5000, (gene$start+gene$end)/2, (gene$start+gene$end)/2) min_y <- c(0.5, -0.1, 1.1) polygonGrob(min_x, min_y,gp=gpar(fill=gene$fill, col=gene$col, lty=gene$lty, lwd=gene$lwd), default.units="native") } ################################################################################### # Turn gene_type into plusGrob if + strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==1] <- 'plusGrob' dna2_r$gene_type[1:10][dna2_r$strand[1:10]==1] <- 'plusGrob' dna3_r$gene_type[1:10][dna3_r$strand[1:10]==1] <- 'plusGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==1] <- 'plusGrob' dna5_r$gene_type[1:10][dna5_r$strand[1:10]==1] <- 'plusGrob' dna6_r$gene_type[1:10][dna6_r$strand[1:10]==1] <- 'plusGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==1] <- 'plusGrob' dna8_r$gene_type[1:10][dna8_r$strand[1:10]==1] <- 'plusGrob' dna9_r$gene_type[1:10][dna9_r$strand[1:10]==1] <- 'plusGrob' dna10_r$gene_type[1:10][dna10_r$strand[1:10]==1] <- 'plusGrob' dna11_r$gene_type[1:10][dna11_r$strand[1:10]==1] <- 'plusGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==1] <- 'plusGrob' dna13_r$gene_type[1:10][dna13_r$strand[1:10]==1] <- 'plusGrob' dna14_r$gene_type[1:10][dna14_r$strand[1:10]==1] <- 'plusGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==1] <- 'plusGrob' dna16_r$gene_type[1:10][dna16_r$strand[1:10]==1] <- 'plusGrob' dna17_r$gene_type[1:10][dna17_r$strand[1:10]==1] <- 'plusGrob' # Turn gene_type into minGrob if - strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==-1] <- 'minGrob' dna2_r$gene_type[1:10][dna2_r$strand[1:10]==-1] <- 'minGrob' dna3_r$gene_type[1:10][dna3_r$strand[1:10]==-1] <- 'minGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==-1] <- 'minGrob' dna5_r$gene_type[1:10][dna5_r$strand[1:10]==-1] <- 'minGrob' dna6_r$gene_type[1:10][dna6_r$strand[1:10]==-1] <- 'minGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==-1] <- 'minGrob' dna8_r$gene_type[1:10][dna8_r$strand[1:10]==-1] <- 'minGrob' dna9_r$gene_type[1:10][dna9_r$strand[1:10]==-1] <- 'minGrob' dna10_r$gene_type[1:10][dna10_r$strand[1:10]==-1] <- 'minGrob' dna11_r$gene_type[1:10][dna11_r$strand[1:10]==-1] <- 'minGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==-1] <- 'minGrob' dna13_r$gene_type[1:10][dna13_r$strand[1:10]==-1] <- 'minGrob' dna14_r$gene_type[1:10][dna14_r$strand[1:10]==-1] <- 'minGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==-1] <- 'minGrob' dna16_r$gene_type[1:10][dna16_r$strand[1:10]==-1] <- 'minGrob' dna17_r$gene_type[1:10][dna17_r$strand[1:10]==-1] <- 'minGrob' # turn into list dna_segs_r <- list(dna1_r, dna2_r, dna3_r, dna4_r, dna5_r, dna6_r, dna7_r, dna8_r, dna9_r, dna10_r, dna11_r, dna12_r, dna13_r, dna14_r, dna15_r, dna16_r, dna17_r) names(dna_segs_r) <- names dna_segs_r ## Calculating middle positions mid_pos1_r <- middle(dna_segs_r[[1]]) mid_pos2_r <- middle(dna_segs_r[[2]]) mid_pos3_r <- middle(dna_segs_r[[3]]) mid_pos4_r <- middle(dna_segs_r[[4]]) mid_pos5_r <- middle(dna_segs_r[[5]]) mid_pos6_r <- middle(dna_segs_r[[6]]) mid_pos7_r <- middle(dna_segs_r[[7]]) mid_pos8_r <- middle(dna_segs_r[[8]]) mid_pos9_r <- middle(dna_segs_r[[9]]) mid_pos10_r <- middle(dna_segs_r[[10]]) mid_pos11_r <- middle(dna_segs_r[[11]]) mid_pos12_r <- middle(dna_segs_r[[12]]) mid_pos13_r <- middle(dna_segs_r[[13]]) mid_pos14_r <- middle(dna_segs_r[[14]]) mid_pos15_r <- middle(dna_segs_r[[15]]) mid_pos16_r <- middle(dna_segs_r[[16]]) mid_pos17_r <- middle(dna_segs_r[[17]]) # Annotations annot1_r <- annotation(x1 = mid_pos1_r, text = dna_segs_r[[1]]$name, rot = 90, col = cols) annot2_r <- annotation(x1 = mid_pos2_r, text = dna_segs_r[[2]]$name, rot = 90, col = cols) annot3_r <- annotation(x1 = mid_pos3_r, text = dna_segs_r[[3]]$name, rot = 90, col = cols) annot4_r <- annotation(x1 = mid_pos4_r, text = dna_segs_r[[4]]$name, rot = 90, col = cols) annot5_r <- annotation(x1 = mid_pos5_r, text = dna_segs_r[[5]]$name, rot = 90, col = cols) annot6_r <- annotation(x1 = mid_pos6_r, text = dna_segs_r[[6]]$name, rot = 90, col = cols) annot7_r <- annotation(x1 = mid_pos7_r, text = dna_segs_r[[7]]$name, rot = 90, col = cols) annot8_r <- annotation(x1 = mid_pos8_r, text = dna_segs_r[[8]]$name, rot = 90, col = cols) annot9_r <- annotation(x1 = mid_pos9_r, text = dna_segs_r[[9]]$name, rot = 90, col = cols) annot10_r <- annotation(x1 = mid_pos10_r, text = dna_segs_r[[10]]$name, rot = 90, col = cols) annot11_r <- annotation(x1 = mid_pos11_r, text = dna_segs_r[[11]]$name, rot = 90, col = cols) annot12_r <- annotation(x1 = mid_pos12_r, text = dna_segs_r[[12]]$name, rot = 90, col = cols) annot13_r <- annotation(x1 = mid_pos13_r, text = dna_segs_r[[13]]$name, rot = 90, col = cols) annot14_r <- annotation(x1 = mid_pos14_r, text = dna_segs_r[[14]]$name, rot = 90, col = cols) annot15_r <- annotation(x1 = mid_pos15_r, text = dna_segs_r[[15]]$name, rot = 90, col = cols) annot16_r <- annotation(x1 = mid_pos16_r, text = dna_segs_r[[16]]$name, rot = 90, col = cols) annot17_r <- annotation(x1 = mid_pos17_r, text = dna_segs_r[[17]]$name, rot = 90, col = cols) #list of annotation objects annots_r <- list(annot1_r, annot2_r, annot3_r, annot4_r, annot5_r, annot6_r, annot7_r, annot8_r, annot9_r, annot10_r, annot11_r, annot12_r, annot13_r, annot14_r, annot15_r, annot16_r, annot17_r) # plot all E.coli genomes figure with plus/minGrob plot_gene_map(dna_segs=dna_segs_r, comparisons=NULL, annotations = annots_r, annotation_height = 3, annotation_cex = 0.6, main = 'E.coli Ter Locations Showing Permissive Directionality', dna_seg_scale=FALSE, dna_seg_label_cex=0.8, scale = FALSE) ################################################# # 5 PHYLOGROUP FIGURE ################################################# # set the genetype in each df to plusGrob if + strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==1] <- 'plusGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==1] <- 'plusGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==1] <- 'plusGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==1] <- 'plusGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==1] <- 'plusGrob' # set the genetype in each df to minGrob if - strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==-1] <- 'minGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==-1] <- 'minGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==-1] <- 'minGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==-1] <- 'minGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==-1] <- 'minGrob' # plot 5 Phylogroup figure plot_gene_map(dna_segs=list(dna1_r, dna4_r,dna7_r,dna12_r,dna15_r), comparisons=NULL, annotations = list(annot1_r, annot4_r, annot7_r,annot12_r,annot15_r), annotation_height = 3, annotation_cex = 0.9, main = 'Phylogroup Analysis of E.coli ter sites', dna_seg_scale=FALSE, dna_seg_label_cex=1, scale = FALSE, gene_type = NULL, dna_seg_labels = c('MG1655 \nGroup A','APEC078 \nGroup B1','S88 \nGroup B2','UNM026 \n Group D','TW14359 \nGroup E'))
/Chromosome Comparisons/Ecoli_Genoplotr_Comparison.R
no_license
DanielGoodall/Goodall_etal2021
R
false
false
23,197
r
# Using genoplotr from Bowtie2 data library(Biostrings) library(seqinr) library(genoPlotR) # read in the CSV from find_ter_BT2_Samtools.R df <- read.csv('Ecoli_geneome_ter_bam.csv', header = TRUE) #sanity df # read in the CSV files from SAM_ter_tocsv.R program: #A MG1655 <- read.csv('MG1655.csv') BW2952 <- read.csv('BW2952.csv') REL606 <- read.csv('REL606.csv') #B1 APEC078 <- read.csv('APEC078.csv') IAI1 <- read.csv('IAI1.csv') E11368 <- read.csv('E11368.csv') #B2 S88 <- read.csv('S88.csv') UTI89 <- read.csv('UTI89.csv') E2348 <- read.csv('E2348.csv') #D IAI39 <- read.csv('IAI39.csv') SMS35 <- read.csv('SMS35.csv') UMN026 <- read.csv('UMN026.csv') CE10 <- read.csv('CE10.csv') D042 <- read.csv('D042.csv') #E TW14359 <- read.csv('TW14359.csv') Sakai <- read.csv('Sakai.csv') EDL933 <- read.csv('EDL933.csv') # We need 4 columns for genoplotr to work # name, start, end, strand # drop the rname column and seq #A MG1655 <- subset(MG1655, select = -c(rname, seq)) BW2952 <- subset(BW2952, select = -c(rname, seq)) REL606 <- subset(REL606, select = -c(rname, seq)) #B1 APEC078 <- subset(APEC078, select = -c(rname, seq)) IAI1 <- subset(IAI1, select = -c(rname, seq)) E11368 <- subset(E11368, select = -c(rname, seq)) #B2 S88 <- subset(S88, select = -c(rname, seq)) UTI89 <- subset(UTI89, select = -c(rname, seq)) E2348 <- subset(E2348, select = -c(rname, seq)) #D IAI39 <- subset(IAI39, select = -c(rname, seq)) SMS35 <- subset(SMS35, select = -c(rname, seq)) UMN026 <- subset(UMN026, select = -c(rname, seq)) CE10 <- subset(CE10, select = -c(rname, seq)) D042 <- subset(D042, select = -c(rname, seq)) #E TW14359 <- subset(TW14359, select = -c(rname, seq)) Sakai <- subset(Sakai, select = -c(rname, seq)) EDL933 <- subset(EDL933, select = -c(rname, seq)) # turn qwidth into end by adding qwidth to pos #A MG1655$qwidth <- MG1655$pos + MG1655$qwidth BW2952$qwidth <- BW2952$pos + BW2952$qwidth REL606$qwidth <- REL606$pos + REL606$qwidth #B1 APEC078$qwidth <- APEC078$pos + APEC078$qwidth IAI1$qwidth <- IAI1$pos + IAI1$qwidth E11368$qwidth <- E11368$pos + E11368$qwidth #B2 S88$qwidth <- S88$pos + S88$qwidth UTI89$qwidth <- UTI89$pos + UTI89$qwidth E2348$qwidth <- E2348$pos + E2348$qwidth #D IAI39$qwidth <- IAI39$pos + IAI39$qwidth SMS35$qwidth <- SMS35$pos + SMS35$qwidth UMN026$qwidth <- UMN026$pos + UMN026$qwidth CE10$qwidth <- CE10$pos + CE10$qwidth D042$qwidth <- D042$pos + D042$qwidth #E TW14359$qwidth <- TW14359$pos + TW14359$qwidth Sakai$qwidth <- Sakai$pos + Sakai$qwidth EDL933$qwidth <- EDL933$pos + EDL933$qwidth # reset the column names #A colnames(MG1655) <- c('name', 'start', 'end', 'strand') colnames(BW2952) <- c('name', 'start', 'end', 'strand') colnames(REL606) <- c('name', 'start', 'end', 'strand') #B1 colnames(APEC078) <- c('name', 'start', 'end', 'strand') colnames(IAI1) <- c('name', 'start', 'end', 'strand') colnames(E11368) <- c('name', 'start', 'end', 'strand') #B2 colnames(S88) <- c('name', 'start', 'end', 'strand') colnames(UTI89) <- c('name', 'start', 'end', 'strand') colnames(E2348) <- c('name', 'start', 'end', 'strand') #D colnames(IAI39) <- c('name', 'start', 'end', 'strand') colnames(SMS35) <- c('name', 'start', 'end', 'strand') colnames(UMN026) <- c('name', 'start', 'end', 'strand') colnames(CE10) <- c('name', 'start', 'end', 'strand') colnames(D042) <- c('name', 'start', 'end', 'strand') #E colnames(TW14359) <- c('name', 'start', 'end', 'strand') colnames(Sakai) <- c('name', 'start', 'end', 'strand') colnames(EDL933) <- c('name', 'start', 'end', 'strand') # SANITY #A MG1655 BW2952 REL606 #B1 APEC078 IAI1 E11368 #B2 S88 UTI89 E2348 #D IAI39 SMS35 UMN026 CE10 D042 #E TW14359 Sakai EDL933 #################################### # ADD DIF site from BLAST #################################### dif <- read.csv('Ecoli_dif_blastn.csv', header = FALSE) dif <- dif[-c(4,6,7)] colnames(dif) <- c('name', 'start', 'end', 'strand') dif$strand[dif$strand == 'minus'] <- '-' dif$strand[dif$strand == 'plus'] <- '+' dif # add dif to the pre-made ter dfs MG1655 <- rbind(MG1655, dif[dif$name=='MG1655',]) BW2952 <- rbind(BW2952, dif[dif$name=='BW2952',]) REL606 <- rbind(REL606, dif[dif$name=='REL606',]) APEC078 <- rbind(APEC078, dif[dif$name=='APECO78',]) IAI1 <- rbind(IAI1, dif[dif$name=='IAI1',]) E11368 <- rbind(E11368, dif[dif$name=='11368',]) S88 <- rbind(S88, dif[dif$name=='S88',]) UTI89 <- rbind(UTI89, dif[dif$name=='UTI89',]) E2348 <- rbind(E2348, dif[dif$name=='E2348/69',]) IAI39 <- rbind(IAI39, dif[dif$name=='IAI39',]) SMS35 <- rbind(SMS35, dif[dif$name=='SMS-3-5',]) UMN026 <- rbind(UMN026, dif[dif$name=='UMN026',]) CE10 <- rbind(CE10, dif[dif$name=='CE10',]) D042 <- rbind(D042, dif[dif$name=='42',]) TW14359 <- rbind(TW14359, dif[dif$name=='TW14359',]) Sakai <- rbind(Sakai, dif[dif$name=='Sakai',]) EDL933 <- rbind(EDL933, dif[dif$name=='EDL933',]) # RENAME [11] AS dif MG1655$name[11] <- 'dif' BW2952$name[11] <- 'dif' REL606$name[11] <- 'dif' APEC078$name[11] <- 'dif' IAI1$name[11] <- 'dif' E11368$name[11] <- 'dif' S88$name[11] <- 'dif' UTI89$name[11] <- 'dif' E2348$name[11] <- 'dif' IAI39$name[11] <- 'dif' SMS35$name[11] <- 'dif' UMN026$name[11] <- 'dif' CE10$name[11] <- 'dif' D042$name[11] <- 'dif' TW14359$name[11] <- 'dif' Sakai$name[11] <- 'dif' EDL933$name[11] <- 'dif' # reset indexes #A rownames(MG1655) <- NULL rownames(BW2952) <- NULL rownames(REL606) <- NULL #B1 rownames(APEC078) <- NULL rownames(IAI1) <- NULL rownames(E11368) <- NULL #B2 rownames(S88) <- NULL rownames(UTI89) <- NULL rownames(E2348) <- NULL #D rownames(IAI39) <- NULL rownames(SMS35) <- NULL rownames(UMN026) <- NULL rownames(CE10) <- NULL rownames(D042) <- NULL #E rownames(TW14359) <- NULL rownames(Sakai) <- NULL rownames(EDL933) <- NULL ########################################################### # now use Genoplotr package to get dna_seg objects ########################################################### # Turn into dna_seg dna1 <- dna_seg(MG1655) dna2 <- dna_seg(BW2952) dna3 <- dna_seg(REL606) dna4 <- dna_seg(APEC078) dna5 <- dna_seg(IAI1) dna6 <- dna_seg(E11368) dna7 <- dna_seg(S88) dna8 <- dna_seg(UTI89) dna9 <- dna_seg(E2348) dna10 <- dna_seg(IAI39) dna11 <- dna_seg(SMS35) dna12 <- dna_seg(UMN026) dna13 <- dna_seg(CE10) dna14 <- dna_seg(D042) dna15 <- dna_seg(TW14359) dna16 <- dna_seg(Sakai) dna17 <- dna_seg(EDL933) cols <- c('red', 'blue', 'orange', 'darkgreen', 'purple', 'grey', 'grey', 'grey', 'grey', 'grey', 'black') # SPECIFY THE COL COLOUR dna1$col <- 'black' dna2$col <- 'black' dna3$col <- 'black' dna4$col <- 'black' dna5$col <- 'black' dna6$col <- 'black' dna7$col <- 'black' dna8$col <- 'black' dna9$col <- 'black' dna10$col <- 'black' dna11$col <- 'black' dna12$col <- 'black' dna13$col <- 'black' dna14$col <- 'black' dna15$col <- 'black' dna16$col <- 'black' dna17$col <- 'black' # SPECIFY THE FILL COLOUR dna1$fill <- cols dna2$fill <- cols dna3$fill <- cols dna4$fill <- cols dna5$fill <- cols dna6$fill <- cols dna7$fill <- cols dna8$fill <- cols dna9$fill <- cols dna10$fill <- cols dna11$fill <- cols dna12$fill <- cols dna13$fill <- cols dna14$fill <- cols dna15$fill <- cols dna16$fill <- cols dna17$fill <- cols # INCREASING END POS BY 10000 NT TO ENLARGE ARROWS dna1$end[1:10] <- c(dna1$start[1:10] + 50000) dna2$end[1:10] <- c(dna2$start[1:10] + 50000) dna3$end[1:10] <- c(dna3$start[1:10] + 50000) dna4$end[1:10] <- c(dna4$start[1:10] + 50000) dna5$end[1:10] <- c(dna5$start[1:10] + 50000) dna6$end[1:10] <- c(dna6$start[1:10] + 50000) dna7$end[1:10] <- c(dna7$start[1:10] + 50000) dna8$end[1:10] <- c(dna8$start[1:10] + 50000) dna9$end[1:10] <- c(dna9$start[1:10] + 50000) dna10$end[1:10] <- c(dna10$start[1:10] + 50000) dna11$end[1:10] <- c(dna11$start[1:10] + 50000) dna12$end[1:10] <- c(dna12$start[1:10] + 50000) dna13$end[1:10] <- c(dna13$start[1:10] + 50000) dna14$end[1:10] <- c(dna14$start[1:10] + 50000) dna15$end[1:10] <- c(dna15$start[1:10] + 50000) dna16$end[1:10] <- c(dna16$start[1:10] + 50000) dna17$end[1:10] <- c(dna17$start[1:10] + 50000) # change dif to be side blocks dna1$gene_type[11] <- 'side_blocks' dna2$gene_type[11] <- 'side_blocks' dna3$gene_type[11] <- 'side_blocks' dna4$gene_type[11] <- 'side_blocks' dna5$gene_type[11] <- 'side_blocks' dna6$gene_type[11] <- 'side_blocks' dna7$gene_type[11] <- 'side_blocks' dna8$gene_type[11] <- 'side_blocks' dna9$gene_type[11] <- 'side_blocks' dna10$gene_type[11] <- 'side_blocks' dna11$gene_type[11] <- 'side_blocks' dna12$gene_type[11] <- 'side_blocks' dna13$gene_type[11] <- 'side_blocks' dna14$gene_type[11] <- 'side_blocks' dna15$gene_type[11] <- 'side_blocks' dna16$gene_type[11] <- 'side_blocks' dna17$gene_type[11] <- 'side_blocks' # specify dif parameters to stop terC overlap EDL933 and SMS35 (this is purely aesthetic) # Do not run lines 363 - 369 if you want to keep the positions true dna17$start[11] <- c(dna17$start[3] + 50700) dna17$end[11] <- c(dna17$start[3] + 50750) dna17 dna11$start[11] <- c(dna11$start[3] + 50700) dna11$end[11] <- c(dna11$start[3] + 50750) dna11 # turn into a massive list dna_segs <-list(dna1, dna2, dna3, dna4, dna5, dna6, dna7, dna8, dna9, dna10, dna11, dna12, dna13, dna14, dna15, dna16, dna17) # sanity dna1 dna2 dna3 dna4 dna5 dna6 dna7 dna8 dna9 dna10 dna11 dna12 dna13 dna14 dna15 dna16 dna17 # give names which will be used as the plot label names <- c('MG1655','BW2952','REL606','APEC078','IAI1','11368', 'S88','UTI89','E2348/69','IAI39','SMS35','UMN026','CE10', '042','TW14359','Sakai','EDL933') names(dna_segs) <- names # SANITY dna_segs ## Calculating middle positions mid_pos1 <- middle(dna_segs[[1]]) mid_pos2 <- middle(dna_segs[[2]]) mid_pos3 <- middle(dna_segs[[3]]) mid_pos4 <- middle(dna_segs[[4]]) mid_pos5 <- middle(dna_segs[[5]]) mid_pos6 <- middle(dna_segs[[6]]) mid_pos7 <- middle(dna_segs[[7]]) mid_pos8 <- middle(dna_segs[[8]]) mid_pos9 <- middle(dna_segs[[9]]) mid_pos10 <- middle(dna_segs[[10]]) mid_pos11 <- middle(dna_segs[[11]]) mid_pos12 <- middle(dna_segs[[12]]) mid_pos13 <- middle(dna_segs[[13]]) mid_pos14 <- middle(dna_segs[[14]]) mid_pos15 <- middle(dna_segs[[15]]) mid_pos16 <- middle(dna_segs[[16]]) mid_pos17 <- middle(dna_segs[[17]]) # Annotations annot1 <- annotation(x1 = mid_pos1, text = dna_segs[[1]]$name, rot = 90, col = cols) annot2 <- annotation(x1 = mid_pos2, text = dna_segs[[2]]$name, rot = 90, col = cols) annot3 <- annotation(x1 = mid_pos3, text = dna_segs[[3]]$name, rot = 90, col = cols) annot4 <- annotation(x1 = mid_pos4, text = dna_segs[[4]]$name, rot = 90, col = cols) annot5 <- annotation(x1 = mid_pos5, text = dna_segs[[5]]$name, rot = 90, col = cols) annot6 <- annotation(x1 = mid_pos6, text = dna_segs[[6]]$name, rot = 90, col = cols) annot7 <- annotation(x1 = mid_pos7, text = dna_segs[[7]]$name, rot = 90, col = cols) annot8 <- annotation(x1 = mid_pos8, text = dna_segs[[8]]$name, rot = 90, col = cols) annot9 <- annotation(x1 = mid_pos9, text = dna_segs[[9]]$name, rot = 90, col = cols) annot10 <- annotation(x1 = mid_pos10, text = dna_segs[[10]]$name, rot = 90, col = cols) annot11 <- annotation(x1 = mid_pos11, text = dna_segs[[11]]$name, rot = 90, col = cols) annot12 <- annotation(x1 = mid_pos12, text = dna_segs[[12]]$name, rot = 90, col = cols) annot13 <- annotation(x1 = mid_pos13, text = dna_segs[[13]]$name, rot = 90, col = cols) annot14 <- annotation(x1 = mid_pos14, text = dna_segs[[14]]$name, rot = 90, col = cols) annot15 <- annotation(x1 = mid_pos15, text = dna_segs[[15]]$name, rot = 90, col = cols) annot16 <- annotation(x1 = mid_pos16, text = dna_segs[[16]]$name, rot = 90, col = cols) annot17 <- annotation(x1 = mid_pos17, text = dna_segs[[17]]$name, rot = 90, col = cols) #list of annotation objects annots <- list(annot1, annot2, annot3, annot4, annot5, annot6, annot7, annot8, annot9, annot10, annot11, annot12, annot13, annot14, annot15, annot16, annot17) # plot plot_gene_map(dna_segs=dna_segs, comparisons=NULL, annotations = annots, annotation_height = 3, annotation_cex = 0.6, main = 'E.coli Ter Locations Determined by BOWTIE2', dna_seg_scale=FALSE, dna_seg_label_cex=0.8, scale = FALSE, arrow_head_len = 30000, gene_type = 'side_blocks') ################################################################################################################### # # REVERSE PLOT # # This puts the comparison in context of the RFT # and shows blocking orientations when compared with circle diagrams ################################################################################################################### # reverse each dna_seg object dna1_r <- reverse(dna1) dna2_r <- reverse(dna2) dna3_r <- reverse(dna3) dna4_r <- reverse(dna4) dna5_r <- reverse(dna5) dna6_r <- reverse(dna6) dna7_r <- reverse(dna7) dna8_r <- reverse(dna8) dna9_r <- reverse(dna9) dna10_r <- reverse(dna10) dna11_r <- reverse(dna11) dna12_r <- reverse(dna12) dna13_r <- reverse(dna13) dna14_r <- reverse(dna14) dna15_r <- reverse(dna15) dna16_r <- reverse(dna16) dna17_r <- reverse(dna17) # sanity dna1_r dna2_r dna3_r dna4_r dna5_r dna6_r dna7_r dna8_r dna9_r dna10_r dna11_r dna12_r dna13_r dna14_r dna15_r dna16_r dna17_r # reset the strand column back to the original dna_seg ## this will show the ter site in correct polarity dna1_r$strand <- dna1$strand dna2_r$strand <- dna2$strand dna3_r$strand <- dna3$strand dna4_r$strand <- dna4$strand dna5_r$strand <- dna5$strand dna6_r$strand <- dna6$strand dna7_r$strand <- dna7$strand dna8_r$strand <- dna8$strand dna9_r$strand <- dna9$strand dna10_r$strand <- dna10$strand dna11_r$strand <- dna11$strand dna12_r$strand <- dna12$strand dna13_r$strand <- dna13$strand dna14_r$strand <- dna14$strand dna15_r$strand <- dna15$strand dna16_r$strand <- dna16$strand dna17_r$strand <- dna17$strand ################################################# # TURN ALL GENOMES INTO plusGrob or MinGrob ################################################# ########################################### # Using triangle shape (grob) ########################################### ## TEMPLATE ## Functions returning grobs. ## Creates a triangle for ter sites triangleGrob <- function(gene, ...) { x <- c(gene$start, (gene$start+gene$end)/2, gene$end) y1 <- 0.5 + 0.5*gene$strand y <- c(y1, 0.5, y1) polygonGrob(x, y,gp=gpar(fill=gene$fill, col=gene$col, lty=gene$lty, lwd=gene$lwd), default.units="native") } # template set... # set the genetype in each df to triangleGrob dna1_r$gene_type[1:10] <- 'triangleGrob' dna4_r$gene_type[1:10] <- 'triangleGrob' dna7_r$gene_type[1:10] <- 'triangleGrob' dna12_r$gene_type[1:10] <- 'triangleGrob' dna15_r$gene_type[1:10] <- 'triangleGrob' ################################################################################## ## Creates a permissible triangle shape grob for ter sites # plus grobs plusGrob <- function(gene, ...) { plus_x <- c((gene$start+gene$end)/2, (gene$start+gene$end)/2, gene$end+5000) plus_y <- c(1.1, -0.1, 0.5) polygonGrob(plus_x, plus_y,gp=gpar(fill=gene$fill, col=gene$col, lty=gene$lty, lwd=gene$lwd), default.units="native") } # min grob minGrob <- function(gene, ...) { min_x <- c(gene$start-5000, (gene$start+gene$end)/2, (gene$start+gene$end)/2) min_y <- c(0.5, -0.1, 1.1) polygonGrob(min_x, min_y,gp=gpar(fill=gene$fill, col=gene$col, lty=gene$lty, lwd=gene$lwd), default.units="native") } ################################################################################### # Turn gene_type into plusGrob if + strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==1] <- 'plusGrob' dna2_r$gene_type[1:10][dna2_r$strand[1:10]==1] <- 'plusGrob' dna3_r$gene_type[1:10][dna3_r$strand[1:10]==1] <- 'plusGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==1] <- 'plusGrob' dna5_r$gene_type[1:10][dna5_r$strand[1:10]==1] <- 'plusGrob' dna6_r$gene_type[1:10][dna6_r$strand[1:10]==1] <- 'plusGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==1] <- 'plusGrob' dna8_r$gene_type[1:10][dna8_r$strand[1:10]==1] <- 'plusGrob' dna9_r$gene_type[1:10][dna9_r$strand[1:10]==1] <- 'plusGrob' dna10_r$gene_type[1:10][dna10_r$strand[1:10]==1] <- 'plusGrob' dna11_r$gene_type[1:10][dna11_r$strand[1:10]==1] <- 'plusGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==1] <- 'plusGrob' dna13_r$gene_type[1:10][dna13_r$strand[1:10]==1] <- 'plusGrob' dna14_r$gene_type[1:10][dna14_r$strand[1:10]==1] <- 'plusGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==1] <- 'plusGrob' dna16_r$gene_type[1:10][dna16_r$strand[1:10]==1] <- 'plusGrob' dna17_r$gene_type[1:10][dna17_r$strand[1:10]==1] <- 'plusGrob' # Turn gene_type into minGrob if - strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==-1] <- 'minGrob' dna2_r$gene_type[1:10][dna2_r$strand[1:10]==-1] <- 'minGrob' dna3_r$gene_type[1:10][dna3_r$strand[1:10]==-1] <- 'minGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==-1] <- 'minGrob' dna5_r$gene_type[1:10][dna5_r$strand[1:10]==-1] <- 'minGrob' dna6_r$gene_type[1:10][dna6_r$strand[1:10]==-1] <- 'minGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==-1] <- 'minGrob' dna8_r$gene_type[1:10][dna8_r$strand[1:10]==-1] <- 'minGrob' dna9_r$gene_type[1:10][dna9_r$strand[1:10]==-1] <- 'minGrob' dna10_r$gene_type[1:10][dna10_r$strand[1:10]==-1] <- 'minGrob' dna11_r$gene_type[1:10][dna11_r$strand[1:10]==-1] <- 'minGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==-1] <- 'minGrob' dna13_r$gene_type[1:10][dna13_r$strand[1:10]==-1] <- 'minGrob' dna14_r$gene_type[1:10][dna14_r$strand[1:10]==-1] <- 'minGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==-1] <- 'minGrob' dna16_r$gene_type[1:10][dna16_r$strand[1:10]==-1] <- 'minGrob' dna17_r$gene_type[1:10][dna17_r$strand[1:10]==-1] <- 'minGrob' # turn into list dna_segs_r <- list(dna1_r, dna2_r, dna3_r, dna4_r, dna5_r, dna6_r, dna7_r, dna8_r, dna9_r, dna10_r, dna11_r, dna12_r, dna13_r, dna14_r, dna15_r, dna16_r, dna17_r) names(dna_segs_r) <- names dna_segs_r ## Calculating middle positions mid_pos1_r <- middle(dna_segs_r[[1]]) mid_pos2_r <- middle(dna_segs_r[[2]]) mid_pos3_r <- middle(dna_segs_r[[3]]) mid_pos4_r <- middle(dna_segs_r[[4]]) mid_pos5_r <- middle(dna_segs_r[[5]]) mid_pos6_r <- middle(dna_segs_r[[6]]) mid_pos7_r <- middle(dna_segs_r[[7]]) mid_pos8_r <- middle(dna_segs_r[[8]]) mid_pos9_r <- middle(dna_segs_r[[9]]) mid_pos10_r <- middle(dna_segs_r[[10]]) mid_pos11_r <- middle(dna_segs_r[[11]]) mid_pos12_r <- middle(dna_segs_r[[12]]) mid_pos13_r <- middle(dna_segs_r[[13]]) mid_pos14_r <- middle(dna_segs_r[[14]]) mid_pos15_r <- middle(dna_segs_r[[15]]) mid_pos16_r <- middle(dna_segs_r[[16]]) mid_pos17_r <- middle(dna_segs_r[[17]]) # Annotations annot1_r <- annotation(x1 = mid_pos1_r, text = dna_segs_r[[1]]$name, rot = 90, col = cols) annot2_r <- annotation(x1 = mid_pos2_r, text = dna_segs_r[[2]]$name, rot = 90, col = cols) annot3_r <- annotation(x1 = mid_pos3_r, text = dna_segs_r[[3]]$name, rot = 90, col = cols) annot4_r <- annotation(x1 = mid_pos4_r, text = dna_segs_r[[4]]$name, rot = 90, col = cols) annot5_r <- annotation(x1 = mid_pos5_r, text = dna_segs_r[[5]]$name, rot = 90, col = cols) annot6_r <- annotation(x1 = mid_pos6_r, text = dna_segs_r[[6]]$name, rot = 90, col = cols) annot7_r <- annotation(x1 = mid_pos7_r, text = dna_segs_r[[7]]$name, rot = 90, col = cols) annot8_r <- annotation(x1 = mid_pos8_r, text = dna_segs_r[[8]]$name, rot = 90, col = cols) annot9_r <- annotation(x1 = mid_pos9_r, text = dna_segs_r[[9]]$name, rot = 90, col = cols) annot10_r <- annotation(x1 = mid_pos10_r, text = dna_segs_r[[10]]$name, rot = 90, col = cols) annot11_r <- annotation(x1 = mid_pos11_r, text = dna_segs_r[[11]]$name, rot = 90, col = cols) annot12_r <- annotation(x1 = mid_pos12_r, text = dna_segs_r[[12]]$name, rot = 90, col = cols) annot13_r <- annotation(x1 = mid_pos13_r, text = dna_segs_r[[13]]$name, rot = 90, col = cols) annot14_r <- annotation(x1 = mid_pos14_r, text = dna_segs_r[[14]]$name, rot = 90, col = cols) annot15_r <- annotation(x1 = mid_pos15_r, text = dna_segs_r[[15]]$name, rot = 90, col = cols) annot16_r <- annotation(x1 = mid_pos16_r, text = dna_segs_r[[16]]$name, rot = 90, col = cols) annot17_r <- annotation(x1 = mid_pos17_r, text = dna_segs_r[[17]]$name, rot = 90, col = cols) #list of annotation objects annots_r <- list(annot1_r, annot2_r, annot3_r, annot4_r, annot5_r, annot6_r, annot7_r, annot8_r, annot9_r, annot10_r, annot11_r, annot12_r, annot13_r, annot14_r, annot15_r, annot16_r, annot17_r) # plot all E.coli genomes figure with plus/minGrob plot_gene_map(dna_segs=dna_segs_r, comparisons=NULL, annotations = annots_r, annotation_height = 3, annotation_cex = 0.6, main = 'E.coli Ter Locations Showing Permissive Directionality', dna_seg_scale=FALSE, dna_seg_label_cex=0.8, scale = FALSE) ################################################# # 5 PHYLOGROUP FIGURE ################################################# # set the genetype in each df to plusGrob if + strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==1] <- 'plusGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==1] <- 'plusGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==1] <- 'plusGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==1] <- 'plusGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==1] <- 'plusGrob' # set the genetype in each df to minGrob if - strand dna1_r$gene_type[1:10][dna1_r$strand[1:10]==-1] <- 'minGrob' dna4_r$gene_type[1:10][dna4_r$strand[1:10]==-1] <- 'minGrob' dna7_r$gene_type[1:10][dna7_r$strand[1:10]==-1] <- 'minGrob' dna12_r$gene_type[1:10][dna12_r$strand[1:10]==-1] <- 'minGrob' dna15_r$gene_type[1:10][dna15_r$strand[1:10]==-1] <- 'minGrob' # plot 5 Phylogroup figure plot_gene_map(dna_segs=list(dna1_r, dna4_r,dna7_r,dna12_r,dna15_r), comparisons=NULL, annotations = list(annot1_r, annot4_r, annot7_r,annot12_r,annot15_r), annotation_height = 3, annotation_cex = 0.9, main = 'Phylogroup Analysis of E.coli ter sites', dna_seg_scale=FALSE, dna_seg_label_cex=1, scale = FALSE, gene_type = NULL, dna_seg_labels = c('MG1655 \nGroup A','APEC078 \nGroup B1','S88 \nGroup B2','UNM026 \n Group D','TW14359 \nGroup E'))
library(factoextra) library(ggplot2) bio_cca <- read.csv("biotic_biocen_cca.csv", row.names=1) View(bio_cca) rowsum=apply(bio_cca, 1, sum) colsum=apply(bio_cca, 2, sum) resum= bio_cca[rowsum > 0, colsum > 0] d <- dist(scale(resum), method = "euclidean") hc <- hclust(dd, method = "ward.D2") #fviz_nbclust(resum, kmeans, method = "gap_stat") fviz_dend(hc, cex = 0.9, k = 7, k_colors = "jco", type = "circular")
/graphic16_and_17.r
no_license
leopregnolato/data_analysis_master
R
false
false
412
r
library(factoextra) library(ggplot2) bio_cca <- read.csv("biotic_biocen_cca.csv", row.names=1) View(bio_cca) rowsum=apply(bio_cca, 1, sum) colsum=apply(bio_cca, 2, sum) resum= bio_cca[rowsum > 0, colsum > 0] d <- dist(scale(resum), method = "euclidean") hc <- hclust(dd, method = "ward.D2") #fviz_nbclust(resum, kmeans, method = "gap_stat") fviz_dend(hc, cex = 0.9, k = 7, k_colors = "jco", type = "circular")
#' Determine the optimal allocation, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005 #' #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investor's parameters #' #' @return Allocation : [vector] (N x 1) #' #' @note #' Compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for " EvaluationChoiceOptimal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationChoiceOptimal = function( Market, InvestorProfile ) { Exp_Prices = diag( Market$CurrentPrices, length(Market$CurrentPrices) ) %*% ( 1 + Market$LinRets_EV ); Cov_Prices = diag( Market$CurrentPrices, length(Market$CurrentPrices) ) %*% Market$LinRets_Cov %*% diag( Market$CurrentPrices, length(Market$CurrentPrices) ); S = solve( Cov_Prices ) %*% diag( 1, dim(Cov_Prices) ); A = (t( Market$CurrentPrices ) %*% S %*% Market$CurrentPrices)[ 1 ]; B = (t( Market$CurrentPrices ) %*% S %*% Exp_Prices)[1]; Gamma = (( InvestorProfile$Budget - InvestorProfile$RiskPropensity * B) / A )[1]; Allocation = InvestorProfile$RiskPropensity * S %*% Exp_Prices + Gamma[ 1 ] * S %*% Market$CurrentPrices; return( Allocation ); } #' Compute the certainty-equivalent statisfaction index , as described in A. Meucci "Risk and Asset Allocation", #' Springer, 2005. #' #' @param Allocation : [vector] (N x 1) #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investor s parameters #' #' @return CertaintyEquivalent : [scalar] #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for " EvaluationSatisfaction.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationSatisfaction = function( Allocation, Market, InvestorProfile ) { CertaintyEquivalent = t(Allocation) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% (1 + Market$LinRets_EV) - 1 / (2 * InvestorProfile$RiskPropensity) * t( Allocation ) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices )) %*% Market$LinRets_Cov %*% diag( Market$CurrentPrices, length( Market$CurrentPrices )) %*% Allocation ; return( CertaintyEquivalent[1] ) } #' Determine the allocation of the best performer, as described in A. Meucci "Risk and Asset Allocation", #' Springer, 2005. #' #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investors parameters #' #' @return Allocation : [vector] (N x 1) #' #' @note #' scenario-dependent decision that tries to pick the optimal allocation #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for "EvaluationDecisionBestPerformer.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationDecisionBestPerformer = function( Market, InvestorProfile ) { # find index of best performer B = which.max( Market$LinRetsSeries[ nrow(Market$LinRetsSeries) , ] ); ##ok<ASGLU> # invest in that asset I = diag( 1, length(Market$CurrentPrices) ); Allocation = InvestorProfile$Budget * I[ , B ] / Market$CurrentPrices[ B ]; return( Allocation ); } #' Determine the cost of allocation, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005. #' #' @param Allocation : [vector] (N x 1) #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investor's parameters #' #' @return C_Plus : [scalar] cost #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for "EvaluationDecisionBestPerformer.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationCost = function( Allocation, Market, InvestorProfile ) { aXi = t(Allocation) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% (1 + Market$LinRets_EV); aPhia = t(Allocation) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% Market$LinRets_Cov %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% Allocation; C = ( 1 - InvestorProfile$BaR ) * InvestorProfile$Budget - aXi + sqrt(2 %*% aPhia) * erfinv( 2 * InvestorProfile$Confidence - 1); C_Plus = max(C, 0); return( C_Plus ); } #' This script evaluates a generic allocation decision (in this case the "best performer" strategy, which fully #' invest the budget in the last period's best performer). #' It displays the distribution of satisfaction, cost of constraint violation and opportunity cost for each value #' of the market stress-test parameters (in this case the correlation). #' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005, Chapter 8. #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for "S_EvaluationGeneric.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## ### Inputs NumScenarios = 1000; NumCorrelations = 5; Bottom_Correlation = 0; Top_Correlation = 0.99; ################################################################################################################## ### Input investor's parameters InvestorProfile = NULL; InvestorProfile$Budget = 10000; InvestorProfile$RiskPropensity = 30; InvestorProfile$Confidence = 0.9; InvestorProfile$BaR = 0.2; ################################################################################################################## ### Input market parameters NumAssets = 10; a = 0.5; # effect of correlation on expected values and volatility (hidden) Bottom = 0.06; Top = 0.36; Step = (Top - Bottom) / (NumAssets - 1); v = seq( Bottom, Top, Step ) ; # volatility vector Market = list(); Market$T = 20; # not hidden Market$CurrentPrices = 10 * array( 1, NumAssets); # not hidden ################################################################################################################## Step = (Top_Correlation - Bottom_Correlation) / (NumCorrelations - 1); Overall_Correlations = seq( Bottom_Correlation, Top_Correlation, Step ); Suboptimal = NULL; Suboptimal$StrsTst_Satisfaction = NULL; Suboptimal$StrsTst_CostConstraints = NULL; Suboptimal$StrsTst_OppCost = NULL; Optimal = NULL; Optimal$StrsTst_Satisfaction = NULL; for( t in 1 : length(Overall_Correlations) ) { # input the (hidden) market parameters (only correlations, we assume standard deviations and expected values fixed and known) Market$St_Devations = ( 1 + a * Overall_Correlations[ t ]) * v; # hidden Market$LinRets_EV = 0.5 * Market$St_Devations; # hidden Correlation = ( 1 - Overall_Correlations[ t ] ) * diag( 1, NumAssets) + Overall_Correlations[ t ] * matrix( 1, NumAssets, NumAssets); Market$LinRets_Cov = diag( Market$St_Devations, length(Market$St_Devations) ) %*% Correlation %*% diag( Market$St_Devations, length(Market$St_Devations) ) ################################################################################################################## # compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" Allocation = EvaluationChoiceOptimal( Market, InvestorProfile ); Satisfaction_Optimal = EvaluationSatisfaction( Allocation, Market, InvestorProfile ); ################################################################################################################## # choose allocation based on available information StrsTst_TrueSatisfaction = NULL; StrsTst_CostConstraints = NULL; for( s in 1 : NumScenarios ) { # generate scenarios i_T of information I_T Market$LinRetsSeries = rmvnorm( Market$T, Market$LinRets_EV, Market$LinRets_Cov ); # scenario-dependent decision that tries to pick the optimal allocation Allocation = EvaluationDecisionBestPerformer( Market, InvestorProfile ); TrueSatisfaction = EvaluationSatisfaction( Allocation, Market, InvestorProfile ); CostConstraints = EvaluationCost( Allocation, Market, InvestorProfile ); StrsTst_TrueSatisfaction = cbind( StrsTst_TrueSatisfaction, TrueSatisfaction ); ##ok<*AGROW> StrsTst_CostConstraints = cbind( StrsTst_CostConstraints, CostConstraints ); } Suboptimal$StrsTst_CostConstraints = rbind( Suboptimal$StrsTst_CostConstraints, StrsTst_CostConstraints ); Suboptimal$StrsTst_Satisfaction = rbind( Suboptimal$StrsTst_Satisfaction, StrsTst_TrueSatisfaction ); Suboptimal$StrsTst_OppCost = rbind( Suboptimal$StrsTst_OppCost, Satisfaction_Optimal - StrsTst_TrueSatisfaction + StrsTst_CostConstraints ); Optimal$StrsTst_Satisfaction = rbind( Optimal$StrsTst_Satisfaction, Satisfaction_Optimal ); } ################################################################################################################## ### Display NumVBins = round(10 * log(NumScenarios)); # optimal allocation vs. allocation decision for( t in 1 : length(Overall_Correlations) ) { dev.new(); par( mfrow = c( 3, 1) ) hist(Suboptimal$StrsTst_Satisfaction[ t, ], NumVBins, main = "satisfaction", xlab ="", ylab = "" ); hist(Suboptimal$StrsTst_CostConstraints[ t, ], NumVBins, main = "constraint violation cost", xlab ="", ylab = ""); hist(Suboptimal$StrsTst_OppCost[ t, ], NumVBins, main = "opportunity cost", xlab ="", ylab = ""); }
/demo/S_EvaluationGeneric.R
no_license
runiaruni/Meucci
R
false
false
10,175
r
#' Determine the optimal allocation, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005 #' #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investor's parameters #' #' @return Allocation : [vector] (N x 1) #' #' @note #' Compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for " EvaluationChoiceOptimal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationChoiceOptimal = function( Market, InvestorProfile ) { Exp_Prices = diag( Market$CurrentPrices, length(Market$CurrentPrices) ) %*% ( 1 + Market$LinRets_EV ); Cov_Prices = diag( Market$CurrentPrices, length(Market$CurrentPrices) ) %*% Market$LinRets_Cov %*% diag( Market$CurrentPrices, length(Market$CurrentPrices) ); S = solve( Cov_Prices ) %*% diag( 1, dim(Cov_Prices) ); A = (t( Market$CurrentPrices ) %*% S %*% Market$CurrentPrices)[ 1 ]; B = (t( Market$CurrentPrices ) %*% S %*% Exp_Prices)[1]; Gamma = (( InvestorProfile$Budget - InvestorProfile$RiskPropensity * B) / A )[1]; Allocation = InvestorProfile$RiskPropensity * S %*% Exp_Prices + Gamma[ 1 ] * S %*% Market$CurrentPrices; return( Allocation ); } #' Compute the certainty-equivalent statisfaction index , as described in A. Meucci "Risk and Asset Allocation", #' Springer, 2005. #' #' @param Allocation : [vector] (N x 1) #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investor s parameters #' #' @return CertaintyEquivalent : [scalar] #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for " EvaluationSatisfaction.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationSatisfaction = function( Allocation, Market, InvestorProfile ) { CertaintyEquivalent = t(Allocation) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% (1 + Market$LinRets_EV) - 1 / (2 * InvestorProfile$RiskPropensity) * t( Allocation ) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices )) %*% Market$LinRets_Cov %*% diag( Market$CurrentPrices, length( Market$CurrentPrices )) %*% Allocation ; return( CertaintyEquivalent[1] ) } #' Determine the allocation of the best performer, as described in A. Meucci "Risk and Asset Allocation", #' Springer, 2005. #' #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investors parameters #' #' @return Allocation : [vector] (N x 1) #' #' @note #' scenario-dependent decision that tries to pick the optimal allocation #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for "EvaluationDecisionBestPerformer.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationDecisionBestPerformer = function( Market, InvestorProfile ) { # find index of best performer B = which.max( Market$LinRetsSeries[ nrow(Market$LinRetsSeries) , ] ); ##ok<ASGLU> # invest in that asset I = diag( 1, length(Market$CurrentPrices) ); Allocation = InvestorProfile$Budget * I[ , B ] / Market$CurrentPrices[ B ]; return( Allocation ); } #' Determine the cost of allocation, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005. #' #' @param Allocation : [vector] (N x 1) #' @param Market : [struct] market parameters #' @param InvestorProfile : [struct] investor's parameters #' #' @return C_Plus : [scalar] cost #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for "EvaluationDecisionBestPerformer.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} EvaluationCost = function( Allocation, Market, InvestorProfile ) { aXi = t(Allocation) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% (1 + Market$LinRets_EV); aPhia = t(Allocation) %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% Market$LinRets_Cov %*% diag( Market$CurrentPrices, length( Market$CurrentPrices ) ) %*% Allocation; C = ( 1 - InvestorProfile$BaR ) * InvestorProfile$Budget - aXi + sqrt(2 %*% aPhia) * erfinv( 2 * InvestorProfile$Confidence - 1); C_Plus = max(C, 0); return( C_Plus ); } #' This script evaluates a generic allocation decision (in this case the "best performer" strategy, which fully #' invest the budget in the last period's best performer). #' It displays the distribution of satisfaction, cost of constraint violation and opportunity cost for each value #' of the market stress-test parameters (in this case the correlation). #' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005, Chapter 8. #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 285 - Estimation risk and opportunity cost". #' #' See Meucci's script for "S_EvaluationGeneric.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## ### Inputs NumScenarios = 1000; NumCorrelations = 5; Bottom_Correlation = 0; Top_Correlation = 0.99; ################################################################################################################## ### Input investor's parameters InvestorProfile = NULL; InvestorProfile$Budget = 10000; InvestorProfile$RiskPropensity = 30; InvestorProfile$Confidence = 0.9; InvestorProfile$BaR = 0.2; ################################################################################################################## ### Input market parameters NumAssets = 10; a = 0.5; # effect of correlation on expected values and volatility (hidden) Bottom = 0.06; Top = 0.36; Step = (Top - Bottom) / (NumAssets - 1); v = seq( Bottom, Top, Step ) ; # volatility vector Market = list(); Market$T = 20; # not hidden Market$CurrentPrices = 10 * array( 1, NumAssets); # not hidden ################################################################################################################## Step = (Top_Correlation - Bottom_Correlation) / (NumCorrelations - 1); Overall_Correlations = seq( Bottom_Correlation, Top_Correlation, Step ); Suboptimal = NULL; Suboptimal$StrsTst_Satisfaction = NULL; Suboptimal$StrsTst_CostConstraints = NULL; Suboptimal$StrsTst_OppCost = NULL; Optimal = NULL; Optimal$StrsTst_Satisfaction = NULL; for( t in 1 : length(Overall_Correlations) ) { # input the (hidden) market parameters (only correlations, we assume standard deviations and expected values fixed and known) Market$St_Devations = ( 1 + a * Overall_Correlations[ t ]) * v; # hidden Market$LinRets_EV = 0.5 * Market$St_Devations; # hidden Correlation = ( 1 - Overall_Correlations[ t ] ) * diag( 1, NumAssets) + Overall_Correlations[ t ] * matrix( 1, NumAssets, NumAssets); Market$LinRets_Cov = diag( Market$St_Devations, length(Market$St_Devations) ) %*% Correlation %*% diag( Market$St_Devations, length(Market$St_Devations) ) ################################################################################################################## # compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" Allocation = EvaluationChoiceOptimal( Market, InvestorProfile ); Satisfaction_Optimal = EvaluationSatisfaction( Allocation, Market, InvestorProfile ); ################################################################################################################## # choose allocation based on available information StrsTst_TrueSatisfaction = NULL; StrsTst_CostConstraints = NULL; for( s in 1 : NumScenarios ) { # generate scenarios i_T of information I_T Market$LinRetsSeries = rmvnorm( Market$T, Market$LinRets_EV, Market$LinRets_Cov ); # scenario-dependent decision that tries to pick the optimal allocation Allocation = EvaluationDecisionBestPerformer( Market, InvestorProfile ); TrueSatisfaction = EvaluationSatisfaction( Allocation, Market, InvestorProfile ); CostConstraints = EvaluationCost( Allocation, Market, InvestorProfile ); StrsTst_TrueSatisfaction = cbind( StrsTst_TrueSatisfaction, TrueSatisfaction ); ##ok<*AGROW> StrsTst_CostConstraints = cbind( StrsTst_CostConstraints, CostConstraints ); } Suboptimal$StrsTst_CostConstraints = rbind( Suboptimal$StrsTst_CostConstraints, StrsTst_CostConstraints ); Suboptimal$StrsTst_Satisfaction = rbind( Suboptimal$StrsTst_Satisfaction, StrsTst_TrueSatisfaction ); Suboptimal$StrsTst_OppCost = rbind( Suboptimal$StrsTst_OppCost, Satisfaction_Optimal - StrsTst_TrueSatisfaction + StrsTst_CostConstraints ); Optimal$StrsTst_Satisfaction = rbind( Optimal$StrsTst_Satisfaction, Satisfaction_Optimal ); } ################################################################################################################## ### Display NumVBins = round(10 * log(NumScenarios)); # optimal allocation vs. allocation decision for( t in 1 : length(Overall_Correlations) ) { dev.new(); par( mfrow = c( 3, 1) ) hist(Suboptimal$StrsTst_Satisfaction[ t, ], NumVBins, main = "satisfaction", xlab ="", ylab = "" ); hist(Suboptimal$StrsTst_CostConstraints[ t, ], NumVBins, main = "constraint violation cost", xlab ="", ylab = ""); hist(Suboptimal$StrsTst_OppCost[ t, ], NumVBins, main = "opportunity cost", xlab ="", ylab = ""); }
### Logistic regression model #data = read.csv(file.choose()) library(svDialogs) library(tcltk) library(rpart) decision_one = function(data){ de = dlgList(c(colnames(data)), multiple = FALSE, title = "Select the Dependent Variable")$res if (!length(de)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Can You please Select the Dependent Variable To Continue...", icon = "info", type = "ok") de = dlgList(c(colnames(data)), multiple = FALSE, title = "Select the Dependent Variable")$res if (!length(de)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Sorry You didn't select any dependent variable...Now You are redirect to Home_Page", icon = "info", type = "ok") source("Home_Page.R") } } else { msgBox <- tkmessageBox(title = "You Selected Dependent Variable is",message = print(de), icon = "info", type = "ok") } ind = dlgList(c(colnames(data)), multiple = TRUE, title = "Select the InDependent Variable")$res if (!length(ind)) { msgBox <- tkmessageBox(title = "Select InDependent Variable",message = "Can You please Select the InDependent Variable", icon = "info", type = "ok") ind = dlgList(c(colnames(data)), multiple = TRUE, title = "Select the InDependent Variable")$res if (!length(ind)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Sorry You didn't select any dependent variable...Now You are redirect to Home_Page", icon = "info", type = "ok") source("Home_Page.R") } } else { msgBox <- tkmessageBox(title = "Selected InDependent Variable",message = print(ind), icon = "info", type = "ok") } id = dlgList(c(colnames(data)), multiple = FALSE, title = "Select Unique ID column or Variable")$res data1 = cbind(data[,c(id)], data[,c(de)], data[,c(ind)]) names(data1)[2] = print(de) smp_size <- floor(0.75 * nrow(data1)) ## set the seed to make your partition reproductible set.seed(123) train_ind <- sample(seq_len(nrow(data1)), size = smp_size) train_data <- data1[train_ind, ] train_data = train_data[,-1] test_data <- data1[-train_ind, ] test_data1 = test_data[,-c(1:2)] if(sum(is.na(data1)) > 0 ){ msgBox <- tkmessageBox(title = "UnSuccessesful to Run the Logistic Model",message = "Sorry Please try again..", icon = "info", type = "ok") } else { names(train_data)[1] = "dependent" train.fit = rpart(dependent ~ ., method="class", data = train_data) #test_data = test_data[,-c(de)] data_predict = predict(train.fit, test_data1, type = "class") #data_predict = round(data_predict) data_predict = as.data.frame(data_predict) names(data_predict)[1] = "Predicted_data" test_data = cbind(test_data, data_predict) write.csv(test_data,"Predicted_dataset_decision.csv") msgBox <- tkmessageBox(title = "Model Run",message = "Successesfully Run the Decision Tree Model, Please Check your directory to view prediction", icon = "info", type = "ok") } } decision_two = function(train, test){ de = dlgList(c(colnames(train)), multiple = FALSE, title = "Select the Dependent Variable")$res if (!length(de)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Can You please Select the Dependent Variable To Continue...", icon = "info", type = "ok") } else { msgBox <- tkmessageBox(title = "You Selected Dependent Variable is",message = print(de), icon = "info", type = "ok") } library(tcltk) ind = dlgList(c(colnames(train)), multiple = TRUE, title = "Select the InDependent Variable")$res if (!length(ind)) { msgBox <- tkmessageBox(title = "Select InDependent Variable",message = "Can You please Select the InDependent Variable", icon = "info", type = "ok") } else { msgBox <- tkmessageBox(title = "Selected InDependent Variable",message = print(ind), icon = "info", type = "ok") } train_dep = cbind(train[,c(de)],train[,c(ind)]) if(sum(is.na(train)) > 0 ){ msgBox <- tkmessageBox(title = "UnSuccessesful to Run the Logistic Model",message = "Sorry Please try again..", icon = "info", type = "ok") } else { names(train_dep)[1] = "dependent" train.fit = rpart(dependent ~ .,method="class", data = train_dep) test_data = test[,c(ind)] data_predict = predict(train.fit, test_data, type = "class") data_predict = as.data.frame(data_predict) names(data_predict)[1] = "Predicted_data" test = cbind(test, data_predict) write.csv(test,"Predicted_dataset_decision_tree.csv") msgBox <- tkmessageBox(title = "Model Run",message = "Successesfully Run the Decision Tree Model, Please Check your directory to view prediction", icon = "info", type = "ok") } } #data = mtcars ll = menu(c("Split Dataset and Use","Already you have Train and Test Dataset"), graphics = TRUE, title = "Please Select") if(ll == 1){ decision_one(data) } else{ decision_two(train, test) } #?tkmessage #library(gWidgets) #library(gWidgetstcltk) #win <- gwindow("Tab delimited file upload example") #grp_name <- ggroup(container = win) #lbl_data_frame_name <- glabel( #"Variable to save data to: ", #container = grp_name
/Decision_tree_classification.R
no_license
Sudhakar12345/Predictive-Automation
R
false
false
5,143
r
### Logistic regression model #data = read.csv(file.choose()) library(svDialogs) library(tcltk) library(rpart) decision_one = function(data){ de = dlgList(c(colnames(data)), multiple = FALSE, title = "Select the Dependent Variable")$res if (!length(de)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Can You please Select the Dependent Variable To Continue...", icon = "info", type = "ok") de = dlgList(c(colnames(data)), multiple = FALSE, title = "Select the Dependent Variable")$res if (!length(de)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Sorry You didn't select any dependent variable...Now You are redirect to Home_Page", icon = "info", type = "ok") source("Home_Page.R") } } else { msgBox <- tkmessageBox(title = "You Selected Dependent Variable is",message = print(de), icon = "info", type = "ok") } ind = dlgList(c(colnames(data)), multiple = TRUE, title = "Select the InDependent Variable")$res if (!length(ind)) { msgBox <- tkmessageBox(title = "Select InDependent Variable",message = "Can You please Select the InDependent Variable", icon = "info", type = "ok") ind = dlgList(c(colnames(data)), multiple = TRUE, title = "Select the InDependent Variable")$res if (!length(ind)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Sorry You didn't select any dependent variable...Now You are redirect to Home_Page", icon = "info", type = "ok") source("Home_Page.R") } } else { msgBox <- tkmessageBox(title = "Selected InDependent Variable",message = print(ind), icon = "info", type = "ok") } id = dlgList(c(colnames(data)), multiple = FALSE, title = "Select Unique ID column or Variable")$res data1 = cbind(data[,c(id)], data[,c(de)], data[,c(ind)]) names(data1)[2] = print(de) smp_size <- floor(0.75 * nrow(data1)) ## set the seed to make your partition reproductible set.seed(123) train_ind <- sample(seq_len(nrow(data1)), size = smp_size) train_data <- data1[train_ind, ] train_data = train_data[,-1] test_data <- data1[-train_ind, ] test_data1 = test_data[,-c(1:2)] if(sum(is.na(data1)) > 0 ){ msgBox <- tkmessageBox(title = "UnSuccessesful to Run the Logistic Model",message = "Sorry Please try again..", icon = "info", type = "ok") } else { names(train_data)[1] = "dependent" train.fit = rpart(dependent ~ ., method="class", data = train_data) #test_data = test_data[,-c(de)] data_predict = predict(train.fit, test_data1, type = "class") #data_predict = round(data_predict) data_predict = as.data.frame(data_predict) names(data_predict)[1] = "Predicted_data" test_data = cbind(test_data, data_predict) write.csv(test_data,"Predicted_dataset_decision.csv") msgBox <- tkmessageBox(title = "Model Run",message = "Successesfully Run the Decision Tree Model, Please Check your directory to view prediction", icon = "info", type = "ok") } } decision_two = function(train, test){ de = dlgList(c(colnames(train)), multiple = FALSE, title = "Select the Dependent Variable")$res if (!length(de)) { msgBox <- tkmessageBox(title = "Select Dependent Variable",message = "Can You please Select the Dependent Variable To Continue...", icon = "info", type = "ok") } else { msgBox <- tkmessageBox(title = "You Selected Dependent Variable is",message = print(de), icon = "info", type = "ok") } library(tcltk) ind = dlgList(c(colnames(train)), multiple = TRUE, title = "Select the InDependent Variable")$res if (!length(ind)) { msgBox <- tkmessageBox(title = "Select InDependent Variable",message = "Can You please Select the InDependent Variable", icon = "info", type = "ok") } else { msgBox <- tkmessageBox(title = "Selected InDependent Variable",message = print(ind), icon = "info", type = "ok") } train_dep = cbind(train[,c(de)],train[,c(ind)]) if(sum(is.na(train)) > 0 ){ msgBox <- tkmessageBox(title = "UnSuccessesful to Run the Logistic Model",message = "Sorry Please try again..", icon = "info", type = "ok") } else { names(train_dep)[1] = "dependent" train.fit = rpart(dependent ~ .,method="class", data = train_dep) test_data = test[,c(ind)] data_predict = predict(train.fit, test_data, type = "class") data_predict = as.data.frame(data_predict) names(data_predict)[1] = "Predicted_data" test = cbind(test, data_predict) write.csv(test,"Predicted_dataset_decision_tree.csv") msgBox <- tkmessageBox(title = "Model Run",message = "Successesfully Run the Decision Tree Model, Please Check your directory to view prediction", icon = "info", type = "ok") } } #data = mtcars ll = menu(c("Split Dataset and Use","Already you have Train and Test Dataset"), graphics = TRUE, title = "Please Select") if(ll == 1){ decision_one(data) } else{ decision_two(train, test) } #?tkmessage #library(gWidgets) #library(gWidgetstcltk) #win <- gwindow("Tab delimited file upload example") #grp_name <- ggroup(container = win) #lbl_data_frame_name <- glabel( #"Variable to save data to: ", #container = grp_name
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/McCOILR.R \docType{package} \name{McCOILR} \alias{McCOILR} \alias{package-McCOILR} \alias{McCOILR-package} \title{Wrapper for THE REAL McCOIL in Rcpp} \description{ Wrapper for THE REAL McCOIL in Rcpp, so that package can be more easily run on distributed computing services and cluster infrastructure. } \details{ Rcpp implementation of THE REAL McCOIL } \references{ 1 Chang H-H, Worby CJ, Yeka A, Nankabirwa J, Kamya MR, Staedke SG, Dorsey G, Murphy M, Neafsey DE, Jeffreys AE, Hubbart C, Rockett KA, Amato R, Kwiatkowski DP, Buckee C, Greenhouse B. 2017. THE REAL McCOIL: A method for the concurrent estimation of the complexity of infection and SNP allele frequency for malaria parasites. PLOS Comput Biol 13: e1005348. doi:10.1371/journal.pcbi.1005348 }
/man/McCOILR.Rd
no_license
arisp99/McCOILR
R
false
true
838
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/McCOILR.R \docType{package} \name{McCOILR} \alias{McCOILR} \alias{package-McCOILR} \alias{McCOILR-package} \title{Wrapper for THE REAL McCOIL in Rcpp} \description{ Wrapper for THE REAL McCOIL in Rcpp, so that package can be more easily run on distributed computing services and cluster infrastructure. } \details{ Rcpp implementation of THE REAL McCOIL } \references{ 1 Chang H-H, Worby CJ, Yeka A, Nankabirwa J, Kamya MR, Staedke SG, Dorsey G, Murphy M, Neafsey DE, Jeffreys AE, Hubbart C, Rockett KA, Amato R, Kwiatkowski DP, Buckee C, Greenhouse B. 2017. THE REAL McCOIL: A method for the concurrent estimation of the complexity of infection and SNP allele frequency for malaria parasites. PLOS Comput Biol 13: e1005348. doi:10.1371/journal.pcbi.1005348 }
library(dplyr) library(ggplot2) library(shiny) library(DT) library(ggrepel) library(tidyr) library(shinycssloaders) library(shinythemes) library(readr) library(data.table) library(tidyverse) library(e1071) library(caret) library(lattice) library(stringr) library(hablar) library(scales) #Import Data #########################################################################################################xulu data offline<- read.csv("ccf_offline_stage1_train.csv") online<- read.csv("ccf_online_stage1_train.csv") #Change data format to date offline$Date_received <- as.Date(offline[["Date_received"]], "%Y%m%d") offline$Date <- as.Date(offline[["Date"]], "%Y%m%d") online$Date_received <- as.Date(online[["Date_received"]], "%Y%m%d") online$Date <- as.Date(online[["Date"]], "%Y%m%d") offline$Distance<-as.integer(offline[["Distance"]]) set.seed(1) online.s <- sample(row.names(online), 0.001*dim(online)[1]) offline.s <- sample(row.names(offline), 0.001*dim(offline)[1]) online.s <- online[online.s, ] offline.s <- offline[offline.s, ] #########################################################################################################xulu data end ########################################################################### TYT Data Import and processing ###offline data processing #offline<-read.csv('ccf_offline_stage1_train.csv', header = T, na.strings = NA) #offline<-data.frame(offline) redeemed_offline<-offline redeemed_offline<-redeemed_offline %>% filter(!is.na(redeemed_offline$Date) & offline$Coupon_id !="null") #offline %>% # filter(offline$Date != 'null' & offline$Coupon_id != 'null') -> redeemed_offline #redeemed_offline$Date <- as.Date(redeemed_offline[["Date"]], "%Y%m%d") #redeemed_offline$Date_received <- as.Date(redeemed_offline[["Date_received"]], "%Y%m%d") redeemed_offline$RedeemDays <- redeemed_offline$Date - redeemed_offline$Date_received ###online data processing #online<-read.csv('ccf_online_stage1_train.csv', header = T, na.strings = NA) #online<-data.frame(online) redeemed_online<-online redeemed_online<-redeemed_online %>% filter(!is.na(redeemed_online$Date) & online$Coupon_id !="null") #online %>% #filter(online$Date != 'null' & online$Coupon_id != 'null') -> redeemed_online #redeemed_online$Date <- as.Date(redeemed_online[["Date"]], "%Y%m%d") #redeemed_online$Date_received <- as.Date(redeemed_online[["Date_received"]], "%Y%m%d") redeemed_online$RedeemDays <- redeemed_online$Date - redeemed_online$Date_received ######################################################################### #Continue TYT part for Use Rate and Coupon Rate relationship ###offline data processing #offline %>% # filter(offline$Date_received != 'null' & offline$Coupon_id != 'null') -> getcpOffline getcpOffline<- offline%>% filter(!is.na(offline$Date_received)& offline$Coupon_id != "null") getcpOffline %>% count(Discount_rate) -> a redeemed_offline %>% count(Discount_rate) -> b merge(a, b, by = 'Discount_rate') -> result_Offline result_Offline$use_rate <- result_Offline$n.y/result_Offline$n.x names(result_Offline)[3] <- 'redNum' names(result_Offline)[2] <- 'getcpNum' ratio1 <- lapply(result_Offline$Discount_rate,function(x){ re <- unlist(str_split(x,":")) if(re > 1){ r <- as.numeric(re[2])/as.numeric(re[1]) } else{ r <- 1-as.numeric(re) } }) result_Offline$discount_percent_off <- unlist(ratio1) sorted_discountOff <- result_Offline[order(result_Offline$discount_percent_off),] sorted_discountOff$discount_percent_off <- percent(sorted_discountOff$discount_percent_off) sorted_discountOff$discount_percent_off <- as.factor(sorted_discountOff[["discount_percent_off"]]) ###online data processing #online %>% # filter(online$Date_received != 'null' & online$Coupon_id != 'null') -> getcpOnline getcpOnline<- online%>% filter(!is.na(online$Date_received)& online$Coupon_id != "null") getcpOnline %>% count(Discount_rate) -> a redeemed_online %>% count(Discount_rate) -> b merge(a, b, by = 'Discount_rate') -> result_Online result_Online$use_rate <- result_Online$n.y/result_Online$n.x names(result_Online)[3] <- 'redNum' names(result_Online)[2] <- 'getcpNum' ratio <- lapply(result_Online$Discount_rate,function(x){ re <- unlist(str_split(x,":")) r <- as.numeric(re[2])/as.numeric(re[1]) }) result_Online$discount_percent_off <- unlist(ratio) sorted_discount <- result_Online[order(result_Online$discount_percent_off),] sorted_discount$discount_percent_off <- percent(sorted_discount$discount_percent_off) sorted_discount$discount_percent_off <- as.factor(sorted_discount[["discount_percent_off"]]) ##############################################################################################################tyt data end ############################################################################################function yating redeemtime <- function(range = c(0, 50), type = "offline"){ resptyt <- ggplot() #judge type first #offline part if (type=="offline"){ red<-filter(redeemed_offline,redeemed_offline$RedeemDays>=range[1] & redeemed_offline$RedeemDays<=range[2])##redeem days range filter resx<-red$RedeemDays } else{ red <- filter(redeemed_online,redeemed_online$RedeemDays>=range[1] & redeemed_online$RedeemDays<=range[2]) resx <- red$RedeemDays } ###Calculating part end ##Output part #res<-data.frame(resx) #names(res) <- c("resx") resptyt<-resptyt+stat_bin(data = red,aes(x=resx)) m <- layer_data(resptyt) resptyt<-resptyt + stat_smooth(data = m, aes(x, y))+ xlab("Redeem Days") return(resptyt) } ###function UseRateCouponRatecol <- function(discount_col = c('20%','10%'), type = "offline"){ resptyt <- ggplot() ##offline part if (type=="offline"){ percent_offline <- data.frame() for (i in discount_col){ percent_offline <- rbind(percent_offline, sorted_discountOff[sorted_discountOff$discount_percent_off == i,]) } percent_offline <- na.omit(percent_offline) resptyt <- resptyt+ geom_col(data = percent_offline, mapping = aes(x = Discount_rate, y=use_rate))+ facet_wrap( ~ discount_percent_off, nrow = 2)+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount rate")+ ylab("Coupon Use Rate") } ###online part else{ percent_online <- data.frame() for (i in discount_col){ percent_online <- rbind(percent_online, sorted_discount[sorted_discount$discount_percent_off == i,]) } percent_online <- na.omit(percent_online) resptyt<- resptyt + geom_col(data = percent_online, mapping = aes(x = Discount_rate, y=use_rate))+ facet_wrap( ~ discount_percent_off, nrow = 2)+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount rate")+ ylab("Coupon Use Rate") } return(resptyt) } UseRateCouponRatebox <- function(discount_box = c('1%','17%','5%', '67%'), type = 'offline'){ resptyt <- ggplot() if(type == 'offline'){ if(is.na(discount_box)){ resptyt <- resptyt + geom_boxplot(data = sorted_discountOff, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } else{ discount_offline <- data.frame() for (i in discount_box){ discount_offline <- rbind(discount_offline, sorted_discountOff[sorted_discountOff$discount_percent_off == i,]) } discount_offline <- na.omit(discount_offline) resptyt <- resptyt+ geom_boxplot(data = discount_offline, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } } else{ if(is.na(discount_box)){ resptyt <- resptyt + geom_boxplot(data = sorted_discount, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } else{ discount_online <- data.frame() for (i in discount_box){ discount_online <- rbind(discount_online, sorted_discount[sorted_discount$discount_percent_off == i,]) } discount_online <- na.omit(discount_online) resptyt <- resptyt + geom_boxplot(data = discount_online, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } } return(resptyt) } ############################################################################################function yating end ############################################################################################function xulu #####function 1 xulu #top n (offline filter distance) #input n: top n # linetype:"offline"/"online" # rof :"freq" / "ratioiss"(usage/issue) / "ratioconsum" (usage/consumption) # distance: two side list (offline only) topn<- function(n=5,linetype="offline",rof="freq",dist=c(0,10)){ #########calculating part #basic para resn<-list(1:n) resp<-ggplot() #judge linetype first #offline part if (linetype=="offline") { #need to consider distance offwc<-filter(offline,!is.na(offline$Date) & offline$Coupon_id!="null" & offline$Distance>=dist[1] & offline$Distance<=dist[2])##distance filter #Aggregate merchant_id and order data offwc<-offwc %>% group_by(offwc$Merchant_id)%>% summarise(times=n()) %>% arrange(desc(times)) #rof="freq" if (rof=="freq") { topndata <- offwc[1:n,] resx<-as.list(topndata[,"offwc$Merchant_id"]) resy<-as.list(topndata[,"times"]) } #rof="ratioiss" else if(rof=="ratioiss"){ #not use offnu<-filter(offline,is.na(offline$Date) & offline$Coupon_id!="null" & offline$Distance>=dist[1] & offline$Distance<=dist[2])##distance filter #Aggregate merchant_id and order data offnu<-offnu %>% group_by(offnu$Merchant_id)%>% summarise(nutimes=n()) %>% arrange(desc(nutimes)) #join two tables and calculate ratio then order data offnu<-left_join(offwc,offnu,by=c("offwc$Merchant_id"="offnu$Merchant_id")) names(offnu)<-c("Merchant_id","times","nutimes") offnu$ratioiss<-offnu$times/offnu$nutimes offnu<-offnu %>% arrange(desc(ratioiss)) topndata <- offnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioiss"]) } #rof="ratioconsum" else if (rof=="ratioconsum") { #consumed but not use offcbnu<-filter(offline,!is.na(offline$Date) & offline$Coupon_id=="null" & offline$Distance>=dist[1] & offline$Distance<=dist[2])##distance filter #Aggregate merchant_id and order data offcbnu<-offcbnu %>% group_by(offcbnu$Merchant_id)%>% summarise(cbnutimes=n()) %>% arrange(desc(cbnutimes)) #join two tables and calculate ratio then order data offcbnu<-left_join(offwc,offcbnu,by=c("offwc$Merchant_id"="offcbnu$Merchant_id")) names(offcbnu)<-c("Merchant_id","times","cbnutimes") offcbnu$ratioconsum<-offcbnu$times/offcbnu$cbnutimes offcbnu<-offcbnu %>% arrange(desc(ratioconsum)) topndata <- offcbnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioconsum"]) } else{ return(resp) } } #online part else{ onwc<-filter(online,!is.na(online$Date) & online$Coupon_id!="null") #Aggregate merchant_id and order data onwc<-onwc %>% group_by(onwc$Merchant_id)%>% summarise(times=n()) %>% arrange(desc(times)) #rof="freq" if (rof=="freq") { topndata <- onwc[1:n,] resx<-as.list(topndata[,"onwc$Merchant_id"]) resy<-as.list(topndata[,"times"]) } #rof="ratioiss" else if(rof=="ratioiss"){ #not use onnu<-filter(online,is.na(online$Date) & online$Coupon_id!="null") #Aggregate merchant_id and order data onnu<-onnu %>% group_by(onnu$Merchant_id)%>% summarise(nutimes=n()) %>% arrange(desc(nutimes)) #join two tables and calculate ratio then order data onnu<-left_join(onwc,onnu,by=c("onwc$Merchant_id"="onnu$Merchant_id")) names(onnu)<-c("Merchant_id","times","nutimes") onnu$ratioiss<-onnu$times/onnu$nutimes onnu<-onnu %>% arrange(desc(ratioiss)) topndata <- onnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioiss"]) } #rof="ratioconsum" else if (rof=="ratioconsum") { #consumed but not use oncbnu<-filter(online,!is.na(online$Date) & online$Coupon_id=="null" ) #Aggregate merchant_id and order data oncbnu<-oncbnu %>% group_by(oncbnu$Merchant_id)%>% summarise(cbnutimes=n()) %>% arrange(desc(cbnutimes)) #join two tables and calculate ratio then order data oncbnu<-left_join(onwc,oncbnu,by=c("onwc$Merchant_id"="oncbnu$Merchant_id")) names(oncbnu)<-c("Merchant_id","times","cbnutimes") oncbnu$ratioconsum<-oncbnu$times/oncbnu$cbnutimes oncbnu<-oncbnu %>% arrange(desc(ratioconsum)) topndata <- oncbnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioconsum"]) } else{ return(resp) } } #######calculating part end #######output part #out put a dataframe res<-data.frame(resn,resx,resy) names(res) <- c("resn","resx","resy") #out put a plot resp<-resp+ geom_bar(data = res,aes(x=resn,y=resy,fill=factor(resx)),stat="identity")+ xlab("Merchant_id")+ theme_bw()+ theme(legend.position = "none")+ scale_x_continuous(breaks=res$resn,labels = res$resx) #rof="ratio" part 2 if (rof=="ratioiss") { resp<- resp + ylab("Coupon useage/Coupon issuance ratio") } else if (rof=="ratioconsum") { resp<- resp + ylab("Coupon useage/Consumption ratio") } else if (rof=="freq") { resp<- resp + ylab("Coupon useage times") } else{ return(resp) } return(resp) } ###function 2 xulu chartCol_xulu<-function(df,colN,b=30){ res<-ggplot(data=df) l<-df[,eval(colN)] if (is.numeric(l[[1]])) { res<-res+ geom_histogram(data=df,mapping = aes_string(x=colN),bins=b) } else{ res<-res+ geom_bar(data=df,mapping = aes_string(x=colN)) } res<-res+ theme(axis.text.x=element_text(angle=40,vjust=NULL,hjust = 1)) return(res) } ######################################################################################################xulufunction end ##########################################################################xiaofei part1 ## XFQ DATA IMPORT # Distance & Coupon Usage ##offline <- fread("ccf_offline_stage1_train.csv") ##offline$Date_received <- as.Date(offline[["Date_received"]], "%Y%m%d") ##offline$Date <- as.Date(offline[["Date"]], "%Y%m%d") l <- offline %>% filter(Distance!="null") %>% filter(!is.na(Date) & !is.na(Coupon_id)) %>% mutate(Distance=as.numeric(Distance)) %>% group_by(Distance) %>% summarise(Frequency = n()) m <- offline %>% filter(Distance!="null") %>% filter(!is.na(Coupon_id)) %>% mutate(Distance=as.numeric(Distance)) %>% group_by(Distance) %>% summarise(total = n()) lm <- l %>% inner_join(m) %>% mutate(ratio=Frequency/total) Distance_plot <- function(data,v1="Distance",v2="Frequency"){ data <- data[,c(v1,v2)] colnames(data) <- c("v1","v2") pic <- data %>% ggplot(aes(v1,v2))+ geom_point()+ geom_line()+ theme_bw()+ theme(axis.text = element_text(color='black',size=10))+ xlab(v1)+ylab(v2) return(pic) } ##########################################################################xiaofei part1 end #####################################################################################tt part1 # HYT Data Import # HYT Data Import offline_Usage_Sent_Ratio_hyt <- read_csv("offline_Usage_Sent_Ratio_hyt.csv") online_Usage_Sent_Ratio_hyt <- read_csv("online_Usage_Sent_Ratio_hyt.csv") online_Usage_Total_ratio_hyt <- read_csv("online_Usage_Total_ratio_hyt.csv") offline_Usage_Total_ratio_hyt <- read_csv("offline_Usage_Total_ratio_hyt.csv") # HYT function # Variable 1 "Ratio" "Numeric" # Variable 2 "Coupon Usage" "Sales-Coupon Usage" # Variable 3,4,5 "a" "b" "c" tsfunc_hyt <- function(type,df, l) { a <- NULL b <- NULL c <- NULL if (length(l)==3){ a <- "a" b <- "b" c <- "c" # cat(a) # cat(b) # cat(c) } else if (length(l)==2){ if (sum(c("Online","Offline") %in% l) ==2 | sum(c("Total","Without Coupon") %in% l) == 2){ a <- "a" b <- "b" } else if (sum(c("Total","With Coupon or Coupon Used") %in% l) == 2){ a <- "a" c <- "c" } else if(sum(c("Without Coupon","With Coupon or Coupon Used") %in% l) == 2){ b <- "b" c <- "c" } else { return("nothing there") } # cat(a) # cat(b) # cat(c) } else if (length(l)==1){ if("Online" %in% l | "Total" %in% l){ a <- "a" } else if("Offline" %in% l | "Without Coupon" %in% l){ b <- "b" } else if("With Coupon or Coupon Used" %in% l){ c <- "c" } else { return("nothing there") } # cat(a) # cat(b) # cat(c) } else { return("error") } if (type == "Ratio" & df == "Coupon Usage") { # a is online # b is offline # Ration Coupon Usage Graph if(!is.null(a) & !is.null(b)){ # plot online and offline ggplot() + geom_line(offline_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio), color = "olivedrab3") + geom_line(online_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio) , color = "skyblue2") + xlab("") + ylab("Probablity of Coupon Sent and Used") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Coupon Usage Probablity Online vs. Offline") + theme_light() }else if(!is.null(a) & is.null(b)){ # plot online ggplot() + geom_line(online_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio) , color = "skyblue2") + xlab("") + ylab("Probablity of Coupon Sent and Used") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Coupon Usage Probablity Online") + theme_light() } else if(!is.null(b) & is.null(a)){ # plot offline ggplot() + geom_line(offline_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio), color = "olivedrab3") + xlab("") + ylab("Probablity of Coupon Sent and Used") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Coupon Usage Probablity Offline") + theme_light() } else { # return erro return("error") } } else if(type == "Ratio" & df == "Sales-Coupon Usage"){ # a is online # b is offline # Ration Sales-Coupon Usage Graph if(!is.null(a) & !is.null(b)){ # plot online and offline ggplot() + geom_line(online_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "skyblue2") + geom_line(offline_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "olivedrab3") + xlab("") + ylab("Probablity of Purchase with Coupon") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Purchase with Coupon Probablity Online vs. Offline") + theme_light() }else if(!is.null(a) & is.null(b)){ # plot online ggplot() + geom_line(online_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "skyblue2") + xlab("") + ylab("Probablity of Purchase with Coupon") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Purchase with Coupon Probablity Online") + theme_light() } else if(!is.null(b) & is.null(a)){ # plot offline ggplot() + geom_line(offline_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "olivedrab3") + xlab("") + ylab("Probablity of Purchase with Coupon") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Purchase with Coupon Probablity Offline") + theme_light() } else { # return error return("error") } } else if(type == "Numeric" & df == "Coupon Usage"){ # a is total # b is without coupon # c is with coupon if(!is.null(a) & !is.null(b) & !is.null(c)){ # plot all # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) }else if(!is.null(a) & is.null(b) & is.null(c)){ # plot total # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(b) & is.null(a) & is.null(c)){ # plot without coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(c) & is.null(a) & is.null(b)){ # plot with coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(b) & is.null(c)){ # plot total & without coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(c) & is.null(b)){ # plot total & with coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(b) & !is.null(c) & is.null(a)){ # plot with coupon $ without coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else { # return error return("error") } } else if(type == "Numeric" & df == "Sales-Coupon Usage"){ # a is total # b is without coupon # c is with coupon if(!is.null(a) & !is.null(b) & !is.null(c)){ # plot all # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) }else if(!is.null(a) & is.null(b) & is.null(c)){ # plot total # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(b) & is.null(a) & is.null(c)){ # plot without coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(c) & is.null(a) & is.null(b)){ # plot with coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(b) & is.null(c)){ # plot total & without coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(c) & is.null(b)){ # plot total & with coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(b) & !is.null(c) & is.null(a)){ # plot with coupon $ without coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else { # return error return("error") } } else { # return error return("error") } } # HYT Naive Bayes Data Import & manipulation offlinevalid.df_hyt <- read_csv("Naive_Bayes_Validation.csv") offline_test_hyt <- read_csv("Naive_Bayes_Test.csv") # Control function of the NavieBayes # input Dataset, choose the validating DF or the test DF #####################################################################################tt part1 end mmss_format <- function(x, ...) { sec <- x%%60 min <- x%/%60 sec <- base::sprintf("%05.2f", sec) ifelse(min == 0, paste(sec), paste(min, sec, sep = ":")) } button_color_css <- " #DivCompClear, #FinderClear, #EnterTimes{ /* Change the background color of the update button to blue. */ background: DodgerBlue; /* Change the text size to 15 pixels. */ font-size: 15px; }" # Define UI ui <- fluidPage( #Navbar structure for UI navbarPage("O2O Coupon Redemption", theme = shinytheme("lumen"), ##################################################################################################xulu part2 tabPanel("Data Overview", fluid = TRUE, icon = icon("globe-americas"), tags$style(button_color_css), # Sidebar layout with a input and output definitions sidebarLayout( sidebarPanel( titlePanel("Coupon usage condition"), # Select linetype selectInput(inputId = "linetypeFinder_xulu", label = "Choose data sourse (One-thousandth sample)", choices = c("offline"="offline.s","online"="online.s"), selected = "50 Free", width = "220px" ), uiOutput("obs1"), uiOutput("bins"), actionButton( inputId = "reset_xulu", label = "Reset Data", icon = icon("refresh"), width = "100%" ), verbatimTextOutput("aaa") ), mainPanel( fluidRow( column(6, DT::dataTableOutput("dataSet")), column(6, plotOutput( #"plotChart", "dataplot_xulu", width = "100%", height = "300px" )) ), ) ) ), ##########################################################################################xulu overview end ############################################################################################tt prediction # Hyt part Prediction tabPanel("Prediction", fluid = TRUE, icon = icon("connectdevelop",lib = "font-awesome"), titlePanel("Prediction"), sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "data_set_predic_hyt", label = "Choose the data", choices = c("Validation","Test"), selected = "Validation", width = "100%" ) ), mainPanel( DT::dataTableOutput("nb_predict_table_hyt") ) ) ), ############################################################################################tt prediction end navbarMenu("Views", icon = icon("chart-bar"), ##############################################################################################tt part2 tabPanel("Time Series Coupon & Sales", fluid = TRUE, tags$style(button_color_css), titlePanel("Time Series Coupon & Sales"), # 默认出现 Numeric Comparison,Coupon Usage的图 # sidebar 包含两个固定位置的下拉单??? TS_Comparison_type_hyt, TS_View_type_hyt # 第三个位置的check box根据TS_View_type_hyt的选择变化 sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "TS_comparison_type_hyt", label = "What kind of comparision", choices = c("Numeric","Ratio"), selected = "Ratio", width = "100%" ), selectInput( inputId = "TS_view_type_hyt", label = "Choose your analysis perspective", choices = c("Coupon Usage","Sales-Coupon Usage"), selected = "Coupon Usage", width = "100%" ), uiOutput("TS_select_line_hyt") ), # 根据inputId TS_Comparison_type_hyt 决定样式 # Numeric Comparison 两列,Ratio Comparison一??? # 需要在server中定义obserevent mainPanel( uiOutput("TS_main_hyt") ) ) ), ##############################################################################################xiaofei part2 ### XFQ PART "Distance & Coupon Usage" tabPanel("Distance & Coupon Usage", fluid = TRUE, titlePanel("Distance & Coupon Usage"), sidebarLayout( sidebarPanel( radioButtons("TYPE", "Choose Graph:", c("Frequency Graph","Ratio Graph","Both") ) ), mainPanel( plotOutput("graph") ) ) ), ##############################################################################################xiaofei part2 end ##############################################################################################Xulu part3 tabPanel("top n merchant", fluid = TRUE, tags$style(button_color_css), titlePanel("Top n merchant"), fluidRow( #Linetype column(4, #selectInput(inputId = "DivCompRaceA", selectInput(inputId = "linetype_xulu", label = "Online or Offline condition", choices = c("online" = "online", "offline" = "offline"))), #rof column(4, #selectInput(inputId = "DivCompRaceA", selectInput(inputId = "rof_xulu", label = "Ranking method ", choices = c("frequency" = "freq", "usage/issuance" = "ratioiss", "usage/consumption" = "ratioconsum"), selected = "frequency")), #n column(4, #sliderInput(inputId = "DivCompRankA", sliderInput(inputId = "n_xulu", label = "select Top n merchant", min = 1, max = 50, value = 5)), # column(4, uiOutput("dist_xulu") ) ), hr(), helpText("Tip:"), br(), fluidRow( column(6, # withSpinner(plotOutput(outputId = "DivCompPlotA", withSpinner(plotOutput(outputId = "topn_xulu" #click = "click_plotDiv" ))), )), #########################################################################################################xulu part3 end #######################################################################################################tj view tabPanel("Days From Receive To Redeem Count Distribution", fluid = TRUE, tags$style(button_color_css), titlePanel("Days From Receive To Redeem Count Distribution"), sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "ViewTypeAtyt", label = "Select type", choices = c("online","offline"), selected = "offline", width = "100%" ), sliderInput(inputId = "ViewRangeAtyt", label = "Redeem Days Range:", min = 0, max = 200, value = c(0,200) ) #selectInput( # inputId = "TS_view_type_hyt", #label = "Choose your analysis perspective", #choices = c("Coupon Usage","Sales-Coupon Usage"), #selected = "Coupon Usage", #width = "100%" #), #uiOutput("TS_select_line_tyt") ), # 根据inputId TS_Comparison_type_hyt 决定样式 # Numeric Comparison 两列,Ratio Comparison一列 # 需要在server???定义obserevent mainPanel( plotOutput("main_tyt") # fluidPage( # uiOutput("main_tyt") # ) ) ) ), ########################################################################## #TYT Continue ###################################################################### tabPanel("Use Rate and Coupon Rate Relationship", fluid = TRUE, tags$style(button_color_css), titlePanel("Use Rate and Coupon Rate Relationship"), sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "ViewTypeBtyt", label = "Select type", choices = c("online","offline"), selected = "offline", width = "100%" ), checkboxGroupInput( inputId = "ViewDiscountcoltyt", label = "Choose discount percent off category for statistic column chart", choices = c("0%","1%",'2%','3%','5%','10%','20%','25%','50%'), selected = c("10%",'20%'), width = "100%" ), checkboxGroupInput( inputId = "ViewDiscountboxtyt1", label = "Choose discount percent off category for box plot", choices = c("0%","1%",'2%','3%','5%','10%','20%','25%','50%'), selected = c("10%",'20%','50%'), width = "100%" ) #uiOutput("TS_select_line_tyt") ), mainPanel(fluidPage(fluidRow( column(6, plotOutput('column_tyt')), column(6, plotOutput('boxplot_tyt')) )) ) # plotOutput("main_tyt") # fluidPage( # uiOutput("main_tyt") # ) )) ##########################################################################################################tj view end ), ######################################################################xiaofei more navbarMenu("More", icon = icon("info-circle"), tabPanel("Data Description", fluid = TRUE, fluidRow( column(6, h4(p("Data Description")), HTML('<img src="1.png",width="1000px",height="1000px"'), br(), HTML('<img src="2.png",width="1000px",height="1000px"'), br(), HTML('<img src="3.png",width="1000px",height="1000px"'), br(), )) ), tabPanel("About", fluid = TRUE, fluidRow( column(6, #br(), h4(p("About the Project")), h5(p("As smart phone penetration reaches the hundreds of millions mark, O2O (Online to Offline) requires businesses to have a strong presence both offline and online. APPs with O2O capabilities accumulate daily consumer behaviour and location data that require big data and commercial operations management.")), br(), h5(p("Sending coupons is a general O2O marketing tool used to activate existing customers and attract new ones. While customers are happy to receive coupons that they want, they are frustrated when receiving coupons that they do not need. ")), br(), h5(p("For merchants, sending unwanted coupons may erode brand equity and hinder marketing expense forecasting. Targeted marketing is an important technology to increase the coupon redemption rate, providing relevant discounts to customers and effective marketing tools to businesses.")) #hr(), ), column(6, h4(p("About the Author")), h5(p("Yating Tao, Yitao Huang, Xiaofei Qu, Xulu Zhang") ), br() ) ), br(), hr(), ) ) ######################################################################xiaofei more end ) ) # Define server server <- function(input, output, session) { ##################################################################################tt part #Program Finder user_clickFinder <- reactiveValues() reactive({ user_clickFinder$DT <- data.frame(matrix(0, ncol = ncol(BigTop100), nrow = 1)) names(user_clickFinder$DT) <- colnames(BigTop100) }) observeEvent(input$click_plotFinder, { add_row <- nearPoints(BigTop100_finder(), input$click_plotFinder, xvar = "lon", yvar = "lat", threshold = 5) user_clickFinder$DT <- rbind(add_row, user_clickFinder$DT) }) brushFinder <- reactive({ req(length(user_clickFinder$DT) > 1) user_clickFinder$DT }) observeEvent({ input$FinderClear #input$EnterTimes },{ user_clickFinder$DT <- NULL }) output$schoolstableFinder<-DT::renderDataTable({ DT::datatable(unique(brushFinder()[,c("Name", "Class", "X.swim_time", "Team", "Relative_RankInEvent_Team", "Division", "Address", "Y2019", "Type", "Time")]), colnames = c("Sort" = "Time", "Time" = "X.swim_time", "US News School Ranking" = "Y2019", "School Type" = "Type", "Swimmer Rank In Event On Team" = "Relative_RankInEvent_Team"), rownames = FALSE, options = list(order = list(9, 'asc'), columnDefs = list(list(visible=FALSE, targets=c(9)), list(className = "dt-center", targets = 1:7), list(classname = "dt-right", targets = 8)) )) }) #Program Comparisons ###############################################################################################xulu part4 values <- reactiveValues(tbl = NULL, obsList = NULL, plot.df = NULL) observeEvent(input$linetypeFinder_xulu, { if (!NA %in% match(input$linetypeFinder_xulu, c("offline.s", "online.s"))) { values$tbl <- as.data.frame(get(input$linetypeFinder_xulu)) values$obsList <- colnames(values$tbl) output$obs1 <- renderUI({ selectInput( inputId = "observationInput1", label = "observation", choices = values$obsList ) }) } }) observeEvent(input$observationInput1,{ dtf<- values$tbl l<-dtf[,eval(input$observationInput1)] if (is.numeric(l[[1]])) { output$bins<- renderUI({ sliderInput( inputId = "bins", label = "Number of bins:", min = 1, max = 50, value = 30) }) } } ) observeEvent(input$observationInput1, { values$plot.df <- as.data.frame(values$tbl[, input$observationInput1]) colnames(values$plot.df) <- input$observationInput1 output$dataSet <- DT::renderDataTable({ values$tbl }, extensions = c('Scroller', 'FixedColumns'), options = list( deferRender = TRUE, scrollX = TRUE, scrollY = 200, scroller = TRUE, dom = 'Bfrtip', fixedColumns = TRUE )) }) observe({ output$dataplot_xulu <- renderPlot({ chartCol_xulu(df=values$plot.df,colN=input$observationInput1,b=input$bins) }) }) observeEvent(input$reset_xulu, { values$tbl <- NULL output$obs1 <- NULL }) output$aaa <- renderPrint({ values$obs1 }) observeEvent(input$linetype_xulu,{ if (eval(input$linetype_xulu)=="offline" ) { output$dist_xulu<- renderUI({ sliderInput( inputId = "dist_xulu", label = "offline merchant distance", min = 0, max = 15, value = c(0,15)) }) } else{ output$dist_xulu<- renderUI({}) } } ) output$topn_xulu <- renderPlot({ topn(n=input$n_xulu,linetype=input$linetype_xulu,rof=input$rof_xulu,dist=input$dist_xulu) }) ####################################################################################################xulu server end ################################################################################### server Hyt part # server Hyt part # Conditional Main panel # When Select Nuemeric shows 2 graphs # When Select Ratio shows 1 graphs observeEvent(input$TS_comparison_type_hyt,{ if (input$TS_comparison_type_hyt == "Numeric") { output$TS_main_hyt <- renderUI({ fluidPage( fluidRow( plotOutput( "TS_online_numeric_hyt", width = "100%", height = "300px" ) ), hr(), fluidRow( plotOutput( "TS_offline_numeric_hyt", width = "100%", height = "300px" ) ) ) }) } else if(input$TS_comparison_type_hyt == "Ratio"){ output$TS_main_hyt <- renderUI({ fluidRow( column(12, plotOutput( "TS_ratio_hyt", width = "100%", height = "300px" )) ) }) } else { return("error") } }) # Conditional Select line checkbox # When Select Ratio shows 2 options # When Select Numeric shows 3 options observeEvent(input$TS_comparison_type_hyt,{ if (input$TS_comparison_type_hyt == "Numeric") { output$TS_select_line_hyt <- renderUI({ checkboxGroupInput( inputId = "TS_line_CB_numeric_hyt", label = "Choose the line to show", choices = c("Total","Without Coupon", "With Coupon or Coupon Used"), selected = c("Total","Without Coupon", "With Coupon or Coupon Used") ) }) } else if(input$TS_comparison_type_hyt == "Ratio"){ output$TS_select_line_hyt <- renderUI({ checkboxGroupInput( inputId = "TS_line_CB_ratio_hyt", label = "Choose the line to show", choices = c("Online","Offline"), selected = c("Online","Offline") ) }) } else { return("error") } }) # TS_online_numeric_hyt plot # Online Numeric part output$TS_online_numeric_hyt <- renderPlot({ tsfunc_hyt(input$TS_comparison_type_hyt,input$TS_view_type_hyt, input$TS_line_CB_numeric_hyt)[1] }) # TS_offline_numeric_hyt # Offline Numeric part output$TS_offline_numeric_hyt <- renderPlot({ tsfunc_hyt(input$TS_comparison_type_hyt,input$TS_view_type_hyt, input$TS_line_CB_numeric_hyt)[2] }) # TS_ratio_hyt # Ratio part output$TS_ratio_hyt <- renderPlot({ tsfunc_hyt(input$TS_comparison_type_hyt,input$TS_view_type_hyt, input$TS_line_CB_ratio_hyt) }) # naive bayes table output$nb_predict_table_hyt <- renderDataTable({ if (input$data_set_predic_hyt == "Validation"){ offlinevalid.df_hyt } else { offline_test_hyt } }) ################################################################################# ttserver end ##################################################################################xiaofei server output$graph <- renderPlot({ print("ok") if(input$TYPE=="Frequency Graph"){ g1x <- Distance_plot(lm,v1="Distance",v2="Frequency") g1x }else if(input$TYPE=="Ratio Graph"){ g2x <- Distance_plot(lm,v1="Distance",v2="ratio") g2x }else if(input$TYPE=="Both"){ p <- ggplot(lm, aes(x = Distance)) p <- p + geom_line(aes(y=ratio, colour = "Ratio")) p <- p + geom_line(aes(y=Frequency/500000, colour = "frequency")) p <- p + scale_y_continuous(sec.axis = sec_axis(~.*500000, name = "frequency")) p <- p + scale_colour_manual(values = c("blue", "red")) p <- p + labs(y = "Ratio",x = "Distance",colour = "Parameter") p <- p + theme(legend.position = c(0.8, 0.9)) p } }) ##################################################################################xiaofei serverend ###################################################################################TYT server part output$main_tyt <- renderPlot({ print("ok") if(input$ViewTypeAtyt=="offline"){ g1 <- redeemtime(range=input$ViewRangeAtyt,type="offline") g1 }else{ g2 <- redeemtime(range=input$ViewRangeAtyt,type="online") g2 } }) output$column_tyt <- renderPlot({ print('ok') g3 <- UseRateCouponRatecol(discount_col = input$ViewDiscountcoltyt,type=input$ViewTypeBtyt) g3 }) output$boxplot_tyt <- renderPlot({ print('ok') g4 <- UseRateCouponRatebox(discount_box = input$ViewDiscountboxtyt1,type=input$ViewTypeBtyt) g4 }) #TYT server part end # #######################################################################################TYT server part end #session$onSessionEnded(stopApp) } # Run the application shinyApp(ui = ui, server = server)
/app.R
no_license
07misu/R-shiny-app-Visualization-of-Coupon-usage
R
false
false
58,781
r
library(dplyr) library(ggplot2) library(shiny) library(DT) library(ggrepel) library(tidyr) library(shinycssloaders) library(shinythemes) library(readr) library(data.table) library(tidyverse) library(e1071) library(caret) library(lattice) library(stringr) library(hablar) library(scales) #Import Data #########################################################################################################xulu data offline<- read.csv("ccf_offline_stage1_train.csv") online<- read.csv("ccf_online_stage1_train.csv") #Change data format to date offline$Date_received <- as.Date(offline[["Date_received"]], "%Y%m%d") offline$Date <- as.Date(offline[["Date"]], "%Y%m%d") online$Date_received <- as.Date(online[["Date_received"]], "%Y%m%d") online$Date <- as.Date(online[["Date"]], "%Y%m%d") offline$Distance<-as.integer(offline[["Distance"]]) set.seed(1) online.s <- sample(row.names(online), 0.001*dim(online)[1]) offline.s <- sample(row.names(offline), 0.001*dim(offline)[1]) online.s <- online[online.s, ] offline.s <- offline[offline.s, ] #########################################################################################################xulu data end ########################################################################### TYT Data Import and processing ###offline data processing #offline<-read.csv('ccf_offline_stage1_train.csv', header = T, na.strings = NA) #offline<-data.frame(offline) redeemed_offline<-offline redeemed_offline<-redeemed_offline %>% filter(!is.na(redeemed_offline$Date) & offline$Coupon_id !="null") #offline %>% # filter(offline$Date != 'null' & offline$Coupon_id != 'null') -> redeemed_offline #redeemed_offline$Date <- as.Date(redeemed_offline[["Date"]], "%Y%m%d") #redeemed_offline$Date_received <- as.Date(redeemed_offline[["Date_received"]], "%Y%m%d") redeemed_offline$RedeemDays <- redeemed_offline$Date - redeemed_offline$Date_received ###online data processing #online<-read.csv('ccf_online_stage1_train.csv', header = T, na.strings = NA) #online<-data.frame(online) redeemed_online<-online redeemed_online<-redeemed_online %>% filter(!is.na(redeemed_online$Date) & online$Coupon_id !="null") #online %>% #filter(online$Date != 'null' & online$Coupon_id != 'null') -> redeemed_online #redeemed_online$Date <- as.Date(redeemed_online[["Date"]], "%Y%m%d") #redeemed_online$Date_received <- as.Date(redeemed_online[["Date_received"]], "%Y%m%d") redeemed_online$RedeemDays <- redeemed_online$Date - redeemed_online$Date_received ######################################################################### #Continue TYT part for Use Rate and Coupon Rate relationship ###offline data processing #offline %>% # filter(offline$Date_received != 'null' & offline$Coupon_id != 'null') -> getcpOffline getcpOffline<- offline%>% filter(!is.na(offline$Date_received)& offline$Coupon_id != "null") getcpOffline %>% count(Discount_rate) -> a redeemed_offline %>% count(Discount_rate) -> b merge(a, b, by = 'Discount_rate') -> result_Offline result_Offline$use_rate <- result_Offline$n.y/result_Offline$n.x names(result_Offline)[3] <- 'redNum' names(result_Offline)[2] <- 'getcpNum' ratio1 <- lapply(result_Offline$Discount_rate,function(x){ re <- unlist(str_split(x,":")) if(re > 1){ r <- as.numeric(re[2])/as.numeric(re[1]) } else{ r <- 1-as.numeric(re) } }) result_Offline$discount_percent_off <- unlist(ratio1) sorted_discountOff <- result_Offline[order(result_Offline$discount_percent_off),] sorted_discountOff$discount_percent_off <- percent(sorted_discountOff$discount_percent_off) sorted_discountOff$discount_percent_off <- as.factor(sorted_discountOff[["discount_percent_off"]]) ###online data processing #online %>% # filter(online$Date_received != 'null' & online$Coupon_id != 'null') -> getcpOnline getcpOnline<- online%>% filter(!is.na(online$Date_received)& online$Coupon_id != "null") getcpOnline %>% count(Discount_rate) -> a redeemed_online %>% count(Discount_rate) -> b merge(a, b, by = 'Discount_rate') -> result_Online result_Online$use_rate <- result_Online$n.y/result_Online$n.x names(result_Online)[3] <- 'redNum' names(result_Online)[2] <- 'getcpNum' ratio <- lapply(result_Online$Discount_rate,function(x){ re <- unlist(str_split(x,":")) r <- as.numeric(re[2])/as.numeric(re[1]) }) result_Online$discount_percent_off <- unlist(ratio) sorted_discount <- result_Online[order(result_Online$discount_percent_off),] sorted_discount$discount_percent_off <- percent(sorted_discount$discount_percent_off) sorted_discount$discount_percent_off <- as.factor(sorted_discount[["discount_percent_off"]]) ##############################################################################################################tyt data end ############################################################################################function yating redeemtime <- function(range = c(0, 50), type = "offline"){ resptyt <- ggplot() #judge type first #offline part if (type=="offline"){ red<-filter(redeemed_offline,redeemed_offline$RedeemDays>=range[1] & redeemed_offline$RedeemDays<=range[2])##redeem days range filter resx<-red$RedeemDays } else{ red <- filter(redeemed_online,redeemed_online$RedeemDays>=range[1] & redeemed_online$RedeemDays<=range[2]) resx <- red$RedeemDays } ###Calculating part end ##Output part #res<-data.frame(resx) #names(res) <- c("resx") resptyt<-resptyt+stat_bin(data = red,aes(x=resx)) m <- layer_data(resptyt) resptyt<-resptyt + stat_smooth(data = m, aes(x, y))+ xlab("Redeem Days") return(resptyt) } ###function UseRateCouponRatecol <- function(discount_col = c('20%','10%'), type = "offline"){ resptyt <- ggplot() ##offline part if (type=="offline"){ percent_offline <- data.frame() for (i in discount_col){ percent_offline <- rbind(percent_offline, sorted_discountOff[sorted_discountOff$discount_percent_off == i,]) } percent_offline <- na.omit(percent_offline) resptyt <- resptyt+ geom_col(data = percent_offline, mapping = aes(x = Discount_rate, y=use_rate))+ facet_wrap( ~ discount_percent_off, nrow = 2)+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount rate")+ ylab("Coupon Use Rate") } ###online part else{ percent_online <- data.frame() for (i in discount_col){ percent_online <- rbind(percent_online, sorted_discount[sorted_discount$discount_percent_off == i,]) } percent_online <- na.omit(percent_online) resptyt<- resptyt + geom_col(data = percent_online, mapping = aes(x = Discount_rate, y=use_rate))+ facet_wrap( ~ discount_percent_off, nrow = 2)+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount rate")+ ylab("Coupon Use Rate") } return(resptyt) } UseRateCouponRatebox <- function(discount_box = c('1%','17%','5%', '67%'), type = 'offline'){ resptyt <- ggplot() if(type == 'offline'){ if(is.na(discount_box)){ resptyt <- resptyt + geom_boxplot(data = sorted_discountOff, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } else{ discount_offline <- data.frame() for (i in discount_box){ discount_offline <- rbind(discount_offline, sorted_discountOff[sorted_discountOff$discount_percent_off == i,]) } discount_offline <- na.omit(discount_offline) resptyt <- resptyt+ geom_boxplot(data = discount_offline, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } } else{ if(is.na(discount_box)){ resptyt <- resptyt + geom_boxplot(data = sorted_discount, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } else{ discount_online <- data.frame() for (i in discount_box){ discount_online <- rbind(discount_online, sorted_discount[sorted_discount$discount_percent_off == i,]) } discount_online <- na.omit(discount_online) resptyt <- resptyt + geom_boxplot(data = discount_online, mapping = aes(x = discount_percent_off, y = use_rate))+ theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+ xlab("Discount Percent Off of Coupon")+ ylab("Coupon Use Rate") } } return(resptyt) } ############################################################################################function yating end ############################################################################################function xulu #####function 1 xulu #top n (offline filter distance) #input n: top n # linetype:"offline"/"online" # rof :"freq" / "ratioiss"(usage/issue) / "ratioconsum" (usage/consumption) # distance: two side list (offline only) topn<- function(n=5,linetype="offline",rof="freq",dist=c(0,10)){ #########calculating part #basic para resn<-list(1:n) resp<-ggplot() #judge linetype first #offline part if (linetype=="offline") { #need to consider distance offwc<-filter(offline,!is.na(offline$Date) & offline$Coupon_id!="null" & offline$Distance>=dist[1] & offline$Distance<=dist[2])##distance filter #Aggregate merchant_id and order data offwc<-offwc %>% group_by(offwc$Merchant_id)%>% summarise(times=n()) %>% arrange(desc(times)) #rof="freq" if (rof=="freq") { topndata <- offwc[1:n,] resx<-as.list(topndata[,"offwc$Merchant_id"]) resy<-as.list(topndata[,"times"]) } #rof="ratioiss" else if(rof=="ratioiss"){ #not use offnu<-filter(offline,is.na(offline$Date) & offline$Coupon_id!="null" & offline$Distance>=dist[1] & offline$Distance<=dist[2])##distance filter #Aggregate merchant_id and order data offnu<-offnu %>% group_by(offnu$Merchant_id)%>% summarise(nutimes=n()) %>% arrange(desc(nutimes)) #join two tables and calculate ratio then order data offnu<-left_join(offwc,offnu,by=c("offwc$Merchant_id"="offnu$Merchant_id")) names(offnu)<-c("Merchant_id","times","nutimes") offnu$ratioiss<-offnu$times/offnu$nutimes offnu<-offnu %>% arrange(desc(ratioiss)) topndata <- offnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioiss"]) } #rof="ratioconsum" else if (rof=="ratioconsum") { #consumed but not use offcbnu<-filter(offline,!is.na(offline$Date) & offline$Coupon_id=="null" & offline$Distance>=dist[1] & offline$Distance<=dist[2])##distance filter #Aggregate merchant_id and order data offcbnu<-offcbnu %>% group_by(offcbnu$Merchant_id)%>% summarise(cbnutimes=n()) %>% arrange(desc(cbnutimes)) #join two tables and calculate ratio then order data offcbnu<-left_join(offwc,offcbnu,by=c("offwc$Merchant_id"="offcbnu$Merchant_id")) names(offcbnu)<-c("Merchant_id","times","cbnutimes") offcbnu$ratioconsum<-offcbnu$times/offcbnu$cbnutimes offcbnu<-offcbnu %>% arrange(desc(ratioconsum)) topndata <- offcbnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioconsum"]) } else{ return(resp) } } #online part else{ onwc<-filter(online,!is.na(online$Date) & online$Coupon_id!="null") #Aggregate merchant_id and order data onwc<-onwc %>% group_by(onwc$Merchant_id)%>% summarise(times=n()) %>% arrange(desc(times)) #rof="freq" if (rof=="freq") { topndata <- onwc[1:n,] resx<-as.list(topndata[,"onwc$Merchant_id"]) resy<-as.list(topndata[,"times"]) } #rof="ratioiss" else if(rof=="ratioiss"){ #not use onnu<-filter(online,is.na(online$Date) & online$Coupon_id!="null") #Aggregate merchant_id and order data onnu<-onnu %>% group_by(onnu$Merchant_id)%>% summarise(nutimes=n()) %>% arrange(desc(nutimes)) #join two tables and calculate ratio then order data onnu<-left_join(onwc,onnu,by=c("onwc$Merchant_id"="onnu$Merchant_id")) names(onnu)<-c("Merchant_id","times","nutimes") onnu$ratioiss<-onnu$times/onnu$nutimes onnu<-onnu %>% arrange(desc(ratioiss)) topndata <- onnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioiss"]) } #rof="ratioconsum" else if (rof=="ratioconsum") { #consumed but not use oncbnu<-filter(online,!is.na(online$Date) & online$Coupon_id=="null" ) #Aggregate merchant_id and order data oncbnu<-oncbnu %>% group_by(oncbnu$Merchant_id)%>% summarise(cbnutimes=n()) %>% arrange(desc(cbnutimes)) #join two tables and calculate ratio then order data oncbnu<-left_join(onwc,oncbnu,by=c("onwc$Merchant_id"="oncbnu$Merchant_id")) names(oncbnu)<-c("Merchant_id","times","cbnutimes") oncbnu$ratioconsum<-oncbnu$times/oncbnu$cbnutimes oncbnu<-oncbnu %>% arrange(desc(ratioconsum)) topndata <- oncbnu[1:n,] resx<-as.list(topndata[,"Merchant_id"]) resy<-as.list(topndata[,"ratioconsum"]) } else{ return(resp) } } #######calculating part end #######output part #out put a dataframe res<-data.frame(resn,resx,resy) names(res) <- c("resn","resx","resy") #out put a plot resp<-resp+ geom_bar(data = res,aes(x=resn,y=resy,fill=factor(resx)),stat="identity")+ xlab("Merchant_id")+ theme_bw()+ theme(legend.position = "none")+ scale_x_continuous(breaks=res$resn,labels = res$resx) #rof="ratio" part 2 if (rof=="ratioiss") { resp<- resp + ylab("Coupon useage/Coupon issuance ratio") } else if (rof=="ratioconsum") { resp<- resp + ylab("Coupon useage/Consumption ratio") } else if (rof=="freq") { resp<- resp + ylab("Coupon useage times") } else{ return(resp) } return(resp) } ###function 2 xulu chartCol_xulu<-function(df,colN,b=30){ res<-ggplot(data=df) l<-df[,eval(colN)] if (is.numeric(l[[1]])) { res<-res+ geom_histogram(data=df,mapping = aes_string(x=colN),bins=b) } else{ res<-res+ geom_bar(data=df,mapping = aes_string(x=colN)) } res<-res+ theme(axis.text.x=element_text(angle=40,vjust=NULL,hjust = 1)) return(res) } ######################################################################################################xulufunction end ##########################################################################xiaofei part1 ## XFQ DATA IMPORT # Distance & Coupon Usage ##offline <- fread("ccf_offline_stage1_train.csv") ##offline$Date_received <- as.Date(offline[["Date_received"]], "%Y%m%d") ##offline$Date <- as.Date(offline[["Date"]], "%Y%m%d") l <- offline %>% filter(Distance!="null") %>% filter(!is.na(Date) & !is.na(Coupon_id)) %>% mutate(Distance=as.numeric(Distance)) %>% group_by(Distance) %>% summarise(Frequency = n()) m <- offline %>% filter(Distance!="null") %>% filter(!is.na(Coupon_id)) %>% mutate(Distance=as.numeric(Distance)) %>% group_by(Distance) %>% summarise(total = n()) lm <- l %>% inner_join(m) %>% mutate(ratio=Frequency/total) Distance_plot <- function(data,v1="Distance",v2="Frequency"){ data <- data[,c(v1,v2)] colnames(data) <- c("v1","v2") pic <- data %>% ggplot(aes(v1,v2))+ geom_point()+ geom_line()+ theme_bw()+ theme(axis.text = element_text(color='black',size=10))+ xlab(v1)+ylab(v2) return(pic) } ##########################################################################xiaofei part1 end #####################################################################################tt part1 # HYT Data Import # HYT Data Import offline_Usage_Sent_Ratio_hyt <- read_csv("offline_Usage_Sent_Ratio_hyt.csv") online_Usage_Sent_Ratio_hyt <- read_csv("online_Usage_Sent_Ratio_hyt.csv") online_Usage_Total_ratio_hyt <- read_csv("online_Usage_Total_ratio_hyt.csv") offline_Usage_Total_ratio_hyt <- read_csv("offline_Usage_Total_ratio_hyt.csv") # HYT function # Variable 1 "Ratio" "Numeric" # Variable 2 "Coupon Usage" "Sales-Coupon Usage" # Variable 3,4,5 "a" "b" "c" tsfunc_hyt <- function(type,df, l) { a <- NULL b <- NULL c <- NULL if (length(l)==3){ a <- "a" b <- "b" c <- "c" # cat(a) # cat(b) # cat(c) } else if (length(l)==2){ if (sum(c("Online","Offline") %in% l) ==2 | sum(c("Total","Without Coupon") %in% l) == 2){ a <- "a" b <- "b" } else if (sum(c("Total","With Coupon or Coupon Used") %in% l) == 2){ a <- "a" c <- "c" } else if(sum(c("Without Coupon","With Coupon or Coupon Used") %in% l) == 2){ b <- "b" c <- "c" } else { return("nothing there") } # cat(a) # cat(b) # cat(c) } else if (length(l)==1){ if("Online" %in% l | "Total" %in% l){ a <- "a" } else if("Offline" %in% l | "Without Coupon" %in% l){ b <- "b" } else if("With Coupon or Coupon Used" %in% l){ c <- "c" } else { return("nothing there") } # cat(a) # cat(b) # cat(c) } else { return("error") } if (type == "Ratio" & df == "Coupon Usage") { # a is online # b is offline # Ration Coupon Usage Graph if(!is.null(a) & !is.null(b)){ # plot online and offline ggplot() + geom_line(offline_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio), color = "olivedrab3") + geom_line(online_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio) , color = "skyblue2") + xlab("") + ylab("Probablity of Coupon Sent and Used") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Coupon Usage Probablity Online vs. Offline") + theme_light() }else if(!is.null(a) & is.null(b)){ # plot online ggplot() + geom_line(online_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio) , color = "skyblue2") + xlab("") + ylab("Probablity of Coupon Sent and Used") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Coupon Usage Probablity Online") + theme_light() } else if(!is.null(b) & is.null(a)){ # plot offline ggplot() + geom_line(offline_Usage_Sent_Ratio_hyt, mapping = aes(x = Date_received, y = ratio), color = "olivedrab3") + xlab("") + ylab("Probablity of Coupon Sent and Used") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Coupon Usage Probablity Offline") + theme_light() } else { # return erro return("error") } } else if(type == "Ratio" & df == "Sales-Coupon Usage"){ # a is online # b is offline # Ration Sales-Coupon Usage Graph if(!is.null(a) & !is.null(b)){ # plot online and offline ggplot() + geom_line(online_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "skyblue2") + geom_line(offline_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "olivedrab3") + xlab("") + ylab("Probablity of Purchase with Coupon") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Purchase with Coupon Probablity Online vs. Offline") + theme_light() }else if(!is.null(a) & is.null(b)){ # plot online ggplot() + geom_line(online_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "skyblue2") + xlab("") + ylab("Probablity of Purchase with Coupon") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Purchase with Coupon Probablity Online") + theme_light() } else if(!is.null(b) & is.null(a)){ # plot offline ggplot() + geom_line(offline_Usage_Total_ratio_hyt,mapping = aes(x = Date, y = ratio), color = "olivedrab3") + xlab("") + ylab("Probablity of Purchase with Coupon") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Purchase with Coupon Probablity Offline") + theme_light() } else { # return error return("error") } } else if(type == "Numeric" & df == "Coupon Usage"){ # a is total # b is without coupon # c is with coupon if(!is.null(a) & !is.null(b) & !is.null(c)){ # plot all # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) }else if(!is.null(a) & is.null(b) & is.null(c)){ # plot total # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(b) & is.null(a) & is.null(c)){ # plot without coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(c) & is.null(a) & is.null(b)){ # plot with coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(b) & is.null(c)){ # plot total & without coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(c) & is.null(b)){ # plot total & with coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_sent), color = "skyblue2") + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else if(!is.null(b) & !is.null(c) & is.null(a)){ # plot with coupon $ without coupon # online part onlineplot <- ggplot(online_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Sent_Ratio_hyt) + geom_line(aes(x = Date_received, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date_received, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Coupon Usage") + theme_light() # list the 2 plot out list(onlineplot, offlineplot) } else { # return error return("error") } } else if(type == "Numeric" & df == "Sales-Coupon Usage"){ # a is total # b is without coupon # c is with coupon if(!is.null(a) & !is.null(b) & !is.null(c)){ # plot all # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) }else if(!is.null(a) & is.null(b) & is.null(c)){ # plot total # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(b) & is.null(a) & is.null(c)){ # plot without coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(c) & is.null(a) & is.null(b)){ # plot with coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(b) & is.null(c)){ # plot total & without coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(a) & !is.null(c) & is.null(b)){ # plot total & with coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = total), color = "skyblue2") + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else if(!is.null(b) & !is.null(c) & is.null(a)){ # plot with coupon $ without coupon # online part onlineplot <- ggplot(online_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Online Sales and Coupon Usage") + theme_light() # offline part offlineplot <- ggplot(offline_Usage_Total_ratio_hyt) + geom_line(aes(x = Date, y = Coupon_used), color = "olivedrab3") + geom_line(aes(x = Date, y = Coupon_no), color = "indianred1") + xlab("") + ylab("Number of Orders") + scale_x_date(date_labels = "%m-%Y") + ggtitle("Offline Sales and Coupon Usage") + theme_light() # list the 2 plot list(onlineplot, offlineplot) } else { # return error return("error") } } else { # return error return("error") } } # HYT Naive Bayes Data Import & manipulation offlinevalid.df_hyt <- read_csv("Naive_Bayes_Validation.csv") offline_test_hyt <- read_csv("Naive_Bayes_Test.csv") # Control function of the NavieBayes # input Dataset, choose the validating DF or the test DF #####################################################################################tt part1 end mmss_format <- function(x, ...) { sec <- x%%60 min <- x%/%60 sec <- base::sprintf("%05.2f", sec) ifelse(min == 0, paste(sec), paste(min, sec, sep = ":")) } button_color_css <- " #DivCompClear, #FinderClear, #EnterTimes{ /* Change the background color of the update button to blue. */ background: DodgerBlue; /* Change the text size to 15 pixels. */ font-size: 15px; }" # Define UI ui <- fluidPage( #Navbar structure for UI navbarPage("O2O Coupon Redemption", theme = shinytheme("lumen"), ##################################################################################################xulu part2 tabPanel("Data Overview", fluid = TRUE, icon = icon("globe-americas"), tags$style(button_color_css), # Sidebar layout with a input and output definitions sidebarLayout( sidebarPanel( titlePanel("Coupon usage condition"), # Select linetype selectInput(inputId = "linetypeFinder_xulu", label = "Choose data sourse (One-thousandth sample)", choices = c("offline"="offline.s","online"="online.s"), selected = "50 Free", width = "220px" ), uiOutput("obs1"), uiOutput("bins"), actionButton( inputId = "reset_xulu", label = "Reset Data", icon = icon("refresh"), width = "100%" ), verbatimTextOutput("aaa") ), mainPanel( fluidRow( column(6, DT::dataTableOutput("dataSet")), column(6, plotOutput( #"plotChart", "dataplot_xulu", width = "100%", height = "300px" )) ), ) ) ), ##########################################################################################xulu overview end ############################################################################################tt prediction # Hyt part Prediction tabPanel("Prediction", fluid = TRUE, icon = icon("connectdevelop",lib = "font-awesome"), titlePanel("Prediction"), sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "data_set_predic_hyt", label = "Choose the data", choices = c("Validation","Test"), selected = "Validation", width = "100%" ) ), mainPanel( DT::dataTableOutput("nb_predict_table_hyt") ) ) ), ############################################################################################tt prediction end navbarMenu("Views", icon = icon("chart-bar"), ##############################################################################################tt part2 tabPanel("Time Series Coupon & Sales", fluid = TRUE, tags$style(button_color_css), titlePanel("Time Series Coupon & Sales"), # 默认出现 Numeric Comparison,Coupon Usage的图 # sidebar 包含两个固定位置的下拉单??? TS_Comparison_type_hyt, TS_View_type_hyt # 第三个位置的check box根据TS_View_type_hyt的选择变化 sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "TS_comparison_type_hyt", label = "What kind of comparision", choices = c("Numeric","Ratio"), selected = "Ratio", width = "100%" ), selectInput( inputId = "TS_view_type_hyt", label = "Choose your analysis perspective", choices = c("Coupon Usage","Sales-Coupon Usage"), selected = "Coupon Usage", width = "100%" ), uiOutput("TS_select_line_hyt") ), # 根据inputId TS_Comparison_type_hyt 决定样式 # Numeric Comparison 两列,Ratio Comparison一??? # 需要在server中定义obserevent mainPanel( uiOutput("TS_main_hyt") ) ) ), ##############################################################################################xiaofei part2 ### XFQ PART "Distance & Coupon Usage" tabPanel("Distance & Coupon Usage", fluid = TRUE, titlePanel("Distance & Coupon Usage"), sidebarLayout( sidebarPanel( radioButtons("TYPE", "Choose Graph:", c("Frequency Graph","Ratio Graph","Both") ) ), mainPanel( plotOutput("graph") ) ) ), ##############################################################################################xiaofei part2 end ##############################################################################################Xulu part3 tabPanel("top n merchant", fluid = TRUE, tags$style(button_color_css), titlePanel("Top n merchant"), fluidRow( #Linetype column(4, #selectInput(inputId = "DivCompRaceA", selectInput(inputId = "linetype_xulu", label = "Online or Offline condition", choices = c("online" = "online", "offline" = "offline"))), #rof column(4, #selectInput(inputId = "DivCompRaceA", selectInput(inputId = "rof_xulu", label = "Ranking method ", choices = c("frequency" = "freq", "usage/issuance" = "ratioiss", "usage/consumption" = "ratioconsum"), selected = "frequency")), #n column(4, #sliderInput(inputId = "DivCompRankA", sliderInput(inputId = "n_xulu", label = "select Top n merchant", min = 1, max = 50, value = 5)), # column(4, uiOutput("dist_xulu") ) ), hr(), helpText("Tip:"), br(), fluidRow( column(6, # withSpinner(plotOutput(outputId = "DivCompPlotA", withSpinner(plotOutput(outputId = "topn_xulu" #click = "click_plotDiv" ))), )), #########################################################################################################xulu part3 end #######################################################################################################tj view tabPanel("Days From Receive To Redeem Count Distribution", fluid = TRUE, tags$style(button_color_css), titlePanel("Days From Receive To Redeem Count Distribution"), sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "ViewTypeAtyt", label = "Select type", choices = c("online","offline"), selected = "offline", width = "100%" ), sliderInput(inputId = "ViewRangeAtyt", label = "Redeem Days Range:", min = 0, max = 200, value = c(0,200) ) #selectInput( # inputId = "TS_view_type_hyt", #label = "Choose your analysis perspective", #choices = c("Coupon Usage","Sales-Coupon Usage"), #selected = "Coupon Usage", #width = "100%" #), #uiOutput("TS_select_line_tyt") ), # 根据inputId TS_Comparison_type_hyt 决定样式 # Numeric Comparison 两列,Ratio Comparison一列 # 需要在server???定义obserevent mainPanel( plotOutput("main_tyt") # fluidPage( # uiOutput("main_tyt") # ) ) ) ), ########################################################################## #TYT Continue ###################################################################### tabPanel("Use Rate and Coupon Rate Relationship", fluid = TRUE, tags$style(button_color_css), titlePanel("Use Rate and Coupon Rate Relationship"), sidebarLayout( sidebarPanel( width = 2, selectInput( inputId = "ViewTypeBtyt", label = "Select type", choices = c("online","offline"), selected = "offline", width = "100%" ), checkboxGroupInput( inputId = "ViewDiscountcoltyt", label = "Choose discount percent off category for statistic column chart", choices = c("0%","1%",'2%','3%','5%','10%','20%','25%','50%'), selected = c("10%",'20%'), width = "100%" ), checkboxGroupInput( inputId = "ViewDiscountboxtyt1", label = "Choose discount percent off category for box plot", choices = c("0%","1%",'2%','3%','5%','10%','20%','25%','50%'), selected = c("10%",'20%','50%'), width = "100%" ) #uiOutput("TS_select_line_tyt") ), mainPanel(fluidPage(fluidRow( column(6, plotOutput('column_tyt')), column(6, plotOutput('boxplot_tyt')) )) ) # plotOutput("main_tyt") # fluidPage( # uiOutput("main_tyt") # ) )) ##########################################################################################################tj view end ), ######################################################################xiaofei more navbarMenu("More", icon = icon("info-circle"), tabPanel("Data Description", fluid = TRUE, fluidRow( column(6, h4(p("Data Description")), HTML('<img src="1.png",width="1000px",height="1000px"'), br(), HTML('<img src="2.png",width="1000px",height="1000px"'), br(), HTML('<img src="3.png",width="1000px",height="1000px"'), br(), )) ), tabPanel("About", fluid = TRUE, fluidRow( column(6, #br(), h4(p("About the Project")), h5(p("As smart phone penetration reaches the hundreds of millions mark, O2O (Online to Offline) requires businesses to have a strong presence both offline and online. APPs with O2O capabilities accumulate daily consumer behaviour and location data that require big data and commercial operations management.")), br(), h5(p("Sending coupons is a general O2O marketing tool used to activate existing customers and attract new ones. While customers are happy to receive coupons that they want, they are frustrated when receiving coupons that they do not need. ")), br(), h5(p("For merchants, sending unwanted coupons may erode brand equity and hinder marketing expense forecasting. Targeted marketing is an important technology to increase the coupon redemption rate, providing relevant discounts to customers and effective marketing tools to businesses.")) #hr(), ), column(6, h4(p("About the Author")), h5(p("Yating Tao, Yitao Huang, Xiaofei Qu, Xulu Zhang") ), br() ) ), br(), hr(), ) ) ######################################################################xiaofei more end ) ) # Define server server <- function(input, output, session) { ##################################################################################tt part #Program Finder user_clickFinder <- reactiveValues() reactive({ user_clickFinder$DT <- data.frame(matrix(0, ncol = ncol(BigTop100), nrow = 1)) names(user_clickFinder$DT) <- colnames(BigTop100) }) observeEvent(input$click_plotFinder, { add_row <- nearPoints(BigTop100_finder(), input$click_plotFinder, xvar = "lon", yvar = "lat", threshold = 5) user_clickFinder$DT <- rbind(add_row, user_clickFinder$DT) }) brushFinder <- reactive({ req(length(user_clickFinder$DT) > 1) user_clickFinder$DT }) observeEvent({ input$FinderClear #input$EnterTimes },{ user_clickFinder$DT <- NULL }) output$schoolstableFinder<-DT::renderDataTable({ DT::datatable(unique(brushFinder()[,c("Name", "Class", "X.swim_time", "Team", "Relative_RankInEvent_Team", "Division", "Address", "Y2019", "Type", "Time")]), colnames = c("Sort" = "Time", "Time" = "X.swim_time", "US News School Ranking" = "Y2019", "School Type" = "Type", "Swimmer Rank In Event On Team" = "Relative_RankInEvent_Team"), rownames = FALSE, options = list(order = list(9, 'asc'), columnDefs = list(list(visible=FALSE, targets=c(9)), list(className = "dt-center", targets = 1:7), list(classname = "dt-right", targets = 8)) )) }) #Program Comparisons ###############################################################################################xulu part4 values <- reactiveValues(tbl = NULL, obsList = NULL, plot.df = NULL) observeEvent(input$linetypeFinder_xulu, { if (!NA %in% match(input$linetypeFinder_xulu, c("offline.s", "online.s"))) { values$tbl <- as.data.frame(get(input$linetypeFinder_xulu)) values$obsList <- colnames(values$tbl) output$obs1 <- renderUI({ selectInput( inputId = "observationInput1", label = "observation", choices = values$obsList ) }) } }) observeEvent(input$observationInput1,{ dtf<- values$tbl l<-dtf[,eval(input$observationInput1)] if (is.numeric(l[[1]])) { output$bins<- renderUI({ sliderInput( inputId = "bins", label = "Number of bins:", min = 1, max = 50, value = 30) }) } } ) observeEvent(input$observationInput1, { values$plot.df <- as.data.frame(values$tbl[, input$observationInput1]) colnames(values$plot.df) <- input$observationInput1 output$dataSet <- DT::renderDataTable({ values$tbl }, extensions = c('Scroller', 'FixedColumns'), options = list( deferRender = TRUE, scrollX = TRUE, scrollY = 200, scroller = TRUE, dom = 'Bfrtip', fixedColumns = TRUE )) }) observe({ output$dataplot_xulu <- renderPlot({ chartCol_xulu(df=values$plot.df,colN=input$observationInput1,b=input$bins) }) }) observeEvent(input$reset_xulu, { values$tbl <- NULL output$obs1 <- NULL }) output$aaa <- renderPrint({ values$obs1 }) observeEvent(input$linetype_xulu,{ if (eval(input$linetype_xulu)=="offline" ) { output$dist_xulu<- renderUI({ sliderInput( inputId = "dist_xulu", label = "offline merchant distance", min = 0, max = 15, value = c(0,15)) }) } else{ output$dist_xulu<- renderUI({}) } } ) output$topn_xulu <- renderPlot({ topn(n=input$n_xulu,linetype=input$linetype_xulu,rof=input$rof_xulu,dist=input$dist_xulu) }) ####################################################################################################xulu server end ################################################################################### server Hyt part # server Hyt part # Conditional Main panel # When Select Nuemeric shows 2 graphs # When Select Ratio shows 1 graphs observeEvent(input$TS_comparison_type_hyt,{ if (input$TS_comparison_type_hyt == "Numeric") { output$TS_main_hyt <- renderUI({ fluidPage( fluidRow( plotOutput( "TS_online_numeric_hyt", width = "100%", height = "300px" ) ), hr(), fluidRow( plotOutput( "TS_offline_numeric_hyt", width = "100%", height = "300px" ) ) ) }) } else if(input$TS_comparison_type_hyt == "Ratio"){ output$TS_main_hyt <- renderUI({ fluidRow( column(12, plotOutput( "TS_ratio_hyt", width = "100%", height = "300px" )) ) }) } else { return("error") } }) # Conditional Select line checkbox # When Select Ratio shows 2 options # When Select Numeric shows 3 options observeEvent(input$TS_comparison_type_hyt,{ if (input$TS_comparison_type_hyt == "Numeric") { output$TS_select_line_hyt <- renderUI({ checkboxGroupInput( inputId = "TS_line_CB_numeric_hyt", label = "Choose the line to show", choices = c("Total","Without Coupon", "With Coupon or Coupon Used"), selected = c("Total","Without Coupon", "With Coupon or Coupon Used") ) }) } else if(input$TS_comparison_type_hyt == "Ratio"){ output$TS_select_line_hyt <- renderUI({ checkboxGroupInput( inputId = "TS_line_CB_ratio_hyt", label = "Choose the line to show", choices = c("Online","Offline"), selected = c("Online","Offline") ) }) } else { return("error") } }) # TS_online_numeric_hyt plot # Online Numeric part output$TS_online_numeric_hyt <- renderPlot({ tsfunc_hyt(input$TS_comparison_type_hyt,input$TS_view_type_hyt, input$TS_line_CB_numeric_hyt)[1] }) # TS_offline_numeric_hyt # Offline Numeric part output$TS_offline_numeric_hyt <- renderPlot({ tsfunc_hyt(input$TS_comparison_type_hyt,input$TS_view_type_hyt, input$TS_line_CB_numeric_hyt)[2] }) # TS_ratio_hyt # Ratio part output$TS_ratio_hyt <- renderPlot({ tsfunc_hyt(input$TS_comparison_type_hyt,input$TS_view_type_hyt, input$TS_line_CB_ratio_hyt) }) # naive bayes table output$nb_predict_table_hyt <- renderDataTable({ if (input$data_set_predic_hyt == "Validation"){ offlinevalid.df_hyt } else { offline_test_hyt } }) ################################################################################# ttserver end ##################################################################################xiaofei server output$graph <- renderPlot({ print("ok") if(input$TYPE=="Frequency Graph"){ g1x <- Distance_plot(lm,v1="Distance",v2="Frequency") g1x }else if(input$TYPE=="Ratio Graph"){ g2x <- Distance_plot(lm,v1="Distance",v2="ratio") g2x }else if(input$TYPE=="Both"){ p <- ggplot(lm, aes(x = Distance)) p <- p + geom_line(aes(y=ratio, colour = "Ratio")) p <- p + geom_line(aes(y=Frequency/500000, colour = "frequency")) p <- p + scale_y_continuous(sec.axis = sec_axis(~.*500000, name = "frequency")) p <- p + scale_colour_manual(values = c("blue", "red")) p <- p + labs(y = "Ratio",x = "Distance",colour = "Parameter") p <- p + theme(legend.position = c(0.8, 0.9)) p } }) ##################################################################################xiaofei serverend ###################################################################################TYT server part output$main_tyt <- renderPlot({ print("ok") if(input$ViewTypeAtyt=="offline"){ g1 <- redeemtime(range=input$ViewRangeAtyt,type="offline") g1 }else{ g2 <- redeemtime(range=input$ViewRangeAtyt,type="online") g2 } }) output$column_tyt <- renderPlot({ print('ok') g3 <- UseRateCouponRatecol(discount_col = input$ViewDiscountcoltyt,type=input$ViewTypeBtyt) g3 }) output$boxplot_tyt <- renderPlot({ print('ok') g4 <- UseRateCouponRatebox(discount_box = input$ViewDiscountboxtyt1,type=input$ViewTypeBtyt) g4 }) #TYT server part end # #######################################################################################TYT server part end #session$onSessionEnded(stopApp) } # Run the application shinyApp(ui = ui, server = server)
library(SDSFoundations) acl <- AustinCityLimits View(acl) aclfg60 <- acl[acl$Age > 60 & acl$Gender=='F',] gender <- table(acl$Gender) gender30 <- acl[acl$Age >= 30,] table(gender30$Gender) tabo30 <- table(gender30$Genre,gender30$Gender) prop.table(tabo30,2) genre <- table(gender30$Genre) prop.table(genre) tab <- table(acl$Genre,acl$Gender) tab prop.table(tabo30,2) male<- acl[acl$Gender=='M',] table(male$Grammy,male$Genre) tabgrammy <- table(male$Grammy) prop.table(tabgrammy) tabgg<- table(male$Grammy,male$Genre) prop.table(tabgg,2) prop.table(tabgg,1) tabgg table(acl$Grammy,acl$Genre) barplot(tabgg,beside=T,legend.text = T) table(acl$Facebook.100k) tabfb<- table(acl$Facebook.100k,acl$Age.Group) prop.table(tabfb,2) # linear regression# table(WorldRecords$Event) which(WorldRecords$Athlete=='Usain Bolt' & WorldRecords$Event == 'Mens 100m') WorldRecords[which(WorldRecords$Athlete=='Usain Bolt' & WorldRecords$Event == 'Mens 100m'),] which(WorldRecords$Event == 'Womens Mile' & WorldRecords$Record < 260) WorldRecords[which(WorldRecords$Event == 'Womens Mile' & WorldRecords$Record < 260),] #Subset the data WR <- WorldRecords menshot <- WR[WR$Event=='Mens Shotput',] womenshot <- WR[WR$Event=='Womens Shotput',] #Create scatterplots plot(menshot$Year,menshot$Record,main='Mens Shotput World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) plot(womenshot$Year,womenshot$Record,main='Womens Shotput World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) #Run linear models linFit(menshot$Year, menshot$Record) linFit(womenshot$Year,womenshot$Record) menmile <- WR[WR$Event =='Mens Mile',] womenmile <- WR[WR$Event =='Womens Mile',] plot(menmile$Year,menmile$Record,main='Mens Mile World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) plot(womenmile$Year,womenmile$Record,main='Womens Mile World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) linFit(menmile$Year,menmile$Record) linFit(womenmile$Year,womenmile$Record) menpole <-WR[WR$Event == 'Mens Polevault' & WR$Year >= 1970,] menpole[menpole$Year == max(menpole$Year),] menpole[menpole$Record>6,] plot(menpole$Year,menpole$Record,main='Mens Polevault World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) linFit(menpole$Year,menpole$Record)
/DataAnalysis_4to5.R
no_license
Santha09/CoreRepo
R
false
false
2,294
r
library(SDSFoundations) acl <- AustinCityLimits View(acl) aclfg60 <- acl[acl$Age > 60 & acl$Gender=='F',] gender <- table(acl$Gender) gender30 <- acl[acl$Age >= 30,] table(gender30$Gender) tabo30 <- table(gender30$Genre,gender30$Gender) prop.table(tabo30,2) genre <- table(gender30$Genre) prop.table(genre) tab <- table(acl$Genre,acl$Gender) tab prop.table(tabo30,2) male<- acl[acl$Gender=='M',] table(male$Grammy,male$Genre) tabgrammy <- table(male$Grammy) prop.table(tabgrammy) tabgg<- table(male$Grammy,male$Genre) prop.table(tabgg,2) prop.table(tabgg,1) tabgg table(acl$Grammy,acl$Genre) barplot(tabgg,beside=T,legend.text = T) table(acl$Facebook.100k) tabfb<- table(acl$Facebook.100k,acl$Age.Group) prop.table(tabfb,2) # linear regression# table(WorldRecords$Event) which(WorldRecords$Athlete=='Usain Bolt' & WorldRecords$Event == 'Mens 100m') WorldRecords[which(WorldRecords$Athlete=='Usain Bolt' & WorldRecords$Event == 'Mens 100m'),] which(WorldRecords$Event == 'Womens Mile' & WorldRecords$Record < 260) WorldRecords[which(WorldRecords$Event == 'Womens Mile' & WorldRecords$Record < 260),] #Subset the data WR <- WorldRecords menshot <- WR[WR$Event=='Mens Shotput',] womenshot <- WR[WR$Event=='Womens Shotput',] #Create scatterplots plot(menshot$Year,menshot$Record,main='Mens Shotput World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) plot(womenshot$Year,womenshot$Record,main='Womens Shotput World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) #Run linear models linFit(menshot$Year, menshot$Record) linFit(womenshot$Year,womenshot$Record) menmile <- WR[WR$Event =='Mens Mile',] womenmile <- WR[WR$Event =='Womens Mile',] plot(menmile$Year,menmile$Record,main='Mens Mile World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) plot(womenmile$Year,womenmile$Record,main='Womens Mile World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) linFit(menmile$Year,menmile$Record) linFit(womenmile$Year,womenmile$Record) menpole <-WR[WR$Event == 'Mens Polevault' & WR$Year >= 1970,] menpole[menpole$Year == max(menpole$Year),] menpole[menpole$Record>6,] plot(menpole$Year,menpole$Record,main='Mens Polevault World Records',xlab='Year',ylab='World Record Distance (m)',pch=16) linFit(menpole$Year,menpole$Record)
library(ggrepel) hospitals <- read.csv("data/reported_hospital_capacity_admissions_facility-level_weekly_average_timeseries_20201207.csv", stringsAsFactors = FALSE) hospitals %>% filter(city == 'SYRACUSE', state == 'NY') %>% select(total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_coverage) syracuse_hospitals <- hospitals %>% select(c(city, state, hospital_name, collection_week, total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg, all_adult_hospital_inpatient_beds_7_day_avg, staffed_adult_icu_bed_occupancy_7_day_avg, total_staffed_adult_icu_beds_7_day_avg , staffed_icu_adult_patients_confirmed_and_suspected_covid_7_day_avg, total_staffed_adult_icu_beds_7_day_avg , total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg, all_adult_hospital_inpatient_bed_occupied_7_day_avg)) %>% mutate(na_if(., -999999), collection_week = as.Date(collection_week), hospitalized_confirmed_suspected_covid = total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg/ all_adult_hospital_inpatient_beds_7_day_avg, total_staffed_ICU_fullness = staffed_adult_icu_bed_occupancy_7_day_avg/ total_staffed_adult_icu_beds_7_day_avg, ICU_COVID = staffed_icu_adult_patients_confirmed_and_suspected_covid_7_day_avg/ total_staffed_adult_icu_beds_7_day_avg, hospitalized_covid = total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg /all_adult_hospital_inpatient_bed_occupied_7_day_avg, remaining_beds = total_staffed_adult_icu_beds_7_day_avg - staffed_adult_icu_bed_occupancy_7_day_avg) %>% filter(city == 'SYRACUSE', state == 'NY') group_syracuse_hospitals <- syracuse_hospitals %>% group_by(collection_week) %>% summarise(tot_patients_covid = sum(total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg, na.rm = TRUE), inpatient_beds = sum(all_adult_hospital_inpatient_beds_7_day_avg, na.rm = TRUE), icu_beds_occupied = sum(staffed_adult_icu_bed_occupancy_7_day_avg, na.rm = TRUE), icu_beds = sum(total_staffed_adult_icu_beds_7_day_avg, na.rm = TRUE), icu_beds_occupied_covid = sum(staffed_icu_adult_patients_confirmed_and_suspected_covid_7_day_avg, na.rm = TRUE), inpatient_bed_occupied = sum(all_adult_hospital_inpatient_bed_occupied_7_day_avg, na.rm = TRUE)) #Show averages per hospital colors_Total <- c( "UNIVERSITY HOSPITAL S U N Y HEALTH SCIENCE CENTER" = '#d7191c', "CROUSE HOSPITAL" = '#2c7bb6', "ST JOSEPH'S HOSPITAL HEALTH CENTER" = '#7b3294') hospitalized_confirmed_suspected_covid_viz = ggplot(syracuse_hospitals, aes(collection_week, hospitalized_confirmed_suspected_covid, color = hospital_name) ) + geom_line(show.legend = FALSE) + geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of Available Hospital Inpatient Beds\n with Suspected or Confirmed COVID Patients", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_confirmed_suspected_covid * 100), '%')), size=5, nudge_x=5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_confirmed_suspected_covid_viz.jpg", plot = hospitalized_confirmed_suspected_covid_viz, width = 10, height = 7) total_staffed_ICU_fullness_viz = ggplot(syracuse_hospitals, aes(collection_week, total_staffed_ICU_fullness, color = hospital_name)) + geom_line() + geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of ICU Beds Occupied for Any Reason", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,1)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(total_staffed_ICU_fullness * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/total_staffed_ICU_fullness_viz.jpg", plot = total_staffed_ICU_fullness_viz, width = 10, height = 7) ICU_COVID_viz <- ggplot(syracuse_hospitals, aes(collection_week, ICU_COVID, color = hospital_name)) + geom_line()+ geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of ICU Beds Occupied with COVID-19 Patients", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(ICU_COVID * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/ICU_COVID_viz.jpg", plot = ICU_COVID_viz, width = 10, height = 7) hospitalized_covid_viz <- ggplot(syracuse_hospitals, aes(collection_week, hospitalized_covid, color = hospital_name)) + geom_line() + geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of Hospitalized Patients that have COVID-19", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_covid * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_covid_viz.jpg", plot = hospitalized_covid_viz, width = 10, height = 7) #add sums together and average for entire city group_syracuse_hospitals <- group_syracuse_hospitals %>% mutate(hospitalized_confirmed_suspected_covid = tot_patients_covid/inpatient_beds, total_staffed_ICU_fullness = icu_beds_occupied/icu_beds, ICU_COVID = icu_beds_occupied_covid/icu_beds, hospitalized_covid = tot_patients_covid/inpatient_bed_occupied) hospitalized_confirmed_suspected_covid_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, hospitalized_confirmed_suspected_covid) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of Available Hospital Inpatient Beds\n with Suspected or Confirmed COVID Patients", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_confirmed_suspected_covid * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_confirmed_suspected_covid_allcuse_viz.jpg", plot = hospitalized_confirmed_suspected_covid_allcuse_viz, width = 10, height = 7) total_staffed_ICU_fullness_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, total_staffed_ICU_fullness) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of ICU Beds Occupied for Any Reason", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,1)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(total_staffed_ICU_fullness * 100), '%')), size=5, nudge_y=-.1, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/total_staffed_ICU_fullness_allcuse_viz.jpg", plot = total_staffed_ICU_fullness_allcuse_viz, width = 10, height = 7) ICU_COVID_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, ICU_COVID) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of ICU Beds Occupied with COVID-19 Patients", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(ICU_COVID * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/ICU_COVID_allcuse_viz.jpg", plot = ICU_COVID_allcuse_viz, width = 10, height = 7) hospitalized_covid_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, hospitalized_covid) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of Hospitalized Patients that have COVID-19", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_covid * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_covid_allcuse_viz.jpg", plot = hospitalized_covid_allcuse_viz, width = 10, height = 7)
/hospitalizations_HHS.R
no_license
samedelstein/Onondaga_COVID
R
false
false
12,469
r
library(ggrepel) hospitals <- read.csv("data/reported_hospital_capacity_admissions_facility-level_weekly_average_timeseries_20201207.csv", stringsAsFactors = FALSE) hospitals %>% filter(city == 'SYRACUSE', state == 'NY') %>% select(total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_coverage) syracuse_hospitals <- hospitals %>% select(c(city, state, hospital_name, collection_week, total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg, all_adult_hospital_inpatient_beds_7_day_avg, staffed_adult_icu_bed_occupancy_7_day_avg, total_staffed_adult_icu_beds_7_day_avg , staffed_icu_adult_patients_confirmed_and_suspected_covid_7_day_avg, total_staffed_adult_icu_beds_7_day_avg , total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg, all_adult_hospital_inpatient_bed_occupied_7_day_avg)) %>% mutate(na_if(., -999999), collection_week = as.Date(collection_week), hospitalized_confirmed_suspected_covid = total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg/ all_adult_hospital_inpatient_beds_7_day_avg, total_staffed_ICU_fullness = staffed_adult_icu_bed_occupancy_7_day_avg/ total_staffed_adult_icu_beds_7_day_avg, ICU_COVID = staffed_icu_adult_patients_confirmed_and_suspected_covid_7_day_avg/ total_staffed_adult_icu_beds_7_day_avg, hospitalized_covid = total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg /all_adult_hospital_inpatient_bed_occupied_7_day_avg, remaining_beds = total_staffed_adult_icu_beds_7_day_avg - staffed_adult_icu_bed_occupancy_7_day_avg) %>% filter(city == 'SYRACUSE', state == 'NY') group_syracuse_hospitals <- syracuse_hospitals %>% group_by(collection_week) %>% summarise(tot_patients_covid = sum(total_adult_patients_hospitalized_confirmed_and_suspected_covid_7_day_avg, na.rm = TRUE), inpatient_beds = sum(all_adult_hospital_inpatient_beds_7_day_avg, na.rm = TRUE), icu_beds_occupied = sum(staffed_adult_icu_bed_occupancy_7_day_avg, na.rm = TRUE), icu_beds = sum(total_staffed_adult_icu_beds_7_day_avg, na.rm = TRUE), icu_beds_occupied_covid = sum(staffed_icu_adult_patients_confirmed_and_suspected_covid_7_day_avg, na.rm = TRUE), inpatient_bed_occupied = sum(all_adult_hospital_inpatient_bed_occupied_7_day_avg, na.rm = TRUE)) #Show averages per hospital colors_Total <- c( "UNIVERSITY HOSPITAL S U N Y HEALTH SCIENCE CENTER" = '#d7191c', "CROUSE HOSPITAL" = '#2c7bb6', "ST JOSEPH'S HOSPITAL HEALTH CENTER" = '#7b3294') hospitalized_confirmed_suspected_covid_viz = ggplot(syracuse_hospitals, aes(collection_week, hospitalized_confirmed_suspected_covid, color = hospital_name) ) + geom_line(show.legend = FALSE) + geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of Available Hospital Inpatient Beds\n with Suspected or Confirmed COVID Patients", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_confirmed_suspected_covid * 100), '%')), size=5, nudge_x=5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_confirmed_suspected_covid_viz.jpg", plot = hospitalized_confirmed_suspected_covid_viz, width = 10, height = 7) total_staffed_ICU_fullness_viz = ggplot(syracuse_hospitals, aes(collection_week, total_staffed_ICU_fullness, color = hospital_name)) + geom_line() + geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of ICU Beds Occupied for Any Reason", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,1)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(total_staffed_ICU_fullness * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/total_staffed_ICU_fullness_viz.jpg", plot = total_staffed_ICU_fullness_viz, width = 10, height = 7) ICU_COVID_viz <- ggplot(syracuse_hospitals, aes(collection_week, ICU_COVID, color = hospital_name)) + geom_line()+ geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of ICU Beds Occupied with COVID-19 Patients", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(ICU_COVID * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/ICU_COVID_viz.jpg", plot = ICU_COVID_viz, width = 10, height = 7) hospitalized_covid_viz <- ggplot(syracuse_hospitals, aes(collection_week, hospitalized_covid, color = hospital_name)) + geom_line() + geom_point() + scale_color_manual(values = colors_Total) + labs(title = "Percentage of Hospitalized Patients that have COVID-19", subtitle = 'Each Hospital in Syracuse', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_covid * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_covid_viz.jpg", plot = hospitalized_covid_viz, width = 10, height = 7) #add sums together and average for entire city group_syracuse_hospitals <- group_syracuse_hospitals %>% mutate(hospitalized_confirmed_suspected_covid = tot_patients_covid/inpatient_beds, total_staffed_ICU_fullness = icu_beds_occupied/icu_beds, ICU_COVID = icu_beds_occupied_covid/icu_beds, hospitalized_covid = tot_patients_covid/inpatient_bed_occupied) hospitalized_confirmed_suspected_covid_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, hospitalized_confirmed_suspected_covid) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of Available Hospital Inpatient Beds\n with Suspected or Confirmed COVID Patients", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_confirmed_suspected_covid * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_confirmed_suspected_covid_allcuse_viz.jpg", plot = hospitalized_confirmed_suspected_covid_allcuse_viz, width = 10, height = 7) total_staffed_ICU_fullness_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, total_staffed_ICU_fullness) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of ICU Beds Occupied for Any Reason", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,1)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(total_staffed_ICU_fullness * 100), '%')), size=5, nudge_y=-.1, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/total_staffed_ICU_fullness_allcuse_viz.jpg", plot = total_staffed_ICU_fullness_allcuse_viz, width = 10, height = 7) ICU_COVID_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, ICU_COVID) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of ICU Beds Occupied with COVID-19 Patients", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(ICU_COVID * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/ICU_COVID_allcuse_viz.jpg", plot = ICU_COVID_allcuse_viz, width = 10, height = 7) hospitalized_covid_allcuse_viz <- ggplot(group_syracuse_hospitals, aes(collection_week, hospitalized_covid) ) + geom_line(show.legend = FALSE) + geom_point() + labs(title = "Percentage of Hospitalized Patients that have COVID-19", subtitle = 'All Syracuse Hospitals Combined', source = 'healthdata.gov', x = '', y = '') + scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0,.5)) + geom_text_repel(data=subset(group_syracuse_hospitals, collection_week==max(collection_week)), aes(label= paste0(round(hospitalized_covid * 100), '%')), size=5, nudge_x=-5, show.legend = FALSE, arrow = arrow(length = unit(0.015, "npc"))) + ggthemes::theme_economist() + theme(legend.position = "top", axis.text.x = element_text(angle = 90), legend.text=element_text(size=9), legend.title = element_blank()) ggsave("/Users/samedelstein/Onondaga_COVID/visualizations/hospitalized_covid_allcuse_viz.jpg", plot = hospitalized_covid_allcuse_viz, width = 10, height = 7)
library(predictmeans)
/code2.R
no_license
snkinyanjui/homework-0
R
false
false
22
r
library(predictmeans)
# libraries and other things set.seed(2021) library(PRP) library(rstudioapi) library(mamba) library(data.table) library(parallel) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) noutlier_rate = 0.25 # loading in or creating data if (file.exists("../data/mamba_data_p25.rda") && file.exists("../data/sim_mamba_mod_p25.rda") ) { load(file = "../data/mamba_data_p25.rda") load(file = "../data/sim_mamba_mod_p25.rda") snpeffect <- mamba_data_p25$betajk snpvar <- mamba_data_p25$sjk2 } else { # simulate data with MAMBA mamba_data_p25 <- generate_data_mamba(lambda = noutlier_rate) save(mamba_data_p25, file = "../data/mamba_data_p25.rda") # save effects and variance snpeffect <- mamba_data_p25$betajk snpvar <- mamba_data_p25$sjk2 # fit mamba model sim_mod_p25 <- mamba(betajk = snpeffect, sjk2 = snpvar) save(sim_mod_p25, file = "../data/sim_mamba_mod_p25.rda") } # list of effects and list of se # each item is a row from the data list_eff = lapply(seq_len(nrow(snpeffect)), function(x) { snpeffect[x,] }) list_se = lapply(seq_len(nrow(snpvar)), function(x) { sqrt(snpvar[x,]) }) # fitting to posterior_prp function print("Now applying posterior_prp function.") post_prp_data_p25 = t(mapply(posterior_prp, beta = list_eff, se = list_eff)) post_prp_data_p25 <- as.data.frame(post_prp_data_p25) post_prp_data_pval_p25 <- as.numeric(post_prp_data_p25$pvalue) post_prp_data_ch_p25 <- apply(post_prp_data_p25, MARGIN = 2, FUN = as.character) save(post_prp_data_p25, file = "../data/post_prp_data_p25.rda") save(post_prp_data_pval_p25, file = "../data/post_prp_data_pval_p25.rda") write.csv(post_prp_data_ch_p25, "../data/post_prp_data_p25.csv", row.names = FALSE)
/old/early_testing/prp_pval_p25.R
no_license
darrenlingit/replicable2021
R
false
false
1,948
r
# libraries and other things set.seed(2021) library(PRP) library(rstudioapi) library(mamba) library(data.table) library(parallel) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) noutlier_rate = 0.25 # loading in or creating data if (file.exists("../data/mamba_data_p25.rda") && file.exists("../data/sim_mamba_mod_p25.rda") ) { load(file = "../data/mamba_data_p25.rda") load(file = "../data/sim_mamba_mod_p25.rda") snpeffect <- mamba_data_p25$betajk snpvar <- mamba_data_p25$sjk2 } else { # simulate data with MAMBA mamba_data_p25 <- generate_data_mamba(lambda = noutlier_rate) save(mamba_data_p25, file = "../data/mamba_data_p25.rda") # save effects and variance snpeffect <- mamba_data_p25$betajk snpvar <- mamba_data_p25$sjk2 # fit mamba model sim_mod_p25 <- mamba(betajk = snpeffect, sjk2 = snpvar) save(sim_mod_p25, file = "../data/sim_mamba_mod_p25.rda") } # list of effects and list of se # each item is a row from the data list_eff = lapply(seq_len(nrow(snpeffect)), function(x) { snpeffect[x,] }) list_se = lapply(seq_len(nrow(snpvar)), function(x) { sqrt(snpvar[x,]) }) # fitting to posterior_prp function print("Now applying posterior_prp function.") post_prp_data_p25 = t(mapply(posterior_prp, beta = list_eff, se = list_eff)) post_prp_data_p25 <- as.data.frame(post_prp_data_p25) post_prp_data_pval_p25 <- as.numeric(post_prp_data_p25$pvalue) post_prp_data_ch_p25 <- apply(post_prp_data_p25, MARGIN = 2, FUN = as.character) save(post_prp_data_p25, file = "../data/post_prp_data_p25.rda") save(post_prp_data_pval_p25, file = "../data/post_prp_data_pval_p25.rda") write.csv(post_prp_data_ch_p25, "../data/post_prp_data_p25.csv", row.names = FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/univariate_driver_functions.R \name{build_ud_results_tbl} \alias{build_ud_results_tbl} \title{Build Univariate Driver Results Tibble} \usage{ build_ud_results_tbl(group_name, feature_id, min_wt, min_mut) } \arguments{ \item{group_name}{A string in the name column of the tags table} \item{feature_id}{An integer in the feature_id column of the driver_results table} \item{min_wt}{An integer} \item{min_mut}{An integer} } \description{ Build Univariate Driver Results Tibble }
/man/build_ud_results_tbl.Rd
permissive
Drinchai/iatlas-app
R
false
true
557
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/univariate_driver_functions.R \name{build_ud_results_tbl} \alias{build_ud_results_tbl} \title{Build Univariate Driver Results Tibble} \usage{ build_ud_results_tbl(group_name, feature_id, min_wt, min_mut) } \arguments{ \item{group_name}{A string in the name column of the tags table} \item{feature_id}{An integer in the feature_id column of the driver_results table} \item{min_wt}{An integer} \item{min_mut}{An integer} } \description{ Build Univariate Driver Results Tibble }
## This script merges data from a number of .txt files and produces ## a tidy data set which may be used for further analysis. ##check for required packages if (!("reshape2" %in% rownames(installed.packages())) ) { print("Please install required package \"reshape2\" before proceeding") } else { ## Open required libraries library(reshape2) ## First, read all required .txt files and label the datasets ## Read all activities and their names and label the aproppriate columns activity_labels <- read.table("./activity_labels.txt",col.names=c("activity_id","activity_name")) ## Read the dataframe's column names features <- read.table("features.txt") feature_names <- features[,2] ## Read the test data and label the dataframe's columns testdata <- read.table("./test/X_test.txt") colnames(testdata) <- feature_names ## Read the training data and label the dataframe's columns traindata <- read.table("./train/X_train.txt") colnames(traindata) <- feature_names ## Read the ids of the test subjects and label the the dataframe's columns test_subject_id <- read.table("./test/subject_test.txt") colnames(test_subject_id) <- "subject_id" ## Read the activity id's of the test data and label the the dataframe's columns test_activity_id <- read.table("./test/y_test.txt") colnames(test_activity_id) <- "activity_id" ## Read the ids of the test subjects and label the the dataframe's columns train_subject_id <- read.table("./train/subject_train.txt") colnames(train_subject_id) <- "subject_id" ## Read the activity id's of the training data and label ##the dataframe's columns train_activity_id <- read.table("./train/y_train.txt") colnames(train_activity_id) <- "activity_id" ##Combine the test subject id's, the test activity id's ##and the test data into one dataframe test_data <- cbind(test_subject_id , test_activity_id , testdata) ##Combine the test subject id's, the test activity id's ##and the test data into one dataframe train_data <- cbind(train_subject_id , train_activity_id , traindata) ##Combine the test data and the train data into one dataframe all_data <- rbind(train_data,test_data) ##Keep only columns refering to mean() or std() values mean_col_idx <- grep("mean",names(all_data),ignore.case=TRUE) mean_col_names <- names(all_data)[mean_col_idx] std_col_idx <- grep("std",names(all_data),ignore.case=TRUE) std_col_names <- names(all_data)[std_col_idx] meanstddata <-all_data[,c("subject_id","activity_id",mean_col_names,std_col_names)] ##Merge the activities datase with the mean/std values datase ##to get one dataset with descriptive activity names descrnames <- merge(activity_labels,meanstddata,by.x="activity_id",by.y="activity_id",all=TRUE) ##Melt the dataset with the descriptive activity names for better handling data_melt <- melt(descrnames,id=c("activity_id","activity_name","subject_id")) ##Cast the melted dataset according to the average of each variable ##for each activity and each subjec mean_data <- dcast(data_melt,activity_id + activity_name + subject_id ~ variable,mean) ## Create a file with the new tidy dataset write.table(mean_data,"./tidy.txt") }
/run_analysis.R
no_license
jacksparrow88/Getting-and-Cleaning-Data-Project
R
false
false
3,759
r
## This script merges data from a number of .txt files and produces ## a tidy data set which may be used for further analysis. ##check for required packages if (!("reshape2" %in% rownames(installed.packages())) ) { print("Please install required package \"reshape2\" before proceeding") } else { ## Open required libraries library(reshape2) ## First, read all required .txt files and label the datasets ## Read all activities and their names and label the aproppriate columns activity_labels <- read.table("./activity_labels.txt",col.names=c("activity_id","activity_name")) ## Read the dataframe's column names features <- read.table("features.txt") feature_names <- features[,2] ## Read the test data and label the dataframe's columns testdata <- read.table("./test/X_test.txt") colnames(testdata) <- feature_names ## Read the training data and label the dataframe's columns traindata <- read.table("./train/X_train.txt") colnames(traindata) <- feature_names ## Read the ids of the test subjects and label the the dataframe's columns test_subject_id <- read.table("./test/subject_test.txt") colnames(test_subject_id) <- "subject_id" ## Read the activity id's of the test data and label the the dataframe's columns test_activity_id <- read.table("./test/y_test.txt") colnames(test_activity_id) <- "activity_id" ## Read the ids of the test subjects and label the the dataframe's columns train_subject_id <- read.table("./train/subject_train.txt") colnames(train_subject_id) <- "subject_id" ## Read the activity id's of the training data and label ##the dataframe's columns train_activity_id <- read.table("./train/y_train.txt") colnames(train_activity_id) <- "activity_id" ##Combine the test subject id's, the test activity id's ##and the test data into one dataframe test_data <- cbind(test_subject_id , test_activity_id , testdata) ##Combine the test subject id's, the test activity id's ##and the test data into one dataframe train_data <- cbind(train_subject_id , train_activity_id , traindata) ##Combine the test data and the train data into one dataframe all_data <- rbind(train_data,test_data) ##Keep only columns refering to mean() or std() values mean_col_idx <- grep("mean",names(all_data),ignore.case=TRUE) mean_col_names <- names(all_data)[mean_col_idx] std_col_idx <- grep("std",names(all_data),ignore.case=TRUE) std_col_names <- names(all_data)[std_col_idx] meanstddata <-all_data[,c("subject_id","activity_id",mean_col_names,std_col_names)] ##Merge the activities datase with the mean/std values datase ##to get one dataset with descriptive activity names descrnames <- merge(activity_labels,meanstddata,by.x="activity_id",by.y="activity_id",all=TRUE) ##Melt the dataset with the descriptive activity names for better handling data_melt <- melt(descrnames,id=c("activity_id","activity_name","subject_id")) ##Cast the melted dataset according to the average of each variable ##for each activity and each subjec mean_data <- dcast(data_melt,activity_id + activity_name + subject_id ~ variable,mean) ## Create a file with the new tidy dataset write.table(mean_data,"./tidy.txt") }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/facet-labels.r \name{label_parsed} \alias{label_parsed} \title{Label facets with parsed label.} \usage{ label_parsed(variable, value) } \arguments{ \item{variable}{variable name passed in by facetter} \item{value}{variable value passed in by facetter} } \description{ Label facets with parsed label. } \examples{ mtcars$cyl2 <- factor(mtcars$cyl, labels = c("alpha", "beta", "gamma")) qplot(wt, mpg, data = mtcars) + facet_grid(. ~ cyl2) qplot(wt, mpg, data = mtcars) + facet_grid(. ~ cyl2, labeller = label_parsed) } \seealso{ \code{\link{plotmath}} Other facet labellers: \code{\link{label_both}}; \code{\link{label_bquote}}; \code{\link{label_value}} }
/man/label_parsed.Rd
no_license
veraanadi/ggplot2
R
false
false
749
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/facet-labels.r \name{label_parsed} \alias{label_parsed} \title{Label facets with parsed label.} \usage{ label_parsed(variable, value) } \arguments{ \item{variable}{variable name passed in by facetter} \item{value}{variable value passed in by facetter} } \description{ Label facets with parsed label. } \examples{ mtcars$cyl2 <- factor(mtcars$cyl, labels = c("alpha", "beta", "gamma")) qplot(wt, mpg, data = mtcars) + facet_grid(. ~ cyl2) qplot(wt, mpg, data = mtcars) + facet_grid(. ~ cyl2, labeller = label_parsed) } \seealso{ \code{\link{plotmath}} Other facet labellers: \code{\link{label_both}}; \code{\link{label_bquote}}; \code{\link{label_value}} }
#!/usr/bin/env Rscript ## this script is used to compare a genomic region library(optparse,quietly = TRUE) optList = list( make_option("--logsdir",action = "store_true",type = "character", help = "Log directory with the files to summarized with samtools flagstat output"), make_option("--outfile",action = "store_true",type = "character",default = tempfile(), help = "Output file with a table with the following columns: Total reads | Mapped reads | Perc. mapped reads") ) opt = parse_args(OptionParser(option_list = optList)) library(readr,quietly = TRUE) library(dplyr,quietly = TRUE) library(magrittr,quietly = TRUE) library(tidyr,quietly = TRUE) files = list.files(opt$logsdir,full.names = TRUE) parse_file <- function(file) { string = (file %>% read_file %>% strsplit("\\n"))[[1]] tbl =tibble(string) %>% separate(string,into = c("q","rest"),sep = "\\+") %>% mutate(q = as.numeric(q)) %>% filter(q > 0) out = tibble(file = basename(file),total = tbl$q[1],mapped = tbl$q[2]) %>% mutate( mapped_perc = mapped / total, file = gsub(".logs","",file)) out } parses = lapply(files,parse_file) %>% bind_rows write_delim(parses,path = opt$outfile,delim = ",")
/rscripts/create_flagstat_summary.R
no_license
welch16/EBV_NOK
R
false
false
1,292
r
#!/usr/bin/env Rscript ## this script is used to compare a genomic region library(optparse,quietly = TRUE) optList = list( make_option("--logsdir",action = "store_true",type = "character", help = "Log directory with the files to summarized with samtools flagstat output"), make_option("--outfile",action = "store_true",type = "character",default = tempfile(), help = "Output file with a table with the following columns: Total reads | Mapped reads | Perc. mapped reads") ) opt = parse_args(OptionParser(option_list = optList)) library(readr,quietly = TRUE) library(dplyr,quietly = TRUE) library(magrittr,quietly = TRUE) library(tidyr,quietly = TRUE) files = list.files(opt$logsdir,full.names = TRUE) parse_file <- function(file) { string = (file %>% read_file %>% strsplit("\\n"))[[1]] tbl =tibble(string) %>% separate(string,into = c("q","rest"),sep = "\\+") %>% mutate(q = as.numeric(q)) %>% filter(q > 0) out = tibble(file = basename(file),total = tbl$q[1],mapped = tbl$q[2]) %>% mutate( mapped_perc = mapped / total, file = gsub(".logs","",file)) out } parses = lapply(files,parse_file) %>% bind_rows write_delim(parses,path = opt$outfile,delim = ",")
library(tidyverse) mpg str(mpg) ggplot(data = mpg, aes(x = displ, y = hwy, color = class)) + geom_point() ?mpg
/Data Analyst Nanodegree/Data Analysis with R/R for DS.R
no_license
yangforbig/Udacity_Github
R
false
false
115
r
library(tidyverse) mpg str(mpg) ggplot(data = mpg, aes(x = displ, y = hwy, color = class)) + geom_point() ?mpg
context("Setting levels to the order as appear in data") test_that("ffactor reorders levels of the provided argument according to their order of apperance", { x <- c('b', 'c', 'd', 'a') a <- c('b', 'c', 'd', 'a') levels(a) <- a #check if ffactor is able to reorder levels as expected expect_identical(ffactor(x), factor(a, levels=levels(a))) expect_identical(levels(ffactor(x)), x) }) test_that("test for invalid argument of ffactor", { t <- c(1, 3, 2, 4) #expect ffactor to accept a vector as argument and return a factor as result expect_equal(is.factor(ffactor(x)), is.vector(t)) #expect for failure when the argument provided is not a vector expect_that(ffactor(data.frame(t)), throws_error()) }) test_that("test for corner cases of ffactor", { x <- c('b', 'c', 'c', 'a') a <- c('b', 'c', 'a') #check if ffactor is able to reorder levels as expected when there are duplicates the argument expect_identical(ffactor(x), factor(x, levels=a)) expect_identical(levels(ffactor(x)), a) })
/tests/testthat/test_ffactor.R
no_license
y252zhan/foofactors
R
false
false
1,024
r
context("Setting levels to the order as appear in data") test_that("ffactor reorders levels of the provided argument according to their order of apperance", { x <- c('b', 'c', 'd', 'a') a <- c('b', 'c', 'd', 'a') levels(a) <- a #check if ffactor is able to reorder levels as expected expect_identical(ffactor(x), factor(a, levels=levels(a))) expect_identical(levels(ffactor(x)), x) }) test_that("test for invalid argument of ffactor", { t <- c(1, 3, 2, 4) #expect ffactor to accept a vector as argument and return a factor as result expect_equal(is.factor(ffactor(x)), is.vector(t)) #expect for failure when the argument provided is not a vector expect_that(ffactor(data.frame(t)), throws_error()) }) test_that("test for corner cases of ffactor", { x <- c('b', 'c', 'c', 'a') a <- c('b', 'c', 'a') #check if ffactor is able to reorder levels as expected when there are duplicates the argument expect_identical(ffactor(x), factor(x, levels=a)) expect_identical(levels(ffactor(x)), a) })
## load the data test_data<-educ_data dd<<-rms::datadist(test_data,q.effect = c(0.5, 0.75)) options(datadist="dd") #create the test model test_model_002<-rms::lrm(educ_3 ~ Rural + sex + max_SEP_3 + n_siblings + cran_rzs + height_rzs + FW_rzs + YOBc + (YOBc * sex) + (YOBc * Rural), data = test_data) expect_doppelganger <- function(title, fig, path = NULL, ...) { testthat::skip_if_not_installed("vdiffr") vdiffr::expect_doppelganger(title, fig, path = path, ...) } test_that("returns a ggplot object", { p <- plot.lrm(test_model_002, cran_rzs, "Rural") expect_true(ggplot2::is.ggplot(p)) }) test_that("plotting test data generates the expected image", { p <- plot.lrm(test_model_002, "cran_rzs", "max_SEP_3", c("Rural", "sex")) expect_doppelganger("prediction_ggplot_article_lrm", p) }) test_that("can plot with only one value", { p <- plot.lrm(test_model_002, cran_rzs) expect_doppelganger("prediction_ggplot_simplest", p) }) test_that("plotting test data changes element names and order", { p <- plot.lrm(test_model_002, "cran_rzs", plot_cols = c("max_SEP_3"), plot_rows = c("Rural", "sex"), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) }) test_that("plotting test data accepts no vectors", { p <- plot.lrm(test_model_002, cran_rzs, plot_cols = "max_SEP_3", plot_rows = c("Rural", "sex"), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) }) test_that("plotting test data accepts no quotes", { p <- plot.lrm(test_model_002, cran_rzs, plot_cols = max_SEP_3, plot_rows = c(Rural, sex), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) p <- plot.lrm(test_model_002, cran_rzs, plot_cols = c(max_SEP_3), plot_rows = c(Rural, sex), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) })
/tests/testthat/test-plot_lrm_predictWithCI.R
no_license
cran/ormPlot
R
false
false
3,736
r
## load the data test_data<-educ_data dd<<-rms::datadist(test_data,q.effect = c(0.5, 0.75)) options(datadist="dd") #create the test model test_model_002<-rms::lrm(educ_3 ~ Rural + sex + max_SEP_3 + n_siblings + cran_rzs + height_rzs + FW_rzs + YOBc + (YOBc * sex) + (YOBc * Rural), data = test_data) expect_doppelganger <- function(title, fig, path = NULL, ...) { testthat::skip_if_not_installed("vdiffr") vdiffr::expect_doppelganger(title, fig, path = path, ...) } test_that("returns a ggplot object", { p <- plot.lrm(test_model_002, cran_rzs, "Rural") expect_true(ggplot2::is.ggplot(p)) }) test_that("plotting test data generates the expected image", { p <- plot.lrm(test_model_002, "cran_rzs", "max_SEP_3", c("Rural", "sex")) expect_doppelganger("prediction_ggplot_article_lrm", p) }) test_that("can plot with only one value", { p <- plot.lrm(test_model_002, cran_rzs) expect_doppelganger("prediction_ggplot_simplest", p) }) test_that("plotting test data changes element names and order", { p <- plot.lrm(test_model_002, "cran_rzs", plot_cols = c("max_SEP_3"), plot_rows = c("Rural", "sex"), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) }) test_that("plotting test data accepts no vectors", { p <- plot.lrm(test_model_002, cran_rzs, plot_cols = "max_SEP_3", plot_rows = c("Rural", "sex"), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) }) test_that("plotting test data accepts no quotes", { p <- plot.lrm(test_model_002, cran_rzs, plot_cols = max_SEP_3, plot_rows = c(Rural, sex), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) p <- plot.lrm(test_model_002, cran_rzs, plot_cols = c(max_SEP_3), plot_rows = c(Rural, sex), xlab = "Cranial volume (residuals to age an birth date)", facet_labels = list(Rural = c("Urban", "Rural"), sex = c("Boys","Girls"), max_SEP_3 = c("Unskilled manual", "Skilled manual", "Non-manual")) ) expect_doppelganger("prediction_ggplot_article_edited_lrm", p) })
#http://www-bcf.usc.edu/~gareth/ISL/Chapter%205%20Lab.txt # Chaper 5 Lab: Cross-Validation and the Bootstrap # The Validation Set Approach library(ISLR) set.seed(1) train=sample(392,196) lm.fit=lm(mpg~horsepower,data=Auto,subset=train) attach(Auto) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) set.seed(2) train=sample(392,196) lm.fit=lm(mpg~horsepower,subset=train) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) # Leave-One-Out Cross-Validation glm.fit=glm(mpg~horsepower,data=Auto) coef(glm.fit) lm.fit=lm(mpg~horsepower,data=Auto) coef(lm.fit) library(boot) glm.fit=glm(mpg~horsepower,data=Auto) cv.err=cv.glm(Auto,glm.fit) cv.err$delta cv.error=rep(0,5) #用几个不同的多项式做拟合看留一验证的结果 for (i in 1:5){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error[i]=cv.glm(Auto,glm.fit)$delta[1] } cv.error # k-Fold Cross-Validation set.seed(17) cv.error.10=rep(0,10) for (i in 1:10){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error.10[i]=cv.glm(Auto,glm.fit,K=10)$delta[1] } cv.error.10 # The Bootstrap #有放回抽取观察结果 alpha.fn=function(data,index){ X=data$X[index] Y=data$Y[index] return((var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y))) } alpha.fn(Portfolio,1:100) set.seed(1) alpha.fn(Portfolio,sample(100,100,replace=T)) boot(Portfolio,alpha.fn,R=1000) # Estimating the Accuracy of a Linear Regression Model boot.fn=function(data,index) return(coef(lm(mpg~horsepower,data=data,subset=index))) boot.fn(Auto,1:392) set.seed(1) boot.fn(Auto,sample(392,392,replace=T)) boot.fn(Auto,sample(392,392,replace=T)) boot(Auto,boot.fn,1000) summary(lm(mpg~horsepower,data=Auto))$coef boot.fn=function(data,index) coefficients(lm(mpg~horsepower+I(horsepower^2),data=data,subset=index)) set.seed(1) boot(Auto,boot.fn,1000) summary(lm(mpg~horsepower+I(horsepower^2),data=Auto))$coef
/DM/Statmllab3.R
no_license
zhurui1351/RSTOCK_TRAIL
R
false
false
2,248
r
#http://www-bcf.usc.edu/~gareth/ISL/Chapter%205%20Lab.txt # Chaper 5 Lab: Cross-Validation and the Bootstrap # The Validation Set Approach library(ISLR) set.seed(1) train=sample(392,196) lm.fit=lm(mpg~horsepower,data=Auto,subset=train) attach(Auto) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) set.seed(2) train=sample(392,196) lm.fit=lm(mpg~horsepower,subset=train) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) # Leave-One-Out Cross-Validation glm.fit=glm(mpg~horsepower,data=Auto) coef(glm.fit) lm.fit=lm(mpg~horsepower,data=Auto) coef(lm.fit) library(boot) glm.fit=glm(mpg~horsepower,data=Auto) cv.err=cv.glm(Auto,glm.fit) cv.err$delta cv.error=rep(0,5) #用几个不同的多项式做拟合看留一验证的结果 for (i in 1:5){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error[i]=cv.glm(Auto,glm.fit)$delta[1] } cv.error # k-Fold Cross-Validation set.seed(17) cv.error.10=rep(0,10) for (i in 1:10){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error.10[i]=cv.glm(Auto,glm.fit,K=10)$delta[1] } cv.error.10 # The Bootstrap #有放回抽取观察结果 alpha.fn=function(data,index){ X=data$X[index] Y=data$Y[index] return((var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y))) } alpha.fn(Portfolio,1:100) set.seed(1) alpha.fn(Portfolio,sample(100,100,replace=T)) boot(Portfolio,alpha.fn,R=1000) # Estimating the Accuracy of a Linear Regression Model boot.fn=function(data,index) return(coef(lm(mpg~horsepower,data=data,subset=index))) boot.fn(Auto,1:392) set.seed(1) boot.fn(Auto,sample(392,392,replace=T)) boot.fn(Auto,sample(392,392,replace=T)) boot(Auto,boot.fn,1000) summary(lm(mpg~horsepower,data=Auto))$coef boot.fn=function(data,index) coefficients(lm(mpg~horsepower+I(horsepower^2),data=data,subset=index)) set.seed(1) boot(Auto,boot.fn,1000) summary(lm(mpg~horsepower+I(horsepower^2),data=Auto))$coef
#' Get Status of a HDI Hive Service / version. #' #' @inheritParams setAzureContext #' @inheritParams azureAuthenticate #' #' @family Hive functions #' @export azureHiveStatus <- function(azureActiveContext, clustername, hdiAdmin, hdiPassword, verbose = FALSE) { HA = "" if (missing(clustername)) { CN <- azureActiveContext$clustername } else (CN = clustername) if (missing(hdiAdmin)) { HA <- azureActiveContext$hdiAdmin } else (HA = hdiAdmin) if (missing(hdiPassword)) { HP <- azureActiveContext$hdiPassword } else (HP = hdiPassword) verbosity <- if (verbose) httr::verbose(TRUE) else NULL if (!length(CN)) { stop("Error: No Valid clustername provided") } if (!length(HA)) { stop("Error: No Valid hdiAdmin provided") } if (!length(HP)) { stop("Error: No Valid hdiPassword provided") } azureActiveContext$hdiAdmin <- HA azureActiveContext$hdiPassword <- HP azureActiveContext$clustername <- CN URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/status", sep = "") r <- GET(URL, add_headers(.headers = c(`Content-type` = "application/json")), authenticate(HA, HP), verbosity) if (status_code(r) != 200 && status_code(r) != 201) { stop(paste0("Error: Return code(", status_code(r), ")")) } rl <- content(r, "text", encoding = "UTF-8") df <- fromJSON(rl) return(paste("Status:", df$status, " version:", df$version)) } #' Submit SQL command to Hive Service. #' #' @inheritParams setAzureContext #' @inheritParams azureAuthenticate #' #' @param CMD SQl COmmand String #' @param path path #' #' @family Hive functions #' @export azureHiveSQL <- function(azureActiveContext, CMD, clustername, hdiAdmin, hdiPassword, path = "wasb:///tmp/", verbose = FALSE) { HA = "" if (missing(clustername)) { CN <- azureActiveContext$clustername } else (CN = clustername) if (missing(hdiAdmin)) { HA <- azureActiveContext$hdiAdmin } else (HA = hdiAdmin) if (missing(hdiPassword)) { HP <- azureActiveContext$hdiPassword } else (HP = hdiPassword) if (missing(CMD)) { stop("Error: No Valid Command(CMD) provided") } verbosity <- if (verbose) httr::verbose(TRUE) else NULL if (!length(CN)) { stop("Error: No Valid clustername provided") } if (!length(HA)) { stop("Error: No Valid hdiAdmin provided") } if (!length(HP)) { stop("Error: No Valid hdiPassword provided") } azureActiveContext$hdiAdmin <- HA azureActiveContext$hdiPassword <- HP azureActiveContext$clustername <- CN # bodyI <- list(user.name = HA, execute = CMD, statusdir='wasb:///tmp/' # ) bodyI <- '{execute=\'show # tables\';statusdir=\'HiveJobStatusFeb3\';enabloelog=\'false\';}' bodyI <- paste("user.name=", HA, "&execute=", CMD, "&statusdir=", path, sep = "") # print(bodyI) bodyI <- 'user.name=admin&execute=SHOW # TABLES&statusdir=wasb:///tmp/' print(bodyI) URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/hive?user.name=", HA, "&execute=", CMD, "&statusdir=wasb:///tmp/", sep = "") URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/hive?user.name=", HA, sep = "") r <- POST(URL, add_headers(.headers = c(`Content-type` = "application/x-www-form-urlencoded")), authenticate(HA, HP), body = bodyI, encode = "json", verbosity) # ,authenticate('admin', 'Summer2014!') rl <- content(r, "text", encoding = "UTF-8") df <- fromJSON(rl) # print(df$id) URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/jobs/", df$id, sep = "") Sys.sleep(2) r <- GET(URL, add_headers(.headers = c(`Content-type` = "application/json")), authenticate(HA, HP)) rl <- content(r, "text", encoding = "UTF-8") df <- fromJSON(rl) message(paste("CMD Running: ", Sys.time())) message("Prep(P), Running(R), Completed(C)") DUR <- 2 # print(df$status$state) while (df$status$state == "RUNNING" | df$status$state == "PREP") { Sys.sleep(DUR) if (DUR < 5) DUR <- DUR + 1 if (df$status$state == "PREP") message("P") if (df$status$state == "RUNNING") message("R") # print(df$status$state) r <- GET(URL, add_headers(.headers = c(`Content-type` = "application/json")), authenticate(HA, HP)) rl <- content(r, "text", encoding = "UTF-8") rh <- headers(r) df <- fromJSON(rl) } if (df$status$state == "SUCCEEDED") message("S") if (df$status$state == "FAILED") message("F") STATE <- df$status$state message("Finished Running statement: ", Sys.time()) return(TRUE) }
/R/AzureHive.R
no_license
strategist922/AzureSMR
R
false
false
4,678
r
#' Get Status of a HDI Hive Service / version. #' #' @inheritParams setAzureContext #' @inheritParams azureAuthenticate #' #' @family Hive functions #' @export azureHiveStatus <- function(azureActiveContext, clustername, hdiAdmin, hdiPassword, verbose = FALSE) { HA = "" if (missing(clustername)) { CN <- azureActiveContext$clustername } else (CN = clustername) if (missing(hdiAdmin)) { HA <- azureActiveContext$hdiAdmin } else (HA = hdiAdmin) if (missing(hdiPassword)) { HP <- azureActiveContext$hdiPassword } else (HP = hdiPassword) verbosity <- if (verbose) httr::verbose(TRUE) else NULL if (!length(CN)) { stop("Error: No Valid clustername provided") } if (!length(HA)) { stop("Error: No Valid hdiAdmin provided") } if (!length(HP)) { stop("Error: No Valid hdiPassword provided") } azureActiveContext$hdiAdmin <- HA azureActiveContext$hdiPassword <- HP azureActiveContext$clustername <- CN URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/status", sep = "") r <- GET(URL, add_headers(.headers = c(`Content-type` = "application/json")), authenticate(HA, HP), verbosity) if (status_code(r) != 200 && status_code(r) != 201) { stop(paste0("Error: Return code(", status_code(r), ")")) } rl <- content(r, "text", encoding = "UTF-8") df <- fromJSON(rl) return(paste("Status:", df$status, " version:", df$version)) } #' Submit SQL command to Hive Service. #' #' @inheritParams setAzureContext #' @inheritParams azureAuthenticate #' #' @param CMD SQl COmmand String #' @param path path #' #' @family Hive functions #' @export azureHiveSQL <- function(azureActiveContext, CMD, clustername, hdiAdmin, hdiPassword, path = "wasb:///tmp/", verbose = FALSE) { HA = "" if (missing(clustername)) { CN <- azureActiveContext$clustername } else (CN = clustername) if (missing(hdiAdmin)) { HA <- azureActiveContext$hdiAdmin } else (HA = hdiAdmin) if (missing(hdiPassword)) { HP <- azureActiveContext$hdiPassword } else (HP = hdiPassword) if (missing(CMD)) { stop("Error: No Valid Command(CMD) provided") } verbosity <- if (verbose) httr::verbose(TRUE) else NULL if (!length(CN)) { stop("Error: No Valid clustername provided") } if (!length(HA)) { stop("Error: No Valid hdiAdmin provided") } if (!length(HP)) { stop("Error: No Valid hdiPassword provided") } azureActiveContext$hdiAdmin <- HA azureActiveContext$hdiPassword <- HP azureActiveContext$clustername <- CN # bodyI <- list(user.name = HA, execute = CMD, statusdir='wasb:///tmp/' # ) bodyI <- '{execute=\'show # tables\';statusdir=\'HiveJobStatusFeb3\';enabloelog=\'false\';}' bodyI <- paste("user.name=", HA, "&execute=", CMD, "&statusdir=", path, sep = "") # print(bodyI) bodyI <- 'user.name=admin&execute=SHOW # TABLES&statusdir=wasb:///tmp/' print(bodyI) URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/hive?user.name=", HA, "&execute=", CMD, "&statusdir=wasb:///tmp/", sep = "") URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/hive?user.name=", HA, sep = "") r <- POST(URL, add_headers(.headers = c(`Content-type` = "application/x-www-form-urlencoded")), authenticate(HA, HP), body = bodyI, encode = "json", verbosity) # ,authenticate('admin', 'Summer2014!') rl <- content(r, "text", encoding = "UTF-8") df <- fromJSON(rl) # print(df$id) URL <- paste("https://", CN, ".azurehdinsight.net/templeton/v1/jobs/", df$id, sep = "") Sys.sleep(2) r <- GET(URL, add_headers(.headers = c(`Content-type` = "application/json")), authenticate(HA, HP)) rl <- content(r, "text", encoding = "UTF-8") df <- fromJSON(rl) message(paste("CMD Running: ", Sys.time())) message("Prep(P), Running(R), Completed(C)") DUR <- 2 # print(df$status$state) while (df$status$state == "RUNNING" | df$status$state == "PREP") { Sys.sleep(DUR) if (DUR < 5) DUR <- DUR + 1 if (df$status$state == "PREP") message("P") if (df$status$state == "RUNNING") message("R") # print(df$status$state) r <- GET(URL, add_headers(.headers = c(`Content-type` = "application/json")), authenticate(HA, HP)) rl <- content(r, "text", encoding = "UTF-8") rh <- headers(r) df <- fromJSON(rl) } if (df$status$state == "SUCCEEDED") message("S") if (df$status$state == "FAILED") message("F") STATE <- df$status$state message("Finished Running statement: ", Sys.time()) return(TRUE) }
# Load modis daily data library(lubridate) library(plyr) rs$ObsDate <- as.Date(levels(rs$ObsDate), "%m/%d/%Y %H:%M:%S %p")[rs$ObsDate] vars_to_keep <- c("Location.ID", "ObsDate", "GrowthStageName", "StemRust.Binary", "YellowRust.Binary", "NoRust.Binary") tmp <- rs[vars_to_keep] dailies$year <- substring(dailies$index, 13, 16) dailies$month <- substring(dailies$index, 18, 19) dailies$day <- substring(dailies$index, 21, 22) dailies$date <- as.Date(paste(dailies$year, dailies$month, dailies$day, sep="-")) dailies <- merge(modis_daily_data, tmp, by="Location.ID") dailies$days.before.survey <- difftime(dailies$date, dailies$ObsDate, unit="days") plot <- ggplot(dailies, aes(x=days.before.survey, y=EVI, color=NoRust.Binary)) plot <- plot + geom_line() plot <- plot + facet_wrap(~ Location.ID, scales="free") plot # Fit a regression line for each Location.ID library(data.table) set.seed(1) dat <- data.table(dailies) models <- dat[,list(intercept=coef(lm(EVI~days.before.survey))[1], coef=coef(lm(EVI~days.before.survey))[2]),by=Location.ID] models <- data.frame(models) dailies <- merge(dailies, models, by="Location.ID", all.x=TRUE) plot <- ggplot(dailies, aes(x=coef, color=NoRust.Binary)) + geom_density() plot
/daily-data-eda.R
no_license
mellamoxr/PlantDiseaseSpread
R
false
false
1,227
r
# Load modis daily data library(lubridate) library(plyr) rs$ObsDate <- as.Date(levels(rs$ObsDate), "%m/%d/%Y %H:%M:%S %p")[rs$ObsDate] vars_to_keep <- c("Location.ID", "ObsDate", "GrowthStageName", "StemRust.Binary", "YellowRust.Binary", "NoRust.Binary") tmp <- rs[vars_to_keep] dailies$year <- substring(dailies$index, 13, 16) dailies$month <- substring(dailies$index, 18, 19) dailies$day <- substring(dailies$index, 21, 22) dailies$date <- as.Date(paste(dailies$year, dailies$month, dailies$day, sep="-")) dailies <- merge(modis_daily_data, tmp, by="Location.ID") dailies$days.before.survey <- difftime(dailies$date, dailies$ObsDate, unit="days") plot <- ggplot(dailies, aes(x=days.before.survey, y=EVI, color=NoRust.Binary)) plot <- plot + geom_line() plot <- plot + facet_wrap(~ Location.ID, scales="free") plot # Fit a regression line for each Location.ID library(data.table) set.seed(1) dat <- data.table(dailies) models <- dat[,list(intercept=coef(lm(EVI~days.before.survey))[1], coef=coef(lm(EVI~days.before.survey))[2]),by=Location.ID] models <- data.frame(models) dailies <- merge(dailies, models, by="Location.ID", all.x=TRUE) plot <- ggplot(dailies, aes(x=coef, color=NoRust.Binary)) + geom_density() plot
library(Metrics) library(forecast) library(fpp) library(smooth) library(tseries) plastic<-read.csv("C:\\Data_science\\EXCLER\\My Assignments\\Forecasting\\PlasticSales.csv") View(plastic) plot(plastic$Sales, type = 'l') #converting data into time series plastic_time<-ts(plastic$Sales, frequency = 12 , start = c(61)) View(plastic_time) plot(plastic_time) #splitting data into train and test. train<-plastic_time[1:47] test<-plastic_time[48:60] # converting into time series train<-ts(train,frequency = 12) test<-ts(test,frequency = 12) #------------HoltsWinter function------------------------------------------------- hl_plastic_a<-HoltWinters(train,alpha = 0.2,beta = F,gamma = F) hl_plastic_pred<-data.frame(predict(hl_plastic_a,n.ahead = 13)) plot(forecast(hl_plastic_a,h=12)) hl_plastic_a_MAPE<-MAPE(hl_plastic_pred$fit,test)*100 hl_plastic_a_MAPE #alpha = 0.2 , beta = 0.1, gamma = 0.1 hl_plastic_abg<-HoltWinters(train,alpha = 0.2,beta = 0.1,gamma = 0.1) hl_plastic_abg_pred<-data.frame(predict(hl_plastic_abg,n.ahead = 13)) plot(forecast(hl_plastic_abg, h = 12)) hl_plastic_abg_MAPE<-MAPE(hl_plastic_abg_pred$fit , test)*100 hl_plastic_abg_MAPE #alpha = 0.2 , beta = 0.1 hl_plastic_ab<-HoltWinters(train,alpha = 0.2,beta = 0.1,gamma = F) hl_plastic_ab_pred<-data.frame(predict(hl_plastic_ab,n.ahead = 13)) plot(forecast(hl_plastic_ab, h = 12)) hl_plastic_ab_MAPE<-MAPE(hl_plastic_ab_pred$fit , test)*100 hl_plastic_ab_MAPE #final model ------------------------------------ final_model<-HoltWinters(train) final_model_predict<-data.frame(predict(final_model, n.ahead = 13)) plot(forecast(final_model, h = 13)) final_model_MAPE<-MAPE(final_model_predict$fit, test)*100 final_model_MAPE #---------------removing alpha -------------------- rm_aplha<-HoltWinters(train,beta = 0.1) rm_aplha_predict<-data.frame(predict(rm_aplha , n.ahead=13)) plot(forecast(rm_aplha, h=12)) rm_alpha_MAPE<-MAPE(rm_aplha_predict$fit, test) * 100 rm_alpha_MAPE #------------------------final table --------------------------------- df_mape<-data.frame(c("hl_plastic_a_MAPE","hl_plastic_abg_MAPE","hl_plastic_ab_MAPE","final_model_MAPE", "rm_alpha_MAPE"),c(hl_plastic_a_MAPE,hl_plastic_abg_MAPE,hl_plastic_ab_MAPE,final_model_MAPE, rm_alpha_MAPE)) colnames(df_mape)=c("MODELS","MAPE") View(df_mape) #------------------------Real data -------------------------- new_model<-HoltWinters(plastic_time) plot(forecast(new_model , h = 13)) new_model_predict<-data.frame(predict(new_model,n.ahead = 13)) new_model_predict View(new_model_predict)
/PlasticSales.r
no_license
preethi928/DataScience
R
false
false
2,672
r
library(Metrics) library(forecast) library(fpp) library(smooth) library(tseries) plastic<-read.csv("C:\\Data_science\\EXCLER\\My Assignments\\Forecasting\\PlasticSales.csv") View(plastic) plot(plastic$Sales, type = 'l') #converting data into time series plastic_time<-ts(plastic$Sales, frequency = 12 , start = c(61)) View(plastic_time) plot(plastic_time) #splitting data into train and test. train<-plastic_time[1:47] test<-plastic_time[48:60] # converting into time series train<-ts(train,frequency = 12) test<-ts(test,frequency = 12) #------------HoltsWinter function------------------------------------------------- hl_plastic_a<-HoltWinters(train,alpha = 0.2,beta = F,gamma = F) hl_plastic_pred<-data.frame(predict(hl_plastic_a,n.ahead = 13)) plot(forecast(hl_plastic_a,h=12)) hl_plastic_a_MAPE<-MAPE(hl_plastic_pred$fit,test)*100 hl_plastic_a_MAPE #alpha = 0.2 , beta = 0.1, gamma = 0.1 hl_plastic_abg<-HoltWinters(train,alpha = 0.2,beta = 0.1,gamma = 0.1) hl_plastic_abg_pred<-data.frame(predict(hl_plastic_abg,n.ahead = 13)) plot(forecast(hl_plastic_abg, h = 12)) hl_plastic_abg_MAPE<-MAPE(hl_plastic_abg_pred$fit , test)*100 hl_plastic_abg_MAPE #alpha = 0.2 , beta = 0.1 hl_plastic_ab<-HoltWinters(train,alpha = 0.2,beta = 0.1,gamma = F) hl_plastic_ab_pred<-data.frame(predict(hl_plastic_ab,n.ahead = 13)) plot(forecast(hl_plastic_ab, h = 12)) hl_plastic_ab_MAPE<-MAPE(hl_plastic_ab_pred$fit , test)*100 hl_plastic_ab_MAPE #final model ------------------------------------ final_model<-HoltWinters(train) final_model_predict<-data.frame(predict(final_model, n.ahead = 13)) plot(forecast(final_model, h = 13)) final_model_MAPE<-MAPE(final_model_predict$fit, test)*100 final_model_MAPE #---------------removing alpha -------------------- rm_aplha<-HoltWinters(train,beta = 0.1) rm_aplha_predict<-data.frame(predict(rm_aplha , n.ahead=13)) plot(forecast(rm_aplha, h=12)) rm_alpha_MAPE<-MAPE(rm_aplha_predict$fit, test) * 100 rm_alpha_MAPE #------------------------final table --------------------------------- df_mape<-data.frame(c("hl_plastic_a_MAPE","hl_plastic_abg_MAPE","hl_plastic_ab_MAPE","final_model_MAPE", "rm_alpha_MAPE"),c(hl_plastic_a_MAPE,hl_plastic_abg_MAPE,hl_plastic_ab_MAPE,final_model_MAPE, rm_alpha_MAPE)) colnames(df_mape)=c("MODELS","MAPE") View(df_mape) #------------------------Real data -------------------------- new_model<-HoltWinters(plastic_time) plot(forecast(new_model , h = 13)) new_model_predict<-data.frame(predict(new_model,n.ahead = 13)) new_model_predict View(new_model_predict)
\name{rcpp_states_list_to_DEmat} \alias{rcpp_states_list_to_DEmat} \title{C++ conversion of a states list to a dispersal-extinction matrix (DEmat)} \usage{ rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, amat = NULL, include_null_range = TRUE, normalize_TF = TRUE, makeCOO_TF = FALSE, min_precision = 1e-26) } \arguments{ \item{areas_list}{a list of lists of areas (numbers, starting with 0)} \item{states_list}{a list of lists of areas (numbers, starting with 0)} \item{dmat}{dispersal matrix from area to area} \item{elist}{a list of extinction probabilities} \item{amat}{A matrix specifying the probability of instantaneous transition from one area to another (as in standard character rate matrices).} \item{include_null_range}{include the null () range (NA) in the matrix (LAGRANGE default=TRUE)} \item{normalize_TF}{should the columns be -1 * rowsums?} \item{makeCOO_TF}{should the returned matrix be COO or standard dense (the latter is default).} \item{min_precision}{what is the effective minimum size for 0} } \value{ dmat (a standard Q matrix) } \description{ This function takes a list of states/ranges, a matrix describing relative dispersal probability (dmat) for each pair of areas, and a list describing the local extirpation probability for each area (elist), and calculates a transition matrix Qmat accordingly. } \details{ The size of the matrix will expand dramatically with the number of areas. See \code{\link{numstates_from_numareas}} for the calculation. Above 7 or so areas, making \code{Qmat} a COO-formatted matrix (COO=Coordinate list, see wikipedia, \url{http://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29} ) which can then be used in \code{\link[rexpokit]{rexpokit}}'s sparse-matrix algorithms, should be more efficient. (Sparse matrices are matrices made of mostly 0s.) } \examples{ # Specify the areas areas_list_txt = c("A", "B", "C") areas_list_txt # rcpp_states_list_to_DEmat function requires a 0-based list of areas areas_list = seq(0, length(areas_list_txt)-1, 1) areas_list \dontrun{ # Calculate the list of 0-based indices for each possible #geographic range, i.e. each combination of areas states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=3, include_null_range=FALSE) states_list states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=3, include_null_range=TRUE) states_list states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=2, include_null_range=TRUE) states_list states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=1, include_null_range=TRUE) states_list # Hard-code the along-branch dispersal and extinction rates d = 0.2 e = 0.1 # Calculate the dispersal weights matrix and the extinction weights matrix # Equal dispersal in all directions (unconstrained) areas = areas_list distances_mat = matrix(1, nrow=length(areas), ncol=length(areas)) dmat = matrix(d, nrow=length(areas), ncol=length(areas)) dmat # Equal extinction probability for all areas elist = rep(e, length(areas)) elist # Set up the instantaneous rate matrix (Q matrix, Qmat) # DON'T force a sparse-style (COO-formatted) matrix here force_sparse = FALSE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat # DO force a sparse-style (COO-formatted) matrix here force_sparse = TRUE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat # Repeat with an amat amat = dmat amat[is.numeric(amat)] = 0.33 # Set up the instantaneous rate matrix (Q matrix, Qmat) # DON'T force a sparse-style (COO-formatted) matrix here force_sparse = FALSE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, amat, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat # DO force a sparse-style (COO-formatted) matrix here force_sparse = TRUE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, amat, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat } } \author{ Nicholas Matzke \email{matzke@berkeley.edu} } \references{ \url{http://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29} Matzke N (2012). "Founder-event speciation in BioGeoBEARS package dramatically improves likelihoods and alters parameter inference in Dispersal-Extinction-Cladogenesis (DEC) analyses." _Frontiers of Biogeography_, *4*(suppl. 1), pp. 210. ISSN 1948-6596, Poster abstract published in the Conference Program and Abstracts of the International Biogeography Society 6th Biannual Meeting, Miami, Florida. Poster Session P10: Historical and Paleo-Biogeography. Poster 129B. January 11, 2013, <URL: \url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}>. Ree RH and Smith SA (2008). "Maximum likelihood inference of geographic range evolution by dispersal, local extinction, and cladogenesis." _Systematic Biology_, *57*(1), pp. 4-14. <URL: http://dx.doi.org/10.1080/10635150701883881>, <URL: http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&dopt=Citation&list_uids=18253896>. } \seealso{ \code{\link{numstates_from_numareas}}, \code{\link[stats]{convolve}} }
/man/rcpp_states_list_to_DEmat.Rd
no_license
wrathematics/cladoRcpp
R
false
false
5,413
rd
\name{rcpp_states_list_to_DEmat} \alias{rcpp_states_list_to_DEmat} \title{C++ conversion of a states list to a dispersal-extinction matrix (DEmat)} \usage{ rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, amat = NULL, include_null_range = TRUE, normalize_TF = TRUE, makeCOO_TF = FALSE, min_precision = 1e-26) } \arguments{ \item{areas_list}{a list of lists of areas (numbers, starting with 0)} \item{states_list}{a list of lists of areas (numbers, starting with 0)} \item{dmat}{dispersal matrix from area to area} \item{elist}{a list of extinction probabilities} \item{amat}{A matrix specifying the probability of instantaneous transition from one area to another (as in standard character rate matrices).} \item{include_null_range}{include the null () range (NA) in the matrix (LAGRANGE default=TRUE)} \item{normalize_TF}{should the columns be -1 * rowsums?} \item{makeCOO_TF}{should the returned matrix be COO or standard dense (the latter is default).} \item{min_precision}{what is the effective minimum size for 0} } \value{ dmat (a standard Q matrix) } \description{ This function takes a list of states/ranges, a matrix describing relative dispersal probability (dmat) for each pair of areas, and a list describing the local extirpation probability for each area (elist), and calculates a transition matrix Qmat accordingly. } \details{ The size of the matrix will expand dramatically with the number of areas. See \code{\link{numstates_from_numareas}} for the calculation. Above 7 or so areas, making \code{Qmat} a COO-formatted matrix (COO=Coordinate list, see wikipedia, \url{http://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29} ) which can then be used in \code{\link[rexpokit]{rexpokit}}'s sparse-matrix algorithms, should be more efficient. (Sparse matrices are matrices made of mostly 0s.) } \examples{ # Specify the areas areas_list_txt = c("A", "B", "C") areas_list_txt # rcpp_states_list_to_DEmat function requires a 0-based list of areas areas_list = seq(0, length(areas_list_txt)-1, 1) areas_list \dontrun{ # Calculate the list of 0-based indices for each possible #geographic range, i.e. each combination of areas states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=3, include_null_range=FALSE) states_list states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=3, include_null_range=TRUE) states_list states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=2, include_null_range=TRUE) states_list states_list = rcpp_areas_list_to_states_list(areas=areas_list, maxareas=1, include_null_range=TRUE) states_list # Hard-code the along-branch dispersal and extinction rates d = 0.2 e = 0.1 # Calculate the dispersal weights matrix and the extinction weights matrix # Equal dispersal in all directions (unconstrained) areas = areas_list distances_mat = matrix(1, nrow=length(areas), ncol=length(areas)) dmat = matrix(d, nrow=length(areas), ncol=length(areas)) dmat # Equal extinction probability for all areas elist = rep(e, length(areas)) elist # Set up the instantaneous rate matrix (Q matrix, Qmat) # DON'T force a sparse-style (COO-formatted) matrix here force_sparse = FALSE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat # DO force a sparse-style (COO-formatted) matrix here force_sparse = TRUE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat # Repeat with an amat amat = dmat amat[is.numeric(amat)] = 0.33 # Set up the instantaneous rate matrix (Q matrix, Qmat) # DON'T force a sparse-style (COO-formatted) matrix here force_sparse = FALSE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, amat, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat # DO force a sparse-style (COO-formatted) matrix here force_sparse = TRUE Qmat = rcpp_states_list_to_DEmat(areas_list, states_list, dmat, elist, amat, include_null_range=TRUE, normalize_TF=TRUE, makeCOO_TF=force_sparse) Qmat } } \author{ Nicholas Matzke \email{matzke@berkeley.edu} } \references{ \url{http://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_.28COO.29} Matzke N (2012). "Founder-event speciation in BioGeoBEARS package dramatically improves likelihoods and alters parameter inference in Dispersal-Extinction-Cladogenesis (DEC) analyses." _Frontiers of Biogeography_, *4*(suppl. 1), pp. 210. ISSN 1948-6596, Poster abstract published in the Conference Program and Abstracts of the International Biogeography Society 6th Biannual Meeting, Miami, Florida. Poster Session P10: Historical and Paleo-Biogeography. Poster 129B. January 11, 2013, <URL: \url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}>. Ree RH and Smith SA (2008). "Maximum likelihood inference of geographic range evolution by dispersal, local extinction, and cladogenesis." _Systematic Biology_, *57*(1), pp. 4-14. <URL: http://dx.doi.org/10.1080/10635150701883881>, <URL: http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Retrieve&db=PubMed&dopt=Citation&list_uids=18253896>. } \seealso{ \code{\link{numstates_from_numareas}}, \code{\link[stats]{convolve}} }
## early exit: product not available from a particular server jnk = utils::capture.output( expect_error( runGdal( product = "MCD18C1" # only available on lpdaac , collection = "061" , tileH = 18L , tileV = 3L , begin = "2019.01.01" , end = "2019.12.31" , MODISserverOrder = c("LAADS", "NSIDC") ) , pattern = "is not available on .* try another server or collection" ) ) ## early exit: `length(maskValue)` not `1L` or matching 'SDSstring' jnk = utils::capture.output( expect_error( runGdal( "MCD15A2H" , collection = "061" , tileH = 21 , tileV = c(7, 8) , begin = "2003001" , end = "2003010" , SDSstring = "110100" , maskValue = c(254L, 255L) , quiet = TRUE ) , pattern = "'maskValue' length needs to be 1 or match 'SDSstring'" ) )
/inst/tinytest/test-runGdal.R
permissive
itati01/MODIS
R
false
false
860
r
## early exit: product not available from a particular server jnk = utils::capture.output( expect_error( runGdal( product = "MCD18C1" # only available on lpdaac , collection = "061" , tileH = 18L , tileV = 3L , begin = "2019.01.01" , end = "2019.12.31" , MODISserverOrder = c("LAADS", "NSIDC") ) , pattern = "is not available on .* try another server or collection" ) ) ## early exit: `length(maskValue)` not `1L` or matching 'SDSstring' jnk = utils::capture.output( expect_error( runGdal( "MCD15A2H" , collection = "061" , tileH = 21 , tileV = c(7, 8) , begin = "2003001" , end = "2003010" , SDSstring = "110100" , maskValue = c(254L, 255L) , quiet = TRUE ) , pattern = "'maskValue' length needs to be 1 or match 'SDSstring'" ) )
my_height <- 158 my_height my_weight <- 50 my_weight bmi <- (50) / (158/ 100)^2 bmi my_name <- "曾巧雯" my_name
/Homework/隨堂練習1.R
no_license
tcw861011/-
R
false
false
125
r
my_height <- 158 my_height my_weight <- 50 my_weight bmi <- (50) / (158/ 100)^2 bmi my_name <- "曾巧雯" my_name
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/manip.r \name{filter} \alias{filter} \title{Return rows with matching conditions} \usage{ filter(.data, ...) } \arguments{ \item{.data}{A tbl. All main verbs are S3 generics and provide methods for \code{\link[=tbl_df]{tbl_df()}}, \code{\link[dtplyr:tbl_dt]{dtplyr::tbl_dt()}} and \code{\link[dbplyr:tbl_dbi]{dbplyr::tbl_dbi()}}.} \item{...}{Logical predicates defined in terms of the variables in \code{.data}. Multiple conditions are combined with \code{&}. Only rows where the conditon evalutes to \code{TRUE} are kept. These arguments are automatically \link[rlang:quo]{quoted} and \link[rlang:eval_tidy]{evaluated} in the context of the data frame. They support \link[rlang:quasiquotation]{unquoting} and splicing. See \code{vignette("programming")} for an introduction to these concepts.} } \value{ An object of the same class as \code{.data}. } \description{ Use \code{filter()} find rows/cases where conditions are true. Unlike base subsetting, rows where the condition evaluates to \code{NA} are dropped. } \details{ Note that dplyr is not yet smart enough to optimise filtering optimisation on grouped datasets that don't need grouped calculations. For this reason, filtering is often considerably faster on \code{\link[=ungroup]{ungroup()}}ed data. } \section{Useful filter functions}{ \itemize{ \item \code{\link{==}}, \code{\link{>}}, \code{\link{>=}} etc \item \code{\link{&}}, \code{\link{|}}, \code{\link{!}}, \code{\link[=xor]{xor()}} \item \code{\link[=is.na]{is.na()}} \item \code{\link[=between]{between()}}, \code{\link[=near]{near()}} } } \section{Tidy data}{ When applied to a data frame, row names are silently dropped. To preserve, convert to an explicit variable with \code{\link[tibble:rownames_to_column]{tibble::rownames_to_column()}}. } \section{Scoped filtering}{ The three \link{scoped} variants (\code{\link[=filter_all]{filter_all()}}, \code{\link[=filter_if]{filter_if()}} and \code{\link[=filter_at]{filter_at()}}) make it easy to apply a filtering condition to a selection of variables. } \examples{ filter(starwars, species == "Human") filter(starwars, mass > 1000) # Multiple criteria filter(starwars, hair_color == "none" & eye_color == "black") filter(starwars, hair_color == "none" | eye_color == "black") # Multiple arguments are equivalent to and filter(starwars, hair_color == "none", eye_color == "black") } \seealso{ \code{\link[=filter_all]{filter_all()}}, \code{\link[=filter_if]{filter_if()}} and \code{\link[=filter_at]{filter_at()}}. Other single table verbs: \code{\link{arrange}}, \code{\link{mutate}}, \code{\link{select}}, \code{\link{slice}}, \code{\link{summarise}} }
/man/filter.Rd
permissive
MhAmine/dplyr
R
false
true
2,719
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/manip.r \name{filter} \alias{filter} \title{Return rows with matching conditions} \usage{ filter(.data, ...) } \arguments{ \item{.data}{A tbl. All main verbs are S3 generics and provide methods for \code{\link[=tbl_df]{tbl_df()}}, \code{\link[dtplyr:tbl_dt]{dtplyr::tbl_dt()}} and \code{\link[dbplyr:tbl_dbi]{dbplyr::tbl_dbi()}}.} \item{...}{Logical predicates defined in terms of the variables in \code{.data}. Multiple conditions are combined with \code{&}. Only rows where the conditon evalutes to \code{TRUE} are kept. These arguments are automatically \link[rlang:quo]{quoted} and \link[rlang:eval_tidy]{evaluated} in the context of the data frame. They support \link[rlang:quasiquotation]{unquoting} and splicing. See \code{vignette("programming")} for an introduction to these concepts.} } \value{ An object of the same class as \code{.data}. } \description{ Use \code{filter()} find rows/cases where conditions are true. Unlike base subsetting, rows where the condition evaluates to \code{NA} are dropped. } \details{ Note that dplyr is not yet smart enough to optimise filtering optimisation on grouped datasets that don't need grouped calculations. For this reason, filtering is often considerably faster on \code{\link[=ungroup]{ungroup()}}ed data. } \section{Useful filter functions}{ \itemize{ \item \code{\link{==}}, \code{\link{>}}, \code{\link{>=}} etc \item \code{\link{&}}, \code{\link{|}}, \code{\link{!}}, \code{\link[=xor]{xor()}} \item \code{\link[=is.na]{is.na()}} \item \code{\link[=between]{between()}}, \code{\link[=near]{near()}} } } \section{Tidy data}{ When applied to a data frame, row names are silently dropped. To preserve, convert to an explicit variable with \code{\link[tibble:rownames_to_column]{tibble::rownames_to_column()}}. } \section{Scoped filtering}{ The three \link{scoped} variants (\code{\link[=filter_all]{filter_all()}}, \code{\link[=filter_if]{filter_if()}} and \code{\link[=filter_at]{filter_at()}}) make it easy to apply a filtering condition to a selection of variables. } \examples{ filter(starwars, species == "Human") filter(starwars, mass > 1000) # Multiple criteria filter(starwars, hair_color == "none" & eye_color == "black") filter(starwars, hair_color == "none" | eye_color == "black") # Multiple arguments are equivalent to and filter(starwars, hair_color == "none", eye_color == "black") } \seealso{ \code{\link[=filter_all]{filter_all()}}, \code{\link[=filter_if]{filter_if()}} and \code{\link[=filter_at]{filter_at()}}. Other single table verbs: \code{\link{arrange}}, \code{\link{mutate}}, \code{\link{select}}, \code{\link{slice}}, \code{\link{summarise}} }
library(compute.es) ### Name: propes ### Title: Proportions to Effect Size ### Aliases: propes ### Keywords: arith ### ** Examples # CALCULATE SEVERAL EFFECT SIZES BASED ON PROPORTIONS: propes(.50,.30, 30, 30)
/data/genthat_extracted_code/compute.es/examples/prop_to_es.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
220
r
library(compute.es) ### Name: propes ### Title: Proportions to Effect Size ### Aliases: propes ### Keywords: arith ### ** Examples # CALCULATE SEVERAL EFFECT SIZES BASED ON PROPORTIONS: propes(.50,.30, 30, 30)
"bone" <- structure(list(y4 = c(49.7, 48.4, 48.5, 47.2, 49.3, 53.7, 54.4, 52.7, 54.4, 48.3, 51.9, 55.5, 55, 49.8, 51.8, 53.3, 49.5, 55.3, 48.4, 51.8), y3 = c(49, 47.7, 47.8, 46.1, 48.9, 53.3, 54.3, 50.3, 52.3, 47.3, 51.6, 53, 53.7, 49.3, 51.2, 52.7, 48.4, 55.1, 48.1, 51.3), y2 = c(48.8, 47.3, 46.8, 45.3, 48.5, 53.2, 53, 50, 50.8, 47, 51.4, 49.2, 52.8, 48.9, 50.4, 51.7, 47.7, 54.6, 47.5, 47.6 ), y1 = c(47.8, 46.4, 46.3, 45.1, 47.6, 52.5, 51.2, 49.8, 48.1, 45, 51.2, 48.5, 52.1, 48.2, 49.6, 50.7, 47.2, 53.3, 46.2, 46.3 )), .Names = c("y4", "y3", "y2", "y1"), row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20"), class = "data.frame")
/bone.R
no_license
StaThin/data
R
false
false
718
r
"bone" <- structure(list(y4 = c(49.7, 48.4, 48.5, 47.2, 49.3, 53.7, 54.4, 52.7, 54.4, 48.3, 51.9, 55.5, 55, 49.8, 51.8, 53.3, 49.5, 55.3, 48.4, 51.8), y3 = c(49, 47.7, 47.8, 46.1, 48.9, 53.3, 54.3, 50.3, 52.3, 47.3, 51.6, 53, 53.7, 49.3, 51.2, 52.7, 48.4, 55.1, 48.1, 51.3), y2 = c(48.8, 47.3, 46.8, 45.3, 48.5, 53.2, 53, 50, 50.8, 47, 51.4, 49.2, 52.8, 48.9, 50.4, 51.7, 47.7, 54.6, 47.5, 47.6 ), y1 = c(47.8, 46.4, 46.3, 45.1, 47.6, 52.5, 51.2, 49.8, 48.1, 45, 51.2, 48.5, 52.1, 48.2, 49.6, 50.7, 47.2, 53.3, 46.2, 46.3 )), .Names = c("y4", "y3", "y2", "y1"), row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20"), class = "data.frame")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggPlotting.R \name{plotSCEBarAssayData} \alias{plotSCEBarAssayData} \title{Bar plot of assay data.} \usage{ plotSCEBarAssayData( inSCE, feature, sample = NULL, useAssay = "counts", featureLocation = NULL, featureDisplay = NULL, groupBy = NULL, xlab = NULL, ylab = NULL, axisSize = 10, axisLabelSize = 10, dotSize = 1, transparency = 1, defaultTheme = TRUE, gridLine = FALSE, summary = NULL, title = NULL, titleSize = NULL, combinePlot = TRUE ) } \arguments{ \item{inSCE}{Input \linkS4class{SingleCellExperiment} object with saved dimension reduction components or a variable with saved results. Required.} \item{feature}{Name of feature stored in assay of SingleCellExperiment object.} \item{sample}{Character vector. Indicates which sample each cell belongs to.} \item{useAssay}{Indicate which assay to use. Default "counts".} \item{featureLocation}{Indicates which column name of rowData to query gene.} \item{featureDisplay}{Indicates which column name of rowData to use to display feature for visualization.} \item{groupBy}{Groupings for each numeric value. A user may input a vector equal length to the number of the samples in the SingleCellExperiment object, or can be retrieved from the colData slot. Default NULL.} \item{xlab}{Character vector. Label for x-axis. Default NULL.} \item{ylab}{Character vector. Label for y-axis. Default NULL.} \item{axisSize}{Size of x/y-axis ticks. Default 10.} \item{axisLabelSize}{Size of x/y-axis labels. Default 10.} \item{dotSize}{Size of dots. Default 1.} \item{transparency}{Transparency of the dots, values will be 0-1. Default 1.} \item{defaultTheme}{Removes grid in plot and sets axis title size to 10 when TRUE. Default TRUE.} \item{gridLine}{Adds a horizontal grid line if TRUE. Will still be drawn even if defaultTheme is TRUE. Default FALSE.} \item{summary}{Adds a summary statistic, as well as a crossbar to the violin plot. Options are "mean" or "median". Default NULL.} \item{title}{Title of plot. Default NULL.} \item{titleSize}{Size of title of plot. Default 15.} \item{combinePlot}{Boolean. If multiple plots are generated (multiple samples, etc.), will combined plots using `cowplot::plot_grid`. Default TRUE.} } \value{ a ggplot of the barplot of assay data. } \description{ Visualizes values stored in the assay slot of a SingleCellExperiment object via a bar plot. } \examples{ plotSCEBarAssayData( inSCE = mouseBrainSubsetSCE, feature = "Apoe", groupBy = "sex" ) }
/man/plotSCEBarAssayData.Rd
permissive
ykoga07/singleCellTK
R
false
true
2,570
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggPlotting.R \name{plotSCEBarAssayData} \alias{plotSCEBarAssayData} \title{Bar plot of assay data.} \usage{ plotSCEBarAssayData( inSCE, feature, sample = NULL, useAssay = "counts", featureLocation = NULL, featureDisplay = NULL, groupBy = NULL, xlab = NULL, ylab = NULL, axisSize = 10, axisLabelSize = 10, dotSize = 1, transparency = 1, defaultTheme = TRUE, gridLine = FALSE, summary = NULL, title = NULL, titleSize = NULL, combinePlot = TRUE ) } \arguments{ \item{inSCE}{Input \linkS4class{SingleCellExperiment} object with saved dimension reduction components or a variable with saved results. Required.} \item{feature}{Name of feature stored in assay of SingleCellExperiment object.} \item{sample}{Character vector. Indicates which sample each cell belongs to.} \item{useAssay}{Indicate which assay to use. Default "counts".} \item{featureLocation}{Indicates which column name of rowData to query gene.} \item{featureDisplay}{Indicates which column name of rowData to use to display feature for visualization.} \item{groupBy}{Groupings for each numeric value. A user may input a vector equal length to the number of the samples in the SingleCellExperiment object, or can be retrieved from the colData slot. Default NULL.} \item{xlab}{Character vector. Label for x-axis. Default NULL.} \item{ylab}{Character vector. Label for y-axis. Default NULL.} \item{axisSize}{Size of x/y-axis ticks. Default 10.} \item{axisLabelSize}{Size of x/y-axis labels. Default 10.} \item{dotSize}{Size of dots. Default 1.} \item{transparency}{Transparency of the dots, values will be 0-1. Default 1.} \item{defaultTheme}{Removes grid in plot and sets axis title size to 10 when TRUE. Default TRUE.} \item{gridLine}{Adds a horizontal grid line if TRUE. Will still be drawn even if defaultTheme is TRUE. Default FALSE.} \item{summary}{Adds a summary statistic, as well as a crossbar to the violin plot. Options are "mean" or "median". Default NULL.} \item{title}{Title of plot. Default NULL.} \item{titleSize}{Size of title of plot. Default 15.} \item{combinePlot}{Boolean. If multiple plots are generated (multiple samples, etc.), will combined plots using `cowplot::plot_grid`. Default TRUE.} } \value{ a ggplot of the barplot of assay data. } \description{ Visualizes values stored in the assay slot of a SingleCellExperiment object via a bar plot. } \examples{ plotSCEBarAssayData( inSCE = mouseBrainSubsetSCE, feature = "Apoe", groupBy = "sex" ) }
#Training dataset size (TDS) and performance (remove records) #update file paths to run (search for lines with "###" to find where required) setwd("###") source("###/all_functions_ranger.R") source("###/opt_functions.R") #libraries library(ranger) #get the data Int <- readRDS("###/data/GloBIplus_Int20EVs.RDS") Non <- readRDS("###/data/allNon_sameCont.RDS") SD_foc <- readRDS("###/data/SD_focUpdate.rds") #add source_aerial_mam column to Int (because it's in the target and therefore potentially the noninteraction source column) Int$source_aerial_mam <- 0 SD_foc$source_aerial_mam <- 0 #cut global dataset to species with 5 or more records ch<-data.frame(table(Int$sourceTaxonName)) prds <- ch$Var1[ch$Freq>4] Int<-Int[Int$sourceTaxonName%in%prds,] Non<-Non[Non$sourceTaxonName%in%prds,] #remove columns not required kp <- c("targetTaxonName","sourceTaxonName","interact","outside",paste("target", "eig", 1:21, sep=""), paste("source", "eig", 1:21, sep="")) #cut to 21 because there are 21 ecomorphological variables nms <- names(Non)[!(grepl("eig",names(Non)))] kp <- unique(c(kp,nms)) Int <- Int[,names(Int)%in%c(kp,"interact","outside")] Non <- Non[,names(Non)%in%c(kp,"interact","outside")] SD_foc <- SD_foc[,names(SD_foc)%in%c(kp)] #deleted "interact","outside" because don't need this in this dataset #the function rf_TDSmod <- function(x,y,ins,out,abs,dt_test, thresh,mtr,mdepth,ntrees,perc,...){ obs <- x obs$interact <- as.factor(TRUE) obs$outside = "present" y <- y[y$sourceTaxonName%in%unique(obs$sourceTaxonName),] unobs_in <- y[y$outside=="FALSE",] unobs_in <- unobs_in[sample(nrow(unobs_in), (dim(obs)[1]/(ins+out)*ins)*abs, replace = FALSE),] #unobs for training unobs_out <- y[y$outside=="TRUE",] unobs_out <- unobs_out[sample(nrow(unobs_out), (dim(obs)[1]/(ins+out)*out)*abs, replace = FALSE),] unobs <- rbind(unobs_in,unobs_out) unobs$obs = NULL unobs$interact <- as.factor(FALSE) data <- rbind(obs,unobs) data$outside = NULL data = data[,-tax_cols(data)] # dt_test = dt_test[,names(data)] #remove rows based on perc (remove from data and data_w) sel1 <- round(perc/100*dim(data)[1]) sel2 <- sample(1:dim(data)[1],sel1) data <- data[sel2,] data_w <- ifelse(data$interact=="TRUE",1,1/(table(data$interact)[2]/table(data$interact)[1])) dt_test$interact <- as.factor(dt_test$interact) rf = ranger(formula = interact ~., data = data, mtry = mtr, num.threads = 20, probability = T, importance = 'impurity', case.weights = data_w, max.depth = mdepth, num.trees = ntrees) predic = predict(rf, data=dt_test[,-(which(names(dt_test)%in%c("interact","sourceTaxonName","targetTaxonName")))]) scores = predic$predictions[,1] lbls <- dt_test$interact lbls <- ifelse(lbls=="TRUE",1,0) all_auc <- auc(scores, lbls) predic = predic$predictions[,1] > thresh pred_perf <- table(dt_test$interact, predic) all_tss <- round(tss(pred_perf),3) round(tss(pred_perf),3) round(tssF(pred_perf),3) list(Tsk=c(all_tss),Ac=c(all_auc),num_removed = sel1) } #put it in a for loop set.seed(123) output <- list() for(i in 1:100){ dt <- replicate(10,rf_TDSmod(Int,Non,ins=2.5,out=1,abs=4.75,SD_foc,thresh=0.31,mtr=42,ntrees=400,mdepth=0, perc = i)) dt <- rbind(dt,i) output[[length(output) + 1]] <- dt print(i) } saveRDS(output,"###/results/training_data_size_results.rds") #get the required data from the list together to plot op <- data.frame(t(do.call("cbind", output))) op <- data.frame(apply(op,2,unlist)) plot(op$i,op$Tsk,xlab="% of full training dataset", ylab = "TSS", pch = 19, cex = 0.5, col=rgb(red=0.1, green=0.2, blue=0.2, alpha=0.3))
/script/data quality manipulation and model performance/recordRemoval&Replace_modelPerformance/training dataset size_removeRecords.R
no_license
JohnLlewelyn/random-forests-for-predicting-predator-prey-interactions-in-terrestrial-vertebrates
R
false
false
3,738
r
#Training dataset size (TDS) and performance (remove records) #update file paths to run (search for lines with "###" to find where required) setwd("###") source("###/all_functions_ranger.R") source("###/opt_functions.R") #libraries library(ranger) #get the data Int <- readRDS("###/data/GloBIplus_Int20EVs.RDS") Non <- readRDS("###/data/allNon_sameCont.RDS") SD_foc <- readRDS("###/data/SD_focUpdate.rds") #add source_aerial_mam column to Int (because it's in the target and therefore potentially the noninteraction source column) Int$source_aerial_mam <- 0 SD_foc$source_aerial_mam <- 0 #cut global dataset to species with 5 or more records ch<-data.frame(table(Int$sourceTaxonName)) prds <- ch$Var1[ch$Freq>4] Int<-Int[Int$sourceTaxonName%in%prds,] Non<-Non[Non$sourceTaxonName%in%prds,] #remove columns not required kp <- c("targetTaxonName","sourceTaxonName","interact","outside",paste("target", "eig", 1:21, sep=""), paste("source", "eig", 1:21, sep="")) #cut to 21 because there are 21 ecomorphological variables nms <- names(Non)[!(grepl("eig",names(Non)))] kp <- unique(c(kp,nms)) Int <- Int[,names(Int)%in%c(kp,"interact","outside")] Non <- Non[,names(Non)%in%c(kp,"interact","outside")] SD_foc <- SD_foc[,names(SD_foc)%in%c(kp)] #deleted "interact","outside" because don't need this in this dataset #the function rf_TDSmod <- function(x,y,ins,out,abs,dt_test, thresh,mtr,mdepth,ntrees,perc,...){ obs <- x obs$interact <- as.factor(TRUE) obs$outside = "present" y <- y[y$sourceTaxonName%in%unique(obs$sourceTaxonName),] unobs_in <- y[y$outside=="FALSE",] unobs_in <- unobs_in[sample(nrow(unobs_in), (dim(obs)[1]/(ins+out)*ins)*abs, replace = FALSE),] #unobs for training unobs_out <- y[y$outside=="TRUE",] unobs_out <- unobs_out[sample(nrow(unobs_out), (dim(obs)[1]/(ins+out)*out)*abs, replace = FALSE),] unobs <- rbind(unobs_in,unobs_out) unobs$obs = NULL unobs$interact <- as.factor(FALSE) data <- rbind(obs,unobs) data$outside = NULL data = data[,-tax_cols(data)] # dt_test = dt_test[,names(data)] #remove rows based on perc (remove from data and data_w) sel1 <- round(perc/100*dim(data)[1]) sel2 <- sample(1:dim(data)[1],sel1) data <- data[sel2,] data_w <- ifelse(data$interact=="TRUE",1,1/(table(data$interact)[2]/table(data$interact)[1])) dt_test$interact <- as.factor(dt_test$interact) rf = ranger(formula = interact ~., data = data, mtry = mtr, num.threads = 20, probability = T, importance = 'impurity', case.weights = data_w, max.depth = mdepth, num.trees = ntrees) predic = predict(rf, data=dt_test[,-(which(names(dt_test)%in%c("interact","sourceTaxonName","targetTaxonName")))]) scores = predic$predictions[,1] lbls <- dt_test$interact lbls <- ifelse(lbls=="TRUE",1,0) all_auc <- auc(scores, lbls) predic = predic$predictions[,1] > thresh pred_perf <- table(dt_test$interact, predic) all_tss <- round(tss(pred_perf),3) round(tss(pred_perf),3) round(tssF(pred_perf),3) list(Tsk=c(all_tss),Ac=c(all_auc),num_removed = sel1) } #put it in a for loop set.seed(123) output <- list() for(i in 1:100){ dt <- replicate(10,rf_TDSmod(Int,Non,ins=2.5,out=1,abs=4.75,SD_foc,thresh=0.31,mtr=42,ntrees=400,mdepth=0, perc = i)) dt <- rbind(dt,i) output[[length(output) + 1]] <- dt print(i) } saveRDS(output,"###/results/training_data_size_results.rds") #get the required data from the list together to plot op <- data.frame(t(do.call("cbind", output))) op <- data.frame(apply(op,2,unlist)) plot(op$i,op$Tsk,xlab="% of full training dataset", ylab = "TSS", pch = 19, cex = 0.5, col=rgb(red=0.1, green=0.2, blue=0.2, alpha=0.3))
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # # Define server logic required to draw a histogram shinyServer(function(input, output, session) { hideTab(inputId = "tags", target = tab01) hideTab(inputId = "tags", target = tab02) hideTab(inputId = "tags", target = tab03) source("components/welcome_server.R", local=TRUE) source("components/tab_01_server.R", local=TRUE) source("components/tab_02_server.R", local=TRUE) source("components/tab_03_server.R", local=TRUE) }) # end of shinyServer
/server.R
no_license
hesham-rafi/covid-19
R
false
false
685
r
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # # Define server logic required to draw a histogram shinyServer(function(input, output, session) { hideTab(inputId = "tags", target = tab01) hideTab(inputId = "tags", target = tab02) hideTab(inputId = "tags", target = tab03) source("components/welcome_server.R", local=TRUE) source("components/tab_01_server.R", local=TRUE) source("components/tab_02_server.R", local=TRUE) source("components/tab_03_server.R", local=TRUE) }) # end of shinyServer
#' Convert to curve space #' #' This function converts SRVFs to curves #' #' @param q array describing SRVF (n,T) #' @return beta array describing curve #' @keywords srvf alignment #' @references Srivastava, A., Klassen, E., Joshi, S., Jermyn, I., (2011). Shape analysis of elastic curves in euclidean spaces. Pattern Analysis and Machine Intelligence, IEEE Transactions on 33 (7), 1415-1428. #' @export #' @examples #' data("mpeg7") #' q = curve_to_q(beta[,,1,1]) #' beta1 = q_to_curve(q) q_to_curve <- function(q){ T1 = ncol(q) n = nrow(q) qnorm = rep(0, T1) for (i in 1:T1) { qnorm[i] = pvecnorm(q[, i], 2) } integrand = matrix(0, n, T1) integrand=t(apply(q,1,function(qrow) qrow*qnorm )) beta = cumtrapz(1:T1, integrand, 2)/T1 return(beta) }
/fuzzedpackages/fdasrvf/R/q_to_curve.R
no_license
akhikolla/testpackages
R
false
false
797
r
#' Convert to curve space #' #' This function converts SRVFs to curves #' #' @param q array describing SRVF (n,T) #' @return beta array describing curve #' @keywords srvf alignment #' @references Srivastava, A., Klassen, E., Joshi, S., Jermyn, I., (2011). Shape analysis of elastic curves in euclidean spaces. Pattern Analysis and Machine Intelligence, IEEE Transactions on 33 (7), 1415-1428. #' @export #' @examples #' data("mpeg7") #' q = curve_to_q(beta[,,1,1]) #' beta1 = q_to_curve(q) q_to_curve <- function(q){ T1 = ncol(q) n = nrow(q) qnorm = rep(0, T1) for (i in 1:T1) { qnorm[i] = pvecnorm(q[, i], 2) } integrand = matrix(0, n, T1) integrand=t(apply(q,1,function(qrow) qrow*qnorm )) beta = cumtrapz(1:T1, integrand, 2)/T1 return(beta) }
cancer_data_new<-cancer_data[,-1] summary(cancer_data_new) cancer_data_model<-model.matrix(Level~.,cancer_data)[,-1] #Multinomial logistic regression require(foreign) require(nnet) require(ggplot2) require(reshape2) training_index<-sample(1:nrow(cancer_data_model),0.8*nrow(cancer_data_model)) cancer_data_new$Level<-relevel(cancer_data_new$Level,ref = "Low") ml<-multinom(Level~.,data = cancer_data_new,subset = training_index) summary(ml) ml.fit<-predict(ml,cancer_data_new[-training_index,]) mean((ml.fit!=apply(cancer_data_new[-training_index,],2,factor))^2) #really bad result library(glmnet) set.seed(1) training_index<-sample(1:nrow(cancer_data_model),0.8*nrow(cancer_data_model)) cancer_data_train<-cancer_data_model[training_index,] cancer_data_test<-cancer_data_model[-training_index,] y_train<-cancer_data$Level[training_index] y_test<-cancer_data$Level[-training_index] cvfit = cv.glmnet(cancer_data_train,y_train,type.measure="class",alpha=1,family="multinomial") bestlam=cvfit$lambda.min pred.lasso = predict(cvfit, s = bestlam, newx = cancer_data_test,type = "response") predicted <- colnames(pred.lasso)[apply(pred.lasso,1,which.max)] mean(predicted!=y_test) #The cvfit contains 10*number of lambda training MSE and it automatically pick the smallest one which #also corresponse with the smallest lambda. It provides the best lambda and best multinomial logistic regression model #using lasso. #The final test MSE is 0.03.
/ordinal logistic regression.R
no_license
JIanying-Liang/STSCI4740-Project
R
false
false
1,450
r
cancer_data_new<-cancer_data[,-1] summary(cancer_data_new) cancer_data_model<-model.matrix(Level~.,cancer_data)[,-1] #Multinomial logistic regression require(foreign) require(nnet) require(ggplot2) require(reshape2) training_index<-sample(1:nrow(cancer_data_model),0.8*nrow(cancer_data_model)) cancer_data_new$Level<-relevel(cancer_data_new$Level,ref = "Low") ml<-multinom(Level~.,data = cancer_data_new,subset = training_index) summary(ml) ml.fit<-predict(ml,cancer_data_new[-training_index,]) mean((ml.fit!=apply(cancer_data_new[-training_index,],2,factor))^2) #really bad result library(glmnet) set.seed(1) training_index<-sample(1:nrow(cancer_data_model),0.8*nrow(cancer_data_model)) cancer_data_train<-cancer_data_model[training_index,] cancer_data_test<-cancer_data_model[-training_index,] y_train<-cancer_data$Level[training_index] y_test<-cancer_data$Level[-training_index] cvfit = cv.glmnet(cancer_data_train,y_train,type.measure="class",alpha=1,family="multinomial") bestlam=cvfit$lambda.min pred.lasso = predict(cvfit, s = bestlam, newx = cancer_data_test,type = "response") predicted <- colnames(pred.lasso)[apply(pred.lasso,1,which.max)] mean(predicted!=y_test) #The cvfit contains 10*number of lambda training MSE and it automatically pick the smallest one which #also corresponse with the smallest lambda. It provides the best lambda and best multinomial logistic regression model #using lasso. #The final test MSE is 0.03.
library(drake) library(ggplot2) library(plyr) library(ggpmisc) library(data.table) library(dplyr) library(xtable) library(MALDIquant) library(xgboost) library(iml) library(caret) library(randomForest) library(doParallel) library(SHAPforxgboost) #### Define your own path to the dataset #### #dpath<-'./regression/' #dpath<-'/Users/lptolik/Documents/Projects/MSpeaks/dataN2TIC/regression/' dpath<-'/Users/lptolik/Dropbox/Скальпель/DBData/regression/' getFreeMem<-function(){ #as.numeric(system("awk '/MemFree/ {print $2}' /proc/meminfo", intern=TRUE))/1e6 return(0) } #' Get peak file names from peak files directory. #' #' @return list of file names for peaks get_peaks<-function(dpath){ cat(format(Sys.time(), "%b %d %X"),'Function: get_peaks("',dpath,'") starts. Mem:',getFreeMem(),'GB\n') fl<-dir(path = dpath,pattern = '*.peak.rds') fl<-fl[grep('diag_32',fl,inver=TRUE)] idx32<-sapply(fl,function(.x)file.exists(paste0(dpath,sub('diag_[0-9]+\\.','diag_32.',.x)))) fl<-fl[idx32] if(grepl('/$',dpath)){ p<-dpath }else{ p<-paste0(dpath,'/') } cat(format(Sys.time(), "%b %d %X"),'Function: get_peaks("',dpath,'") finish.\n') return(paste0(p,fl)) # return(fl) } #' Prepare feature matrix. #' Method load files named in the peaks parameter, and convert them into feature matrix. #' Add all annotation required for the further data analysis including resolution, mode, #' exp_setup, diagnosis, percentage of norm tissue etc. #' #' @param peaks -- list of peak files to be converted into feature matrix #' prepare_feature_matrix<-function(peaks,norm_shift=0,monoisotopic=FALSE,size=3L:10L){ cat(format(Sys.time(), "%b %d %X"),'Function: prepare_feature_matrix("',peaks,'",',norm_shift,') starts. Mem:',getFreeMem(),'GB\n') # n<-peaks[[1]] # cat('prepare_feature_matrix',n) # d<-data.frame(name=n,MZ_1=rnorm(10),MZ_2=2*rnorm(10)) # #write.csv(d,paste0(n,'.csv')) # return(d) getMD<-function(p){ as.data.frame(metaData(p)) } getRDS<-function(f){ cat(format(Sys.time(), "%b %d %X"),'Function: getRDS("',f,'") starts. Mem:',getFreeMem(),'GB\n') res<-try(readRDS(f)) cat(format(Sys.time(), "%b %d %X"),class(res),'.\n') if(inherits(res, "try-error")){ return(list()) }else{ return(res) } } norm<-sub('diag_[0-9]+\\.','diag_32.',peaks) idx<-sapply(norm,file.exists) l<-lapply(c(peaks,norm[idx]),getRDS) peaksL<-do.call(c,l) if(all(grepl('res_2',peaks))){ tol=5e-4 }else{ tol=5e-5 } if(monoisotopic){ if(all(grepl('mode_2',peaks))){ K=TRUE Cl=FALSE }else{ K=FALSE Cl=TRUE } peaksL<-myPeakList(peaksL, minCor=0.95, tolerance=tol, size=size,Cl=Cl,K=K) } dl<-lapply(peaksL, getMD) md<-do.call(rbind,dl) md$norm.p<-as.numeric(as.character(md$norm.p)) md$tumor.p<-as.numeric(as.character(md$tumor.p)) md$necro.p<-as.numeric(as.character(md$necro.p)) if(any(grepl('othr.p',names(md)))){ md$othr.p<-as.numeric(as.character(md$othr.p)) md$target<-md$norm.p+md$othr.p }else{ md$othr.p<-0 md$target<-md$norm.p } md$target[md$diagnosis==32]<- md$target[md$diagnosis==32]+norm_shift md$fname<-basename(peaks[1]) wf<-determineWarpingFunctions(peaksL, method="lowess", plot=FALSE,minFrequency=0.05) aPeaks<-warpMassPeaks(peaksL,wf) bPeaks <- binPeaks(aPeaks, method="strict",tolerance=tol) fpeaks <- filterPeaks(bPeaks, labels=md$diag, minFrequency=0.25, mergeWhitelists=TRUE) featureMatrix <- intensityMatrix(fpeaks) idNA<-which(is.na(featureMatrix),arr.ind =TRUE) featureMatrix[idNA]<-0 colnames(featureMatrix)<-paste0('MZ_',round(as.numeric(colnames(featureMatrix)),3)) fm<-cbind(md,featureMatrix) tot<-fm$norm.p+fm$tumor.p+fm$necro.p+fm$othr.p idx<-which(tot>=100) fm<-fm[idx,] cat(format(Sys.time(), "%b %d %X"),'feature matrix',dim(featureMatrix),'\n') cat(format(Sys.time(), "%b %d %X"),'filtered matrix',dim(fm),'\n') cat(format(Sys.time(), "%b %d %X"),'Function: prepare_feature_matrix("',peaks,'",',norm_shift,') finish.\n') return(fm) } #normtypes<-factor(c('None'))#,'Pareto','Autoscaling')) #filtertypes<-c('None')#,'ZVar','Corr') feature_filter<-function(fm,ftype){ cat(format(Sys.time(), "%b %d %X"),'Function: feature_filter("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',ftype,'") starts.\n') idx<-grep("MZ_.*",names(fm)) features<-fm[,idx] mdt<-fm[,-idx] mdt$Filter<-ftype res<-switch (ftype, None=features, ZVar=filter_nzv(features), Corr=filter_corr(features) ) res<-cbind(mdt,res) cat(format(Sys.time(), "%b %d %X"),'Function: feature_filter("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',ftype,'") finish.\n') return(res) } filter_nzv<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: filter_nzv; dim(fm)=',dim(fm),'\n') nzv <- nearZeroVar(fm) if(length(nzv)>0){ res<-fm[,-nzv] }else{ res<-fm } cat(format(Sys.time(), "%b %d %X"),'Function: filter_nzv; length(nzv)=',length(nzv),'; dim(res)=',dim(res),'\n') return(res) } filter_corr<-function(fm,cutoff = .8){ cat(format(Sys.time(), "%b %d %X"),'Function: filter_corr; dim(fm)=',dim(fm),'\n') fm1<-filter_nzv(fm) descrCor <- cor(fm1) highlyCorDescr <- findCorrelation(descrCor, cutoff = cutoff) if(length(highlyCorDescr)>0){ res<-fm1[,-highlyCorDescr] }else{ res<-fm1 } cat(format(Sys.time(), "%b %d %X"),'Function: filter_corr; length(highlyCorDescr)=',length(highlyCorDescr),'; dim(res)=',dim(res),'\n') return(res) } #' Title #' #' @param fm feature matrix #' @param normtype type of norm to apply #' #' @return normalized fm normalize<-function(fm,normtype){ cat(format(Sys.time(), "%b %d %X"),'Function: normalize(fm,"',normtype,'") starts. Mem:',getFreeMem(),'GB\n') cat('dim(fm)=',dim(fm),'\n') #fm<-fml[[1]] #cat(names(fm),'\n') cidx<-grep('MZ_.*',names(fm)) #cat(cidx,'\n') mdt<-fm[,-cidx] mz<-fm[,cidx] #cat(normtype,dim(mdt),dim(mz),'\n') mdt$Norm<-normtype cat(unique(as.character(mdt$Norm)),unique(mdt$fname),'\n') mz<-switch(as.character(normtype), None=mz, Autoscaling=scale(mz), Pareto=paretoscale(mz)) cat(format(Sys.time(), "%b %d %X"),'Function: normalize(fm,"',normtype,'") finish.\n') return(cbind(mdt,mz)) } paretoscale<-function(mz){ cat(format(Sys.time(), "%b %d %X"),'Function: paretoscale',' starts. Mem:',getFreeMem(),'GB\n') s<-apply(mz,2,sd) a<-apply(mz,2,mean) return(scale(mz,center = a,scale = sqrt(s))) } get_mdt<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_mdt("',fm$fname[1],'") starts. Mem:',getFreeMem(),'GB\n') mdt<-fm %>% dplyr::select(spectrumid,patientid,diagnosis,t.id,smpl.id,target) %>% unique cat(format(Sys.time(), "%b %d %X"),'Function: get_mdt("',fm$fname[1],'") finish.\n') return(mdt) } groups<-factor(c('train','test')) smpl_split_fm<-function(fm,split=0.6){ cat(format(Sys.time(), "%b %d %X"),'Function: smpl_split_fm("',fm$fname[1],'","',as.character(fm$Norm[1]),'") starts. Mem:',getFreeMem(),'GB\n') mdt<-get_mdt(fm) smpl<-mdt %>% dplyr::select(smpl.id,target) %>% unique trainIndexSmpl <- createDataPartition(smpl$target, p = split, list = FALSE, times = 1) test_smpl<-smpl$smpl.id[-trainIndexSmpl] fm$grp<-groups[1] fm$grp[fm$smpl.id %in% test_smpl]<-groups[2] cat(format(Sys.time(), "%b %d %X"),'Function: smpl_split_fm("',fm$fname[1],'","',as.character(fm$Norm[1]),'") finish.\n') return(fm) } train_model<-function(fm,modeltype){ cat(format(Sys.time(), "%b %d %X"),'Function: train_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',modeltype,'") starts. Mem:',getFreeMem(),'GB\n') fm$was.trained<-0 idx<-grep("(MZ_.*|target)",names(fm)) trdx<-which(fm$grp==groups[1]) if(smpl<length(trdx)){ jdx<-trdx[sample.int(length(trdx),size = smpl)] }else{ jdx<-trdx } fm$was.trained[jdx]<-1 train<-fm[jdx,idx] cat(format(Sys.time(), "%b %d %X"),'train dataset',dim(train),'\n') res<-switch (modeltype, rf=train_rf(train), xgb=train_xgb(train) ) cat(format(Sys.time(), "%b %d %X"),'Function: train_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',modeltype,'") finish.\n') return(list(model=res,data=fm)) } smpl<-5e6 test_model<-function(mod){ fm<-mod$data model<-mod$model cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") starts. Mem:',getFreeMem(),'GB\n') idx<-grep("(MZ_.*|target)",names(fm)) test<-fm[,idx] res<-predict(model,newdata=test) fm$predict<-res fm$method<-model$method cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") finish.\n') return(fm) } eval_model<-function(tst){ return(postResample(tst[!tst$was.trained, "predict"], tst[!tst$was.trained, "target"])) } apply_model<-function(mod,newdata){ fm<-newdata model<-mod$model cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") starts. Mem:',getFreeMem(),'GB\n') idx<-match(model$finalModel$xNames,names(fm)) if(any(is.na(idx))){ i<-which(is.na(idx)) stop('Model parameters [',model$finalModel$xNames[i],'] are missing from the dataset.\n') } test<-fm[,c('target',names(fm)[idx])] res<-predict(model,newdata=test) fm$predict<-res fm$method<-model$method cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") finish.\n') return(fm) } make_point_plot<-function(test){ my.formula <- y ~ x p <- ggplot(data = test, aes(x = target, y = predict)) + geom_smooth(method = "lm", se=FALSE, color="black", formula = my.formula) + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~", aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), parse = TRUE) return(p) } plot_test_point<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$grp==groups[2],] p<-make_point_plot(test)+geom_point() + geom_jitter() cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_train_point<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$was.trained==0,] p<-make_point_plot(test)+geom_point()+ geom_jitter(aes(x = target, y = predict,color='blue'),data=fm[fm$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_test_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$grp==groups[2],] p<-make_point_plot(test)+geom_boxplot(aes(group=target)) cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_train_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$was.trained==0,] p<-make_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = predict,color='blue'),data=fm[fm$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } #### TCP plots #### plot_tcp_box<-function(fm,theme=theme_grey()){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$tcp<-100-fm$target fmtcp$predict<-100-fm$predict test<-fmtcp[fmtcp$was.trained==0,] p<-make_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = predict,color='blue'),data=fmtcp[fmtcp$was.trained==1,])+ xlab('Assigned TCP')+ylab('Predicted TCP')+theme cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_train("',fmtcp$fname[1],'","', as.character(fmtcp$Norm[1]),'","',fmtcp$method[1],'") finish\n') return(p) } smpl_box_theme<-theme_bw() + theme( plot.title = element_text(face = "bold", size = 16), axis.title = element_text(size=20), legend.background = element_rect(fill = "white", size = 4, colour = "white"), legend.justification = c(0.99, 0.5), legend.position = c(0.99, 0.5)) plot_tcp_smpl_box<-function(fm,theme=theme_grey(base_size=14),palette=NULL){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$target<-100-fm$target fmtcp$predict<-100-fm$predict sum_tst<-ddply(fmtcp,.(smpl.id,target,was.trained), summarise,min.pred=min(predict),mean.pred=mean(predict), max.pred=max(predict)) fmtcp$samples<-factor(fmtcp$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) fmtcp$Set<-factor(fmtcp$was.trained,labels=c('Validation','Train')) sum_tst$samples<-factor(sum_tst$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) p <- ggplot(fmtcp, aes(x=samples, y=predict,color=Set)) + geom_boxplot()+ geom_point(data=sum_tst, aes(x=samples, y=target), color='black', shape='+',size=6)+ coord_flip()+xlab('Predicted TCP')+ylab('Sample ID') p<-p+theme if(!is.null(palette)){ p<-p+scale_colour_brewer(type = "seq", palette =palette) } cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_median_spectrum_tcp_box<-function(fm,theme=theme_grey(base_size=26)){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$target<-100-fm$target fmtcp$predict<-100-fm$predict spec_tst<-make_mean_spectrum(fmtcp) test<-spec_tst[spec_tst$was.trained==0,] p<-make_median_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = median.pred),color='blue', size=2, data=spec_tst[spec_tst$was.trained==1,])+ xlab('Assigned TCP')+ylab('Median of predicted TCP')+theme cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_mean_spectrum_tcp_box<-function(fm,theme=theme_grey(base_size=26)){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$target<-100-fm$target fmtcp$predict<-100-fm$predict spec_tst<-make_mean_spectrum(fmtcp) test<-spec_tst[spec_tst$was.trained==0,] p<-make_mean_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = mean.pred),color='blue',size=2, data=spec_tst[spec_tst$was.trained==1,])+ xlab('Assigned TCP')+ylab('Mean of predicted TCP')+theme cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } #### Spectrum aggregation ##### make_mean_spectrum<-function(tst){ spec_tst<-ddply(tst,.(spectrumid,patientid,diagnosis,smpl.id,t.id,norm.p,tumor.p,necro.p,norm.type,othr.p,target,was.trained),summarise,min.pred=min(predict),mean.pred=mean(predict),median.pred=median(predict),max.pred=max(predict)) return(spec_tst) } #### Spectrum median plot ##### make_median_spectrum_point_plot<-function(test){ my.formula <- y ~ x p <- ggplot(data = test, aes(x = target, y = median.pred)) + geom_smooth(method = "lm", se=FALSE, color="black", formula = my.formula) + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~", aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), parse = TRUE) return(p) } plot_median_spectrum_test_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_median_spectrum_point_plot(test)+geom_boxplot(aes(group=target)) cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_median_spectrum_train_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_median_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = median.pred),color='blue',data=spec_tst[spec_tst$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } #### Spectrum mean plot ##### make_mean_spectrum_point_plot<-function(test){ my.formula <- y ~ x p <- ggplot(data = test, aes(x = target, y = mean.pred)) + geom_smooth(method = "lm", se=FALSE, color="black", formula = my.formula) + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~", aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), parse = TRUE) return(p) } plot_mean_spectrum_test_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_mean_spectrum_point_plot(test)+geom_boxplot(aes(group=target)) cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_mean_spectrum_train_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_mean_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = mean.pred),color='blue',data=spec_tst[spec_tst$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_train_smpl_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_train_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') sum_tst<-ddply(fm,.(smpl.id,target,was.trained),summarise,min.pred=min(predict),mean.pred=mean(predict),max.pred=max(predict)) fm$samples<-factor(fm$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) sum_tst$samples<-factor(sum_tst$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) p <- ggplot(fm, aes(x=samples, y=predict,color=factor(was.trained))) + geom_boxplot()+ geom_point(data=sum_tst, aes(x=samples, y=target), color='black', shape='+',size=6)+ coord_flip() cat(format(Sys.time(), "%b %d %X"),'Function: plot_train_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } train_rf<-function(train){ cat(format(Sys.time(), "%b %d %X"),'Function: train_rf',' starts. Mem:',getFreeMem(),'GB\n') fitCV10<-trainControl(method = "repeatedcv", number = 10, repeats = 3) N<-dim(train)[1] p<-dim(train)[2]-1 #tunegrid <- expand.grid(.mtry=c(1:(p/3)),.ntree=c(500,1000,1500)) tunegrid <- expand.grid(mtry=sample(c(1:(p/3)),size=10))#,.ntree=c(500,1000,1500)) if(!exists('ncores')){ ncores<- detectCores() } cat(format(Sys.time(), "%b %d %X"),'ncores=',ncores,'.\n') cl <- makePSOCKcluster(ncores) registerDoParallel(cl) rfFitCVpat <- train(target ~ ., data = train, method = "rf",#customRF, trControl = fitCV10, tuneGrid=tunegrid, verbose = FALSE) stopCluster(cl) cat(format(Sys.time(), "%b %d %X"),'Function: train_rf',' finish.\n') return(rfFitCVpat) } train_xgb<-function(train){ cat(format(Sys.time(), "%b %d %X"),'Function: train_xgb',' starts. Mem:',getFreeMem(),'GB\n') fitCV10<-trainControl(method = "repeatedcv", number = 10, repeats = 3) if(!exists('ncores')){ ncores<- detectCores() } cat(format(Sys.time(), "%b %d %X"),'ncores=',ncores,'.\n') cl <- makePSOCKcluster(ncores) registerDoParallel(cl) xboostFitCVspec <- train(target ~ ., data = train, method = "xgbDART", trControl = fitCV10, verbose = FALSE) stopCluster(cl) cat(format(Sys.time(), "%b %d %X"),'Function: train_xgb',' finish.\n') return(xboostFitCVspec) } xgb_importance<-function(mod){ cat(format(Sys.time(), "%b %d %X"),'Function: xgb_importance starts. \n') fm<-mod$data model<-mod$model xgb.importance(model =model$finalModel)->imp_mat cat(format(Sys.time(), "%b %d %X"),'Function: xgb_importance finish. \n') return(list(data=fm,model=model,importance=imp_mat)) } xgb_plot_importance<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: xgb_plot_importance starts. \n') fm<-imp$data model<-imp$model$finalModel imp_mat<-imp$importance p<-xgb.ggplot.importance(imp_mat, rel_to_first = TRUE, xlab = "Relative importance")->p cat(format(Sys.time(), "%b %d %X"),'Function: xgb_plot_importance finish. \n') return(p) } get_shap_values<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values starts. \n') fm<-imp$data #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values dim(fm)',dim(fm),' \n') model<-imp$model$finalModel #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values length(model)',length(model),' \n') imp<-imp$importance #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values dim(imp)',dim(imp),' \n') idx<-grep("(MZ_.*)",names(fm)) tm<-as.matrix(fm[,idx]) #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values dim(tm)',dim(tm),' \n') shap_values <- SHAPforxgboost::shap.values(xgb_model = model, X_train = tm) shap_values$tm<-tm return(shap_values) } get_shap_dep_plot<-function(shap_values){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_dep_plot starts. \n') shap_values$tm->tm shap_long <- SHAPforxgboost::shap.prep(shap_contrib = shap_values$shap_score, X_train = tm) fig_list <- lapply(names(shap_values$mean_shap_score)[1:16], shap.plot.dependence, data_long = shap_long) p<-gridExtra::grid.arrange(grobs = fig_list, ncol = 4) return(p) } get_shap_plot_data<-function(shap_values){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_plot_data starts. \n') plot_data <- shap.prep.stack.data(shap_contrib = shap_values$shap_score, top_n = 10, n_groups = 10) return(plot_data) } get_shap_force_plot<-function(plot_data){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_force_plot starts. \n') p<-shap.plot.force_plot(plot_data,zoom_in = FALSE) return(p) } get_shap_force_group_plot<-function(plot_data){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_force_group_plot starts. \n') p<-shap.plot.force_plot_bygroup(plot_data) return(p) } get_xgb.shap_plot<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: get_xgb.shap_plot starts. \n') fm<-imp$data model<-imp$model$finalModel imp<-imp$importance idx<-grep("(MZ_.*)",names(fm)) tm<-as.matrix(fm[,idx]) contr<-predict(model,newdata=tm, predcontrib = TRUE) shap<-xgb.plot.shap(tm,contr,model=model$finalModel, features = imp$Feature[1:10], plot = FALSE) cat(format(Sys.time(), "%b %d %X"),'Function: get_xgb.shap_plot finish. \n') return(shap) } get_shap_plot<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_plot starts. \n') fm<-imp$data model<-imp$model$finalModel imp<-imp$importance idx<-grep("(MZ_.*)",names(fm)) tm<-as.matrix(fm[,idx]) sh_res<-shap.score.rank(xgb_model = model, X_train =tm, shap_approx = F ) sh_long<-shap.prep(shap = sh_res, X_train = tm , top_n = 10 ) p<-plot.shap.summary(data_long = sh_long) cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_plot finish. \n') return(p) } train_trigger<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: train_trigger',' starts. Mem:',getFreeMem(),'GB\n') return(length(unique(fm$norm.p))>2) } #### Train reduced model #### get_reduced_fm<-function(imp,threshold=5){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values starts. \n') fm<-imp$data shap_values <- get_shap_values(imp) idxMZ<-grep('MZ_',names(fm)) shval<-cbind(fm[,-idxMZ],shap_values$shap_score) idxMZ<-grep('MZ_',names(shval)) sh_mean<-ddply(shval,.(target),function(.x){apply(.x[,idxMZ],2,mean)}) sh_mean_long<-melt(sh_mean,id='target') idxMZ<-grep('MZ_',names(fm)) idxOpt<-match(as.character(unique( sh_mean_long$variable[abs(sh_mean_long$value)>threshold])), names(fm)) fm_opt<-cbind(fm[,-idxMZ],fm[,idxOpt]) return(fm_opt) } #' Prepare panel of three PCA plots: 1-2, 2-3, 1-3 #' #' @param fm feature matrix to plot #' @param color name of the column to color plot with #' plot_pca<-function(fm, color){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_pca','\n') data=fm[[1]] stop('Not implemented yet') } get_model_fname<-function(ms_setup,diag,expt,method,idx){ cat(format(Sys.time(), "%b %d %X"),'Function: get_model_fname','\n') res=ms_setup[1] mode=ms_setup[2] mz=ms_setup[3] dev=ifelse(expt==1,2,4) fname<-sprintf('peak2019.diag_%d.expt_%d.res_%d.mode_%d.dev_%d.mz_%d.%s.cv10.%s.fmodel.rds', diag,expt,res,mode,dev,mz,method,idx) return(fname) } #Dataset file prefix fpref<-'peak2019' #' Prepare dataset file name list. #' Using provided parameters of the spectra this function create list of files #' to be read for the dataset creation. #' #' This function uses parameter \code{fpref} as a datafile prefix. Different #' datasets could use different prefixes such as 'peak2019'. #' #' @param res -- resolution #' @param mode -- registration mode #' @param mz -- width of mass range #' @param ddiag -- diagnosis #' @param path -- path to datafile folder #' #' @return list of filepath strings. get_fm_fname<-function(res,mode,mz,ddiag,path){ cat(format(Sys.time(), "%b %d %X"),'Function: get_fm_fname','\n') fpatt<-sprintf('%s.diag_%d.expt_.*.res_%d.mode_%d.dev_.*.mz_%d.peak.rds', fpref,ddiag,res,mode,mz) fname<-dir(path = path,pattern = fpatt) return(fname) } get_model<-function(ms_setup,diag,expt,method,idx){ cat(format(Sys.time(), "%b %d %X"),'Function: get_model','\n') fname<-get_model_fname(ms_setup,diag,expt,method,idx) fpath<-paste0(path,fname) if(file.exists(fpath)){ m<- readRDS(fpath) return(list(model=m,ms_setup=ms_setup,diag=diag,expt=expt,method=method,idx=idx)) }else{ return(list()) } } mapMZ<-function(model,fm){ cat(format(Sys.time(), "%b %d %X"),'Function: mapMZ','\n') modMZ<-as.numeric(sub('^MZ_','',model$xNames)) dataMZ<-as.numeric(sub('^MZ_','',names(fm)[grep('^MZ',names(fm))])) } predict_dataset<-function(model,ms_setup,ddiag,dexpt){ cat(format(Sys.time(), "%b %d %X"),'Function: predict_dataset','\n') if(length(mode)==0) return(data.frame()) m<-model$model fm<-load_dataset(model,ms_setup,ddiag,dexpt) if(dim(fm)[1]==0) return(data.frame()) fmapped<-mapMZ(m,fm) } load_dataset<-function(res,mode,mz,diag){ cat(format(Sys.time(), "%b %d %X"),'Function: load_dataset','\n') path<-dpath fmfname<-get_fm_fname(res,mode,mz,diag,path) fpath<-paste0(path,fmfname) return(fpath) } get_pat_df<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_pat_df','\n') if(dim(fm)[2]>2){ return(unique(fm[,c("patientid","diagnosis")])) }else{ return(data.frame()) } } get_spec_df<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_spec_df','\n') if(dim(fm)[2]>2){ return(unique(fm[,c("spectrumid","patientid","diagnosis")])) }else{ return(data.frame()) } } get_dim<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_dim','\n') return(data.frame(nrow=dim(fm)[1],ncol=dim(fm)[2])) }
/R/functions.R
permissive
smartscalpel/regressionCV
R
false
false
30,155
r
library(drake) library(ggplot2) library(plyr) library(ggpmisc) library(data.table) library(dplyr) library(xtable) library(MALDIquant) library(xgboost) library(iml) library(caret) library(randomForest) library(doParallel) library(SHAPforxgboost) #### Define your own path to the dataset #### #dpath<-'./regression/' #dpath<-'/Users/lptolik/Documents/Projects/MSpeaks/dataN2TIC/regression/' dpath<-'/Users/lptolik/Dropbox/Скальпель/DBData/regression/' getFreeMem<-function(){ #as.numeric(system("awk '/MemFree/ {print $2}' /proc/meminfo", intern=TRUE))/1e6 return(0) } #' Get peak file names from peak files directory. #' #' @return list of file names for peaks get_peaks<-function(dpath){ cat(format(Sys.time(), "%b %d %X"),'Function: get_peaks("',dpath,'") starts. Mem:',getFreeMem(),'GB\n') fl<-dir(path = dpath,pattern = '*.peak.rds') fl<-fl[grep('diag_32',fl,inver=TRUE)] idx32<-sapply(fl,function(.x)file.exists(paste0(dpath,sub('diag_[0-9]+\\.','diag_32.',.x)))) fl<-fl[idx32] if(grepl('/$',dpath)){ p<-dpath }else{ p<-paste0(dpath,'/') } cat(format(Sys.time(), "%b %d %X"),'Function: get_peaks("',dpath,'") finish.\n') return(paste0(p,fl)) # return(fl) } #' Prepare feature matrix. #' Method load files named in the peaks parameter, and convert them into feature matrix. #' Add all annotation required for the further data analysis including resolution, mode, #' exp_setup, diagnosis, percentage of norm tissue etc. #' #' @param peaks -- list of peak files to be converted into feature matrix #' prepare_feature_matrix<-function(peaks,norm_shift=0,monoisotopic=FALSE,size=3L:10L){ cat(format(Sys.time(), "%b %d %X"),'Function: prepare_feature_matrix("',peaks,'",',norm_shift,') starts. Mem:',getFreeMem(),'GB\n') # n<-peaks[[1]] # cat('prepare_feature_matrix',n) # d<-data.frame(name=n,MZ_1=rnorm(10),MZ_2=2*rnorm(10)) # #write.csv(d,paste0(n,'.csv')) # return(d) getMD<-function(p){ as.data.frame(metaData(p)) } getRDS<-function(f){ cat(format(Sys.time(), "%b %d %X"),'Function: getRDS("',f,'") starts. Mem:',getFreeMem(),'GB\n') res<-try(readRDS(f)) cat(format(Sys.time(), "%b %d %X"),class(res),'.\n') if(inherits(res, "try-error")){ return(list()) }else{ return(res) } } norm<-sub('diag_[0-9]+\\.','diag_32.',peaks) idx<-sapply(norm,file.exists) l<-lapply(c(peaks,norm[idx]),getRDS) peaksL<-do.call(c,l) if(all(grepl('res_2',peaks))){ tol=5e-4 }else{ tol=5e-5 } if(monoisotopic){ if(all(grepl('mode_2',peaks))){ K=TRUE Cl=FALSE }else{ K=FALSE Cl=TRUE } peaksL<-myPeakList(peaksL, minCor=0.95, tolerance=tol, size=size,Cl=Cl,K=K) } dl<-lapply(peaksL, getMD) md<-do.call(rbind,dl) md$norm.p<-as.numeric(as.character(md$norm.p)) md$tumor.p<-as.numeric(as.character(md$tumor.p)) md$necro.p<-as.numeric(as.character(md$necro.p)) if(any(grepl('othr.p',names(md)))){ md$othr.p<-as.numeric(as.character(md$othr.p)) md$target<-md$norm.p+md$othr.p }else{ md$othr.p<-0 md$target<-md$norm.p } md$target[md$diagnosis==32]<- md$target[md$diagnosis==32]+norm_shift md$fname<-basename(peaks[1]) wf<-determineWarpingFunctions(peaksL, method="lowess", plot=FALSE,minFrequency=0.05) aPeaks<-warpMassPeaks(peaksL,wf) bPeaks <- binPeaks(aPeaks, method="strict",tolerance=tol) fpeaks <- filterPeaks(bPeaks, labels=md$diag, minFrequency=0.25, mergeWhitelists=TRUE) featureMatrix <- intensityMatrix(fpeaks) idNA<-which(is.na(featureMatrix),arr.ind =TRUE) featureMatrix[idNA]<-0 colnames(featureMatrix)<-paste0('MZ_',round(as.numeric(colnames(featureMatrix)),3)) fm<-cbind(md,featureMatrix) tot<-fm$norm.p+fm$tumor.p+fm$necro.p+fm$othr.p idx<-which(tot>=100) fm<-fm[idx,] cat(format(Sys.time(), "%b %d %X"),'feature matrix',dim(featureMatrix),'\n') cat(format(Sys.time(), "%b %d %X"),'filtered matrix',dim(fm),'\n') cat(format(Sys.time(), "%b %d %X"),'Function: prepare_feature_matrix("',peaks,'",',norm_shift,') finish.\n') return(fm) } #normtypes<-factor(c('None'))#,'Pareto','Autoscaling')) #filtertypes<-c('None')#,'ZVar','Corr') feature_filter<-function(fm,ftype){ cat(format(Sys.time(), "%b %d %X"),'Function: feature_filter("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',ftype,'") starts.\n') idx<-grep("MZ_.*",names(fm)) features<-fm[,idx] mdt<-fm[,-idx] mdt$Filter<-ftype res<-switch (ftype, None=features, ZVar=filter_nzv(features), Corr=filter_corr(features) ) res<-cbind(mdt,res) cat(format(Sys.time(), "%b %d %X"),'Function: feature_filter("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',ftype,'") finish.\n') return(res) } filter_nzv<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: filter_nzv; dim(fm)=',dim(fm),'\n') nzv <- nearZeroVar(fm) if(length(nzv)>0){ res<-fm[,-nzv] }else{ res<-fm } cat(format(Sys.time(), "%b %d %X"),'Function: filter_nzv; length(nzv)=',length(nzv),'; dim(res)=',dim(res),'\n') return(res) } filter_corr<-function(fm,cutoff = .8){ cat(format(Sys.time(), "%b %d %X"),'Function: filter_corr; dim(fm)=',dim(fm),'\n') fm1<-filter_nzv(fm) descrCor <- cor(fm1) highlyCorDescr <- findCorrelation(descrCor, cutoff = cutoff) if(length(highlyCorDescr)>0){ res<-fm1[,-highlyCorDescr] }else{ res<-fm1 } cat(format(Sys.time(), "%b %d %X"),'Function: filter_corr; length(highlyCorDescr)=',length(highlyCorDescr),'; dim(res)=',dim(res),'\n') return(res) } #' Title #' #' @param fm feature matrix #' @param normtype type of norm to apply #' #' @return normalized fm normalize<-function(fm,normtype){ cat(format(Sys.time(), "%b %d %X"),'Function: normalize(fm,"',normtype,'") starts. Mem:',getFreeMem(),'GB\n') cat('dim(fm)=',dim(fm),'\n') #fm<-fml[[1]] #cat(names(fm),'\n') cidx<-grep('MZ_.*',names(fm)) #cat(cidx,'\n') mdt<-fm[,-cidx] mz<-fm[,cidx] #cat(normtype,dim(mdt),dim(mz),'\n') mdt$Norm<-normtype cat(unique(as.character(mdt$Norm)),unique(mdt$fname),'\n') mz<-switch(as.character(normtype), None=mz, Autoscaling=scale(mz), Pareto=paretoscale(mz)) cat(format(Sys.time(), "%b %d %X"),'Function: normalize(fm,"',normtype,'") finish.\n') return(cbind(mdt,mz)) } paretoscale<-function(mz){ cat(format(Sys.time(), "%b %d %X"),'Function: paretoscale',' starts. Mem:',getFreeMem(),'GB\n') s<-apply(mz,2,sd) a<-apply(mz,2,mean) return(scale(mz,center = a,scale = sqrt(s))) } get_mdt<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_mdt("',fm$fname[1],'") starts. Mem:',getFreeMem(),'GB\n') mdt<-fm %>% dplyr::select(spectrumid,patientid,diagnosis,t.id,smpl.id,target) %>% unique cat(format(Sys.time(), "%b %d %X"),'Function: get_mdt("',fm$fname[1],'") finish.\n') return(mdt) } groups<-factor(c('train','test')) smpl_split_fm<-function(fm,split=0.6){ cat(format(Sys.time(), "%b %d %X"),'Function: smpl_split_fm("',fm$fname[1],'","',as.character(fm$Norm[1]),'") starts. Mem:',getFreeMem(),'GB\n') mdt<-get_mdt(fm) smpl<-mdt %>% dplyr::select(smpl.id,target) %>% unique trainIndexSmpl <- createDataPartition(smpl$target, p = split, list = FALSE, times = 1) test_smpl<-smpl$smpl.id[-trainIndexSmpl] fm$grp<-groups[1] fm$grp[fm$smpl.id %in% test_smpl]<-groups[2] cat(format(Sys.time(), "%b %d %X"),'Function: smpl_split_fm("',fm$fname[1],'","',as.character(fm$Norm[1]),'") finish.\n') return(fm) } train_model<-function(fm,modeltype){ cat(format(Sys.time(), "%b %d %X"),'Function: train_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',modeltype,'") starts. Mem:',getFreeMem(),'GB\n') fm$was.trained<-0 idx<-grep("(MZ_.*|target)",names(fm)) trdx<-which(fm$grp==groups[1]) if(smpl<length(trdx)){ jdx<-trdx[sample.int(length(trdx),size = smpl)] }else{ jdx<-trdx } fm$was.trained[jdx]<-1 train<-fm[jdx,idx] cat(format(Sys.time(), "%b %d %X"),'train dataset',dim(train),'\n') res<-switch (modeltype, rf=train_rf(train), xgb=train_xgb(train) ) cat(format(Sys.time(), "%b %d %X"),'Function: train_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',modeltype,'") finish.\n') return(list(model=res,data=fm)) } smpl<-5e6 test_model<-function(mod){ fm<-mod$data model<-mod$model cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") starts. Mem:',getFreeMem(),'GB\n') idx<-grep("(MZ_.*|target)",names(fm)) test<-fm[,idx] res<-predict(model,newdata=test) fm$predict<-res fm$method<-model$method cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") finish.\n') return(fm) } eval_model<-function(tst){ return(postResample(tst[!tst$was.trained, "predict"], tst[!tst$was.trained, "target"])) } apply_model<-function(mod,newdata){ fm<-newdata model<-mod$model cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") starts. Mem:',getFreeMem(),'GB\n') idx<-match(model$finalModel$xNames,names(fm)) if(any(is.na(idx))){ i<-which(is.na(idx)) stop('Model parameters [',model$finalModel$xNames[i],'] are missing from the dataset.\n') } test<-fm[,c('target',names(fm)[idx])] res<-predict(model,newdata=test) fm$predict<-res fm$method<-model$method cat(format(Sys.time(), "%b %d %X"),'Function: test_model("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$Filter[1],'","',model$method,'") finish.\n') return(fm) } make_point_plot<-function(test){ my.formula <- y ~ x p <- ggplot(data = test, aes(x = target, y = predict)) + geom_smooth(method = "lm", se=FALSE, color="black", formula = my.formula) + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~", aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), parse = TRUE) return(p) } plot_test_point<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$grp==groups[2],] p<-make_point_plot(test)+geom_point() + geom_jitter() cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_train_point<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$was.trained==0,] p<-make_point_plot(test)+geom_point()+ geom_jitter(aes(x = target, y = predict,color='blue'),data=fm[fm$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_test_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$grp==groups[2],] p<-make_point_plot(test)+geom_boxplot(aes(group=target)) cat(format(Sys.time(), "%b %d %X"),'Function: plot_test("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_train_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') test<-fm[fm$was.trained==0,] p<-make_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = predict,color='blue'),data=fm[fm$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } #### TCP plots #### plot_tcp_box<-function(fm,theme=theme_grey()){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_train("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$tcp<-100-fm$target fmtcp$predict<-100-fm$predict test<-fmtcp[fmtcp$was.trained==0,] p<-make_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = predict,color='blue'),data=fmtcp[fmtcp$was.trained==1,])+ xlab('Assigned TCP')+ylab('Predicted TCP')+theme cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_train("',fmtcp$fname[1],'","', as.character(fmtcp$Norm[1]),'","',fmtcp$method[1],'") finish\n') return(p) } smpl_box_theme<-theme_bw() + theme( plot.title = element_text(face = "bold", size = 16), axis.title = element_text(size=20), legend.background = element_rect(fill = "white", size = 4, colour = "white"), legend.justification = c(0.99, 0.5), legend.position = c(0.99, 0.5)) plot_tcp_smpl_box<-function(fm,theme=theme_grey(base_size=14),palette=NULL){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$target<-100-fm$target fmtcp$predict<-100-fm$predict sum_tst<-ddply(fmtcp,.(smpl.id,target,was.trained), summarise,min.pred=min(predict),mean.pred=mean(predict), max.pred=max(predict)) fmtcp$samples<-factor(fmtcp$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) fmtcp$Set<-factor(fmtcp$was.trained,labels=c('Validation','Train')) sum_tst$samples<-factor(sum_tst$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) p <- ggplot(fmtcp, aes(x=samples, y=predict,color=Set)) + geom_boxplot()+ geom_point(data=sum_tst, aes(x=samples, y=target), color='black', shape='+',size=6)+ coord_flip()+xlab('Predicted TCP')+ylab('Sample ID') p<-p+theme if(!is.null(palette)){ p<-p+scale_colour_brewer(type = "seq", palette =palette) } cat(format(Sys.time(), "%b %d %X"),'Function: plot_tcp_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_median_spectrum_tcp_box<-function(fm,theme=theme_grey(base_size=26)){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$target<-100-fm$target fmtcp$predict<-100-fm$predict spec_tst<-make_mean_spectrum(fmtcp) test<-spec_tst[spec_tst$was.trained==0,] p<-make_median_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = median.pred),color='blue', size=2, data=spec_tst[spec_tst$was.trained==1,])+ xlab('Assigned TCP')+ylab('Median of predicted TCP')+theme cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_mean_spectrum_tcp_box<-function(fm,theme=theme_grey(base_size=26)){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') fmtcp<-fm fmtcp$target<-100-fm$target fmtcp$predict<-100-fm$predict spec_tst<-make_mean_spectrum(fmtcp) test<-spec_tst[spec_tst$was.trained==0,] p<-make_mean_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = mean.pred),color='blue',size=2, data=spec_tst[spec_tst$was.trained==1,])+ xlab('Assigned TCP')+ylab('Mean of predicted TCP')+theme cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_tcp_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } #### Spectrum aggregation ##### make_mean_spectrum<-function(tst){ spec_tst<-ddply(tst,.(spectrumid,patientid,diagnosis,smpl.id,t.id,norm.p,tumor.p,necro.p,norm.type,othr.p,target,was.trained),summarise,min.pred=min(predict),mean.pred=mean(predict),median.pred=median(predict),max.pred=max(predict)) return(spec_tst) } #### Spectrum median plot ##### make_median_spectrum_point_plot<-function(test){ my.formula <- y ~ x p <- ggplot(data = test, aes(x = target, y = median.pred)) + geom_smooth(method = "lm", se=FALSE, color="black", formula = my.formula) + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~", aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), parse = TRUE) return(p) } plot_median_spectrum_test_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_median_spectrum_point_plot(test)+geom_boxplot(aes(group=target)) cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_median_spectrum_train_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_median_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = median.pred),color='blue',data=spec_tst[spec_tst$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_median_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } #### Spectrum mean plot ##### make_mean_spectrum_point_plot<-function(test){ my.formula <- y ~ x p <- ggplot(data = test, aes(x = target, y = mean.pred)) + geom_smooth(method = "lm", se=FALSE, color="black", formula = my.formula) + stat_poly_eq(formula = my.formula, eq.with.lhs = "italic(hat(y))~`=`~", aes(label = paste(..eq.label.., ..rr.label.., sep = "*plain(\",\")~")), parse = TRUE) return(p) } plot_mean_spectrum_test_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_mean_spectrum_point_plot(test)+geom_boxplot(aes(group=target)) cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_test_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_mean_spectrum_train_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') spec_tst<-make_mean_spectrum(fm) test<-spec_tst[spec_tst$was.trained==0,] p<-make_mean_spectrum_point_plot(test)+geom_boxplot(aes(group=target))+ geom_jitter(aes(x = target, y = mean.pred),color='blue',data=spec_tst[spec_tst$was.trained==1,]) cat(format(Sys.time(), "%b %d %X"),'Function: plot_mean_spectrum_train_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } plot_train_smpl_box<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_train_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") starts. Mem:',getFreeMem(),'GB\n') sum_tst<-ddply(fm,.(smpl.id,target,was.trained),summarise,min.pred=min(predict),mean.pred=mean(predict),max.pred=max(predict)) fm$samples<-factor(fm$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) sum_tst$samples<-factor(sum_tst$smpl.id,levels = sum_tst$smpl.id[order(sum_tst$target)]) p <- ggplot(fm, aes(x=samples, y=predict,color=factor(was.trained))) + geom_boxplot()+ geom_point(data=sum_tst, aes(x=samples, y=target), color='black', shape='+',size=6)+ coord_flip() cat(format(Sys.time(), "%b %d %X"),'Function: plot_train_smpl_box("',fm$fname[1],'","',as.character(fm$Norm[1]),'","',fm$method[1],'") finish\n') return(p) } train_rf<-function(train){ cat(format(Sys.time(), "%b %d %X"),'Function: train_rf',' starts. Mem:',getFreeMem(),'GB\n') fitCV10<-trainControl(method = "repeatedcv", number = 10, repeats = 3) N<-dim(train)[1] p<-dim(train)[2]-1 #tunegrid <- expand.grid(.mtry=c(1:(p/3)),.ntree=c(500,1000,1500)) tunegrid <- expand.grid(mtry=sample(c(1:(p/3)),size=10))#,.ntree=c(500,1000,1500)) if(!exists('ncores')){ ncores<- detectCores() } cat(format(Sys.time(), "%b %d %X"),'ncores=',ncores,'.\n') cl <- makePSOCKcluster(ncores) registerDoParallel(cl) rfFitCVpat <- train(target ~ ., data = train, method = "rf",#customRF, trControl = fitCV10, tuneGrid=tunegrid, verbose = FALSE) stopCluster(cl) cat(format(Sys.time(), "%b %d %X"),'Function: train_rf',' finish.\n') return(rfFitCVpat) } train_xgb<-function(train){ cat(format(Sys.time(), "%b %d %X"),'Function: train_xgb',' starts. Mem:',getFreeMem(),'GB\n') fitCV10<-trainControl(method = "repeatedcv", number = 10, repeats = 3) if(!exists('ncores')){ ncores<- detectCores() } cat(format(Sys.time(), "%b %d %X"),'ncores=',ncores,'.\n') cl <- makePSOCKcluster(ncores) registerDoParallel(cl) xboostFitCVspec <- train(target ~ ., data = train, method = "xgbDART", trControl = fitCV10, verbose = FALSE) stopCluster(cl) cat(format(Sys.time(), "%b %d %X"),'Function: train_xgb',' finish.\n') return(xboostFitCVspec) } xgb_importance<-function(mod){ cat(format(Sys.time(), "%b %d %X"),'Function: xgb_importance starts. \n') fm<-mod$data model<-mod$model xgb.importance(model =model$finalModel)->imp_mat cat(format(Sys.time(), "%b %d %X"),'Function: xgb_importance finish. \n') return(list(data=fm,model=model,importance=imp_mat)) } xgb_plot_importance<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: xgb_plot_importance starts. \n') fm<-imp$data model<-imp$model$finalModel imp_mat<-imp$importance p<-xgb.ggplot.importance(imp_mat, rel_to_first = TRUE, xlab = "Relative importance")->p cat(format(Sys.time(), "%b %d %X"),'Function: xgb_plot_importance finish. \n') return(p) } get_shap_values<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values starts. \n') fm<-imp$data #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values dim(fm)',dim(fm),' \n') model<-imp$model$finalModel #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values length(model)',length(model),' \n') imp<-imp$importance #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values dim(imp)',dim(imp),' \n') idx<-grep("(MZ_.*)",names(fm)) tm<-as.matrix(fm[,idx]) #cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values dim(tm)',dim(tm),' \n') shap_values <- SHAPforxgboost::shap.values(xgb_model = model, X_train = tm) shap_values$tm<-tm return(shap_values) } get_shap_dep_plot<-function(shap_values){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_dep_plot starts. \n') shap_values$tm->tm shap_long <- SHAPforxgboost::shap.prep(shap_contrib = shap_values$shap_score, X_train = tm) fig_list <- lapply(names(shap_values$mean_shap_score)[1:16], shap.plot.dependence, data_long = shap_long) p<-gridExtra::grid.arrange(grobs = fig_list, ncol = 4) return(p) } get_shap_plot_data<-function(shap_values){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_plot_data starts. \n') plot_data <- shap.prep.stack.data(shap_contrib = shap_values$shap_score, top_n = 10, n_groups = 10) return(plot_data) } get_shap_force_plot<-function(plot_data){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_force_plot starts. \n') p<-shap.plot.force_plot(plot_data,zoom_in = FALSE) return(p) } get_shap_force_group_plot<-function(plot_data){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_force_group_plot starts. \n') p<-shap.plot.force_plot_bygroup(plot_data) return(p) } get_xgb.shap_plot<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: get_xgb.shap_plot starts. \n') fm<-imp$data model<-imp$model$finalModel imp<-imp$importance idx<-grep("(MZ_.*)",names(fm)) tm<-as.matrix(fm[,idx]) contr<-predict(model,newdata=tm, predcontrib = TRUE) shap<-xgb.plot.shap(tm,contr,model=model$finalModel, features = imp$Feature[1:10], plot = FALSE) cat(format(Sys.time(), "%b %d %X"),'Function: get_xgb.shap_plot finish. \n') return(shap) } get_shap_plot<-function(imp){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_plot starts. \n') fm<-imp$data model<-imp$model$finalModel imp<-imp$importance idx<-grep("(MZ_.*)",names(fm)) tm<-as.matrix(fm[,idx]) sh_res<-shap.score.rank(xgb_model = model, X_train =tm, shap_approx = F ) sh_long<-shap.prep(shap = sh_res, X_train = tm , top_n = 10 ) p<-plot.shap.summary(data_long = sh_long) cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_plot finish. \n') return(p) } train_trigger<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: train_trigger',' starts. Mem:',getFreeMem(),'GB\n') return(length(unique(fm$norm.p))>2) } #### Train reduced model #### get_reduced_fm<-function(imp,threshold=5){ cat(format(Sys.time(), "%b %d %X"),'Function: get_shap_values starts. \n') fm<-imp$data shap_values <- get_shap_values(imp) idxMZ<-grep('MZ_',names(fm)) shval<-cbind(fm[,-idxMZ],shap_values$shap_score) idxMZ<-grep('MZ_',names(shval)) sh_mean<-ddply(shval,.(target),function(.x){apply(.x[,idxMZ],2,mean)}) sh_mean_long<-melt(sh_mean,id='target') idxMZ<-grep('MZ_',names(fm)) idxOpt<-match(as.character(unique( sh_mean_long$variable[abs(sh_mean_long$value)>threshold])), names(fm)) fm_opt<-cbind(fm[,-idxMZ],fm[,idxOpt]) return(fm_opt) } #' Prepare panel of three PCA plots: 1-2, 2-3, 1-3 #' #' @param fm feature matrix to plot #' @param color name of the column to color plot with #' plot_pca<-function(fm, color){ cat(format(Sys.time(), "%b %d %X"),'Function: plot_pca','\n') data=fm[[1]] stop('Not implemented yet') } get_model_fname<-function(ms_setup,diag,expt,method,idx){ cat(format(Sys.time(), "%b %d %X"),'Function: get_model_fname','\n') res=ms_setup[1] mode=ms_setup[2] mz=ms_setup[3] dev=ifelse(expt==1,2,4) fname<-sprintf('peak2019.diag_%d.expt_%d.res_%d.mode_%d.dev_%d.mz_%d.%s.cv10.%s.fmodel.rds', diag,expt,res,mode,dev,mz,method,idx) return(fname) } #Dataset file prefix fpref<-'peak2019' #' Prepare dataset file name list. #' Using provided parameters of the spectra this function create list of files #' to be read for the dataset creation. #' #' This function uses parameter \code{fpref} as a datafile prefix. Different #' datasets could use different prefixes such as 'peak2019'. #' #' @param res -- resolution #' @param mode -- registration mode #' @param mz -- width of mass range #' @param ddiag -- diagnosis #' @param path -- path to datafile folder #' #' @return list of filepath strings. get_fm_fname<-function(res,mode,mz,ddiag,path){ cat(format(Sys.time(), "%b %d %X"),'Function: get_fm_fname','\n') fpatt<-sprintf('%s.diag_%d.expt_.*.res_%d.mode_%d.dev_.*.mz_%d.peak.rds', fpref,ddiag,res,mode,mz) fname<-dir(path = path,pattern = fpatt) return(fname) } get_model<-function(ms_setup,diag,expt,method,idx){ cat(format(Sys.time(), "%b %d %X"),'Function: get_model','\n') fname<-get_model_fname(ms_setup,diag,expt,method,idx) fpath<-paste0(path,fname) if(file.exists(fpath)){ m<- readRDS(fpath) return(list(model=m,ms_setup=ms_setup,diag=diag,expt=expt,method=method,idx=idx)) }else{ return(list()) } } mapMZ<-function(model,fm){ cat(format(Sys.time(), "%b %d %X"),'Function: mapMZ','\n') modMZ<-as.numeric(sub('^MZ_','',model$xNames)) dataMZ<-as.numeric(sub('^MZ_','',names(fm)[grep('^MZ',names(fm))])) } predict_dataset<-function(model,ms_setup,ddiag,dexpt){ cat(format(Sys.time(), "%b %d %X"),'Function: predict_dataset','\n') if(length(mode)==0) return(data.frame()) m<-model$model fm<-load_dataset(model,ms_setup,ddiag,dexpt) if(dim(fm)[1]==0) return(data.frame()) fmapped<-mapMZ(m,fm) } load_dataset<-function(res,mode,mz,diag){ cat(format(Sys.time(), "%b %d %X"),'Function: load_dataset','\n') path<-dpath fmfname<-get_fm_fname(res,mode,mz,diag,path) fpath<-paste0(path,fmfname) return(fpath) } get_pat_df<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_pat_df','\n') if(dim(fm)[2]>2){ return(unique(fm[,c("patientid","diagnosis")])) }else{ return(data.frame()) } } get_spec_df<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_spec_df','\n') if(dim(fm)[2]>2){ return(unique(fm[,c("spectrumid","patientid","diagnosis")])) }else{ return(data.frame()) } } get_dim<-function(fm){ cat(format(Sys.time(), "%b %d %X"),'Function: get_dim','\n') return(data.frame(nrow=dim(fm)[1],ncol=dim(fm)[2])) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pred.2ph.R \name{predict.ah.2ph} \alias{predict.ah.2ph} \title{Prediction Based on the Additive Hazards Model Fitted from Two-phase Sampling} \usage{ \method{predict}{ah.2ph}(object, newdata, newtime, ...) } \arguments{ \item{object}{an object of class inhering from 'ah.2ph'.} \item{newdata}{a dataframe of an individual's predictors.} \item{newtime}{a given sequence of time points at which the prediction is performed.} \item{...}{further arguments passed to or from other methods.} } \value{ A dataframe including the given time points, predicted hazards, their standard errors, their variances, the phase I component of the variance for predicted hazards and the phase II component of the variance. } \description{ This function predicts a subject's overall hazard rates at given time points based on this subject's covariate values. The prediction function is an object from \code{\link{ah.2ph}}. The estimating procedures follow Hu (2014). } \examples{ library(survival) ### load data nwts <- nwtsco[1:100,] ### create strata based on institutional histology and disease status nwts$strt <- 1+nwts$instit ### add a stratum containing all (relapsed) cases nwts$strt[nwts$relaps==1] <- 3 ### assign phase II subsampling probabilities ### oversample unfavorable histology (instit =1) and cases ### Pi = 0.5 for instit =0, Pi =1 for instit =1 and relaps =1 nwts$Pi<- 0.5 * (nwts$strt == 1) + 1 * (nwts$strt == 2) + 1 * (nwts$strt == 3) ### generate phase II sampling indicators N <- dim(nwts)[1] nwts$in.ph2 <- rbinom(N, 1, nwts$Pi) ### fit an additive hazards model to two-phase sampling data without calibration fit1 <- ah.2ph(Surv(trel,relaps) ~ age + histol, data = nwts, ties = FALSE, R = in.ph2, Pi = Pi, robust = FALSE) ### input the new data for prediction newdata <- nwtsco[101,] ### based on the fitted model fit1, perform prediction at time points t =3 and t= 5 predict(fit1, newdata, newtime = c(3,5)) ### fit an additve hazards model to two-phase sampling data with calibration ### The calibration variable is stage fit2 <- ah.2ph(Surv(trel,relaps) ~ age + histol, data = nwts, R = in.ph2, Pi = Pi, ties = FALSE, robust = FALSE, calibration.variables = "stage") ### based on the fitted model fit2, perform prediction at time points t =3 and t= 5 predict(fit2, newdata, newtime = c(3,5)) \dontrun{ ### The calibration variable is stage, when set robust = TRUE fit3 <- ah.2ph(Surv(trel,relaps) ~ age + histol, data = nwts, R = in.ph2, Pi = Pi, ties = FALSE, robust = TRUE, calibration.variables = "stage") ### based on the fitted model fit2, perform prediction at time points t =3 and t= 5 predict(fit3, newdata, newtime = c(3,5)) } } \references{ Jie Hu (2014) A Z-estimation System for Two-phase Sampling with Applications to Additive Hazards Models and Epidemiologic Studies. Dissertation, University of Washington. } \seealso{ \code{\link{ah.2ph}} for fitting the additive hazards model with two-phase }
/man/predict.ah.2ph.Rd
no_license
cran/addhazard
R
false
true
3,181
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pred.2ph.R \name{predict.ah.2ph} \alias{predict.ah.2ph} \title{Prediction Based on the Additive Hazards Model Fitted from Two-phase Sampling} \usage{ \method{predict}{ah.2ph}(object, newdata, newtime, ...) } \arguments{ \item{object}{an object of class inhering from 'ah.2ph'.} \item{newdata}{a dataframe of an individual's predictors.} \item{newtime}{a given sequence of time points at which the prediction is performed.} \item{...}{further arguments passed to or from other methods.} } \value{ A dataframe including the given time points, predicted hazards, their standard errors, their variances, the phase I component of the variance for predicted hazards and the phase II component of the variance. } \description{ This function predicts a subject's overall hazard rates at given time points based on this subject's covariate values. The prediction function is an object from \code{\link{ah.2ph}}. The estimating procedures follow Hu (2014). } \examples{ library(survival) ### load data nwts <- nwtsco[1:100,] ### create strata based on institutional histology and disease status nwts$strt <- 1+nwts$instit ### add a stratum containing all (relapsed) cases nwts$strt[nwts$relaps==1] <- 3 ### assign phase II subsampling probabilities ### oversample unfavorable histology (instit =1) and cases ### Pi = 0.5 for instit =0, Pi =1 for instit =1 and relaps =1 nwts$Pi<- 0.5 * (nwts$strt == 1) + 1 * (nwts$strt == 2) + 1 * (nwts$strt == 3) ### generate phase II sampling indicators N <- dim(nwts)[1] nwts$in.ph2 <- rbinom(N, 1, nwts$Pi) ### fit an additive hazards model to two-phase sampling data without calibration fit1 <- ah.2ph(Surv(trel,relaps) ~ age + histol, data = nwts, ties = FALSE, R = in.ph2, Pi = Pi, robust = FALSE) ### input the new data for prediction newdata <- nwtsco[101,] ### based on the fitted model fit1, perform prediction at time points t =3 and t= 5 predict(fit1, newdata, newtime = c(3,5)) ### fit an additve hazards model to two-phase sampling data with calibration ### The calibration variable is stage fit2 <- ah.2ph(Surv(trel,relaps) ~ age + histol, data = nwts, R = in.ph2, Pi = Pi, ties = FALSE, robust = FALSE, calibration.variables = "stage") ### based on the fitted model fit2, perform prediction at time points t =3 and t= 5 predict(fit2, newdata, newtime = c(3,5)) \dontrun{ ### The calibration variable is stage, when set robust = TRUE fit3 <- ah.2ph(Surv(trel,relaps) ~ age + histol, data = nwts, R = in.ph2, Pi = Pi, ties = FALSE, robust = TRUE, calibration.variables = "stage") ### based on the fitted model fit2, perform prediction at time points t =3 and t= 5 predict(fit3, newdata, newtime = c(3,5)) } } \references{ Jie Hu (2014) A Z-estimation System for Two-phase Sampling with Applications to Additive Hazards Models and Epidemiologic Studies. Dissertation, University of Washington. } \seealso{ \code{\link{ah.2ph}} for fitting the additive hazards model with two-phase }
#Application 06 library(shiny) shinyUI( fluidPage( titlePanel("Probability Distributions"), sidebarLayout( sidebarPanel( fileInput("file", "Choose File From the Application 06 Folder", multiple=TRUE),h4("The file is about different types of servings of drinks in different countries."), br(),numericInput("val1", "Value 1", 0, min = 1, max = 1000,width = "150px"), numericInput("val2", "Value 2", 0, min = 1, max = 1000,width = "150px"), numericInput("val3", "Value 3", 0, min = 1, max = 1000,width = "150px"), checkboxInput("LowerTail", "Lower Tail", FALSE), tags$hr(), h2("Binomial Distribtion"), h5("Binomial Distribution shows frequency distribution of the possible number of successful outcomes in a given number of trials in each of which there is the same probability of success."), actionButton("pbinom", "pbinom"), actionButton("qbinom", "qbinom"), actionButton("dbinom", "dbinom"), tags$hr(), h2("Normal Distribution"), h5("A normal distribution is an arrangement of a data set in which most values cluster in the middle of the range and the rest taper off symmetrically toward either extreme."), actionButton("pnorm", "pnorm"), actionButton("qnorm", "qnorm"), actionButton("dnorm", "dnorm"), tags$hr(), h2("Poisson Distribution"), h5("The Poisson distribution can be used to calculate the probabilities of various numbers of success based on the mean number of successes."), actionButton("ppois", "ppois"), actionButton("qpois", "qpois"), actionButton("dpois", "dpois") ), mainPanel( tabsetPanel( tabPanel("Data", tableOutput("datatable")), tabPanel("Result", h3(textOutput("result"))) ) ) ) ) )
/Probability_Distrubution_[6]/ui.r
no_license
osamaahmed17/Data-Science-Applications
R
false
false
1,880
r
#Application 06 library(shiny) shinyUI( fluidPage( titlePanel("Probability Distributions"), sidebarLayout( sidebarPanel( fileInput("file", "Choose File From the Application 06 Folder", multiple=TRUE),h4("The file is about different types of servings of drinks in different countries."), br(),numericInput("val1", "Value 1", 0, min = 1, max = 1000,width = "150px"), numericInput("val2", "Value 2", 0, min = 1, max = 1000,width = "150px"), numericInput("val3", "Value 3", 0, min = 1, max = 1000,width = "150px"), checkboxInput("LowerTail", "Lower Tail", FALSE), tags$hr(), h2("Binomial Distribtion"), h5("Binomial Distribution shows frequency distribution of the possible number of successful outcomes in a given number of trials in each of which there is the same probability of success."), actionButton("pbinom", "pbinom"), actionButton("qbinom", "qbinom"), actionButton("dbinom", "dbinom"), tags$hr(), h2("Normal Distribution"), h5("A normal distribution is an arrangement of a data set in which most values cluster in the middle of the range and the rest taper off symmetrically toward either extreme."), actionButton("pnorm", "pnorm"), actionButton("qnorm", "qnorm"), actionButton("dnorm", "dnorm"), tags$hr(), h2("Poisson Distribution"), h5("The Poisson distribution can be used to calculate the probabilities of various numbers of success based on the mean number of successes."), actionButton("ppois", "ppois"), actionButton("qpois", "qpois"), actionButton("dpois", "dpois") ), mainPanel( tabsetPanel( tabPanel("Data", tableOutput("datatable")), tabPanel("Result", h3(textOutput("result"))) ) ) ) ) )
setwd("C:/Users/ac14037/Google Drive/R/scripts/rotation1scripts") library(qtl) install.packages(SNPolisher) ??snpolisher install.packages("qtl") ####LOAD UP NEW AXIOM DATA WITH 6K SNPS (POST FILTERING W/ ANALYSIS SUITE)#### # nd = read.csv("recommendedsnpsv3.csv", stringsAsFactors = F) # nd[nd=="AA"] = "A" # nd[nd=="BB"] = "B" # nd[nd=="AB"] = "H" # nd[nd=="NoCall"] = "-" # nd[1,]=1 # nd[1,1]="" # nd[1,2]="" # nd=nd[,-(6978:ncol(nd))] # write.csv(nd, "nd.csv", row.names = F) axiomcsv = read.csv("nd.csv", stringsAsFactors = F) comp=axiomcsv[c(340, 343, 345, 349, 351, 353),] #select parent plants from dataset acomp=all.equal(comp[1,], comp[2,]) #find markers in which parents of same cultivar differ t = strsplit(acomp[4], " ")[[1]][2] #parse string from acomp to get marker substring(t, 2, nchar(t)-2) #more parsing ?substring ?gsub ?regex ?strsplit words ?all.equal axiomdata_untouched[350,] all.equal(axiomdata_untouched[350] axiomdata = read.cross("csv", "./", "nd.csv", estimate.map=F) axiomdata_untouched = axiomdata ?read.cross summary(axiomdata) plotMissing(axiomdata) #do some plots ?par par(mfrow=c(1,2), las=1) plot(ntyped(axiomdata), ylab="No. typed markers", main="No. genotypes by individual") plot(ntyped(axiomdata, "mar"), ylab="No. typed individuals", main="No. genotypes by marker") #drop individuals with low information axiomdata = subset(axiomdata, ind=(ntyped(axiomdata)>50)) #drop markers with low information nt.bymar = ntyped(axiomdata, "mar") todrop = names(nt.bymar[nt.bymar < 200]) axiomdata = drop.markers(axiomdata, todrop) #####remove duplicate individuals#### cg = comparegeno(axiomdata) hist(cg[lower.tri(cg)], breaks=seq(0, 1, len=101), xlab="No. matching genotypes") rug(cg[lower.tri(cg)]) wh = which(cg > 0.9, arr=TRUE) wh = wh[wh[,1] < wh[,2],] wh g = pull.geno(axiomdata) table(g[3,], g[11,]) for(i in 1:nrow(wh)) { tozero = !is.na(g[wh[i,1],]) & !is.na(g[wh[i,2],]) & g[wh[i,1],] != g[wh[i,2],] axiomdata$geno[[1]]$data[wh[i,1],tozero] = NA } axiomdata = subset(axiomdata, ind=-wh[,2]) ####### ####remove markers with same genotypes#### dup = findDupMarkers(axiomdata, exact.only=F) ?drop.markers axiomdata = drop.markers(axiomdata, dup) summary(axiomdata) ####inspect seg. distotion (here only for 1:2:1 ratio. how check deviation from 1:1??)#### gt = geno.table(axiomdata) gt[gt$P.value < 0.05/totmar(axiomdata),] todrop = rownames(gt[gt$P.value < 1e-200,]) # drop poor markers - is p value set properly? this removes ~ 2/3 of markers axiomdata2 = drop.markers(axiomdata, todrop) summary(axiomdata2) gt2 = geno.table(axiomdata2) gt2[gt2$P.value < 0.05/totmar(axiomdata2),] todrop = rownames(gt[gt$P.value < 1e-10,]) # drop poor markers - is p value set properly? this removes ~ 2/3 of markers axiomdata3 = drop.markers(axiomdata, todrop) summary(axiomdata3) gt3 = geno.table(axiomdata3) gt3[gt3$P.value < 0.05/totmar(axiomdata3),] ####### #look at pariwise recombination fractions adat = markerlrt(axiomdata) #do some checks for possible switched alleles checkAlleles(adat, threshold=5) rf=pull.rf(adat) lod=pull.rf(adat, what="lod") plot(as.numeric(rf), as.numeric(lod), xlab="recombination fraction", ylab="LOD score") ?formLinkageGroups #form linkage groups lg <- formLinkageGroups(axiomdata, max.rf=0.35, min.lod=6) table(lg[,2])
/scripts/r/axiomrqtl.R
no_license
alexcoulton/phd
R
false
false
3,453
r
setwd("C:/Users/ac14037/Google Drive/R/scripts/rotation1scripts") library(qtl) install.packages(SNPolisher) ??snpolisher install.packages("qtl") ####LOAD UP NEW AXIOM DATA WITH 6K SNPS (POST FILTERING W/ ANALYSIS SUITE)#### # nd = read.csv("recommendedsnpsv3.csv", stringsAsFactors = F) # nd[nd=="AA"] = "A" # nd[nd=="BB"] = "B" # nd[nd=="AB"] = "H" # nd[nd=="NoCall"] = "-" # nd[1,]=1 # nd[1,1]="" # nd[1,2]="" # nd=nd[,-(6978:ncol(nd))] # write.csv(nd, "nd.csv", row.names = F) axiomcsv = read.csv("nd.csv", stringsAsFactors = F) comp=axiomcsv[c(340, 343, 345, 349, 351, 353),] #select parent plants from dataset acomp=all.equal(comp[1,], comp[2,]) #find markers in which parents of same cultivar differ t = strsplit(acomp[4], " ")[[1]][2] #parse string from acomp to get marker substring(t, 2, nchar(t)-2) #more parsing ?substring ?gsub ?regex ?strsplit words ?all.equal axiomdata_untouched[350,] all.equal(axiomdata_untouched[350] axiomdata = read.cross("csv", "./", "nd.csv", estimate.map=F) axiomdata_untouched = axiomdata ?read.cross summary(axiomdata) plotMissing(axiomdata) #do some plots ?par par(mfrow=c(1,2), las=1) plot(ntyped(axiomdata), ylab="No. typed markers", main="No. genotypes by individual") plot(ntyped(axiomdata, "mar"), ylab="No. typed individuals", main="No. genotypes by marker") #drop individuals with low information axiomdata = subset(axiomdata, ind=(ntyped(axiomdata)>50)) #drop markers with low information nt.bymar = ntyped(axiomdata, "mar") todrop = names(nt.bymar[nt.bymar < 200]) axiomdata = drop.markers(axiomdata, todrop) #####remove duplicate individuals#### cg = comparegeno(axiomdata) hist(cg[lower.tri(cg)], breaks=seq(0, 1, len=101), xlab="No. matching genotypes") rug(cg[lower.tri(cg)]) wh = which(cg > 0.9, arr=TRUE) wh = wh[wh[,1] < wh[,2],] wh g = pull.geno(axiomdata) table(g[3,], g[11,]) for(i in 1:nrow(wh)) { tozero = !is.na(g[wh[i,1],]) & !is.na(g[wh[i,2],]) & g[wh[i,1],] != g[wh[i,2],] axiomdata$geno[[1]]$data[wh[i,1],tozero] = NA } axiomdata = subset(axiomdata, ind=-wh[,2]) ####### ####remove markers with same genotypes#### dup = findDupMarkers(axiomdata, exact.only=F) ?drop.markers axiomdata = drop.markers(axiomdata, dup) summary(axiomdata) ####inspect seg. distotion (here only for 1:2:1 ratio. how check deviation from 1:1??)#### gt = geno.table(axiomdata) gt[gt$P.value < 0.05/totmar(axiomdata),] todrop = rownames(gt[gt$P.value < 1e-200,]) # drop poor markers - is p value set properly? this removes ~ 2/3 of markers axiomdata2 = drop.markers(axiomdata, todrop) summary(axiomdata2) gt2 = geno.table(axiomdata2) gt2[gt2$P.value < 0.05/totmar(axiomdata2),] todrop = rownames(gt[gt$P.value < 1e-10,]) # drop poor markers - is p value set properly? this removes ~ 2/3 of markers axiomdata3 = drop.markers(axiomdata, todrop) summary(axiomdata3) gt3 = geno.table(axiomdata3) gt3[gt3$P.value < 0.05/totmar(axiomdata3),] ####### #look at pariwise recombination fractions adat = markerlrt(axiomdata) #do some checks for possible switched alleles checkAlleles(adat, threshold=5) rf=pull.rf(adat) lod=pull.rf(adat, what="lod") plot(as.numeric(rf), as.numeric(lod), xlab="recombination fraction", ylab="LOD score") ?formLinkageGroups #form linkage groups lg <- formLinkageGroups(axiomdata, max.rf=0.35, min.lod=6) table(lg[,2])
micombine.F <- function( Fvalues , df1 , display = TRUE ){ M <- length(Fvalues) # number of imputations dk <- df1 * Fvalues # micombine.chisquare( dk = df1*Fvalues , df = df1 , display = display ) }
/R/micombine.F.R
no_license
cksun/miceadds
R
false
false
237
r
micombine.F <- function( Fvalues , df1 , display = TRUE ){ M <- length(Fvalues) # number of imputations dk <- df1 * Fvalues # micombine.chisquare( dk = df1*Fvalues , df = df1 , display = display ) }
setOldClass(c("hms", "difftime")) setMethod("asJSON", "hms", function(x, hms = c("string", "secs"), ...) { hms <- match.arg(hms) output <- switch(hms, string = as.character(x), secs = as.numeric(x, units = "secs") ) output[is.na(x)] <- NA asJSON(output, ...) })
/R/asJSON.difftime.R
permissive
stefanedwards/jsonlite
R
false
false
310
r
setOldClass(c("hms", "difftime")) setMethod("asJSON", "hms", function(x, hms = c("string", "secs"), ...) { hms <- match.arg(hms) output <- switch(hms, string = as.character(x), secs = as.numeric(x, units = "secs") ) output[is.na(x)] <- NA asJSON(output, ...) })
Dataset = setRefClass("Dataset", methods = list( point = function() { print('Must implement!') } ) )
/dataset/dataset.R
no_license
senhorinha/pattern-recognition
R
false
false
129
r
Dataset = setRefClass("Dataset", methods = list( point = function() { print('Must implement!') } ) )
#using sRGES to summarize all compounds library("plyr") ################## #### getsRGES = function(RGES, cor, pert_dose, pert_time, diff, max_cor){ sRGES = RGES pert_time = ifelse(pert_time < 24, "short", "long") pert_dose = ifelse(pert_dose < 10, "low", "high") if (pert_time == "short" & pert_dose == "low"){ sRGES = sRGES + diff[4] } if (pert_dose == "low" & pert_time == "long"){ sRGES = sRGES + diff[2] } if (pert_dose == "high" & pert_time == "short"){ sRGES = sRGES + diff[1] } return(sRGES * cor/max_cor) # } ############## ############## cancer = "ER" #build a reference model according to dose and time output_path <- paste(cancer, "/all_lincs_score.csv", sep="") lincs_drug_prediction = read.csv(output_path) #should use pert_dose > 0.01 lincs_drug_prediction_subset = subset(lincs_drug_prediction, pert_dose > 0 & pert_time %in% c(6, 24)) #pairs that share the same drug and cell id lincs_drug_prediction_pairs = merge(lincs_drug_prediction_subset, lincs_drug_prediction_subset, by=c("pert_iname", "cell_id")) #x is the reference lincs_drug_prediction_pairs = subset(lincs_drug_prediction_pairs, id.x != id.y & pert_time.x == 24 & pert_dose.x == 10) #, select = c("cmap_score.x", "cmap_score.y", "pert_dose.y", "pert_time.y")) #difference of RGES to the reference lincs_drug_prediction_pairs$cmap_diff = lincs_drug_prediction_pairs$cmap_score.x - lincs_drug_prediction_pairs$cmap_score.y lincs_drug_prediction_pairs$dose = round(log(lincs_drug_prediction_pairs$pert_dose.y, 2), 1) #fix time lincs_drug_prediction_pairs_subset = subset(lincs_drug_prediction_pairs, pert_time.y == 24 ) dose_cmap_diff_24 = tapply(lincs_drug_prediction_pairs_subset$cmap_diff, lincs_drug_prediction_pairs_subset$dose, mean) dose_cmap_diff_24 = data.frame(dose = as.numeric(names(dose_cmap_diff_24)), cmap_diff= dose_cmap_diff_24) plot(dose_cmap_diff_24$dose, dose_cmap_diff_24$cmap_diff) lm_dose_24 = lm(cmap_diff ~ dose, data = dose_cmap_diff_24) summary(lm_dose_24) lincs_drug_prediction_pairs_subset = subset(lincs_drug_prediction_pairs, pert_time.y == 6) dose_cmap_diff_6 = tapply(lincs_drug_prediction_pairs_subset$cmap_diff, lincs_drug_prediction_pairs_subset$dose, mean) dose_cmap_diff_6 = data.frame(dose = as.numeric(names(dose_cmap_diff_6)), cmap_diff= dose_cmap_diff_6) lm_dose_6 = lm(cmap_diff ~ dose, data = dose_cmap_diff_6) plot(dose_cmap_diff_6$dose, dose_cmap_diff_6$cmap_diff) summary(lm_dose_6) #estimate difference lincs_drug_prediction_pairs$dose_bin = ifelse(lincs_drug_prediction_pairs$pert_dose.y < 10, "low", "high") tapply(lincs_drug_prediction_pairs$cmap_diff, lincs_drug_prediction_pairs$dose_bin, mean) tapply(lincs_drug_prediction_pairs$cmap_diff, lincs_drug_prediction_pairs$pert_time.y, mean) diff = tapply(lincs_drug_prediction_pairs$cmap_diff, paste(lincs_drug_prediction_pairs$dose_bin, lincs_drug_prediction_pairs$pert_time.y), mean) cell_lines = read.csv(paste("raw/cell_lines/", cancer, "_cell_lines.csv", sep="")) cell_line_cancer = read.csv(paste(cancer, "/", "cell_line_", cancer, "_tacle.csv", sep="")) cell_line_cancer = merge(cell_line_cancer, ccle_lincs, by.x="Cell.line.primary.name", by.y="ccle_cell_line_name") cell_line_cancer = cell_line_cancer[order(cell_line_cancer$cor),] pred = merge(lincs_drug_prediction, cell_line_cancer, by.x="cell_id", by.y="lincs_cell_id") pred$RGES = sapply(1:nrow(pred), function(id){getsRGES(pred$cmap_score[id], pred$cor[id], pred$pert_dose[id], pred$pert_time[id], diff, max(pred$cor))}) cmpd_freq = table(pred$pert_iname) pred = subset(pred, pert_iname %in% names(cmpd_freq[cmpd_freq>0])) pred_merged = ddply(pred, .(pert_iname), summarise, mean = mean(RGES), n = length(RGES), median = median(RGES), sd = sd(RGES)) pred_merged$sRGES = pred_merged$mean pred_merged = pred_merged[order(pred_merged$sRGES), ] write.csv(pred_merged,paste( cancer, "/lincs_cancer_sRGES.csv", sep=""))
/sRGES_all_cmpds.R
no_license
drychkov/RGES
R
false
false
4,013
r
#using sRGES to summarize all compounds library("plyr") ################## #### getsRGES = function(RGES, cor, pert_dose, pert_time, diff, max_cor){ sRGES = RGES pert_time = ifelse(pert_time < 24, "short", "long") pert_dose = ifelse(pert_dose < 10, "low", "high") if (pert_time == "short" & pert_dose == "low"){ sRGES = sRGES + diff[4] } if (pert_dose == "low" & pert_time == "long"){ sRGES = sRGES + diff[2] } if (pert_dose == "high" & pert_time == "short"){ sRGES = sRGES + diff[1] } return(sRGES * cor/max_cor) # } ############## ############## cancer = "ER" #build a reference model according to dose and time output_path <- paste(cancer, "/all_lincs_score.csv", sep="") lincs_drug_prediction = read.csv(output_path) #should use pert_dose > 0.01 lincs_drug_prediction_subset = subset(lincs_drug_prediction, pert_dose > 0 & pert_time %in% c(6, 24)) #pairs that share the same drug and cell id lincs_drug_prediction_pairs = merge(lincs_drug_prediction_subset, lincs_drug_prediction_subset, by=c("pert_iname", "cell_id")) #x is the reference lincs_drug_prediction_pairs = subset(lincs_drug_prediction_pairs, id.x != id.y & pert_time.x == 24 & pert_dose.x == 10) #, select = c("cmap_score.x", "cmap_score.y", "pert_dose.y", "pert_time.y")) #difference of RGES to the reference lincs_drug_prediction_pairs$cmap_diff = lincs_drug_prediction_pairs$cmap_score.x - lincs_drug_prediction_pairs$cmap_score.y lincs_drug_prediction_pairs$dose = round(log(lincs_drug_prediction_pairs$pert_dose.y, 2), 1) #fix time lincs_drug_prediction_pairs_subset = subset(lincs_drug_prediction_pairs, pert_time.y == 24 ) dose_cmap_diff_24 = tapply(lincs_drug_prediction_pairs_subset$cmap_diff, lincs_drug_prediction_pairs_subset$dose, mean) dose_cmap_diff_24 = data.frame(dose = as.numeric(names(dose_cmap_diff_24)), cmap_diff= dose_cmap_diff_24) plot(dose_cmap_diff_24$dose, dose_cmap_diff_24$cmap_diff) lm_dose_24 = lm(cmap_diff ~ dose, data = dose_cmap_diff_24) summary(lm_dose_24) lincs_drug_prediction_pairs_subset = subset(lincs_drug_prediction_pairs, pert_time.y == 6) dose_cmap_diff_6 = tapply(lincs_drug_prediction_pairs_subset$cmap_diff, lincs_drug_prediction_pairs_subset$dose, mean) dose_cmap_diff_6 = data.frame(dose = as.numeric(names(dose_cmap_diff_6)), cmap_diff= dose_cmap_diff_6) lm_dose_6 = lm(cmap_diff ~ dose, data = dose_cmap_diff_6) plot(dose_cmap_diff_6$dose, dose_cmap_diff_6$cmap_diff) summary(lm_dose_6) #estimate difference lincs_drug_prediction_pairs$dose_bin = ifelse(lincs_drug_prediction_pairs$pert_dose.y < 10, "low", "high") tapply(lincs_drug_prediction_pairs$cmap_diff, lincs_drug_prediction_pairs$dose_bin, mean) tapply(lincs_drug_prediction_pairs$cmap_diff, lincs_drug_prediction_pairs$pert_time.y, mean) diff = tapply(lincs_drug_prediction_pairs$cmap_diff, paste(lincs_drug_prediction_pairs$dose_bin, lincs_drug_prediction_pairs$pert_time.y), mean) cell_lines = read.csv(paste("raw/cell_lines/", cancer, "_cell_lines.csv", sep="")) cell_line_cancer = read.csv(paste(cancer, "/", "cell_line_", cancer, "_tacle.csv", sep="")) cell_line_cancer = merge(cell_line_cancer, ccle_lincs, by.x="Cell.line.primary.name", by.y="ccle_cell_line_name") cell_line_cancer = cell_line_cancer[order(cell_line_cancer$cor),] pred = merge(lincs_drug_prediction, cell_line_cancer, by.x="cell_id", by.y="lincs_cell_id") pred$RGES = sapply(1:nrow(pred), function(id){getsRGES(pred$cmap_score[id], pred$cor[id], pred$pert_dose[id], pred$pert_time[id], diff, max(pred$cor))}) cmpd_freq = table(pred$pert_iname) pred = subset(pred, pert_iname %in% names(cmpd_freq[cmpd_freq>0])) pred_merged = ddply(pred, .(pert_iname), summarise, mean = mean(RGES), n = length(RGES), median = median(RGES), sd = sd(RGES)) pred_merged$sRGES = pred_merged$mean pred_merged = pred_merged[order(pred_merged$sRGES), ] write.csv(pred_merged,paste( cancer, "/lincs_cancer_sRGES.csv", sep=""))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bsts_create.R \name{bsts_create} \alias{bsts_create} \title{Create a bsts_model} \usage{ bsts_create(df, date_variable, response, nseasons, niter = 100, target_variable, group_variable, model.options = BstsOptions(), rebag_vars = FALSE, rebag_mean_vars = FALSE, inclusion_probability = 0.01) } \arguments{ \item{df}{Dataframe} \item{date_variable}{Column name containing date} \item{response}{Group that is being forecast} \item{nseasons}{Seasonality of data - will be assumed if not provided} \item{niter}{Number of MCMC iterations to attempt} \item{target_variable}{Column name containing metric to be forecast} \item{group_variable}{Column name indicating group names - will build into regressors} \item{model.options}{Additional options for BSTS} \item{rebag_vars}{Create pseudo aggregate variable of other regressors} \item{rebag_mean_vars}{Create psuedo mean variable of other regressors} \item{inclusion_probability}{Minimum probability of inclusion in final model to show in returned predictors} } \value{ list object - bsts.model, data.frame of predictors } \description{ Create a bsts_model }
/man/bsts_create.Rd
no_license
lissahyacinth/bstsTest
R
false
true
1,197
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bsts_create.R \name{bsts_create} \alias{bsts_create} \title{Create a bsts_model} \usage{ bsts_create(df, date_variable, response, nseasons, niter = 100, target_variable, group_variable, model.options = BstsOptions(), rebag_vars = FALSE, rebag_mean_vars = FALSE, inclusion_probability = 0.01) } \arguments{ \item{df}{Dataframe} \item{date_variable}{Column name containing date} \item{response}{Group that is being forecast} \item{nseasons}{Seasonality of data - will be assumed if not provided} \item{niter}{Number of MCMC iterations to attempt} \item{target_variable}{Column name containing metric to be forecast} \item{group_variable}{Column name indicating group names - will build into regressors} \item{model.options}{Additional options for BSTS} \item{rebag_vars}{Create pseudo aggregate variable of other regressors} \item{rebag_mean_vars}{Create psuedo mean variable of other regressors} \item{inclusion_probability}{Minimum probability of inclusion in final model to show in returned predictors} } \value{ list object - bsts.model, data.frame of predictors } \description{ Create a bsts_model }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/route53resolver_operations.R \name{route53resolver_get_resolver_query_log_config_association} \alias{route53resolver_get_resolver_query_log_config_association} \title{Gets information about a specified association between a Resolver query logging configuration and an Amazon VPC} \usage{ route53resolver_get_resolver_query_log_config_association( ResolverQueryLogConfigAssociationId ) } \arguments{ \item{ResolverQueryLogConfigAssociationId}{[required] The ID of the Resolver query logging configuration association that you want to get information about.} } \description{ Gets information about a specified association between a Resolver query logging configuration and an Amazon VPC. When you associate a VPC with a query logging configuration, Resolver logs DNS queries that originate in that VPC. See \url{https://www.paws-r-sdk.com/docs/route53resolver_get_resolver_query_log_config_association/} for full documentation. } \keyword{internal}
/cran/paws.networking/man/route53resolver_get_resolver_query_log_config_association.Rd
permissive
paws-r/paws
R
false
true
1,028
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/route53resolver_operations.R \name{route53resolver_get_resolver_query_log_config_association} \alias{route53resolver_get_resolver_query_log_config_association} \title{Gets information about a specified association between a Resolver query logging configuration and an Amazon VPC} \usage{ route53resolver_get_resolver_query_log_config_association( ResolverQueryLogConfigAssociationId ) } \arguments{ \item{ResolverQueryLogConfigAssociationId}{[required] The ID of the Resolver query logging configuration association that you want to get information about.} } \description{ Gets information about a specified association between a Resolver query logging configuration and an Amazon VPC. When you associate a VPC with a query logging configuration, Resolver logs DNS queries that originate in that VPC. See \url{https://www.paws-r-sdk.com/docs/route53resolver_get_resolver_query_log_config_association/} for full documentation. } \keyword{internal}
# merge behavioral (long form) and language data together library(tidyverse) library(here) LANGUAGE_PATH <- here("data/study1c/processed/bnc_vs_coca_es_400_10_x5.csv") RAW_BEHAVIORAL_CONF <- here("data/study1c/processed/tidy_behavioral_iat_data_confirmatory_full.csv") #here("data/study1c/processed/tidy_behavioral_iat_data_confirmatory_full.csv") OUTFILE <- here("data/study1c/processed/long_form_confirmatory_behavior_and_language.csv") # here("data/study1c/processed/long_form_confirmatory_behavior_and_language.csv") # language es (5 runs of each model) es_lang_raw <- read_csv(LANGUAGE_PATH) es_lang_tidy <- es_lang_raw %>% spread(model, effect_size) %>% rename(coca_lang_es = coca, bnc_lang_es = bnc) %>% mutate(lang_diff = bnc_lang_es - coca_lang_es) # get bnc - coca language es difference # behavioral behavioral <- read_csv(RAW_BEHAVIORAL_CONF) %>% select(user_id, domain, residence, resid)%>% rename(behavioral_effect_resid = resid) %>% mutate(user_id = as.character(user_id)) # full df full_df <- behavioral %>% mutate(temp = list(es_lang_tidy)) %>% unnest() %>% filter(domain == domain1) %>% select(-domain1) %>% mutate_if(is.character, as.factor) write_csv(full_df, OUTFILE)
/analyses/study1c/10_tidy_full_df.R
no_license
mllewis/IATLANG
R
false
false
1,226
r
# merge behavioral (long form) and language data together library(tidyverse) library(here) LANGUAGE_PATH <- here("data/study1c/processed/bnc_vs_coca_es_400_10_x5.csv") RAW_BEHAVIORAL_CONF <- here("data/study1c/processed/tidy_behavioral_iat_data_confirmatory_full.csv") #here("data/study1c/processed/tidy_behavioral_iat_data_confirmatory_full.csv") OUTFILE <- here("data/study1c/processed/long_form_confirmatory_behavior_and_language.csv") # here("data/study1c/processed/long_form_confirmatory_behavior_and_language.csv") # language es (5 runs of each model) es_lang_raw <- read_csv(LANGUAGE_PATH) es_lang_tidy <- es_lang_raw %>% spread(model, effect_size) %>% rename(coca_lang_es = coca, bnc_lang_es = bnc) %>% mutate(lang_diff = bnc_lang_es - coca_lang_es) # get bnc - coca language es difference # behavioral behavioral <- read_csv(RAW_BEHAVIORAL_CONF) %>% select(user_id, domain, residence, resid)%>% rename(behavioral_effect_resid = resid) %>% mutate(user_id = as.character(user_id)) # full df full_df <- behavioral %>% mutate(temp = list(es_lang_tidy)) %>% unnest() %>% filter(domain == domain1) %>% select(-domain1) %>% mutate_if(is.character, as.factor) write_csv(full_df, OUTFILE)
hts_CutTrim <- setClass("hts_CutTrim", contains = c("hts_Trimmer")) hts_CutTrim <- function(CutTrim.data = list()){ expected <- c("Notes", "totalFragmentsInput", "totalFragmentsOutput", "SE_in", "SE_out", "SE_rightTrim", "SE_leftTrim", "SE_discarded", "PE_in", "PE_out", "R1_leftTrim", "R1_rightTrim", "R2_leftTrim", "R2_rightTrim", "R1_discarded", "R2_discarded", "PE_discarded") if (!all(names(expected) == expected)) { stop("error with message")} my_class_ss <- new("hts_CutTrim", Notes = CutTrim.data$Notes, totalFragmentsInput = CutTrim.data$totalFragmentsInput, totalFragmentsOutput = CutTrim.data$totalFragmentsOutput, SE_in = CutTrim.data$Single_end$SE_in, SE_out = CutTrim.data$Single_end$SE_out, SE_rightTrim = CutTrim.data$Single_end$SE_rightTrim, SE_leftTrim = CutTrim.data$Single_end$SE_leftTrim, SE_discarded = CutTrim.data$Single_end$SE_discarded, PE_in = CutTrim.data$Paired_end$PE_in, PE_out = CutTrim.data$Paired_end$PE_out, R1_leftTrim = CutTrim.data$Paired_end$R1_leftTrim, R1_rightTrim = CutTrim.data$Paired_end$R2_rightTrim, R2_leftTrim = CutTrim.data$Paired_end$R2_leftTrim, R2_rightTrim = CutTrim.data$Paired_end$R2_rightTrim, R1_discarded = CutTrim.data$Paired_end$R1_discarded, R2_discarded = CutTrim.data$Paired_end$R2_discarded, PE_discarded = CutTrim.data$Paired_end$PE_discarded) return(my_class_ss) setValidity("hts_CutTrim", validCutTrim) validCutTrim(my_class_ss) }
/R/CutTrim.R
no_license
mgaliciaa/HTSstream
R
false
false
1,803
r
hts_CutTrim <- setClass("hts_CutTrim", contains = c("hts_Trimmer")) hts_CutTrim <- function(CutTrim.data = list()){ expected <- c("Notes", "totalFragmentsInput", "totalFragmentsOutput", "SE_in", "SE_out", "SE_rightTrim", "SE_leftTrim", "SE_discarded", "PE_in", "PE_out", "R1_leftTrim", "R1_rightTrim", "R2_leftTrim", "R2_rightTrim", "R1_discarded", "R2_discarded", "PE_discarded") if (!all(names(expected) == expected)) { stop("error with message")} my_class_ss <- new("hts_CutTrim", Notes = CutTrim.data$Notes, totalFragmentsInput = CutTrim.data$totalFragmentsInput, totalFragmentsOutput = CutTrim.data$totalFragmentsOutput, SE_in = CutTrim.data$Single_end$SE_in, SE_out = CutTrim.data$Single_end$SE_out, SE_rightTrim = CutTrim.data$Single_end$SE_rightTrim, SE_leftTrim = CutTrim.data$Single_end$SE_leftTrim, SE_discarded = CutTrim.data$Single_end$SE_discarded, PE_in = CutTrim.data$Paired_end$PE_in, PE_out = CutTrim.data$Paired_end$PE_out, R1_leftTrim = CutTrim.data$Paired_end$R1_leftTrim, R1_rightTrim = CutTrim.data$Paired_end$R2_rightTrim, R2_leftTrim = CutTrim.data$Paired_end$R2_leftTrim, R2_rightTrim = CutTrim.data$Paired_end$R2_rightTrim, R1_discarded = CutTrim.data$Paired_end$R1_discarded, R2_discarded = CutTrim.data$Paired_end$R2_discarded, PE_discarded = CutTrim.data$Paired_end$PE_discarded) return(my_class_ss) setValidity("hts_CutTrim", validCutTrim) validCutTrim(my_class_ss) }
########################################################## # EDX MovieLens Project HarvardX PH125.9x # R script # Author: Camilo Lillo # May 16, 2021 ########################################################## ########################################################## # Create edx set, validation set (final hold-out test set) ########################################################## # Note: this process could take a couple of minutes if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org") if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org") if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org") if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org") if(!require(stringi)) install.packages("stringi", repos = "http://cran.us.r-project.org") if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org") if(!require(dismo)) install.packages("dismo", repos = "http://cran.us.r-project.org") if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org") if(!require(recosystem)) install.packages("recosystem", repos = "http://cran.us.r-project.org") library(tidyverse) library(caret) library(data.table) library(ggplot2) library(stringi) library(scales) library(dismo) library(dplyr) library(recosystem) # MovieLens 10M dataset: # https://grouplens.org/datasets/movielens/10m/ # http://files.grouplens.org/datasets/movielens/ml-10m.zip dl <- tempfile() download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") # I use R 4.0 or later: movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId), title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") # Validation set will be 10% of MovieLens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[ test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) rm(dl, ratings, movies, test_index, movielens, removed) edx0 = edx ########################################################## # Exploratory data analysis ########################################################## # Response variable distribution dim(edx) fig1 <- edx %>% group_by(rating) %>% summarize(total = length(rating)) %>% ggplot(aes(x = rating, y = 100*total/(sum(total)))) + geom_bar(stat="identity", position=position_dodge(), width = 0.4, color = "black", fill = "orange") + geom_text(aes(label=percent(total/(sum(total)) %>% round(1))), vjust=1.6, color="black", position = position_dodge(0.9), size = 3) + labs(title="Rating Distribution", x="Rating", y = "Relative Frequency (%)") fig1 ## user analysis tab_userId = edx %>% group_by(userId) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) tab_userId = tab_userId[order(-tab_userId$avg),] tab_userId = tab_userId %>% mutate(sds = ifelse(is.na(sds), 0, sds), cv = ifelse(is.na(cv), 0, cv)) tab_userId$rank = c(1:NROW(tab_userId)) tab_userId = tab_userId %>% mutate(icsup = avg + 1.96*(sds/sqrt(len))) tab_userId = tab_userId %>% mutate(icinf = avg - 1.96*(sds/sqrt(len))) fig2 = tab_userId %>% filter(len > 100) %>% ggplot(aes(rank)) + geom_line(aes(y = avg, colour = "rating average")) + geom_line(aes(y = icsup, colour = "upper IC(95%)")) + geom_line(aes(y = icinf, colour = "lower IC(95%)")) + labs(title="", x = "users ranking (users who have watched more than 100 movies)", y = "ratings") fig2 ## movie analysis tab_movieId = edx %>% group_by(title) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) tab_movieId = tab_movieId[order(-tab_movieId$avg),] tab_movieId = tab_movieId %>% mutate(sds = ifelse(is.na(sds), 0, sds), cv = ifelse(is.na(cv), 0, cv)) tab_movieId$rank = c(1:NROW(tab_movieId)) tab_movieId = tab_movieId %>% mutate(icsup = avg + 1.96*(sds/sqrt(len))) tab_movieId = tab_movieId %>% mutate(icinf = avg - 1.96*(sds/sqrt(len))) fig3 = tab_movieId %>% filter(len > 100) %>% ggplot(aes(rank)) + geom_line(aes(y = avg, colour = "rating average")) + geom_line(aes(y = icsup, colour = "upper IC(95%)")) + geom_line(aes(y = icinf, colour = "lower IC(95%)")) + labs(title="", x = "movie ranking (viewed more than 100 times)", y = "ratings") fig3 ## both effects tab_userId = tab_userId[order(tab_userId$avg),] tab_movieId = tab_movieId[order(tab_movieId$avg),] tab_userId$k = (c(1:NROW(tab_userId) ) - 0.5)/NROW(tab_userId ) tab_movieId$k = (c(1:NROW(tab_movieId)) - 0.5)/NROW(tab_movieId) fig4 = ggplot() + geom_line(data=tab_userId, aes(x = avg, y = k, colour = "rating (users) dist.")) + geom_line(data=tab_movieId, aes(x = avg, y = k, colour = "rating (movies) dist.")) + labs(title="", x = "ratings", y = "ECDF") fig4 ## year analysis # edx$year = as.numeric(stri_reverse(substr(stri_reverse(edx$title), 2, 5))) # tab_years = edx %>% # group_by(year) %>% # summarize(avg = mean(rating), # sds = sd(rating), # cv = 100*sd(rating)/mean(rating), # len = length(rating)) # # fig5.1 = tab_years %>% ggplot(aes(x=year, y=len)) + # geom_line(aes(y=len, group = 1), col="black") + # geom_point(aes(y=len, group = 1), col="black") + # labs(title="", x = "year", y = "number of movies by year") # fig5.1 edx$year = as.numeric(stri_reverse(substr(stri_reverse(edx$title), 2, 5))) edx = edx %>% mutate(year = ifelse(year < 1980, "< 1980", year)) tab_years2 = edx %>% group_by(year) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) fig5.2 = tab_years2 %>% ggplot(aes(x=year, y=avg)) + geom_line(aes(y=avg, group = 1), col="black") + geom_point(aes(y=avg, group = 1), col="black") + geom_errorbar(aes(ymin=avg-(1.96*sds/sqrt(len)), ymax=avg+(1.96*sds/sqrt(len))), width=.2, position=position_dodge(0.05)) + labs(title="", x = "year", y = "rating average") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) fig5.2 # analysis by genres tab_genres = edx %>% separate_rows(genres, sep = "\\|") %>% group_by(genres) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) b <- max(tab_genres$len)/(max(tab_genres$avg) - 3) a <- b*(0 - 3) fig6 = tab_genres %>% ggplot(aes(x=reorder(genres, -avg), y=len)) + geom_bar(col="black", stat="identity", fill = "orange") + geom_line(aes(y=avg * b + a, group = 1), col = "darkgreen", size = 1.2) + scale_y_continuous(name="n", sec.axis=sec_axis(~(. - a)/b, name = "Rating average")) + labs(title="", x = "") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) fig6 ## mu IC 95% grouped by genres # fig7 = tab_genres %>% filter((genres != "IMAX")&(genres != "(no genres listed)")) %>% ggplot(aes(x=reorder(genres, -avg), y=avg)) + # geom_line(aes(y=avg, group = 1), col="darkgreen") + # geom_point(aes(y=avg, group = 1), col="black") + # geom_errorbar(aes(ymin=avg-(1.96*sds/sqrt(len)), ymax=avg+(1.96*sds/sqrt(len))), width=.2, # position=position_dodge(0.05)) + # labs(title="", x = "", y = "ratings") + # theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) # fig7 ########################################################## # Model ########################################################## ## m1: global mean model ## select 10-folds from train set to develop cross-validation of diferents models KF = 10 set.seed(2020-12-31, sample.kind = "Rounding") edx$id_cv = kfold(1:NROW(edx), k = KF) edx_val = numeric() for(u in 1:KF){ m1_folds = edx %>% filter(id_cv != u) %>% summarise(avg = mean(rating)) edx_test = edx %>% filter(id_cv == u) edx_test = edx_test[,c("userId", "movieId", "rating", "id_cv")] edx_test$pred_m1 = m1_folds edx_val = rbind(edx_val, edx_test, fill = TRUE) } RMSE_m1 = RMSE(pred = edx_val$pred_m1, obs = edx_val$rating) # ratings model for movie effects edx$movieId = as.factor(edx$movieId) edx_val_m2 = numeric() for(u in 1:KF){ xb <- mean(edx$rating) m2_folds = edx %>% filter(id_cv != u) %>% group_by(movieId) %>% summarise(b_i = mean(rating - xb)) edx_test = edx %>% filter(id_cv == u) edx_test$pred_m2 <- xb + edx_test %>% left_join(m2_folds, by = "movieId") %>% pull(b_i) edx_val_m2 = rbind(edx_val_m2, edx_test, fill = TRUE) } edx_val_m2 = edx_val_m2[,c("userId", "movieId", "pred_m2")] edx_val_m2 = na.omit(edx_val_m2) edx_val_m2$movieId = as.numeric(as.character(edx_val_m2$movieId)) edx_val = edx_val[,-1] edx_val = left_join(edx_val, edx_val_m2, by = c("userId", "movieId")) RMSE_m2 = RMSE(pred = edx_val$pred_m2, obs = edx_val$rating, na.rm = TRUE) # add user effect edx$movieId = as.factor(edx$movieId) edx_val_m3 = numeric() for(u in 1:KF){ xb <- mean(edx$rating) m3_folds = edx %>% filter(id_cv != u) %>% group_by(movieId) %>% summarise(b_i = mean(rating - xb)) m3_add_user_in_folds = edx %>% filter(id_cv != u) %>% left_join(m3_folds, "movieId") %>% group_by(userId) %>% summarise(b_u = mean(rating - xb - b_i)) edx_test = edx %>% filter(id_cv == u) edx_test$pred_m3 <- edx_test %>% left_join(m3_folds, by = "movieId") %>% left_join(m3_add_user_in_folds, by = "userId") %>% mutate(rating_pred = xb + b_i + b_u) %>% pull(rating_pred) edx_val_m3 = rbind(edx_val_m3, edx_test, fill = TRUE) } edx_val_m3 = edx_val_m3[,c("userId", "movieId", "pred_m3")] edx_val_m3 = na.omit(edx_val_m3) edx_val$userId = as.character(edx_val$userId) edx_val_m3$userId = as.character(edx_val_m3$userId) edx_val$movieId = as.character(edx_val$movieId) edx_val_m3$movieId = as.character(edx_val_m3$movieId) edx_val = left_join(edx_val, edx_val_m3, by = c("userId", "movieId")) RMSE_m3 = RMSE(pred = edx_val$pred_m3, obs = edx_val$rating, na.rm = TRUE) # these model can be better by the next model ## not run! set.seed(2021-05-28, sample.kind = "Rounding") train_edx <- with(edx0, data_memory(user_index = userId, item_index = movieId, rating = rating)) test_edx <- with(validation, data_memory(user_index = userId, item_index = movieId, rating = rating)) # r = recosystem::Reco() # opts = r$tune(train_edx, opts = list(dim = c(20, 30, 40), # lrate = seq(0.025, 0.1, 0.025), # costp_l2 = c(0.05, 0.075, 0.1), # costq_l2 = c(0.001, 0.005, 0.01, 0.015), # nthread = 8, niter = 10)) optim_par = list() optim_par[["dim"]] = 30 optim_par[["costp_l1"]] = 0 optim_par[["costp_l2"]] = 0.075 optim_par[["costq_l1"]] = 0 optim_par[["costq_l2"]] = 0.015 optim_par[["lrate"]] = 0.05 optim_par[["loss_fun"]] = 0.7987279 edx$movieId = as.factor(edx$movieId) edx_val_m4 = numeric() for(u in 1:KF){ m4_folds = edx %>% filter(id_cv != u) edx_test = edx %>% filter(id_cv == u) mfold <- with(m4_folds, data_memory(user_index = userId, item_index = movieId, rating = rating)) tfold <- with(edx_test, data_memory(user_index = userId, item_index = movieId, rating = rating)) r = recosystem::Reco() r$train(mfold, opts = c(optim_par, nthread = 8, niter = 20), verbose = FALSE) pred_rating_folds <- r$predict(tfold, out_memory()) edx_test$pred_m4 = pred_rating_folds edx_val_m4 = rbind(edx_val_m4, edx_test[,c("userId", "movieId", "pred_m4")], fill = TRUE) } edx_val_m4 = edx_val_m4[,-1] edx_val$userId = as.character(edx_val$userId) edx_val_m4$userId = as.character(edx_val_m4$userId) edx_val$movieId = as.character(edx_val$movieId) edx_val_m4$movieId = as.character(edx_val_m4$movieId) edx_val = left_join(edx_val, edx_val_m4, by = c("userId", "movieId")) RMSE_m4 = RMSE(pred = edx_val$pred_m4, obs = edx_val$rating, na.rm = TRUE) # Evaluate the test set with the final model r$train(train_edx, opts = c(optim_par, nthread = 8, niter = 20), verbose = FALSE) rating_pred_validation <- r$predict(test_edx, out_memory()) RMSE_validation = RMSE(rating_pred_validation, validation$rating) rating_pred_validation2 = rating_pred_validation rating_pred_validation2[rating_pred_validation2 > 5.0] = 5.0 rating_pred_validation2[rating_pred_validation2 < 0.5] = 0.5 RMSE_val = RMSE(rating_pred_validation2, validation$rating) print(RMSE_val)
/Rcode_movielens.R
no_license
Camilillo/HarvardX-PH125.9x-MovieLens
R
false
false
13,985
r
########################################################## # EDX MovieLens Project HarvardX PH125.9x # R script # Author: Camilo Lillo # May 16, 2021 ########################################################## ########################################################## # Create edx set, validation set (final hold-out test set) ########################################################## # Note: this process could take a couple of minutes if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org") if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org") if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org") if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org") if(!require(stringi)) install.packages("stringi", repos = "http://cran.us.r-project.org") if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org") if(!require(dismo)) install.packages("dismo", repos = "http://cran.us.r-project.org") if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org") if(!require(recosystem)) install.packages("recosystem", repos = "http://cran.us.r-project.org") library(tidyverse) library(caret) library(data.table) library(ggplot2) library(stringi) library(scales) library(dismo) library(dplyr) library(recosystem) # MovieLens 10M dataset: # https://grouplens.org/datasets/movielens/10m/ # http://files.grouplens.org/datasets/movielens/ml-10m.zip dl <- tempfile() download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") # I use R 4.0 or later: movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId), title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") # Validation set will be 10% of MovieLens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[ test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) rm(dl, ratings, movies, test_index, movielens, removed) edx0 = edx ########################################################## # Exploratory data analysis ########################################################## # Response variable distribution dim(edx) fig1 <- edx %>% group_by(rating) %>% summarize(total = length(rating)) %>% ggplot(aes(x = rating, y = 100*total/(sum(total)))) + geom_bar(stat="identity", position=position_dodge(), width = 0.4, color = "black", fill = "orange") + geom_text(aes(label=percent(total/(sum(total)) %>% round(1))), vjust=1.6, color="black", position = position_dodge(0.9), size = 3) + labs(title="Rating Distribution", x="Rating", y = "Relative Frequency (%)") fig1 ## user analysis tab_userId = edx %>% group_by(userId) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) tab_userId = tab_userId[order(-tab_userId$avg),] tab_userId = tab_userId %>% mutate(sds = ifelse(is.na(sds), 0, sds), cv = ifelse(is.na(cv), 0, cv)) tab_userId$rank = c(1:NROW(tab_userId)) tab_userId = tab_userId %>% mutate(icsup = avg + 1.96*(sds/sqrt(len))) tab_userId = tab_userId %>% mutate(icinf = avg - 1.96*(sds/sqrt(len))) fig2 = tab_userId %>% filter(len > 100) %>% ggplot(aes(rank)) + geom_line(aes(y = avg, colour = "rating average")) + geom_line(aes(y = icsup, colour = "upper IC(95%)")) + geom_line(aes(y = icinf, colour = "lower IC(95%)")) + labs(title="", x = "users ranking (users who have watched more than 100 movies)", y = "ratings") fig2 ## movie analysis tab_movieId = edx %>% group_by(title) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) tab_movieId = tab_movieId[order(-tab_movieId$avg),] tab_movieId = tab_movieId %>% mutate(sds = ifelse(is.na(sds), 0, sds), cv = ifelse(is.na(cv), 0, cv)) tab_movieId$rank = c(1:NROW(tab_movieId)) tab_movieId = tab_movieId %>% mutate(icsup = avg + 1.96*(sds/sqrt(len))) tab_movieId = tab_movieId %>% mutate(icinf = avg - 1.96*(sds/sqrt(len))) fig3 = tab_movieId %>% filter(len > 100) %>% ggplot(aes(rank)) + geom_line(aes(y = avg, colour = "rating average")) + geom_line(aes(y = icsup, colour = "upper IC(95%)")) + geom_line(aes(y = icinf, colour = "lower IC(95%)")) + labs(title="", x = "movie ranking (viewed more than 100 times)", y = "ratings") fig3 ## both effects tab_userId = tab_userId[order(tab_userId$avg),] tab_movieId = tab_movieId[order(tab_movieId$avg),] tab_userId$k = (c(1:NROW(tab_userId) ) - 0.5)/NROW(tab_userId ) tab_movieId$k = (c(1:NROW(tab_movieId)) - 0.5)/NROW(tab_movieId) fig4 = ggplot() + geom_line(data=tab_userId, aes(x = avg, y = k, colour = "rating (users) dist.")) + geom_line(data=tab_movieId, aes(x = avg, y = k, colour = "rating (movies) dist.")) + labs(title="", x = "ratings", y = "ECDF") fig4 ## year analysis # edx$year = as.numeric(stri_reverse(substr(stri_reverse(edx$title), 2, 5))) # tab_years = edx %>% # group_by(year) %>% # summarize(avg = mean(rating), # sds = sd(rating), # cv = 100*sd(rating)/mean(rating), # len = length(rating)) # # fig5.1 = tab_years %>% ggplot(aes(x=year, y=len)) + # geom_line(aes(y=len, group = 1), col="black") + # geom_point(aes(y=len, group = 1), col="black") + # labs(title="", x = "year", y = "number of movies by year") # fig5.1 edx$year = as.numeric(stri_reverse(substr(stri_reverse(edx$title), 2, 5))) edx = edx %>% mutate(year = ifelse(year < 1980, "< 1980", year)) tab_years2 = edx %>% group_by(year) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) fig5.2 = tab_years2 %>% ggplot(aes(x=year, y=avg)) + geom_line(aes(y=avg, group = 1), col="black") + geom_point(aes(y=avg, group = 1), col="black") + geom_errorbar(aes(ymin=avg-(1.96*sds/sqrt(len)), ymax=avg+(1.96*sds/sqrt(len))), width=.2, position=position_dodge(0.05)) + labs(title="", x = "year", y = "rating average") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) fig5.2 # analysis by genres tab_genres = edx %>% separate_rows(genres, sep = "\\|") %>% group_by(genres) %>% summarize(avg = mean(rating), sds = sd(rating), cv = 100*sd(rating)/mean(rating), len = length(rating)) b <- max(tab_genres$len)/(max(tab_genres$avg) - 3) a <- b*(0 - 3) fig6 = tab_genres %>% ggplot(aes(x=reorder(genres, -avg), y=len)) + geom_bar(col="black", stat="identity", fill = "orange") + geom_line(aes(y=avg * b + a, group = 1), col = "darkgreen", size = 1.2) + scale_y_continuous(name="n", sec.axis=sec_axis(~(. - a)/b, name = "Rating average")) + labs(title="", x = "") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) fig6 ## mu IC 95% grouped by genres # fig7 = tab_genres %>% filter((genres != "IMAX")&(genres != "(no genres listed)")) %>% ggplot(aes(x=reorder(genres, -avg), y=avg)) + # geom_line(aes(y=avg, group = 1), col="darkgreen") + # geom_point(aes(y=avg, group = 1), col="black") + # geom_errorbar(aes(ymin=avg-(1.96*sds/sqrt(len)), ymax=avg+(1.96*sds/sqrt(len))), width=.2, # position=position_dodge(0.05)) + # labs(title="", x = "", y = "ratings") + # theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) # fig7 ########################################################## # Model ########################################################## ## m1: global mean model ## select 10-folds from train set to develop cross-validation of diferents models KF = 10 set.seed(2020-12-31, sample.kind = "Rounding") edx$id_cv = kfold(1:NROW(edx), k = KF) edx_val = numeric() for(u in 1:KF){ m1_folds = edx %>% filter(id_cv != u) %>% summarise(avg = mean(rating)) edx_test = edx %>% filter(id_cv == u) edx_test = edx_test[,c("userId", "movieId", "rating", "id_cv")] edx_test$pred_m1 = m1_folds edx_val = rbind(edx_val, edx_test, fill = TRUE) } RMSE_m1 = RMSE(pred = edx_val$pred_m1, obs = edx_val$rating) # ratings model for movie effects edx$movieId = as.factor(edx$movieId) edx_val_m2 = numeric() for(u in 1:KF){ xb <- mean(edx$rating) m2_folds = edx %>% filter(id_cv != u) %>% group_by(movieId) %>% summarise(b_i = mean(rating - xb)) edx_test = edx %>% filter(id_cv == u) edx_test$pred_m2 <- xb + edx_test %>% left_join(m2_folds, by = "movieId") %>% pull(b_i) edx_val_m2 = rbind(edx_val_m2, edx_test, fill = TRUE) } edx_val_m2 = edx_val_m2[,c("userId", "movieId", "pred_m2")] edx_val_m2 = na.omit(edx_val_m2) edx_val_m2$movieId = as.numeric(as.character(edx_val_m2$movieId)) edx_val = edx_val[,-1] edx_val = left_join(edx_val, edx_val_m2, by = c("userId", "movieId")) RMSE_m2 = RMSE(pred = edx_val$pred_m2, obs = edx_val$rating, na.rm = TRUE) # add user effect edx$movieId = as.factor(edx$movieId) edx_val_m3 = numeric() for(u in 1:KF){ xb <- mean(edx$rating) m3_folds = edx %>% filter(id_cv != u) %>% group_by(movieId) %>% summarise(b_i = mean(rating - xb)) m3_add_user_in_folds = edx %>% filter(id_cv != u) %>% left_join(m3_folds, "movieId") %>% group_by(userId) %>% summarise(b_u = mean(rating - xb - b_i)) edx_test = edx %>% filter(id_cv == u) edx_test$pred_m3 <- edx_test %>% left_join(m3_folds, by = "movieId") %>% left_join(m3_add_user_in_folds, by = "userId") %>% mutate(rating_pred = xb + b_i + b_u) %>% pull(rating_pred) edx_val_m3 = rbind(edx_val_m3, edx_test, fill = TRUE) } edx_val_m3 = edx_val_m3[,c("userId", "movieId", "pred_m3")] edx_val_m3 = na.omit(edx_val_m3) edx_val$userId = as.character(edx_val$userId) edx_val_m3$userId = as.character(edx_val_m3$userId) edx_val$movieId = as.character(edx_val$movieId) edx_val_m3$movieId = as.character(edx_val_m3$movieId) edx_val = left_join(edx_val, edx_val_m3, by = c("userId", "movieId")) RMSE_m3 = RMSE(pred = edx_val$pred_m3, obs = edx_val$rating, na.rm = TRUE) # these model can be better by the next model ## not run! set.seed(2021-05-28, sample.kind = "Rounding") train_edx <- with(edx0, data_memory(user_index = userId, item_index = movieId, rating = rating)) test_edx <- with(validation, data_memory(user_index = userId, item_index = movieId, rating = rating)) # r = recosystem::Reco() # opts = r$tune(train_edx, opts = list(dim = c(20, 30, 40), # lrate = seq(0.025, 0.1, 0.025), # costp_l2 = c(0.05, 0.075, 0.1), # costq_l2 = c(0.001, 0.005, 0.01, 0.015), # nthread = 8, niter = 10)) optim_par = list() optim_par[["dim"]] = 30 optim_par[["costp_l1"]] = 0 optim_par[["costp_l2"]] = 0.075 optim_par[["costq_l1"]] = 0 optim_par[["costq_l2"]] = 0.015 optim_par[["lrate"]] = 0.05 optim_par[["loss_fun"]] = 0.7987279 edx$movieId = as.factor(edx$movieId) edx_val_m4 = numeric() for(u in 1:KF){ m4_folds = edx %>% filter(id_cv != u) edx_test = edx %>% filter(id_cv == u) mfold <- with(m4_folds, data_memory(user_index = userId, item_index = movieId, rating = rating)) tfold <- with(edx_test, data_memory(user_index = userId, item_index = movieId, rating = rating)) r = recosystem::Reco() r$train(mfold, opts = c(optim_par, nthread = 8, niter = 20), verbose = FALSE) pred_rating_folds <- r$predict(tfold, out_memory()) edx_test$pred_m4 = pred_rating_folds edx_val_m4 = rbind(edx_val_m4, edx_test[,c("userId", "movieId", "pred_m4")], fill = TRUE) } edx_val_m4 = edx_val_m4[,-1] edx_val$userId = as.character(edx_val$userId) edx_val_m4$userId = as.character(edx_val_m4$userId) edx_val$movieId = as.character(edx_val$movieId) edx_val_m4$movieId = as.character(edx_val_m4$movieId) edx_val = left_join(edx_val, edx_val_m4, by = c("userId", "movieId")) RMSE_m4 = RMSE(pred = edx_val$pred_m4, obs = edx_val$rating, na.rm = TRUE) # Evaluate the test set with the final model r$train(train_edx, opts = c(optim_par, nthread = 8, niter = 20), verbose = FALSE) rating_pred_validation <- r$predict(test_edx, out_memory()) RMSE_validation = RMSE(rating_pred_validation, validation$rating) rating_pred_validation2 = rating_pred_validation rating_pred_validation2[rating_pred_validation2 > 5.0] = 5.0 rating_pred_validation2[rating_pred_validation2 < 0.5] = 0.5 RMSE_val = RMSE(rating_pred_validation2, validation$rating) print(RMSE_val)
#' Euro area and U.S. long-term government bond yields and Euro-U.S. dollar exchange rate #' #' A dataset containing time series of the difference between the monthly Euro area and U.S. #' long-term government bond yields and monthly average Euro - U.S. dollar exchange rate. The data #' covers the time period January 1989 - December 2009 with monthly frequency. This is the same data #' (in non-scaled form) that is used by Kalliovirta et. al. (2016). #' #' @format A numeric matrix of class \code{'ts'} with 252 rows and 2 columns with one time series in each column: #' \describe{ #' \item{First column:}{The difference between the monthly Euro area and U.S. long-term government bond yields #' (10 year maturity, i_euro - i_us), from January 1989 to December 2009. calculated by the ECB and the #' Federal Reserve Board; prior to 2001, the Euro area data refer to the "EU11" countries, and afterwards #' with changing composition eventually to the "EU17" by the end of the data period.} #' \item{Second column:}{Monthly average Euro - U.S. dollar exchange rate, from January 1989 to December 2009. #' Based on the ECU - USD exchange rate prior to 1999.} #' } #' #' @inherit GMVAR references #' @source OECD Statistics "eurusd" #' U.S. data containing log-differences of industrial production index, consumer price index, and M1, and an interest rate #' variable #' #' A dataset containing the monthly U.S. data covering the period from February 1959 to December 2019 #' (731 observations) and consisting of four variables: the log-difference of industrial production index (IP), #' the log-difference of consumer price index (CPI), the log-difference of M1 monetary aggregate (M1), and an interest rate #' variable (RATE). The log-differences are multiplied by hundred. The interest rate variable is the effective #' federal funds rate from February 1959 to August 2008 after which we replaced it with the Wu and Xia #' (2016) shadow rate, which is not constrained by the zero lower bound and also #' quantifies unconventional monetary policy measures. #' #' The Wu and Xia (2016) shadow rate data was retrieved from the Federal Reserve Bank of Atlanta's website #' (\url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}) and the rest of the data was #' retrieved from the Federal Reserve Bank of St. Louis database. #' #' @format A numeric matrix of class \code{'ts'} with 731 rows and 4 columns with one time series in each column: #' \describe{ #' \item{IP:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/INDPRO}} #' \item{CPI:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/CPIAUCSL}} #' \item{M1:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/M1SL}} #' \item{RATE:}{From 1959 February to 2008 August \url{https://fred.stlouisfed.org/series/FEDFUNDS} and #' from 2008 September onwards \url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}} #' } #' #' @references #' \itemize{ #' \item J.C. Wu and F.D. Xia. 2016. Measuring the Macroeconomic Impact of Monetary Policy at the Zero Lower Bound. #' \emph{Journal of Money, Credit and Banking}, 48(2-3): 253-291. #' } #' @source The Federal Reserve Bank of St. Louis database and the Federal Reserve Bank of Atlanta's website "usamone_prec" #' U.S. data containing log-differences of industrial production index, consumer price index, and M1, and an interest rate #' variable #' #' A dataset containing the monthly U.S. data covering the period from February 1959 to December 2020 #' (743 observations) and consisting of four variables: the log-difference of industrial production index (IP), #' the log-difference of consumer price index (CPI), the log-difference of M1 monetary aggregate (M1), and an interest rate #' variable (RATE). The log-differences are multiplied by hundred. The interest rate variable is the effective #' federal funds rate from February 1959 to August 2008 after which we replaced it with the Wu and Xia #' (2016) shadow rate, which is not constrained by the zero lower bound and also #' quantifies unconventional monetary policy measures. #' #' The Wu and Xia (2016) shadow rate data was retrieved from the Federal Reserve Bank of Atlanta's website #' (\url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}) and the rest of the data was #' retrieved from the Federal Reserve Bank of St. Louis database. #' #' @format A numeric matrix of class \code{'ts'} with 743 rows and 4 columns with one time series in each column: #' \describe{ #' \item{IP:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/INDPRO}} #' \item{CPI:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/CPIAUCSL}} #' \item{M1:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/M1SL}} #' \item{RATE:}{From 1959 February to 2008 August \url{https://fred.stlouisfed.org/series/FEDFUNDS} and #' from 2008 September onwards \url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}} #' } #' #' @inherit usamone_prec references source "usamone"
/R/data.R
no_license
yangkedc1984/gmvarkit
R
false
false
5,279
r
#' Euro area and U.S. long-term government bond yields and Euro-U.S. dollar exchange rate #' #' A dataset containing time series of the difference between the monthly Euro area and U.S. #' long-term government bond yields and monthly average Euro - U.S. dollar exchange rate. The data #' covers the time period January 1989 - December 2009 with monthly frequency. This is the same data #' (in non-scaled form) that is used by Kalliovirta et. al. (2016). #' #' @format A numeric matrix of class \code{'ts'} with 252 rows and 2 columns with one time series in each column: #' \describe{ #' \item{First column:}{The difference between the monthly Euro area and U.S. long-term government bond yields #' (10 year maturity, i_euro - i_us), from January 1989 to December 2009. calculated by the ECB and the #' Federal Reserve Board; prior to 2001, the Euro area data refer to the "EU11" countries, and afterwards #' with changing composition eventually to the "EU17" by the end of the data period.} #' \item{Second column:}{Monthly average Euro - U.S. dollar exchange rate, from January 1989 to December 2009. #' Based on the ECU - USD exchange rate prior to 1999.} #' } #' #' @inherit GMVAR references #' @source OECD Statistics "eurusd" #' U.S. data containing log-differences of industrial production index, consumer price index, and M1, and an interest rate #' variable #' #' A dataset containing the monthly U.S. data covering the period from February 1959 to December 2019 #' (731 observations) and consisting of four variables: the log-difference of industrial production index (IP), #' the log-difference of consumer price index (CPI), the log-difference of M1 monetary aggregate (M1), and an interest rate #' variable (RATE). The log-differences are multiplied by hundred. The interest rate variable is the effective #' federal funds rate from February 1959 to August 2008 after which we replaced it with the Wu and Xia #' (2016) shadow rate, which is not constrained by the zero lower bound and also #' quantifies unconventional monetary policy measures. #' #' The Wu and Xia (2016) shadow rate data was retrieved from the Federal Reserve Bank of Atlanta's website #' (\url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}) and the rest of the data was #' retrieved from the Federal Reserve Bank of St. Louis database. #' #' @format A numeric matrix of class \code{'ts'} with 731 rows and 4 columns with one time series in each column: #' \describe{ #' \item{IP:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/INDPRO}} #' \item{CPI:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/CPIAUCSL}} #' \item{M1:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/M1SL}} #' \item{RATE:}{From 1959 February to 2008 August \url{https://fred.stlouisfed.org/series/FEDFUNDS} and #' from 2008 September onwards \url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}} #' } #' #' @references #' \itemize{ #' \item J.C. Wu and F.D. Xia. 2016. Measuring the Macroeconomic Impact of Monetary Policy at the Zero Lower Bound. #' \emph{Journal of Money, Credit and Banking}, 48(2-3): 253-291. #' } #' @source The Federal Reserve Bank of St. Louis database and the Federal Reserve Bank of Atlanta's website "usamone_prec" #' U.S. data containing log-differences of industrial production index, consumer price index, and M1, and an interest rate #' variable #' #' A dataset containing the monthly U.S. data covering the period from February 1959 to December 2020 #' (743 observations) and consisting of four variables: the log-difference of industrial production index (IP), #' the log-difference of consumer price index (CPI), the log-difference of M1 monetary aggregate (M1), and an interest rate #' variable (RATE). The log-differences are multiplied by hundred. The interest rate variable is the effective #' federal funds rate from February 1959 to August 2008 after which we replaced it with the Wu and Xia #' (2016) shadow rate, which is not constrained by the zero lower bound and also #' quantifies unconventional monetary policy measures. #' #' The Wu and Xia (2016) shadow rate data was retrieved from the Federal Reserve Bank of Atlanta's website #' (\url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}) and the rest of the data was #' retrieved from the Federal Reserve Bank of St. Louis database. #' #' @format A numeric matrix of class \code{'ts'} with 743 rows and 4 columns with one time series in each column: #' \describe{ #' \item{IP:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/INDPRO}} #' \item{CPI:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/CPIAUCSL}} #' \item{M1:}{The log-difference multiplied by hundred, \url{https://fred.stlouisfed.org/series/M1SL}} #' \item{RATE:}{From 1959 February to 2008 August \url{https://fred.stlouisfed.org/series/FEDFUNDS} and #' from 2008 September onwards \url{https://www.atlantafed.org/cqer/research/wu-xia-shadow-federal-funds-rate}} #' } #' #' @inherit usamone_prec references source "usamone"
#' Life cycle of the rlang package #' #' @description #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("maturing")} #' #' The rlang package is currently maturing. Unless otherwise stated, #' this applies to all its exported functions. Maturing functions are #' susceptible to API changes. Only use these in packages if you're #' prepared to make changes as the package evolves. See sections below #' for a list of functions marked as stable. #' #' The documentation pages of retired functions contain life cycle #' sections that explain the reasons for their retirements. #' #' #' @section Stable functions: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("stable")} #' #' * [eval_tidy()] #' * [!!], [!!!] #' * [enquo()], [quo()], [quos()] #' * [enexpr()], [expr()], [exprs()] #' * [sym()], [syms()] #' * [new_quosure()], [is_quosure()] #' * [missing_arg()], [is_missing()] #' #' * [quo_get_expr()], [quo_set_expr()] #' * [quo_get_env()], [quo_set_env()] #' #' * [eval_bare()] #' #' * [set_names()], [names2()] #' * [as_function()], [new_function()] #' #' #' @section Experimental functions: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("experimental")} #' #' These functions are not yet part of the rlang API. Expect breaking #' changes. #' #' * [with_env()], [locally()], [env_poke()] #' * [pkg_env()], [pkg_env_name()], [ns_env()], [ns_imports_env()], [ns_env_name()] #' #' * [is_pairlist()], [as_pairlist()], [is_node()], [is_node_list()] #' #' * [is_definition()], [new_definition()], [is_formulaish()], #' [dots_definitions()] #' #' * [local_options()], [with_options()], [push_options()], #' [peek_options()], [peek_option()] #' #' * [as_bytes()], [chr_unserialise_unicode()] #' #' * [caller_fn()], [current_fn()] #' #' #' @section Questioning stage: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("questioning")} #' #' #' **In the questioning stage as of rlang 0.4.0** #' #' These functions are likely to be moved to the vctrs package: #' #' * [lgl()], [int()], etc. #' * [new_logical()], [new_integer()], etc. #' * [na_lgl], [na_int], [is_lgl_na()], [is_int_na()], etc. #' #' #' **In the questioning stage as of rlang 0.3.0** #' #' * [child_env()] #' * [flatten()], [squash()], and their atomic vector variants #' * [modify()] and [prepend()] #' * [with_restarts()], [rst_list()], [rst_exists()], [rst_jump()], #' [rst_maybe_jump()], [rst_abort()]. It is not clear yet whether we #' want to recommend restarts as a style of programming in R. #' * [return_from()] and [return_to()]. #' * [expr_label()], [expr_name()], and [expr_text()]. #' #' #' **In the questioning stage as of rlang 0.2.0** #' #' * [UQ()], [UQS()] #' * [dots_splice()], [splice()] #' #' #' @section Soft-deprecated functions and arguments: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("soft-deprecated")} #' #' #' **Soft-deprecated in rlang 0.4.0** #' #' * [exiting()]: Handlers are now treated as exiting by default. #' * [invoke()]: Use the simpler [exec()] instead. #' * [as_logical()], [as_integer()], etc. => `vctrs::vec_cast()`. #' * [type_of()], [switch_type()], [coerce_type()], [switch_class()], #' [coerce_class()] #' #' #' @section Deprecated functions and arguments: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("deprecated")} #' #' **Bumped to deprecated in rlang 0.4.0** #' #' * [modify()] and [prepend()]. #' * `new_logical_along()`, `new_integer_along()`, #' `new_double_along()`, `new_complex_along()`, #' `new_character_along()`, `new_raw_along()`, `new_list_along()`. #' #' * [lang_modify()] => [call_modify()] #' * [lang_standardise()] => [call_standardise()] #' * [lang_fn()] => [call_fn()] #' * [lang_name()] => [call_name()] #' * [lang_args()] => [call_args()] #' * [lang_args_names()] => [call_args_names()] #' * [lang_head()], [lang_tail()] #' * [lang()] => [call2()] #' * [new_language()] => [new_call()] #' * [is_lang()] => [is_call()] #' * [is_unary_lang()] => Use the `n` argument of [is_call()] #' * [is_binary_lang()] => Use the `n` argument of [is_call()] #' * [quo_is_lang()] => [quo_is_call()] #' #' * [call_modify()]: `.standardise` and `.env` arguments. #' #' * [is_expr()] => [is_expression()] #' * `quo_expr()` => [quo_squash()] #' * [parse_quosure()] => [parse_quo()] #' * [parse_quosures()] => [parse_quos()] #' * Assigning non-quosure objects to quosure lists. #' * `as.character()` on quosures. #' #' * [cnd_signal()]: `.cnd` => `cnd` #' * [cnd_signal()]: The `.mufflable` argument no longer has any effect #' #' * `scoped_names()` => [base::search()] #' * `is_scoped()` => [is_attached()] #' * `scoped_env()` => [search_env()] #' * `scoped_envs()` => [search_envs()] #' #' * `env_bind_exprs()` => [env_bind_lazy()] #' * `env_bind_fns()` => [env_bind_active()] #' * Passing a function or formula to `env_depth()`, #' `env_poke_parent()`, `env_parent<-`, `env_tail()`, `set_env()`, #' `env_clone()`, `env_inherits()`, `env_bind()`, #' `local_bindings()`, `with_bindings()`, `env_poke()`, #' `env_has()`, `env_get()`, `env_names()`, `env_bind_exprs()` and #' `env_bind_fns()`. This internal genericity was causing confusion #' (see issue #427). You should now extract the environment #' separately before calling these functions. #' * [get_env()]: The `env` argument no longer has a default and must be supplied #' #' * [is_frame()], [global_frame()], [current_frame()], #' [ctxt_frame()], [call_frame()], [frame_position()], #' [caller_frame()] #' #' * [ctxt_depth()], [call_depth()], [ctxt_stack()], [call_stack()], #' [stack_trim()] #' #' * [set_attrs()], [mut_attrs()] #' #' * The `width` and `printer` arguments of [exprs_auto_name()] and #' [quos_auto_name()] no longer have any effect. For the same #' reason, passing a width as `.named` argument of dots collectors #' like `quos()` is deprecated. #' #' * `as_overscope()` => [as_data_mask()] #' * `new_overscope()` => [new_data_mask()] #' * `overscope_eval_next()` => [eval_tidy()] #' * `overscope_clean()` #' #' #' @section Defunct functions and arguments: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("defunct")} #' #' **Defunct as of rlang 0.4.0** #' #' * `length()` and `names()` on tidy eval `.data` pronouns. #' * Supplying a named `!!!` call. #' #' * [as_data_mask()]: `parent` argument #' * [new_data_mask()]: `parent` argument #' * [env_tail()]: `sentinel` => `last` #' * [abort()], [warn()], [inform()]: `msg`, `type` => `.msg`, `.type` #' * [abort()], [warn()], [inform()], [cnd()], [error_cnd()], #' [warning_cnd()], [message_cnd()]: `call` argument. #' * [is_character()], [is_string()], and variants: The `encoding` #' argument. #' #' #' @section Archived: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("archived")} #' #' These functions were entirely removed from the package. You will #' find them in the commit history and previous releases. #' #' #' **Archived as of rlang 0.4.0** #' #' * `UQE()` #' * `as_dictionary()`, `is_dictionary()` #' * `as_quosureish()`, `is_quosureish()` #' * `eval_tidy_()` #' * `mut_utf8_locale()`, `mut_latin1_locale()`, `mut_mbcs_locale()` #' * `set_chr_encoding()`, `chr_encoding()`, `set_str_encoding()`, `str_encoding()` #' * `as_native_character()`, `as_utf8_string()`, `as_native_string()` #' * `lang_type_of()`, `switch_lang()`, `coerce_lang()` #' #' #' **Archived as of rlang 0.3.0:** #' #' * `cnd_inform()`, `cnd_warn()` and `cnd_abort()` #' #' * `new_cnd()` => [cnd()] #' * `cnd_message()` => [message_cnd()] #' * `cnd_warning()` => [warning_cnd()] #' * `cnd_error()` => [error_cnd()] #' * `rst_muffle()` => [cnd_muffle()] #' * `inplace()` => [calling()]. The `muffle` argument of `inplace()` #' has not been implemented in `calling()` and is now defunct. #' #' * [cnd_signal()]: `.msg` and `.call`. #' * [cnd()], [error_cnd()], [warning_cnd()] and [message_cnd()]: #' `.msg` => `message`. #' #' #' @keywords internal #' @name lifecycle NULL
/R/lifecycle.R
no_license
sumana86543/rlang
R
false
false
7,909
r
#' Life cycle of the rlang package #' #' @description #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("maturing")} #' #' The rlang package is currently maturing. Unless otherwise stated, #' this applies to all its exported functions. Maturing functions are #' susceptible to API changes. Only use these in packages if you're #' prepared to make changes as the package evolves. See sections below #' for a list of functions marked as stable. #' #' The documentation pages of retired functions contain life cycle #' sections that explain the reasons for their retirements. #' #' #' @section Stable functions: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("stable")} #' #' * [eval_tidy()] #' * [!!], [!!!] #' * [enquo()], [quo()], [quos()] #' * [enexpr()], [expr()], [exprs()] #' * [sym()], [syms()] #' * [new_quosure()], [is_quosure()] #' * [missing_arg()], [is_missing()] #' #' * [quo_get_expr()], [quo_set_expr()] #' * [quo_get_env()], [quo_set_env()] #' #' * [eval_bare()] #' #' * [set_names()], [names2()] #' * [as_function()], [new_function()] #' #' #' @section Experimental functions: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("experimental")} #' #' These functions are not yet part of the rlang API. Expect breaking #' changes. #' #' * [with_env()], [locally()], [env_poke()] #' * [pkg_env()], [pkg_env_name()], [ns_env()], [ns_imports_env()], [ns_env_name()] #' #' * [is_pairlist()], [as_pairlist()], [is_node()], [is_node_list()] #' #' * [is_definition()], [new_definition()], [is_formulaish()], #' [dots_definitions()] #' #' * [local_options()], [with_options()], [push_options()], #' [peek_options()], [peek_option()] #' #' * [as_bytes()], [chr_unserialise_unicode()] #' #' * [caller_fn()], [current_fn()] #' #' #' @section Questioning stage: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("questioning")} #' #' #' **In the questioning stage as of rlang 0.4.0** #' #' These functions are likely to be moved to the vctrs package: #' #' * [lgl()], [int()], etc. #' * [new_logical()], [new_integer()], etc. #' * [na_lgl], [na_int], [is_lgl_na()], [is_int_na()], etc. #' #' #' **In the questioning stage as of rlang 0.3.0** #' #' * [child_env()] #' * [flatten()], [squash()], and their atomic vector variants #' * [modify()] and [prepend()] #' * [with_restarts()], [rst_list()], [rst_exists()], [rst_jump()], #' [rst_maybe_jump()], [rst_abort()]. It is not clear yet whether we #' want to recommend restarts as a style of programming in R. #' * [return_from()] and [return_to()]. #' * [expr_label()], [expr_name()], and [expr_text()]. #' #' #' **In the questioning stage as of rlang 0.2.0** #' #' * [UQ()], [UQS()] #' * [dots_splice()], [splice()] #' #' #' @section Soft-deprecated functions and arguments: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("soft-deprecated")} #' #' #' **Soft-deprecated in rlang 0.4.0** #' #' * [exiting()]: Handlers are now treated as exiting by default. #' * [invoke()]: Use the simpler [exec()] instead. #' * [as_logical()], [as_integer()], etc. => `vctrs::vec_cast()`. #' * [type_of()], [switch_type()], [coerce_type()], [switch_class()], #' [coerce_class()] #' #' #' @section Deprecated functions and arguments: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("deprecated")} #' #' **Bumped to deprecated in rlang 0.4.0** #' #' * [modify()] and [prepend()]. #' * `new_logical_along()`, `new_integer_along()`, #' `new_double_along()`, `new_complex_along()`, #' `new_character_along()`, `new_raw_along()`, `new_list_along()`. #' #' * [lang_modify()] => [call_modify()] #' * [lang_standardise()] => [call_standardise()] #' * [lang_fn()] => [call_fn()] #' * [lang_name()] => [call_name()] #' * [lang_args()] => [call_args()] #' * [lang_args_names()] => [call_args_names()] #' * [lang_head()], [lang_tail()] #' * [lang()] => [call2()] #' * [new_language()] => [new_call()] #' * [is_lang()] => [is_call()] #' * [is_unary_lang()] => Use the `n` argument of [is_call()] #' * [is_binary_lang()] => Use the `n` argument of [is_call()] #' * [quo_is_lang()] => [quo_is_call()] #' #' * [call_modify()]: `.standardise` and `.env` arguments. #' #' * [is_expr()] => [is_expression()] #' * `quo_expr()` => [quo_squash()] #' * [parse_quosure()] => [parse_quo()] #' * [parse_quosures()] => [parse_quos()] #' * Assigning non-quosure objects to quosure lists. #' * `as.character()` on quosures. #' #' * [cnd_signal()]: `.cnd` => `cnd` #' * [cnd_signal()]: The `.mufflable` argument no longer has any effect #' #' * `scoped_names()` => [base::search()] #' * `is_scoped()` => [is_attached()] #' * `scoped_env()` => [search_env()] #' * `scoped_envs()` => [search_envs()] #' #' * `env_bind_exprs()` => [env_bind_lazy()] #' * `env_bind_fns()` => [env_bind_active()] #' * Passing a function or formula to `env_depth()`, #' `env_poke_parent()`, `env_parent<-`, `env_tail()`, `set_env()`, #' `env_clone()`, `env_inherits()`, `env_bind()`, #' `local_bindings()`, `with_bindings()`, `env_poke()`, #' `env_has()`, `env_get()`, `env_names()`, `env_bind_exprs()` and #' `env_bind_fns()`. This internal genericity was causing confusion #' (see issue #427). You should now extract the environment #' separately before calling these functions. #' * [get_env()]: The `env` argument no longer has a default and must be supplied #' #' * [is_frame()], [global_frame()], [current_frame()], #' [ctxt_frame()], [call_frame()], [frame_position()], #' [caller_frame()] #' #' * [ctxt_depth()], [call_depth()], [ctxt_stack()], [call_stack()], #' [stack_trim()] #' #' * [set_attrs()], [mut_attrs()] #' #' * The `width` and `printer` arguments of [exprs_auto_name()] and #' [quos_auto_name()] no longer have any effect. For the same #' reason, passing a width as `.named` argument of dots collectors #' like `quos()` is deprecated. #' #' * `as_overscope()` => [as_data_mask()] #' * `new_overscope()` => [new_data_mask()] #' * `overscope_eval_next()` => [eval_tidy()] #' * `overscope_clean()` #' #' #' @section Defunct functions and arguments: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("defunct")} #' #' **Defunct as of rlang 0.4.0** #' #' * `length()` and `names()` on tidy eval `.data` pronouns. #' * Supplying a named `!!!` call. #' #' * [as_data_mask()]: `parent` argument #' * [new_data_mask()]: `parent` argument #' * [env_tail()]: `sentinel` => `last` #' * [abort()], [warn()], [inform()]: `msg`, `type` => `.msg`, `.type` #' * [abort()], [warn()], [inform()], [cnd()], [error_cnd()], #' [warning_cnd()], [message_cnd()]: `call` argument. #' * [is_character()], [is_string()], and variants: The `encoding` #' argument. #' #' #' @section Archived: #' #' \Sexpr[results=rd, stage=render]{rlang:::lifecycle("archived")} #' #' These functions were entirely removed from the package. You will #' find them in the commit history and previous releases. #' #' #' **Archived as of rlang 0.4.0** #' #' * `UQE()` #' * `as_dictionary()`, `is_dictionary()` #' * `as_quosureish()`, `is_quosureish()` #' * `eval_tidy_()` #' * `mut_utf8_locale()`, `mut_latin1_locale()`, `mut_mbcs_locale()` #' * `set_chr_encoding()`, `chr_encoding()`, `set_str_encoding()`, `str_encoding()` #' * `as_native_character()`, `as_utf8_string()`, `as_native_string()` #' * `lang_type_of()`, `switch_lang()`, `coerce_lang()` #' #' #' **Archived as of rlang 0.3.0:** #' #' * `cnd_inform()`, `cnd_warn()` and `cnd_abort()` #' #' * `new_cnd()` => [cnd()] #' * `cnd_message()` => [message_cnd()] #' * `cnd_warning()` => [warning_cnd()] #' * `cnd_error()` => [error_cnd()] #' * `rst_muffle()` => [cnd_muffle()] #' * `inplace()` => [calling()]. The `muffle` argument of `inplace()` #' has not been implemented in `calling()` and is now defunct. #' #' * [cnd_signal()]: `.msg` and `.call`. #' * [cnd()], [error_cnd()], [warning_cnd()] and [message_cnd()]: #' `.msg` => `message`. #' #' #' @keywords internal #' @name lifecycle NULL
scissorLD <- read_tsv('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/bgMinus.raw.TcReads.tsv') allMuts <- read_tsv('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/mutStats.tsv') readMuts <- function(mf) { return(read_tsv(mf, col_types = cols(depth = col_double()))) } getnormFactor <- function(mutlist) { mutsummary <- mutlist %>% filter(time == 1440, average.reads >= 100) %>% group_by(experiment, mutCode) %>% summarise(avg.mut = mean(mutFract)) %>% mutate(factor = 1 / avg.mut) return(mutsummary) } avgMutsPerTime <- function(ml) { avgMuts <- ml %>% group_by(flybase_id, arm.name, start.pos, mir.type, average.reads, experiment, time, timepoint, mutCode) %>% summarise(avg.mut = mean(mutFract)) %>% ungroup() return(avgMuts) } subtractMutBG <- function(mdf) { mutBG <- mdf %>% filter(time == 0) %>% select(flybase_id, start.pos, pos, relPos, mutCode, mutFract) %>% dplyr::rename(mutBg = mutFract) bgMinusMuts <- mdf %>% left_join(mutBG) %>% mutate(bgMinusMuts = ifelse(mutFract - mutBg > 0, mutFract - mutBg, 0)) return(bgMinusMuts) } excludeSNPs <- function(ml, expDescription) { noSNP <- ml %>% group_by(flybase_id, time, pos) %>% filter(max(mutFract) < 0.75) %>% ungroup() return(noSNP %>% left_join(expDescription)) } ml <- list('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/mutStats.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M4134 - Ago2KO 24h R2/raw/mutStats.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3282 - wildtype OXIDISED/raw/mutStats.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3283 - wildtype UNOX/raw/mutStats.tsv') am <- lapply(ml, readMuts) noSNP <- lapply(am, excludeSNPs, expDescription = expDF) mut.summary <- lapply(noSNP, getnormFactor) %>% purrr::reduce(bind_rows) avg.muts <- lapply(noSNP, avgMutsPerTime) %>% purrr::reduce(bind_rows) muts.bgMinus <- lapply(noSNP, subtractMutBG) %>% purrr::reduce(bind_rows) ox.tp <- c(38519:38526) unox.tp <- c(38509:38517) ltc1.tp <- c(45493:45501) ltc2.tp <- c(47117:47125) expDF.sep <- as_data_frame(list("experiment" = c(rep("wt-oxidised", length(ox.tp)), rep('wt-unoxidised', length(unox.tp)), rep('Ago2KO-24h-R1', length(ltc1.tp)), rep('Ago2KO-24h-R2', length(ltc2.tp))), 'timepoint' = c(ox.tp, unox.tp, ltc1.tp, ltc2.tp))) muts.bgWexp <- muts.bgMinus %>% left_join(expDF.sep) avg.mutsWexp <- avg.muts %>% left_join(expDF.sep) maxMuts <- muts.bgWexp %>% filter(time == 1440) %>% group_by(experiment, start.pos, flybase_id, arm.name, mutCode) %>% summarise(maxMutMedian = mean(bgMinusMuts)) %>% ungroup() avg.bgMinusMuts <- muts.bgWexp %>% group_by(experiment, start.pos, flybase_id, time, mutCode) %>% summarise(muts.bgMinus = mean(bgMinusMuts)) %>% ungroup() avg.bgMinusMuts.normed <- muts.bgWexp %>% left_join(maxMuts) %>% mutate(muts.bgMinus.norm = bgMinusMuts / maxMutMedian) %>% group_by(experiment, start.pos, flybase_id, arm.name, time, mutCode) %>% summarise(avg.bgMinus.muts = mean(muts.bgMinus.norm)) %>% ungroup() mirmutF <- list('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/miRs.wAllMuts.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M4134 - Ago2KO 24h R2/raw/miRs.wAllMuts.tsv') mirmuts <- lapply(mirmutF, read_tsv) %>% purrr::reduce(bind_rows) mirmuts.wFrac <- mirmuts %>% spread(nucleotide, count) %>% replace_na(list(A = 0, C = 0, G = 0, T = 0)) %>% gather(nucleotide, count, A:T) %>% mutate(mutCode = ifelse(refNuc != nucleotide, paste(refNuc, nucleotide, sep = '>'), refNuc)) %>% group_by(flybase_id, timepoint, pos, start.pos) %>% mutate(depth = sum(count), mutFract = count / depth) %>% dplyr::filter(grepl('>', mutCode)) %>% ungroup() %>% dplyr::select(-refNuc, -nucleotide, -count, -`5p`, -`3p`, -align, -full.seq, -mir_name, -read.type, -depth, -miRNAreads) %>% left_join(expDF.sep) mirbgmuts <- mirmuts.wFrac %>% filter(time == 0) %>% dplyr::rename(bg.mut = mutFract) %>% select(pos, relPos, flybase_id, start.pos, mutCode, bg.mut, experiment) mirmuts.noBG <- mirmuts.wFrac %>% left_join(mirbgmuts) %>% mutate(bg.minus.mut = ifelse(mutFract > bg.mut + 1e-7, mutFract - bg.mut, 0)) %>% select(-mutFract, -bg.mut) avg.mirmuts.nobg <- mirmuts.noBG %>% group_by(experiment, start.pos, flybase_id, time, mutCode) %>% summarise(muts.bgMinus = mean(bg.minus.mut)) %>% ungroup() mirmuts.noBG.max <- mirmuts.noBG %>% filter(time == 1440) %>% group_by(experiment, start.pos, flybase_id, arm.name, mutCode) %>% summarise(mean.mut = mean(bg.minus.mut), median.mut = median(bg.minus.mut)) %>% ungroup() mirmuts.noBG.normed <- mirmuts.noBG %>% left_join(mirmuts.noBG.max) %>% mutate(normed.mut = bg.minus.mut / mean.mut)
/analysis/mutAnalysis.R
permissive
breichholf/smRNAseq
R
false
false
5,144
r
scissorLD <- read_tsv('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/bgMinus.raw.TcReads.tsv') allMuts <- read_tsv('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/mutStats.tsv') readMuts <- function(mf) { return(read_tsv(mf, col_types = cols(depth = col_double()))) } getnormFactor <- function(mutlist) { mutsummary <- mutlist %>% filter(time == 1440, average.reads >= 100) %>% group_by(experiment, mutCode) %>% summarise(avg.mut = mean(mutFract)) %>% mutate(factor = 1 / avg.mut) return(mutsummary) } avgMutsPerTime <- function(ml) { avgMuts <- ml %>% group_by(flybase_id, arm.name, start.pos, mir.type, average.reads, experiment, time, timepoint, mutCode) %>% summarise(avg.mut = mean(mutFract)) %>% ungroup() return(avgMuts) } subtractMutBG <- function(mdf) { mutBG <- mdf %>% filter(time == 0) %>% select(flybase_id, start.pos, pos, relPos, mutCode, mutFract) %>% dplyr::rename(mutBg = mutFract) bgMinusMuts <- mdf %>% left_join(mutBG) %>% mutate(bgMinusMuts = ifelse(mutFract - mutBg > 0, mutFract - mutBg, 0)) return(bgMinusMuts) } excludeSNPs <- function(ml, expDescription) { noSNP <- ml %>% group_by(flybase_id, time, pos) %>% filter(max(mutFract) < 0.75) %>% ungroup() return(noSNP %>% left_join(expDescription)) } ml <- list('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/mutStats.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M4134 - Ago2KO 24h R2/raw/mutStats.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3282 - wildtype OXIDISED/raw/mutStats.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3283 - wildtype UNOX/raw/mutStats.tsv') am <- lapply(ml, readMuts) noSNP <- lapply(am, excludeSNPs, expDescription = expDF) mut.summary <- lapply(noSNP, getnormFactor) %>% purrr::reduce(bind_rows) avg.muts <- lapply(noSNP, avgMutsPerTime) %>% purrr::reduce(bind_rows) muts.bgMinus <- lapply(noSNP, subtractMutBG) %>% purrr::reduce(bind_rows) ox.tp <- c(38519:38526) unox.tp <- c(38509:38517) ltc1.tp <- c(45493:45501) ltc2.tp <- c(47117:47125) expDF.sep <- as_data_frame(list("experiment" = c(rep("wt-oxidised", length(ox.tp)), rep('wt-unoxidised', length(unox.tp)), rep('Ago2KO-24h-R1', length(ltc1.tp)), rep('Ago2KO-24h-R2', length(ltc2.tp))), 'timepoint' = c(ox.tp, unox.tp, ltc1.tp, ltc2.tp))) muts.bgWexp <- muts.bgMinus %>% left_join(expDF.sep) avg.mutsWexp <- avg.muts %>% left_join(expDF.sep) maxMuts <- muts.bgWexp %>% filter(time == 1440) %>% group_by(experiment, start.pos, flybase_id, arm.name, mutCode) %>% summarise(maxMutMedian = mean(bgMinusMuts)) %>% ungroup() avg.bgMinusMuts <- muts.bgWexp %>% group_by(experiment, start.pos, flybase_id, time, mutCode) %>% summarise(muts.bgMinus = mean(bgMinusMuts)) %>% ungroup() avg.bgMinusMuts.normed <- muts.bgWexp %>% left_join(maxMuts) %>% mutate(muts.bgMinus.norm = bgMinusMuts / maxMutMedian) %>% group_by(experiment, start.pos, flybase_id, arm.name, time, mutCode) %>% summarise(avg.bgMinus.muts = mean(muts.bgMinus.norm)) %>% ungroup() mirmutF <- list('~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M3987 - Ago2KO 24h R1/raw/miRs.wAllMuts.tsv', '~/Dropbox/PhD/data/sRNA SLAMseq REANALYSED/M4134 - Ago2KO 24h R2/raw/miRs.wAllMuts.tsv') mirmuts <- lapply(mirmutF, read_tsv) %>% purrr::reduce(bind_rows) mirmuts.wFrac <- mirmuts %>% spread(nucleotide, count) %>% replace_na(list(A = 0, C = 0, G = 0, T = 0)) %>% gather(nucleotide, count, A:T) %>% mutate(mutCode = ifelse(refNuc != nucleotide, paste(refNuc, nucleotide, sep = '>'), refNuc)) %>% group_by(flybase_id, timepoint, pos, start.pos) %>% mutate(depth = sum(count), mutFract = count / depth) %>% dplyr::filter(grepl('>', mutCode)) %>% ungroup() %>% dplyr::select(-refNuc, -nucleotide, -count, -`5p`, -`3p`, -align, -full.seq, -mir_name, -read.type, -depth, -miRNAreads) %>% left_join(expDF.sep) mirbgmuts <- mirmuts.wFrac %>% filter(time == 0) %>% dplyr::rename(bg.mut = mutFract) %>% select(pos, relPos, flybase_id, start.pos, mutCode, bg.mut, experiment) mirmuts.noBG <- mirmuts.wFrac %>% left_join(mirbgmuts) %>% mutate(bg.minus.mut = ifelse(mutFract > bg.mut + 1e-7, mutFract - bg.mut, 0)) %>% select(-mutFract, -bg.mut) avg.mirmuts.nobg <- mirmuts.noBG %>% group_by(experiment, start.pos, flybase_id, time, mutCode) %>% summarise(muts.bgMinus = mean(bg.minus.mut)) %>% ungroup() mirmuts.noBG.max <- mirmuts.noBG %>% filter(time == 1440) %>% group_by(experiment, start.pos, flybase_id, arm.name, mutCode) %>% summarise(mean.mut = mean(bg.minus.mut), median.mut = median(bg.minus.mut)) %>% ungroup() mirmuts.noBG.normed <- mirmuts.noBG %>% left_join(mirmuts.noBG.max) %>% mutate(normed.mut = bg.minus.mut / mean.mut)
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 419845 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 419845 c c Input Parameter (command line, file): c input filename QBFLIB/Tentrup/cycle-sched/cycle_sched_6_3_1.unsat.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 140089 c no.of clauses 419845 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 419845 c c QBFLIB/Tentrup/cycle-sched/cycle_sched_6_3_1.unsat.qdimacs 140089 419845 E1 [] 0 13 140076 419845 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/cycle-sched/cycle_sched_6_3_1.unsat/cycle_sched_6_3_1.unsat.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
665
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 419845 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 419845 c c Input Parameter (command line, file): c input filename QBFLIB/Tentrup/cycle-sched/cycle_sched_6_3_1.unsat.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 140089 c no.of clauses 419845 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 419845 c c QBFLIB/Tentrup/cycle-sched/cycle_sched_6_3_1.unsat.qdimacs 140089 419845 E1 [] 0 13 140076 419845 NONE
source("glee-r-funcs.r") # -- user-defined -- # XLFILE = "../glee/short.Cooper_147_vs_689.xls" XLFILE = "Cooper_147_vs_689.xls" # XLFILE = "wt-ClpS.xlsx" # number of replicates nA = 3 nB = 3 # specify rowIndex and/or colIndex if errors occur Data = read.xlsx(XLFILE, sheetIndex=1, rowIndex=NULL, colIndex=NULL, as.data.frame=TRUE, header=TRUE) # options fit_type = "cubic" num_iter = 10000 num_digits = 4 outfile = "diff-exp.xlsx" # ------------------ if (!data_ok(Data,nA,nB)) { msg = paste("check input data! spreadsheet must have", "(1) the right number of columns", "(2) positive finite values", "\n",sep="\n") stop(msg) } Prot = as.character(Data[,1]) A = as.matrix(Data[,1+(1:nA)]) B = as.matrix(Data[,1+nA+(1:nB)]) m = fit_model(A, B, fit_type) model_fit_plots(m, outfile="fitplots.png") stn_pval = calc_stn_pval(A, B, m, num_iter) stn_pval_plots(stn_pval, outfile="stn-pval.png") diff_exp_table(stn_pval, Prot, num_digits, outfile)
/glee-funcs/glee-run.r
no_license
lponnala/omics
R
false
false
961
r
source("glee-r-funcs.r") # -- user-defined -- # XLFILE = "../glee/short.Cooper_147_vs_689.xls" XLFILE = "Cooper_147_vs_689.xls" # XLFILE = "wt-ClpS.xlsx" # number of replicates nA = 3 nB = 3 # specify rowIndex and/or colIndex if errors occur Data = read.xlsx(XLFILE, sheetIndex=1, rowIndex=NULL, colIndex=NULL, as.data.frame=TRUE, header=TRUE) # options fit_type = "cubic" num_iter = 10000 num_digits = 4 outfile = "diff-exp.xlsx" # ------------------ if (!data_ok(Data,nA,nB)) { msg = paste("check input data! spreadsheet must have", "(1) the right number of columns", "(2) positive finite values", "\n",sep="\n") stop(msg) } Prot = as.character(Data[,1]) A = as.matrix(Data[,1+(1:nA)]) B = as.matrix(Data[,1+nA+(1:nB)]) m = fit_model(A, B, fit_type) model_fit_plots(m, outfile="fitplots.png") stn_pval = calc_stn_pval(A, B, m, num_iter) stn_pval_plots(stn_pval, outfile="stn-pval.png") diff_exp_table(stn_pval, Prot, num_digits, outfile)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/devicefarm_operations.R \name{devicefarm_delete_run} \alias{devicefarm_delete_run} \title{Deletes the run, given the run ARN} \usage{ devicefarm_delete_run(arn) } \arguments{ \item{arn}{[required] The Amazon Resource Name (ARN) for the run to delete.} } \description{ Deletes the run, given the run ARN. } \details{ Deleting this resource does not stop an in-progress run. } \section{Request syntax}{ \preformatted{svc$delete_run( arn = "string" ) } } \examples{ \dontrun{ # The following example deletes a specific test run. svc$delete_run( arn = "arn:aws:devicefarm:us-west-2:123456789101:run:EXAMPLE-GUID-123-456" ) } } \keyword{internal}
/paws/man/devicefarm_delete_run.Rd
permissive
johnnytommy/paws
R
false
true
726
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/devicefarm_operations.R \name{devicefarm_delete_run} \alias{devicefarm_delete_run} \title{Deletes the run, given the run ARN} \usage{ devicefarm_delete_run(arn) } \arguments{ \item{arn}{[required] The Amazon Resource Name (ARN) for the run to delete.} } \description{ Deletes the run, given the run ARN. } \details{ Deleting this resource does not stop an in-progress run. } \section{Request syntax}{ \preformatted{svc$delete_run( arn = "string" ) } } \examples{ \dontrun{ # The following example deletes a specific test run. svc$delete_run( arn = "arn:aws:devicefarm:us-west-2:123456789101:run:EXAMPLE-GUID-123-456" ) } } \keyword{internal}
## # Author: Autogenerated on 2013-12-05 22:49:50 # gitHash: dbc23230f6666890a6cb7c4234e68e540e096e0c # SEED: 779905237530228736 ## source('./findNSourceUtils.R') simpleFilterTest_prostate_28 <- function(conn) { Log.info("A munge-task R unit test on data <prostate> testing the functional unit <==> ") Log.info("Uploading prostate") hex <- h2o.uploadFile(conn, locate("../../smalldata/logreg/prostate.csv"), "rprostate.hex") Log.info("Filtering out rows by == from dataset prostate and column \"RACE\" using value 0.607585315311") filterHex <- hex[hex[,c("RACE")] == 0.607585315311,] Log.info("Perform filtering with the '$' sign also") filterHex <- hex[hex$"RACE" == 0.607585315311,] Log.info("Filtering out rows by == from dataset prostate and column \"DPROS\" using value 2.20276163047") filterHex <- hex[hex[,c("DPROS")] == 2.20276163047,] Log.info("Perform filtering with the '$' sign also") filterHex <- hex[hex$"DPROS" == 2.20276163047,] Log.info("Filtering out rows by == from dataset prostate and column \"DPROS\" using value 1.71355443172, and also subsetting columns.") filterHex <- hex[hex[,c("DPROS")] == 1.71355443172, c("DPROS")] Log.info("Now do the same filter & subset, but select complement of columns.") filterHex <- hex[hex[,c("DPROS")] == 1.71355443172, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")] Log.info("Filtering out rows by == from dataset prostate and column \"CAPSULE\" using value 0.805393678056, and also subsetting columns.") filterHex <- hex[hex[,c("CAPSULE")] == 0.805393678056, c("CAPSULE")] Log.info("Now do the same filter & subset, but select complement of columns.") filterHex <- hex[hex[,c("CAPSULE")] == 0.805393678056, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")] testEnd() } doTest("simpleFilterTest_ on data prostate unit= ==", simpleFilterTest_prostate_28)
/R/tests/testdir_autoGen/runit_simpleFilterTest_prostate_28.R
permissive
jmcclell/h2o
R
false
false
2,340
r
## # Author: Autogenerated on 2013-12-05 22:49:50 # gitHash: dbc23230f6666890a6cb7c4234e68e540e096e0c # SEED: 779905237530228736 ## source('./findNSourceUtils.R') simpleFilterTest_prostate_28 <- function(conn) { Log.info("A munge-task R unit test on data <prostate> testing the functional unit <==> ") Log.info("Uploading prostate") hex <- h2o.uploadFile(conn, locate("../../smalldata/logreg/prostate.csv"), "rprostate.hex") Log.info("Filtering out rows by == from dataset prostate and column \"RACE\" using value 0.607585315311") filterHex <- hex[hex[,c("RACE")] == 0.607585315311,] Log.info("Perform filtering with the '$' sign also") filterHex <- hex[hex$"RACE" == 0.607585315311,] Log.info("Filtering out rows by == from dataset prostate and column \"DPROS\" using value 2.20276163047") filterHex <- hex[hex[,c("DPROS")] == 2.20276163047,] Log.info("Perform filtering with the '$' sign also") filterHex <- hex[hex$"DPROS" == 2.20276163047,] Log.info("Filtering out rows by == from dataset prostate and column \"DPROS\" using value 1.71355443172, and also subsetting columns.") filterHex <- hex[hex[,c("DPROS")] == 1.71355443172, c("DPROS")] Log.info("Now do the same filter & subset, but select complement of columns.") filterHex <- hex[hex[,c("DPROS")] == 1.71355443172, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")] Log.info("Filtering out rows by == from dataset prostate and column \"CAPSULE\" using value 0.805393678056, and also subsetting columns.") filterHex <- hex[hex[,c("CAPSULE")] == 0.805393678056, c("CAPSULE")] Log.info("Now do the same filter & subset, but select complement of columns.") filterHex <- hex[hex[,c("CAPSULE")] == 0.805393678056, c("GLEASON","DPROS","PSA","DCAPS","VOL","CAPSULE","RACE","ID","AGE")] testEnd() } doTest("simpleFilterTest_ on data prostate unit= ==", simpleFilterTest_prostate_28)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as.Date.character.R \name{as.Date.character} \alias{as.Date.character} \title{fun_name} \usage{ as.Date.character(params) } \arguments{ \item{param}{fun_name} } \description{ kolejna funkcja podmieniona } \keyword{Gruba} \keyword{Przy} \keyword{boski} \keyword{chillout} \keyword{piwerku} \keyword{rozkmina} \keyword{sie} \keyword{toczy}
/man/as.Date.character.Rd
no_license
granatb/RapeR
R
false
true
416
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/as.Date.character.R \name{as.Date.character} \alias{as.Date.character} \title{fun_name} \usage{ as.Date.character(params) } \arguments{ \item{param}{fun_name} } \description{ kolejna funkcja podmieniona } \keyword{Gruba} \keyword{Przy} \keyword{boski} \keyword{chillout} \keyword{piwerku} \keyword{rozkmina} \keyword{sie} \keyword{toczy}
library(readr) library(magrittr) library(stringr) library(data.table) library(snow) library(magrittr) # indir_template <- "E:/洪湖东分块/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据" # tmp <- readDBFs_fields(indir_template) indir6.2 <- "G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据" indir6.3 <- "G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.3影响分析支撑数据-乌伦古河/" dbf_files <- dir(indir, pattern = "*.dbf$", full.names = T)[-1] tmp <- check.ztu(fnames = dbf_files) # tmp <- readDBFs_fields(indir) tmp <- check.ztu("G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.3影响分析支撑数据-乌伦古河/") indir <- "G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据" fnames <- dir(indir, pattern = "*.txt$", full.names = T) outdir <- "." i = 1 tmp <- check.6.2_ymssTXT(fnames[1]) # fnames <- dir("E:/洪湖东分块/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据", # pattern = "*.txt$", full.names = T) # # x <- check.6.2_ymssTXT(fnames[1]) cl <- makeCluster(5, type = "SOCK", outfile = "log.txt") tmp <- clusterEvalQ(cl, { library(floodmap) # library(readr) # library(stringr) # library(data.table) NULL }) # clusterExport(cl, c("check.6.2_ymssTXT"), envir = environment())#设置环境 tm <- snow.time(x_newDT <- parLapply(cl, fnames, check.6.2_ymssTXT)) stopCluster(cl) # x_newDT <- list() # for (i in seq_along(fnames)){ # cat(sprintf("====fname:%s====\n", fnames[i])) # x_newDT[[i]] <- check.6.2_ymssTXT(fnames[i]) # } ymss <- lapply(x_newDT, function(x) x[, mean(VALUE, na.rm = T), by = c("TIME")]) ymss_df <- do.call(cbind.data.frame, lapply(ymss, function(x) x[, V1])) %>% set_colnames(basename(fnames)) %>% cbind(TIME = ymss[[1]][,TIME], .) X <- data.table::melt(ymss_df, id.vars = "TIME", variable.name = "ymss") YMSS.plot(X) ## 可以考虑采用并行运算 ## 测试洪湖东数据 template <- "E:/洪湖东分块/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据/" fnames <- dir(indir, pattern = "*.txt$", full.names = T) x_hd <- check.6.2_ymssTXT(fnames[1]) ymss <- x_hd[, mean(VALUE, na.rm = T), by = c("TIME")] ## 示例数据 plot(ymss$V1, xlab = "TIME", ylab = "YMSS", type = "l") writeYMSS(x_hd, "ymss1.txt")
/tests/tests-6.2check.R
no_license
kongdd/floodmap
R
false
false
2,577
r
library(readr) library(magrittr) library(stringr) library(data.table) library(snow) library(magrittr) # indir_template <- "E:/洪湖东分块/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据" # tmp <- readDBFs_fields(indir_template) indir6.2 <- "G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据" indir6.3 <- "G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.3影响分析支撑数据-乌伦古河/" dbf_files <- dir(indir, pattern = "*.dbf$", full.names = T)[-1] tmp <- check.ztu(fnames = dbf_files) # tmp <- readDBFs_fields(indir) tmp <- check.ztu("G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.3影响分析支撑数据-乌伦古河/") indir <- "G:/地图审核/乌伦古河/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据" fnames <- dir(indir, pattern = "*.txt$", full.names = T) outdir <- "." i = 1 tmp <- check.6.2_ymssTXT(fnames[1]) # fnames <- dir("E:/洪湖东分块/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据", # pattern = "*.txt$", full.names = T) # # x <- check.6.2_ymssTXT(fnames[1]) cl <- makeCluster(5, type = "SOCK", outfile = "log.txt") tmp <- clusterEvalQ(cl, { library(floodmap) # library(readr) # library(stringr) # library(data.table) NULL }) # clusterExport(cl, c("check.6.2_ymssTXT"), envir = environment())#设置环境 tm <- snow.time(x_newDT <- parLapply(cl, fnames, check.6.2_ymssTXT)) stopCluster(cl) # x_newDT <- list() # for (i in seq_along(fnames)){ # cat(sprintf("====fname:%s====\n", fnames[i])) # x_newDT[[i]] <- check.6.2_ymssTXT(fnames[i]) # } ymss <- lapply(x_newDT, function(x) x[, mean(VALUE, na.rm = T), by = c("TIME")]) ymss_df <- do.call(cbind.data.frame, lapply(ymss, function(x) x[, V1])) %>% set_colnames(basename(fnames)) %>% cbind(TIME = ymss[[1]][,TIME], .) X <- data.table::melt(ymss_df, id.vars = "TIME", variable.name = "ymss") YMSS.plot(X) ## 可以考虑采用并行运算 ## 测试洪湖东数据 template <- "E:/洪湖东分块/6风险图应用业务相关数据/6.2淹没过程动态展示支撑数据/" fnames <- dir(indir, pattern = "*.txt$", full.names = T) x_hd <- check.6.2_ymssTXT(fnames[1]) ymss <- x_hd[, mean(VALUE, na.rm = T), by = c("TIME")] ## 示例数据 plot(ymss$V1, xlab = "TIME", ylab = "YMSS", type = "l") writeYMSS(x_hd, "ymss1.txt")
library("openxlsx") library("C50") #Mempersiapkan data dataCreditRating <- read.xlsx(xlsxFile = "https://academy.dqlab.id/dataset/credit_scoring_dqlab.xlsx") dataCreditRating$risk_rating <- as.factor(dataCreditRating$risk_rating) #write.xlsx(dataCreditRating, file="data_credit.xlsx") #Menggunakan C5.0 drop_columns <- c("kpr_aktif", "pendapatan_setahun_juta", "risk_rating", "rata_rata_overdue") datafeed <- dataCreditRating[ , !(names(dataCreditRating) %in% drop_columns)] modelKu <- C5.0(datafeed, as.factor(dataCreditRating$risk_rating)) summary(modelKu)
/Data_Science_In_Finance/Contoh Pemodelan Decision Tree dengan Machine Learning.R
no_license
rhedi/Data_Science
R
false
false
562
r
library("openxlsx") library("C50") #Mempersiapkan data dataCreditRating <- read.xlsx(xlsxFile = "https://academy.dqlab.id/dataset/credit_scoring_dqlab.xlsx") dataCreditRating$risk_rating <- as.factor(dataCreditRating$risk_rating) #write.xlsx(dataCreditRating, file="data_credit.xlsx") #Menggunakan C5.0 drop_columns <- c("kpr_aktif", "pendapatan_setahun_juta", "risk_rating", "rata_rata_overdue") datafeed <- dataCreditRating[ , !(names(dataCreditRating) %in% drop_columns)] modelKu <- C5.0(datafeed, as.factor(dataCreditRating$risk_rating)) summary(modelKu)
CPS = read.csv("CPSData.csv") MAC = read.csv("MetroAreaCodes.csv") CC = read.csv("CountryCodes.csv") summary(CPS) sort(table(CPS$Region)) sort(table(CPS$State)) summary(CPS$Citizenship) str(CPS) 116639 + 7073 +7590 116639 + 7073 123712/131302 summary(CPS$Race) summary(CPS$Hispanic) str(CPS$Hispanic) table(CPS$Hispanic) tapply(CPS$Race, CPS$Hispanic) table(tapply(CPS$Race, CPS$Hispanic)) summary(CPS$Race) AI = subset(CPS, Race == "Asian" & Hispanic == 1) summary(AI) nrow(AI) AI = subset(CPS, Race == "American Indian" & Hispanic == 1) AS = subset(CPS, Race == "Asian" & Hispanic == 1) B = subset(CPS, Race == "Black" & Hispanic == 1) MR = subset(CPS, Race == "Multiracial" & Hispanic == 1) PI = subset(CPS, Race == "Pacific Islander" & Hispanic == 1) W = subset(CPS, Race == "White" & Hispanic == 1) nrow(AI) nrow(AS) nrow(B) nrow(MR) nrow(PI) nrow(W) summary(CPS) is.na(CPS$Married) table(CPS$Region, is.na(CPS$Married)) table(CPS$Sex, is.na(CPS$Married)) table(CPS$Age, is.na(CPS$Married)) summary(CPS) table(CPS$Citizenship, is.na(CPS$Married)) table(CPS$MetroAreaCode, is.na(CPS$Married)) summary(CPS$State) CPS_DC = subset(CPS, State == "District of Columbia") table(CPS_DC$MetroAreaCode, is.na(CPS_DC$Married)) table(CPS$State, is.na(CPS$MetroAreaCode)) S_MAC = table(CPS$State, is.na(CPS$MetroAreaCode)) ?"[" S_MAC["Alabama"][TRUE] S_MAC[TRE]["Alabama"] S_MAC[TRUE]["Alabama"] S_MAC S_MAC = table(CPS$Region, is.na(CPS$MetroAreaCode)) table(CPS$Region, is.na(CPS$MetroAreaCode)) 8084/8084 8084/25093 10674/(10674+20010) 5609/(5609+20330) 9871/(9871+31631) 8084/(8084+25093) tapply(CPS$Race, CPS$Hispanic) table(tapply(CPS$Race, CPS$Hispanic)) table(CPS$State, is.na(CPS$MetroAreaCode)) S_MAC = table(CPS$State, is.na(CPS$MetroAreaCode)) tapply(CPS$State, is.na(CPS$MetroAreaCode), mean) tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3 table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3) summary(CPS) max(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean)) MAC_S = tapply(is.na(CPS$MetroAreaCode), CPS$State, mean)) MAC_S = tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) MAC_S summary(MAC_S) str(MAC_S) summary(MAC) summary(CC) str(MAC) str(CC) CPS = merge(CPS, MetroAreaMap, by.x="MetroAreaCode", by.y="Code", all.x=TRUE) CPS = merge(CPS, MAC, by.x="MetroAreaCode", by.y="Code", all.x=TRUE) CPS = merge(CPS, CC, by.x="CountryCode", by.y="Code", all.x=TRUE) summay(CPS) summary(CPS) CPS = merge(CPS, CC, by.x="CountryOfBirthCode", by.y="Code", all.x=TRUE) summary(CPS) str(CPS) summary(CPS$MetroArea) table(CPS$MetroArea) sort(table(CPS$MetroArea)) tapply(CPS$Hispanic, CPS$MetroArea, mean) sort(tapply(CPS$Hispanic, CPS$MetroArea, mean)) summary(CPS) tapply(CPS$Race == "Asian", CPS$MetroArea, mean) sort(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) sort(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) >= 0.2 table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) sort(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) sum( sort(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) > 0.2) sum( sort(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) >= 0.2) sum( tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.2) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) >= 0.2) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) summary(CPS$Race) summary(CPS$MetroArea) str(CPS$MetroArea) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) summary(CPS$Race) Asians = subset(CPS, Race == "Asian") nrow(Asians) tabl(CPS$Race == "Asian") table(CPS$Race == "Asian") table(CPS$Race == "Asian", CPS$MetroArea) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) str(CPS$MetroArea) str(CPS$MetroAreaCode) summary(CPS$MetroAreaCode) str(CPS) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) >= 0.2) tapply(CPS$Race == "Asian", CPS$MetroArea, mean) tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean) sum( table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean)) >= 0.2) sum(CPS$MetroAreaCode) summary(CPS$MetroAreaCode) summary(CPS$MetroArea) table(CPS$MetroArea) sum(table(CPS$MetroArea)) nrow(table(CPS$MetroArea)) sum(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean)) sum(table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean))) table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean)) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) (tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) <= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.02) ?tapply nrow(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) NA > 0.02 NA < 0.02 NA == FALSE nrow(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean, na.rm = TRUE))) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean, na.rm = TRUE)) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean, na.rm = TRUE) >= 0.02) tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3 table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3) table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3) summary(MAC_S) str(MAC_S) MAC_S = tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) max(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean)) MAC_S tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) == 1.0 sum(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) == 1.0) table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) == 1.0) table(tapply(CPS$MetroAreaCode != NA, CPS$State, mean) == 1.0) table(tapply(CPS$MetroAreaCode != "NA", CPS$State, mean) == 1.0) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean) >= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean) >= 0.2) sort(tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean)) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean) summary(CPS) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean) summary(CPS) str(CPS$Education) summary(CPS$Education) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean) summary(CPS$Education) summary(CPS) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean, na.rm=TRUE) sort(tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean, na.rm=TRUE)) summary(CPS) tapply(CPS$Country != "United States", CPS$MetroArea, mean) tapply(CPS$Country != "United States", CPS$MetroArea, mean, na.rm=TRUE) sort(tapply(CPS$Country == "INDIA", CPS$MetroArea, mean, na.rm=TRUE)) sort(tapply(CPS$Country == "India", CPS$MetroArea, mean, na.rm=TRUE)) sort(tapply(CPS$Country == "Brazil", CPS$MetroArea, mean, na.rm=TRUE)) sort(tapply(CPS$Country == "Somalia", CPS$MetroArea, mean, na.rm=TRUE)) savehistory("~/Projects/edx/AnalyticsEdge_MITx15_071x/lec1/HW1_3.R")
/2015/mitx_ana_edge_15_071x/lec1-intro/HW3.R
no_license
bicepjai/myclasses
R
false
false
6,947
r
CPS = read.csv("CPSData.csv") MAC = read.csv("MetroAreaCodes.csv") CC = read.csv("CountryCodes.csv") summary(CPS) sort(table(CPS$Region)) sort(table(CPS$State)) summary(CPS$Citizenship) str(CPS) 116639 + 7073 +7590 116639 + 7073 123712/131302 summary(CPS$Race) summary(CPS$Hispanic) str(CPS$Hispanic) table(CPS$Hispanic) tapply(CPS$Race, CPS$Hispanic) table(tapply(CPS$Race, CPS$Hispanic)) summary(CPS$Race) AI = subset(CPS, Race == "Asian" & Hispanic == 1) summary(AI) nrow(AI) AI = subset(CPS, Race == "American Indian" & Hispanic == 1) AS = subset(CPS, Race == "Asian" & Hispanic == 1) B = subset(CPS, Race == "Black" & Hispanic == 1) MR = subset(CPS, Race == "Multiracial" & Hispanic == 1) PI = subset(CPS, Race == "Pacific Islander" & Hispanic == 1) W = subset(CPS, Race == "White" & Hispanic == 1) nrow(AI) nrow(AS) nrow(B) nrow(MR) nrow(PI) nrow(W) summary(CPS) is.na(CPS$Married) table(CPS$Region, is.na(CPS$Married)) table(CPS$Sex, is.na(CPS$Married)) table(CPS$Age, is.na(CPS$Married)) summary(CPS) table(CPS$Citizenship, is.na(CPS$Married)) table(CPS$MetroAreaCode, is.na(CPS$Married)) summary(CPS$State) CPS_DC = subset(CPS, State == "District of Columbia") table(CPS_DC$MetroAreaCode, is.na(CPS_DC$Married)) table(CPS$State, is.na(CPS$MetroAreaCode)) S_MAC = table(CPS$State, is.na(CPS$MetroAreaCode)) ?"[" S_MAC["Alabama"][TRUE] S_MAC[TRE]["Alabama"] S_MAC[TRUE]["Alabama"] S_MAC S_MAC = table(CPS$Region, is.na(CPS$MetroAreaCode)) table(CPS$Region, is.na(CPS$MetroAreaCode)) 8084/8084 8084/25093 10674/(10674+20010) 5609/(5609+20330) 9871/(9871+31631) 8084/(8084+25093) tapply(CPS$Race, CPS$Hispanic) table(tapply(CPS$Race, CPS$Hispanic)) table(CPS$State, is.na(CPS$MetroAreaCode)) S_MAC = table(CPS$State, is.na(CPS$MetroAreaCode)) tapply(CPS$State, is.na(CPS$MetroAreaCode), mean) tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3 table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3) summary(CPS) max(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean)) MAC_S = tapply(is.na(CPS$MetroAreaCode), CPS$State, mean)) MAC_S = tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) MAC_S summary(MAC_S) str(MAC_S) summary(MAC) summary(CC) str(MAC) str(CC) CPS = merge(CPS, MetroAreaMap, by.x="MetroAreaCode", by.y="Code", all.x=TRUE) CPS = merge(CPS, MAC, by.x="MetroAreaCode", by.y="Code", all.x=TRUE) CPS = merge(CPS, CC, by.x="CountryCode", by.y="Code", all.x=TRUE) summay(CPS) summary(CPS) CPS = merge(CPS, CC, by.x="CountryOfBirthCode", by.y="Code", all.x=TRUE) summary(CPS) str(CPS) summary(CPS$MetroArea) table(CPS$MetroArea) sort(table(CPS$MetroArea)) tapply(CPS$Hispanic, CPS$MetroArea, mean) sort(tapply(CPS$Hispanic, CPS$MetroArea, mean)) summary(CPS) tapply(CPS$Race == "Asian", CPS$MetroArea, mean) sort(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) sort(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) >= 0.2 table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) sort(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) sum( sort(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) > 0.2) sum( sort(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) >= 0.2) sum( tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.2) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) >= 0.2) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) summary(CPS$Race) summary(CPS$MetroArea) str(CPS$MetroArea) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) summary(CPS$Race) Asians = subset(CPS, Race == "Asian") nrow(Asians) tabl(CPS$Race == "Asian") table(CPS$Race == "Asian") table(CPS$Race == "Asian", CPS$MetroArea) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) str(CPS$MetroArea) str(CPS$MetroAreaCode) summary(CPS$MetroAreaCode) str(CPS) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) sum( table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) >= 0.2) tapply(CPS$Race == "Asian", CPS$MetroArea, mean) tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean) sum( table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean)) >= 0.2) sum(CPS$MetroAreaCode) summary(CPS$MetroAreaCode) summary(CPS$MetroArea) table(CPS$MetroArea) sum(table(CPS$MetroArea)) nrow(table(CPS$MetroArea)) sum(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean)) sum(table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean))) table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean)) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) (tapply(CPS$Race == "Asian", CPS$MetroArea, mean)) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) <= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.02) ?tapply nrow(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean))) NA > 0.02 NA < 0.02 NA == FALSE nrow(table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean, na.rm = TRUE))) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean, na.rm = TRUE)) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean, na.rm = TRUE) >= 0.02) tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3 table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3) table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) > 0.3) summary(MAC_S) str(MAC_S) MAC_S = tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) max(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean)) MAC_S tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) == 1.0 sum(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) == 1.0) table(tapply(is.na(CPS$MetroAreaCode), CPS$State, mean) == 1.0) table(tapply(CPS$MetroAreaCode != NA, CPS$State, mean) == 1.0) table(tapply(CPS$MetroAreaCode != "NA", CPS$State, mean) == 1.0) table(tapply(CPS$Race == "Asian", CPS$MetroArea, mean) >= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean) >= 0.02) table(tapply(CPS$Race == "Asian", CPS$MetroAreaCode, mean) >= 0.2) sort(tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean)) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean) summary(CPS) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean) summary(CPS) str(CPS$Education) summary(CPS$Education) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean) summary(CPS$Education) summary(CPS) tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean, na.rm=TRUE) sort(tapply(CPS$Education == "No high school diploma", CPS$MetroArea, mean, na.rm=TRUE)) summary(CPS) tapply(CPS$Country != "United States", CPS$MetroArea, mean) tapply(CPS$Country != "United States", CPS$MetroArea, mean, na.rm=TRUE) sort(tapply(CPS$Country == "INDIA", CPS$MetroArea, mean, na.rm=TRUE)) sort(tapply(CPS$Country == "India", CPS$MetroArea, mean, na.rm=TRUE)) sort(tapply(CPS$Country == "Brazil", CPS$MetroArea, mean, na.rm=TRUE)) sort(tapply(CPS$Country == "Somalia", CPS$MetroArea, mean, na.rm=TRUE)) savehistory("~/Projects/edx/AnalyticsEdge_MITx15_071x/lec1/HW1_3.R")
############################################## # Load in dependencies ############################################## library(lme4) library(ggplot2) library(lattice) library(lmomco) ## The first thing is to set your R session to the base directory you just downloaded from github ## insert path below... setwd() ## Tester #setwd("~/Dropbox/Feldman_Ellsworth_Setaria_WUE_2017/") ##### CREATE DIRECTORY PATHS ##### ## Make the directory of the folder you downloaded the current working directory home.dir<-getwd() ############################################## # Lets define a function to get a loess fit of timeseries data ############################################## get.loess.fit<-function(fit, times, geno, cond) { return_df<-c() predict.vals<-predict(fit, times, se=T) genotype<-rep(geno, length(times)) condition<-rep(as.character(cond), length(times)) M<-predict.vals$fit M.lo<-M - predict.vals$se.fit M.hi<-M + predict.vals$se.fit slope<-c(0) for(s in 2:length(times)) { s.temp<-(M[s] - M[s-1]) / 2 slope<-c(slope, s.temp) } slope<-slope*10 return_df<-cbind(genotype, condition,times, M, M.lo, M.hi, slope) return(return_df) } ############################################## # Make a function to do loess fit, make plots and output data and report ############################################## report.loess.values<-function(rawdata, trait, genos, treatments, days, from, to, plotname){ genos<-as.character(sort(unique(genos))) treatments<-as.character(treatments) treatments<-unique(treatments) treatments<-sort(treatments) t1<-treatments[1] t2<-treatments[2] print(t1) print(t2) dap_i<-unique(days) dap_i<-sort(as.numeric(as.character(dap_i))) times = seq(from = from, to = to, by=0.1) colnumber <- which(colnames(rawdata) %in% trait) # Get loess fits for each genotype using get.loess.fit() fxn ril_loess_model_fit<-c() for (i in 1:length(genos)) { r<-genos[i] temp1<-rawdata[rawdata$genotype == as.character(r),] per.ril<-c() for (j in 1:length(treatments)) { t<-treatments[j] per.t<-c() temp2<-temp1[temp1$treatment == as.character(t),] if (nrow(temp2) < 1) {next;} colnumber2 <- which(colnames(temp2) %in% trait) # Log of 0 is INF need to replace with another small # temp2[,colnumber2]<-replace(temp2[,colnumber2], temp2[,colnumber2] <= 0, 1) out.loess<-loess(get(trait)~dap_i, data=temp2) output<-get.loess.fit(out.loess, times, r, t) ril_loess_model_fit<-rbind(ril_loess_model_fit, output) } } # Now make sure the resulting dataframe has ril_loess_model_fit<-ril_loess_model_fit[complete.cases(ril_loess_model_fit),] colnames(ril_loess_model_fit)<-c('ril', 'treatment', 'dap_i', 'M', 'M.lo', 'M.hi', 'AGR') ril_loess_model_fit<-as.data.frame(ril_loess_model_fit) ril_loess_model_fit$ril<-as.character(ril_loess_model_fit$ril) ril_loess_model_fit$treatment<-as.character(ril_loess_model_fit$treatment) ril_loess_model_fit$dap_i<-as.numeric(as.character(ril_loess_model_fit$dap_i)) ril_loess_model_fit$M<-as.numeric(as.character(ril_loess_model_fit$M)) ril_loess_model_fit$M.lo<-as.numeric(as.character(ril_loess_model_fit$M.lo)) ril_loess_model_fit$M.hi<-as.numeric(as.character(ril_loess_model_fit$M.hi)) ril_loess_model_fit$AGR<-as.numeric(as.character(ril_loess_model_fit$AGR)) ril_loess_model_fit<-ril_loess_model_fit[ril_loess_model_fit$dap_i %in% dap_i,] # Lets remove M.lo and M.hi # Sometimes these are difficult to estimate and end up NA # Creates plotting problems below ril_loess_model_fit<-ril_loess_model_fit[,c(1:4,7)] rate_id<-sort(unique(as.character(ril_loess_model_fit$ril))) growth_rate_report<-c() pdf(plotname, paper = "a4", width = 21/2.54, height = (7*5.3)/2.54) layout(matrix(c(1:4), ncol = 1, byrow = T), heights = c(1, 1, 1, 1)) for(i in 1:length(rate_id)) { r<-rate_id[i] ril_set<-rawdata[(rawdata$genotype == r),] ril_rates<-ril_loess_model_fit[(ril_loess_model_fit$ril == r),] plant_ids<-unique(ril_set$plantbarcode) colnumber3 <- which(colnames(ril_set) %in% trait) max.b<-max(max(ril_set[,colnumber3],na.rm=T), max(as.numeric(as.character(ril_rates$M)), na.rm=T),na.rm=T) min.b<-min(min(ril_set[,colnumber3],na.rm=T), min(as.numeric(as.character(ril_rates$M)), na.rm=T),na.rm=T) set<-ril_set[ril_set$plantbarcode == plant_ids[1],] # Start making plots if (set[1,'treatment'] == t2) {l.color<-c("light blue")} if (set[1,'treatment'] == t1) {l.color<-c("gold")} plot(set[,colnumber3]~set$dap_i, type='p', pch=19, xlim=c(from,to), ylim=c(min.b, max.b), col=l.color, xlab="Days after planting", ylab=trait, main=r) if(length(plant_ids) >1) { for (j in 2:length(plant_ids)) { set<-ril_set[ril_set$plantbarcode == plant_ids[j],] if (set[1,'treatment'] == t2) {l.color<-c("light blue")} if (set[1,'treatment'] == t1) {l.color<-c("gold")} points(set[,colnumber3]~set$dap_i, type='p', pch=19, xlim=c(from,to), ylim=c(min.b, max.b), col=l.color, xlab="Day", ylab=trait) } } rate.t2<-ril_rates[ril_rates$treatment == t2, ] if (nrow(rate.t2) > 0) { l.color<-c("blue") p.color<-c("dark blue") max.rate.t2<-max(as.numeric(as.character(rate.t2$AGR)),na.rm=T) day.t2<-rate.t2[rate.t2$AGR == max.rate.t2, 'dap_i'] max.val.t2<-as.numeric(as.character(rate.t2[rate.t2$AGR == max.rate.t2, 'M'])) lines(rate.t2$M~rate.t2$dap_i, lwd=2, col=l.color) points(day.t2, max.val.t2, cex=1.5, col=p.color, pch=18) # Can optionally plot confidence interval #lines(rate.t2$M.hi~rate.t2$dap_i, lty=2, col=c('navy')) #lines(rate.t2$M.lo~rate.t2$dap_i, lty=2, col=c('navy')) } rate.t1<-ril_rates[ril_rates$treatment == t1, ] if (nrow(rate.t1) > 0) { l.color<-c("orange") p.color<-c("dark orange") max.rate.t1<-max(rate.t1$AGR,na.rm=T) day.t1<-rate.t1[rate.t1$AGR == max.rate.t1, 'dap_i'] max.val.t1<-rate.t1[rate.t1$AGR == max.rate.t1, 'M'] lines(rate.t1$M~rate.t1$dap_i, lwd=2, col=l.color) points(day.t1, max.val.t1, cex=1.5, col=p.color, pch=18) #lines(rate.t1$M.hi~rate.t1$dap_i, lty=2, col=c('brown')) #lines(rate.t1$M.lo~rate.t1$dap_i, lty=2, col=c('brown')) } # treatment drought rate.t1<-ril_rates[ril_rates$treatment == t1, ] if (nrow(rate.t1) > 0) { max.day_trait.t1<-max(rate.t1$M, na.rm=T) max.agr.t1<-max(rate.t1$AGR,na.rm=T) rate.t1<-rate.t1[complete.cases(rate.t1),] max.agr.day.t1<-rate.t1[rate.t1$AGR == max.agr.t1, 'dap_i'] max.agr.day_trait.t1<-rate.t1[rate.t1$AGR == max.agr.t1, 'M'] } # treatment well watered rate.t2<-ril_rates[ril_rates$treatment == t2, ] if (nrow(rate.t2) > 0) { max.day_trait.t2<-max(rate.t2$M,na.rm=T) max.agr.t2<-max(rate.t2$AGR,na.rm=T) rate.t2<-rate.t2[complete.cases(rate.t2),] max.agr.day.t2<-rate.t2[rate.t2$AGR == max.agr.t2, 'dap_i'] max.agr.day_trait.t2<-rate.t2[rate.t2$AGR == max.agr.t2, 'M'] } # Generate the report on a per/ril basis if (length(unique(ril_rates$treatment)) > 1) { ril_entry<-c(r, max.day_trait.t2, max.day_trait.t1, max.day_trait.t2 - max.day_trait.t1, max.agr.t2, max.agr.t1, max.agr.t2 - max.agr.t1, max.agr.day.t2, max.agr.day.t1, max.agr.day.t2 - max.agr.day.t1, max.agr.day_trait.t2, max.agr.day_trait.t1, max.agr.day_trait.t2 - max.agr.day_trait.t1) growth_rate_report<-rbind(growth_rate_report, ril_entry) } # Plot rates if (length(unique(ril_rates$treatment)) > 1) { max.r<-max(max(rate.t2$AGR,na.rm=T), max(rate.t1$AGR,na.rm=T),na.rm=T) min.r<-min(min(rate.t2$AGR,na.rm=T), min(rate.t1$AGR,na.rm=T),na.rm=T) rate.t2<<-rate.t2 rate.t1<<-rate.t1 max.r<<-max.r min.r<<-min.r max.agr.day.t2<<-max.agr.day.t2 max.agr.t2<<-max.agr.t2 max.agr.day.t1<<-max.agr.day.t1 max.agr.t1<<-max.agr.t1 plot(rate.t2$AGR~rate.t2$dap_i, type="l", col="blue", xlab='Days after planting', ylab='Rate', ylim=c(min.r,max.r)) lines(rate.t1$AGR~rate.t1$dap_i, col="orange") points(max.agr.day.t2, max.agr.t2, pch=18, col="dark blue") points(max.agr.day.t1, max.agr.t1, pch=18, col="dark orange") } } dev.off() #ril_loess_model_fit<-ril_loess_model_fit[,c(1:4,7)] colnames(ril_loess_model_fit)<-c("genotype", "treatment", "dap_i", trait, paste(trait, '_rate', sep="")) # Give column names to growth report rownames(growth_rate_report)<-c(1:nrow(growth_rate_report)) growth_rate_report<-as.data.frame(growth_rate_report) colnames(growth_rate_report)<-c("genotype", "max_value.t2", "max_value.t1","max_value.diff","max_rate.t2","max_rate.t1","max_rate.diff","max_day.t2","max_day.t1","max_day.diff","value_max_rate_day.t2","value_max_rate_day.t1", "value_max_rate_day.diff") ril_loess_model_fit<<-ril_loess_model_fit growth_rate_report<<-growth_rate_report } ############################################## # Lets define a function to calculate heritability ############################################## # Broad sense heritability get_h2<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") H2<-c() for (i in 3:length(colnames(pheno))){ # Get complete cases cc.pheno<-pheno[complete.cases(pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.pheno[,3]~(1|id)+(1|treatment)+(1|id:treatment), data=cc.pheno) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) gxt.var<-re[1] geno.var<-re[2] treat.var<-re[3] # Total variance is sum of all variances tot.var<-sum(re, res) reps.t1<-table(pheno[pheno$treatment == i.treat[1], 'id']) reps.t2<-table(pheno[pheno$treatment == i.treat[2], 'id']) reps.treatment<-c(reps.t1, reps.t2) reps.t1<-as.character(unique(pheno[pheno$treatment == i.treat[1], 'id'])) reps.t2<-as.character(unique(pheno[pheno$treatment == i.treat[2], 'id'])) unique.combined <- c(as.character(reps.t1), as.character(reps.t2)) freq.unique.combined <- table(unique.combined) # Calculate the harmonic mean replication within treatment blocks hm_treatment<-harmonic.mean(freq.unique.combined)$harmean # Now get a count of total genotypic replication reps.total<-table(pheno[,'id']) # Get the harmonic mean of this quantity hm_total<-harmonic.mean(reps.total)$harmean # Calculate heritability as described by AEL # H2 = geno.var/(geno.var + (gxt.var/harmonic mean of treatment block replication) + (residual.var/harmonic mean of total genotype replication) ) h2<-((geno.var)/(geno.var + (gxt.var/hm_treatment) + (res/hm_total))) # This is the heritability H2<-c(H2,h2) } names(H2)<-colnames(pheno)[3:ncol(pheno)] return(H2) } # Heritability within treatment get_h2_in_treatment<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") variance.out<-c() for (t in 1:length(i.treat)) { # Create variables to store values treatment.pheno<-pheno[pheno$treatment == i.treat[t],] H2<-c() e2<-c() # For each treatment.phenotype calculate variance for(i in 3:length(colnames(treatment.pheno))){ # Use only RILs with all measurements for each treatment.phenotype cc.treatment.pheno<-treatment.pheno[complete.cases(treatment.pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.treatment.pheno[,3]~(1|id), data=cc.treatment.pheno, control=lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.rankZ = "ignore",check.nobs.vs.nRE="ignore")) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) geno.var<-re[1] # Total variance is sum of all variances tot.var<-sum(re, res) # Get proportion of variance h<-geno.var/tot.var e<-res/tot.var # Append variables to a vector of variables H2<-c(H2,h) e2<-c(e2,e) } variance<-rbind(H2, e2) colnames(treatment.pheno)[3:length(treatment.pheno)]<-paste(i.treat[t], colnames(treatment.pheno)[3:length(treatment.pheno)], sep="_") colnames(variance)<-colnames(treatment.pheno)[3:length(treatment.pheno)] rownames(variance)<-c('Genotype', 'Error') assign(paste('variance', i.treat[t], sep="_"), variance) variance.out<-cbind(variance.out, variance) } return(variance.out) } # Total variance partition get_total_var<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") H2<-c() t2<-c() e2<-c() gxt2<-c() for (i in 3:length(colnames(pheno))){ # Get complete cases cc.pheno<-pheno[complete.cases(pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.pheno[,3]~(1|id)+(1|treatment)+(1|id:treatment), data=cc.pheno) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) gxt.var<-re[1] geno.var<-re[2] treat.var<-re[3] # Total variance is sum of all variances tot.var<-sum(re, res) # Get proportion of variance h<-geno.var/tot.var t<-treat.var/tot.var e<-res/tot.var gxt<-gxt.var/tot.var # Append variables to a vector of variables H2<-c(H2,h) t2<-c(t2,t) e2<-c(e2,e) gxt2<-c(gxt2, gxt) } variance<-rbind(H2, t2, gxt2, e2) colnames(variance)<-colnames(pheno)[3:length(pheno)] rownames(variance)<-c('Genotype', 'Treatment', 'G x Treatment', 'Error') return(variance) } # Total variance partition field (includes plot) get_total_var_field<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") H2<-c() t2<-c() e2<-c() p2<-c() gxt2<-c() for (i in 5:length(colnames(pheno))){ # Get complete cases cc.pheno<-pheno[complete.cases(pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.pheno[,3]~(1|id)+(1|treatment)+(1|plot)+(1|id:treatment), data=cc.pheno) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) gxt.var<-re[1] geno.var<-re[2] plot.var<-re[3] treat.var<-re[4] # Total variance is sum of all variances tot.var<-sum(re, res) # Get proportion of variance for all factors h<-geno.var/tot.var t<-treat.var/tot.var e<-res/tot.var gxt<-gxt.var/tot.var p<-plot.var/tot.var # Append variables to a vector of variables H2<-c(H2,h) t2<-c(t2,t) e2<-c(e2,e) gxt2<-c(gxt2, gxt) p2<-c(p2, p) } variance<-rbind(H2, t2, p2, gxt2, e2) colnames(variance)<-colnames(pheno)[3:length(pheno)] rownames(variance)<-c('Genotype', 'Treatment','Plot', 'G x Treatment', 'Error') return(variance) } loess.fit.for.h2<-function(data, trait){ barcodes<-unique(data$plantbarcode) dap_i<-sort(unique(data$dap_i)) plant_id_loess_model_fit<-c() for(b in barcodes){ print(b) temp<-data[data$plantbarcode == b,] genotype<-unique(temp$genotype) treatment<-unique(temp$treatment) #colnumber <- which(colnames(temp) %in% trait) out.loess<-loess(get(trait)~dap_i, data=temp) times = seq(from = min(temp$dap_i), to = max(temp$dap_i), by=0.1) output<-get.loess.estimates(out.loess, times, b) output<-as.data.frame(output) output<-output[output$times %in% dap_i,] output$genotype<-rep(genotype, nrow(output)) output$treatment<-rep(treatment, nrow(output)) colnames(output)<-c("plantbarcode", "dap_i", trait,paste(trait, ".lo" ,sep=""),paste(trait, ".hi", sep=""), paste(trait, ".slope", sep=""), 'genotype', 'treatment') output<-output[,c(1,7,8,2:6)] plant_id_loess_model_fit<-rbind(plant_id_loess_model_fit, output) } return(plant_id_loess_model_fit) } get.loess.estimates<-function(fit, times, id) { return_df<-c() predict.vals<-predict(fit, times, se=T) ids<-rep(as.character(id), length(times)) #condition<-rep(as.character(cond), length(times)) M<-predict.vals$fit M.lo<-M - predict.vals$se.fit M.hi<-M + predict.vals$se.fit slope<-c(0) for(s in 2:length(times)) { s.temp<-(M[s] - M[s-1]) / 2 slope<-c(slope, s.temp) } slope<-slope*10 return_df<-cbind(ids,times, M, M.lo, M.hi, slope) return(return_df) } ############################################## # Lets define a function to merge the QTL results based upon a single marker ############################################## merged_table<-c() unify_chr<-function(temp){ all_qtl<-sort(table(temp$marker), decreasing=T) if(length(all_qtl) > 1){ #m.name<-names(all_qtl)[1] m.name<-temp[temp$lod == max(temp$lod),'marker'] m.name<-m.name[1] ave.pos<-mean(temp[temp$marker == m.name, 'pos']) #cr<-unique(temp[temp$marker == names(all_qtl)[1],'chr']) #po<-unique(temp[temp$marker == names(all_qtl)[1],'pos']) cr<-unique(temp[temp$marker == m.name,'chr']) po<-unique(temp[temp$marker == m.name,'pos']) max.pos<-ave.pos+10 min.pos<-ave.pos-10 subset<-temp[temp$pos > min.pos & temp$pos < max.pos,] subset$marker<-rep(m.name, length(subset$marker)) subset$chr<-rep(cr, length(subset$chr)) subset$pos<-rep(po, length(subset$pos)) temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] merged_table<<-rbind(merged_table, subset) unify_chr(temp) } if(length(all_qtl) == 1){ merged_table<<-rbind(merged_table, temp) #unify_chr(temp) } } unify_marker<-function(input){ chrs<-sort(unique(input$chr)) merged_table<<-c() for(ch in chrs) { temp<-input[input$chr == ch,] temp$marker<-as.character(temp$marker) unify_chr(temp) } return(merged_table) } ###### This is a function to get unique qtl from a qtl summary table # Basically collpase redundant QTL into 10 cM intervals remove_dup_qtl<-function(temp){ all_qtl<-sort(table(temp$marker), decreasing=T) if (length(all_qtl) == 1) { treatments<-as.character(unique(temp$treatment)) if(length(treatments) == 1) { m.names<<-c(m.names, names(all_qtl)[1]) # <<- means change the global variable (chr<<-max) changes the global variable chr to local variable max chr<<-c(chr,unique(temp[temp$marker == names(all_qtl)[1],'chr'])) pos<<-c(pos,unique(temp[temp$marker == names(all_qtl)[1],'pos'])) t<-as.character(unique(temp$treatment)) condition<<-c(condition, t) qtl_count<<-c(qtl_count, 1) max.lod<<-c(max.lod, max(temp[temp$marker == names(all_qtl)[1],'lod'], na.rm = T)) max.prop.var<<-c(max.prop.var, max(temp[temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) max.fx<<-c(max.fx, max(temp[temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) max.fx_se<<-c(max.fx_se, max(temp[temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) max.L.CI_pos<<-c(max.L.CI_pos, max(temp[temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) max.R.CI_pos<<-c(max.R.CI_pos, max(temp[temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) med.lod<<-c(med.lod, median(temp[temp$marker == names(all_qtl)[1],'lod'], na.rm = T)) med.prop.var<<-c(med.prop.var, median(temp[temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) med.fx<<-c(med.fx, median(temp[temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) med.fx_se<<-c(med.fx_se, median(temp[temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) med.L.CI_pos<<-c(med.L.CI_pos, median(temp[temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) med.R.CI_pos<<-c(med.R.CI_pos, median(temp[temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) min.lod<<-c(min.lod, min(temp[temp$marker == names(all_qtl)[1],'lod'], na.rm = T)) min.prop.var<<-c(min.prop.var, min(temp[temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) min.fx<<-c(min.fx, min(temp[temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) min.fx_se<<-c(min.fx_se, min(temp[temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) min.L.CI_pos<<-c(min.L.CI_pos, min(temp[temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) min.R.CI_pos<<-c(min.R.CI_pos, min(temp[temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) print(chr) print(pos) print(qtl_count) print(med.lod) } if(length(treatments) > 1){ for(t in treatments) { t.temp<-temp[temp$treatment == t,] m.names<<-c(m.names, names(all_qtl)[1]) # <<- means change the global variable (chr<<-max) changes the global variable chr to local variable max chr<<-c(chr,unique(t.temp[t.temp$marker == names(all_qtl)[1],'chr'])) pos<<-c(pos,unique(t.temp[t.temp$marker == names(all_qtl)[1],'pos'])) condition<<-c(condition, t) qtl_count<<-c(qtl_count, 1) max.lod<<-c(max.lod, max(t.temp[t.temp$marker == names(all_qtl)[1],'lod'], na.rm=T)) max.prop.var<<-c(max.prop.var, max(t.temp[t.temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) max.fx<<-c(max.fx, max(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) max.fx_se<<-c(max.fx_se, max(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) max.L.CI_pos<<-c(max.L.CI_pos, max(t.temp[t.temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) max.R.CI_pos<<-c(max.R.CI_pos, max(t.temp[t.temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) med.lod<<-c(med.lod, median(t.temp[t.temp$marker == names(all_qtl)[1],'lod'], na.rm=T)) med.prop.var<<-c(med.prop.var, median(t.temp[t.temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) med.fx<<-c(med.fx, median(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) med.fx_se<<-c(med.fx_se, median(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) med.L.CI_pos<<-c(med.L.CI_pos, median(t.temp[t.temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) med.R.CI_pos<<-c(med.R.CI_pos, median(t.temp[t.temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) min.lod<<-c(min.lod, min(t.temp[t.temp$marker == names(all_qtl)[1],'lod'], na.rm=T)) min.prop.var<<-c(min.prop.var, min(t.temp[t.temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) min.fx<<-c(min.fx, min(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) min.fx_se<<-c(min.fx_se, min(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) min.L.CI_pos<<-c(min.L.CI_pos, min(t.temp[t.temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) min.R.CI_pos<<-c(min.R.CI_pos, min(t.temp[t.temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) print(chr) print(pos) print(qtl_count) print(med.lod) } } } if (length(all_qtl) > 1) { #name<-names(all_qtl)[1] name<-temp[temp$lod == max(temp$lod),'marker'] name<-name[1] tester<-temp[temp$marker == name,] treatments<-as.character(unique(tester$treatment)) if(length(treatments) == 1) { ave.pos<-mean(temp[temp$marker == name, 'pos']) #m.name<-names(all_qtl)[1] m.name<-name #cr<-unique(temp[temp$marker == names(all_qtl)[1],'chr']) #po<-unique(temp[temp$marker == names(all_qtl)[1],'pos']) cr<-unique(temp[temp$marker == m.name,'chr']) po<-unique(temp[temp$marker == m.name,'pos']) max.pos<-ave.pos+10 min.pos<-ave.pos-10 subset<-temp[temp$pos > min.pos & temp$pos < max.pos,] if(length(unique(subset$treatment)) == 1){ m.names<<-c(m.names, m.name) chr<<-c(chr, cr) pos<<-c(pos, po) qtl_c<-nrow(subset) x.lod<-max(subset$lod, na.rm = T) x.prop.var<-max(subset$prop.var, na.rm = T) x.fx<-max(subset$additive.fx, na.rm = T) x.fx_se<-max(subset$additive.fx_se, na.rm = T) x.L.CI_pos<-max(subset$L.CI_pos, na.rm = T) x.R.CI_pos<-max(subset$R.CI_pos, na.rm = T) m.lod<-median(subset$lod, na.rm = T) m.prop.var<-median(subset$prop.var, na.rm = T) m.fx<-median(subset$additive.fx, na.rm = T) m.fx_se<-median(subset$additive.fx_se, na.rm = T) m.L.CI_pos<-median(subset$L.CI_pos, na.rm = T) m.R.CI_pos<-median(subset$R.CI_pos, na.rm = T) n.lod<-min(subset$lod, na.rm = T) n.prop.var<-min(subset$prop.var, na.rm = T) n.fx<-min(subset$additive.fx, na.rm = T) n.fx_se<-min(subset$additive.fx_se, na.rm = T) n.L.CI_pos<-min(subset$L.CI_pos, na.rm = T) n.R.CI_pos<-min(subset$R.CI_pos, na.rm = T) temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] print(ave.pos) print(chr) print(pos) #print(collapsed_qtl) print(med.lod) condition<<-c(condition, treatments[1]) qtl_count<<-c(qtl_count, qtl_c) max.lod<<-c(max.lod, x.lod) max.prop.var<<-c(max.prop.var, x.prop.var) max.fx<<-c(max.fx, x.fx) max.fx_se<<-c(max.fx_se, x.fx_se) max.L.CI_pos<<-c(max.L.CI_pos, x.L.CI_pos) max.R.CI_pos<<-c(max.R.CI_pos, x.R.CI_pos) med.lod<<-c(med.lod, m.lod) med.prop.var<<-c(med.prop.var, m.prop.var) med.fx<<-c(med.fx, m.fx) med.fx_se<<-c(med.fx_se, m.fx_se) med.L.CI_pos<<-c(med.L.CI_pos, m.L.CI_pos) med.R.CI_pos<<-c(med.R.CI_pos, m.R.CI_pos) min.lod<<-c(min.lod, n.lod) min.prop.var<<-c(min.prop.var, n.prop.var) min.fx<<-c(min.fx, n.fx) min.fx_se<<-c(min.fx_se, n.fx_se) min.L.CI_pos<<-c(min.L.CI_pos, n.L.CI_pos) min.R.CI_pos<<-c(min.R.CI_pos, n.R.CI_pos) remove_dup_qtl(temp) } if(length(unique(subset$treatment)) > 1){ subset.ts<-unique(subset$treatment) for (t in subset.ts) { m.names<<-c(m.names, m.name) chr<<-c(chr, cr) pos<<-c(pos, po) t.subset<-subset[subset$treatment == t,] qtl_c<-nrow(t.subset) x.lod<-max(t.subset$lod, na.rm = T) x.prop.var<-max(t.subset$prop.var, na.rm = T) x.fx<-max(t.subset$additive.fx, na.rm = T) x.fx_se<-max(t.subset$additive.fx_se, na.rm = T) x.L.CI_pos<-max(t.subset$L.CI_pos, na.rm = T) x.R.CI_pos<-max(t.subset$R.CI_pos, na.rm = T) m.lod<-median(t.subset$lod, na.rm = T) m.prop.var<-median(t.subset$prop.var, na.rm = T) m.fx<-median(t.subset$additive.fx, na.rm = T) m.fx_se<-median(t.subset$additive.fx_se, na.rm = T) m.L.CI_pos<-median(t.subset$L.CI_pos, na.rm = T) m.R.CI_pos<-median(t.subset$R.CI_pos, na.rm = T) n.lod<-min(t.subset$lod, na.rm = T) n.prop.var<-min(t.subset$prop.var, na.rm = T) n.fx<-min(t.subset$additive.fx, na.rm = T) n.fx_se<-min(t.subset$additive.fx_se, na.rm = T) n.L.CI_pos<-min(t.subset$L.CI_pos, na.rm = T) n.R.CI_pos<-min(t.subset$R.CI_pos, na.rm = T) print(ave.pos) print(chr) print(pos) #print(collapsed_qtl) print(med.lod) condition<<-c(condition, t) qtl_count<<-c(qtl_count, qtl_c) max.lod<<-c(max.lod, x.lod) max.prop.var<<-c(max.prop.var, x.prop.var) max.fx<<-c(max.fx, x.fx) max.fx_se<<-c(max.fx_se, x.fx_se) max.L.CI_pos<<-c(max.L.CI_pos, x.L.CI_pos) max.R.CI_pos<<-c(max.R.CI_pos, x.R.CI_pos) med.lod<<-c(med.lod, m.lod) med.prop.var<<-c(med.prop.var, m.prop.var) med.fx<<-c(med.fx, m.fx) med.fx_se<<-c(med.fx_se, m.fx_se) med.L.CI_pos<<-c(med.L.CI_pos, m.L.CI_pos) med.R.CI_pos<<-c(med.R.CI_pos, m.R.CI_pos) min.lod<<-c(min.lod, n.lod) min.prop.var<<-c(min.prop.var, n.prop.var) min.fx<<-c(min.fx, n.fx) min.fx_se<<-c(min.fx_se, n.fx_se) min.L.CI_pos<<-c(min.L.CI_pos, n.L.CI_pos) min.R.CI_pos<<-c(min.R.CI_pos, n.R.CI_pos) } temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] remove_dup_qtl(temp) } } if(length(treatments) > 1) { #for (t in treatments) { ave.pos<-mean(temp[temp$marker == name, 'pos']) #m.name<-names(all_qtl)[1] #cr<-unique(temp[temp$marker == names(all_qtl)[1],'chr']) #po<-unique(temp[temp$marker == names(all_qtl)[1],'pos']) m.name<-name cr<-unique(temp[temp$marker == m.name,'chr']) po<-unique(temp[temp$marker == m.name,'pos']) max.pos<-ave.pos+10 min.pos<-ave.pos-10 subset<-temp[temp$pos > min.pos & temp$pos < max.pos,] subset.ts<-unique(subset$treatment) for (t in subset.ts) { m.names<<-c(m.names, m.name) chr<<-c(chr, cr) pos<<-c(pos, po) t.subset<-subset[subset$treatment == t,] qtl_c<-nrow(t.subset) x.lod<-max(t.subset$lod, na.rm = T) x.prop.var<-max(t.subset$prop.var, na.rm = T) x.fx<-max(t.subset$additive.fx, na.rm = T) x.fx_se<-max(t.subset$additive.fx_se, na.rm = T) x.L.CI_pos<-max(t.subset$L.CI_pos, na.rm = T) x.R.CI_pos<-max(t.subset$R.CI_pos, na.rm = T) m.lod<-median(t.subset$lod, na.rm = T) m.prop.var<-median(t.subset$prop.var, na.rm = T) m.fx<-median(t.subset$additive.fx, na.rm = T) m.fx_se<-median(t.subset$additive.fx_se, na.rm = T) m.L.CI_pos<-median(t.subset$L.CI_pos, na.rm = T) m.R.CI_pos<-median(t.subset$R.CI_pos, na.rm = T) n.lod<-min(t.subset$lod, na.rm = T) n.prop.var<-min(t.subset$prop.var, na.rm = T) n.fx<-min(t.subset$additive.fx, na.rm = T) n.fx_se<-min(t.subset$additive.fx_se, na.rm = T) n.L.CI_pos<-min(t.subset$L.CI_pos, na.rm = T) n.R.CI_pos<-min(t.subset$R.CI_pos, na.rm = T) print(ave.pos) print(chr) print(pos) #print(collapsed_qtl) print(med.lod) condition<<-c(condition, t) qtl_count<<-c(qtl_count, qtl_c) max.lod<<-c(max.lod, x.lod) max.prop.var<<-c(max.prop.var, x.prop.var) max.fx<<-c(max.fx, x.fx) max.fx_se<<-c(max.fx_se, x.fx_se) max.L.CI_pos<<-c(max.L.CI_pos, x.L.CI_pos) max.R.CI_pos<<-c(max.R.CI_pos, x.R.CI_pos) med.lod<<-c(med.lod, m.lod) med.prop.var<<-c(med.prop.var, m.prop.var) med.fx<<-c(med.fx, m.fx) med.fx_se<<-c(med.fx_se, m.fx_se) med.L.CI_pos<<-c(med.L.CI_pos, m.L.CI_pos) med.R.CI_pos<<-c(med.R.CI_pos, m.R.CI_pos) min.lod<<-c(min.lod, n.lod) min.prop.var<<-c(min.prop.var, n.prop.var) min.fx<<-c(min.fx, n.fx) min.fx_se<<-c(min.fx_se, n.fx_se) min.L.CI_pos<<-c(min.L.CI_pos, n.L.CI_pos) min.R.CI_pos<<-c(min.R.CI_pos, n.R.CI_pos) } temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] remove_dup_qtl(temp) } } } condense_qtl<-function(input){ chrs<-sort(unique(input$chr)) m.names<<-c() chr<<-c() pos<<-c() condition<<-c() qtl_count<<-c() med.lod<<-c() med.prop.var<<-c() med.fx<<-c() med.fx_se<<-c() med.L.CI_pos<<-c() med.R.CI_pos<<-c() max.lod<<-c() max.prop.var<<-c() max.fx<<-c() max.fx_se<<-c() max.L.CI_pos<<-c() max.R.CI_pos<<-c() min.lod<<-c() min.prop.var<<-c() min.fx<<-c() min.fx_se<<-c() min.L.CI_pos<<-c() min.R.CI_pos<<-c() for(ch in chrs) { temp<-input[input$chr == ch,] temp$marker<-as.character(temp$marker) remove_dup_qtl(temp) } input.collapsed<-as.data.frame(cbind(m.names, chr, pos, condition, qtl_count, max.lod, max.prop.var, max.fx, max.fx_se, max.L.CI_pos, max.R.CI_pos,med.lod, med.prop.var, med.fx, med.fx_se, med.L.CI_pos, med.R.CI_pos,min.lod, min.prop.var, min.fx, min.fx_se, min.L.CI_pos, min.R.CI_pos)) return(input.collapsed) } ############################################## # Lets define a function to make common QTL plots ############################################## make_qtl_common_plot<-function(all.qtl, plotname) { all.qtl$chr<-factor(all.qtl$chr, levels=c(1,2,3,4,5,6,7,8,9)) fx.size<-all.qtl$additive.fx fx.size<-as.numeric(as.character(fx.size)) plot.char<-c() for(i in 1:length(fx.size)){ if (fx.size[i] > 0) {plot.char<-c(plot.char, '24')} if (fx.size[i] < 0) {plot.char<-c(plot.char, '25')} } all.qtl$plot.char<-plot.char all.qtl$plot.char<-as.factor(all.qtl$plot.char) all.qtl$group<-paste(all.qtl$exp, all.qtl$year, all.qtl$treatment, sep="_") treatments<-as.character(all.qtl$treatment) treatment.name<-unique(treatments) plot.col<-c() for(i in 1:length(treatments)){ logical<-treatments[i] == treatment.name col<-which(logical, arr.ind=TRUE) plot.col<-c(plot.col, col) } all.qtl$plot.col<-plot.col pdf(plotname) p<-ggplot() + geom_point(data = all.qtl, aes(x = pos, y = prop.var, shape=plot.char, colour=as.character(plot.col), fill=as.character(plot.col)),size=3, alpha=0.5) + geom_blank(data = blank_data, aes(x = x, y = y)) + facet_wrap(~chr, scales = "free_x") + expand_limits(x = 0) + scale_x_continuous(expand = c(0, 0)) + theme_bw() + scale_shape_manual(values=c(24,25)) print(p + scale_color_manual(values=c("1" = "orange", "2" = "blue")) + scale_fill_manual(values=c("1" = "orange", "2" = "blue")) + ylab("% Variance") + xlab("Genome Position") + theme(legend.position = "none")) dev.off() } make_qtl_common_plot_diff<-function(all.qtl, plotname) { all.qtl$chr<-factor(all.qtl$chr, levels=c(1,2,3,4,5,6,7,8,9)) fx.size<-all.qtl$additive.fx fx.size<-as.numeric(as.character(fx.size)) plot.char<-c() for(i in 1:length(fx.size)){ if (fx.size[i] > 0) {plot.char<-c(plot.char, '24')} if (fx.size[i] < 0) {plot.char<-c(plot.char, '25')} } all.qtl$plot.char<-plot.char all.qtl$plot.char<-as.factor(all.qtl$plot.char) all.qtl$group<-paste(all.qtl$exp, all.qtl$year, all.qtl$treatment, sep="_") treatments<-as.character(all.qtl$treatment) treatment.name<-unique(treatments) plot.col<-c() for(i in 1:length(treatments)){ logical<-treatments[i] == treatment.name col<-which(logical, arr.ind=TRUE) plot.col<-c(plot.col, col) } all.qtl$plot.col<-plot.col pdf(plotname) p<-ggplot() + geom_point(data = all.qtl, aes(x = pos, y = prop.var, shape=plot.char, colour=as.character(plot.col), fill=as.character(plot.col)),size=3, alpha=0.5) + geom_blank(data = blank_data, aes(x = x, y = y)) + facet_wrap(~chr, scales = "free_x") + expand_limits(x = 0) + scale_x_continuous(expand = c(0, 0)) + theme_bw() + scale_shape_manual(values=c(24,25)) print(p + scale_color_manual(values=c("1" = "orange", "2" = "blue", "3" = "grey")) + scale_fill_manual(values=c("1" = "orange", "2" = "blue", "3" = "grey")) + ylab("% Variance") + xlab("Genome Position") + theme(legend.position = "none")) dev.off() } ############################################## # Lets define a function to calculated predicted values and residuals from a major axis model ############################################## get.lmodel2.values<-function(model, rma){ if(rma == 'N') { # Get model intercepts ols.int<-model$regression.results$Intercept[1] ma.int<-model$regression.results$Intercept[2] sma.int<-model$regression.results$Intercept[3] # Get model slope ols.slope<-model$regression.results$Slope[1] ma.slope<-model$regression.results$Slope[2] sma.slope<-model$regression.results$Slope[3] # Get values you specified as X x<-model$x y<-model$y # Get predicted values y.ols.pred<-(ols.slope * x) + ols.int y.ma.pred<-(ma.slope * x) + ma.int y.sma.pred<-(sma.slope * x) + sma.int # Get residuals from the fit y.ols.res<-(y-y.ols.pred) y.ma.res<-(y-y.ma.pred) y.sma.res<-(y-y.sma.pred) # Format results out<-cbind(x,y,y.ols.pred,y.ma.pred,y.sma.pred,y.ols.res,y.ma.res,y.sma.res) colnames(out)<-c('x','y','ols.pred','ma.pred','sma.pred','ols.res','ma.res','sma.res') } if(rma == 'N') { # Get model intercepts ols.int<-model$regression.results$Intercept[1] ma.int<-model$regression.results$Intercept[2] sma.int<-model$regression.results$Intercept[3] rma.int<-model$regression.results$Intercept[4] # Get model slope ols.slope<-model$regression.results$Slope[1] ma.slope<-model$regression.results$Slope[2] sma.slope<-model$regression.results$Slope[3] rma.slope<-model$regression.results$Slope[4] # Get values you specified as X x<-model$x y<-model$y # Get predicted values y.ols.pred<-(ols.slope * x) + ols.int y.ma.pred<-(ma.slope * x) + ma.int y.sma.pred<-(sma.slope * x) + sma.int y.rma.pred<-(rma.slope * x) + rma.int # Get residuals from the fit y.ols.res<-(y.ols.pred-y) y.ma.res<-(y.ma.pred-y) y.sma.res<-(y.sma.pred-y) y.rma.res<-(y.rma.pred-y) # Format results out<-cbind(x,y,y.ols.pred,y.ma.pred,y.sma.pred,y.rma.pred,y.ols.res,y.ma.res,y.sma.res,y.rma.res) colnames(out)<-c('x','y','ols.pred','ma.pred','sma.pred','rma.pred','ols.res','ma.res','sma.res','rma.res') } return(out) } setwd(home.dir) save.image('analysis_fxns.Rdata')
/script/analysis_fxns.R
no_license
maxjfeldman/Feldman_Ellsworth_Setaria_WUE_2017
R
false
false
39,565
r
############################################## # Load in dependencies ############################################## library(lme4) library(ggplot2) library(lattice) library(lmomco) ## The first thing is to set your R session to the base directory you just downloaded from github ## insert path below... setwd() ## Tester #setwd("~/Dropbox/Feldman_Ellsworth_Setaria_WUE_2017/") ##### CREATE DIRECTORY PATHS ##### ## Make the directory of the folder you downloaded the current working directory home.dir<-getwd() ############################################## # Lets define a function to get a loess fit of timeseries data ############################################## get.loess.fit<-function(fit, times, geno, cond) { return_df<-c() predict.vals<-predict(fit, times, se=T) genotype<-rep(geno, length(times)) condition<-rep(as.character(cond), length(times)) M<-predict.vals$fit M.lo<-M - predict.vals$se.fit M.hi<-M + predict.vals$se.fit slope<-c(0) for(s in 2:length(times)) { s.temp<-(M[s] - M[s-1]) / 2 slope<-c(slope, s.temp) } slope<-slope*10 return_df<-cbind(genotype, condition,times, M, M.lo, M.hi, slope) return(return_df) } ############################################## # Make a function to do loess fit, make plots and output data and report ############################################## report.loess.values<-function(rawdata, trait, genos, treatments, days, from, to, plotname){ genos<-as.character(sort(unique(genos))) treatments<-as.character(treatments) treatments<-unique(treatments) treatments<-sort(treatments) t1<-treatments[1] t2<-treatments[2] print(t1) print(t2) dap_i<-unique(days) dap_i<-sort(as.numeric(as.character(dap_i))) times = seq(from = from, to = to, by=0.1) colnumber <- which(colnames(rawdata) %in% trait) # Get loess fits for each genotype using get.loess.fit() fxn ril_loess_model_fit<-c() for (i in 1:length(genos)) { r<-genos[i] temp1<-rawdata[rawdata$genotype == as.character(r),] per.ril<-c() for (j in 1:length(treatments)) { t<-treatments[j] per.t<-c() temp2<-temp1[temp1$treatment == as.character(t),] if (nrow(temp2) < 1) {next;} colnumber2 <- which(colnames(temp2) %in% trait) # Log of 0 is INF need to replace with another small # temp2[,colnumber2]<-replace(temp2[,colnumber2], temp2[,colnumber2] <= 0, 1) out.loess<-loess(get(trait)~dap_i, data=temp2) output<-get.loess.fit(out.loess, times, r, t) ril_loess_model_fit<-rbind(ril_loess_model_fit, output) } } # Now make sure the resulting dataframe has ril_loess_model_fit<-ril_loess_model_fit[complete.cases(ril_loess_model_fit),] colnames(ril_loess_model_fit)<-c('ril', 'treatment', 'dap_i', 'M', 'M.lo', 'M.hi', 'AGR') ril_loess_model_fit<-as.data.frame(ril_loess_model_fit) ril_loess_model_fit$ril<-as.character(ril_loess_model_fit$ril) ril_loess_model_fit$treatment<-as.character(ril_loess_model_fit$treatment) ril_loess_model_fit$dap_i<-as.numeric(as.character(ril_loess_model_fit$dap_i)) ril_loess_model_fit$M<-as.numeric(as.character(ril_loess_model_fit$M)) ril_loess_model_fit$M.lo<-as.numeric(as.character(ril_loess_model_fit$M.lo)) ril_loess_model_fit$M.hi<-as.numeric(as.character(ril_loess_model_fit$M.hi)) ril_loess_model_fit$AGR<-as.numeric(as.character(ril_loess_model_fit$AGR)) ril_loess_model_fit<-ril_loess_model_fit[ril_loess_model_fit$dap_i %in% dap_i,] # Lets remove M.lo and M.hi # Sometimes these are difficult to estimate and end up NA # Creates plotting problems below ril_loess_model_fit<-ril_loess_model_fit[,c(1:4,7)] rate_id<-sort(unique(as.character(ril_loess_model_fit$ril))) growth_rate_report<-c() pdf(plotname, paper = "a4", width = 21/2.54, height = (7*5.3)/2.54) layout(matrix(c(1:4), ncol = 1, byrow = T), heights = c(1, 1, 1, 1)) for(i in 1:length(rate_id)) { r<-rate_id[i] ril_set<-rawdata[(rawdata$genotype == r),] ril_rates<-ril_loess_model_fit[(ril_loess_model_fit$ril == r),] plant_ids<-unique(ril_set$plantbarcode) colnumber3 <- which(colnames(ril_set) %in% trait) max.b<-max(max(ril_set[,colnumber3],na.rm=T), max(as.numeric(as.character(ril_rates$M)), na.rm=T),na.rm=T) min.b<-min(min(ril_set[,colnumber3],na.rm=T), min(as.numeric(as.character(ril_rates$M)), na.rm=T),na.rm=T) set<-ril_set[ril_set$plantbarcode == plant_ids[1],] # Start making plots if (set[1,'treatment'] == t2) {l.color<-c("light blue")} if (set[1,'treatment'] == t1) {l.color<-c("gold")} plot(set[,colnumber3]~set$dap_i, type='p', pch=19, xlim=c(from,to), ylim=c(min.b, max.b), col=l.color, xlab="Days after planting", ylab=trait, main=r) if(length(plant_ids) >1) { for (j in 2:length(plant_ids)) { set<-ril_set[ril_set$plantbarcode == plant_ids[j],] if (set[1,'treatment'] == t2) {l.color<-c("light blue")} if (set[1,'treatment'] == t1) {l.color<-c("gold")} points(set[,colnumber3]~set$dap_i, type='p', pch=19, xlim=c(from,to), ylim=c(min.b, max.b), col=l.color, xlab="Day", ylab=trait) } } rate.t2<-ril_rates[ril_rates$treatment == t2, ] if (nrow(rate.t2) > 0) { l.color<-c("blue") p.color<-c("dark blue") max.rate.t2<-max(as.numeric(as.character(rate.t2$AGR)),na.rm=T) day.t2<-rate.t2[rate.t2$AGR == max.rate.t2, 'dap_i'] max.val.t2<-as.numeric(as.character(rate.t2[rate.t2$AGR == max.rate.t2, 'M'])) lines(rate.t2$M~rate.t2$dap_i, lwd=2, col=l.color) points(day.t2, max.val.t2, cex=1.5, col=p.color, pch=18) # Can optionally plot confidence interval #lines(rate.t2$M.hi~rate.t2$dap_i, lty=2, col=c('navy')) #lines(rate.t2$M.lo~rate.t2$dap_i, lty=2, col=c('navy')) } rate.t1<-ril_rates[ril_rates$treatment == t1, ] if (nrow(rate.t1) > 0) { l.color<-c("orange") p.color<-c("dark orange") max.rate.t1<-max(rate.t1$AGR,na.rm=T) day.t1<-rate.t1[rate.t1$AGR == max.rate.t1, 'dap_i'] max.val.t1<-rate.t1[rate.t1$AGR == max.rate.t1, 'M'] lines(rate.t1$M~rate.t1$dap_i, lwd=2, col=l.color) points(day.t1, max.val.t1, cex=1.5, col=p.color, pch=18) #lines(rate.t1$M.hi~rate.t1$dap_i, lty=2, col=c('brown')) #lines(rate.t1$M.lo~rate.t1$dap_i, lty=2, col=c('brown')) } # treatment drought rate.t1<-ril_rates[ril_rates$treatment == t1, ] if (nrow(rate.t1) > 0) { max.day_trait.t1<-max(rate.t1$M, na.rm=T) max.agr.t1<-max(rate.t1$AGR,na.rm=T) rate.t1<-rate.t1[complete.cases(rate.t1),] max.agr.day.t1<-rate.t1[rate.t1$AGR == max.agr.t1, 'dap_i'] max.agr.day_trait.t1<-rate.t1[rate.t1$AGR == max.agr.t1, 'M'] } # treatment well watered rate.t2<-ril_rates[ril_rates$treatment == t2, ] if (nrow(rate.t2) > 0) { max.day_trait.t2<-max(rate.t2$M,na.rm=T) max.agr.t2<-max(rate.t2$AGR,na.rm=T) rate.t2<-rate.t2[complete.cases(rate.t2),] max.agr.day.t2<-rate.t2[rate.t2$AGR == max.agr.t2, 'dap_i'] max.agr.day_trait.t2<-rate.t2[rate.t2$AGR == max.agr.t2, 'M'] } # Generate the report on a per/ril basis if (length(unique(ril_rates$treatment)) > 1) { ril_entry<-c(r, max.day_trait.t2, max.day_trait.t1, max.day_trait.t2 - max.day_trait.t1, max.agr.t2, max.agr.t1, max.agr.t2 - max.agr.t1, max.agr.day.t2, max.agr.day.t1, max.agr.day.t2 - max.agr.day.t1, max.agr.day_trait.t2, max.agr.day_trait.t1, max.agr.day_trait.t2 - max.agr.day_trait.t1) growth_rate_report<-rbind(growth_rate_report, ril_entry) } # Plot rates if (length(unique(ril_rates$treatment)) > 1) { max.r<-max(max(rate.t2$AGR,na.rm=T), max(rate.t1$AGR,na.rm=T),na.rm=T) min.r<-min(min(rate.t2$AGR,na.rm=T), min(rate.t1$AGR,na.rm=T),na.rm=T) rate.t2<<-rate.t2 rate.t1<<-rate.t1 max.r<<-max.r min.r<<-min.r max.agr.day.t2<<-max.agr.day.t2 max.agr.t2<<-max.agr.t2 max.agr.day.t1<<-max.agr.day.t1 max.agr.t1<<-max.agr.t1 plot(rate.t2$AGR~rate.t2$dap_i, type="l", col="blue", xlab='Days after planting', ylab='Rate', ylim=c(min.r,max.r)) lines(rate.t1$AGR~rate.t1$dap_i, col="orange") points(max.agr.day.t2, max.agr.t2, pch=18, col="dark blue") points(max.agr.day.t1, max.agr.t1, pch=18, col="dark orange") } } dev.off() #ril_loess_model_fit<-ril_loess_model_fit[,c(1:4,7)] colnames(ril_loess_model_fit)<-c("genotype", "treatment", "dap_i", trait, paste(trait, '_rate', sep="")) # Give column names to growth report rownames(growth_rate_report)<-c(1:nrow(growth_rate_report)) growth_rate_report<-as.data.frame(growth_rate_report) colnames(growth_rate_report)<-c("genotype", "max_value.t2", "max_value.t1","max_value.diff","max_rate.t2","max_rate.t1","max_rate.diff","max_day.t2","max_day.t1","max_day.diff","value_max_rate_day.t2","value_max_rate_day.t1", "value_max_rate_day.diff") ril_loess_model_fit<<-ril_loess_model_fit growth_rate_report<<-growth_rate_report } ############################################## # Lets define a function to calculate heritability ############################################## # Broad sense heritability get_h2<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") H2<-c() for (i in 3:length(colnames(pheno))){ # Get complete cases cc.pheno<-pheno[complete.cases(pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.pheno[,3]~(1|id)+(1|treatment)+(1|id:treatment), data=cc.pheno) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) gxt.var<-re[1] geno.var<-re[2] treat.var<-re[3] # Total variance is sum of all variances tot.var<-sum(re, res) reps.t1<-table(pheno[pheno$treatment == i.treat[1], 'id']) reps.t2<-table(pheno[pheno$treatment == i.treat[2], 'id']) reps.treatment<-c(reps.t1, reps.t2) reps.t1<-as.character(unique(pheno[pheno$treatment == i.treat[1], 'id'])) reps.t2<-as.character(unique(pheno[pheno$treatment == i.treat[2], 'id'])) unique.combined <- c(as.character(reps.t1), as.character(reps.t2)) freq.unique.combined <- table(unique.combined) # Calculate the harmonic mean replication within treatment blocks hm_treatment<-harmonic.mean(freq.unique.combined)$harmean # Now get a count of total genotypic replication reps.total<-table(pheno[,'id']) # Get the harmonic mean of this quantity hm_total<-harmonic.mean(reps.total)$harmean # Calculate heritability as described by AEL # H2 = geno.var/(geno.var + (gxt.var/harmonic mean of treatment block replication) + (residual.var/harmonic mean of total genotype replication) ) h2<-((geno.var)/(geno.var + (gxt.var/hm_treatment) + (res/hm_total))) # This is the heritability H2<-c(H2,h2) } names(H2)<-colnames(pheno)[3:ncol(pheno)] return(H2) } # Heritability within treatment get_h2_in_treatment<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") variance.out<-c() for (t in 1:length(i.treat)) { # Create variables to store values treatment.pheno<-pheno[pheno$treatment == i.treat[t],] H2<-c() e2<-c() # For each treatment.phenotype calculate variance for(i in 3:length(colnames(treatment.pheno))){ # Use only RILs with all measurements for each treatment.phenotype cc.treatment.pheno<-treatment.pheno[complete.cases(treatment.pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.treatment.pheno[,3]~(1|id), data=cc.treatment.pheno, control=lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.rankZ = "ignore",check.nobs.vs.nRE="ignore")) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) geno.var<-re[1] # Total variance is sum of all variances tot.var<-sum(re, res) # Get proportion of variance h<-geno.var/tot.var e<-res/tot.var # Append variables to a vector of variables H2<-c(H2,h) e2<-c(e2,e) } variance<-rbind(H2, e2) colnames(treatment.pheno)[3:length(treatment.pheno)]<-paste(i.treat[t], colnames(treatment.pheno)[3:length(treatment.pheno)], sep="_") colnames(variance)<-colnames(treatment.pheno)[3:length(treatment.pheno)] rownames(variance)<-c('Genotype', 'Error') assign(paste('variance', i.treat[t], sep="_"), variance) variance.out<-cbind(variance.out, variance) } return(variance.out) } # Total variance partition get_total_var<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") H2<-c() t2<-c() e2<-c() gxt2<-c() for (i in 3:length(colnames(pheno))){ # Get complete cases cc.pheno<-pheno[complete.cases(pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.pheno[,3]~(1|id)+(1|treatment)+(1|id:treatment), data=cc.pheno) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) gxt.var<-re[1] geno.var<-re[2] treat.var<-re[3] # Total variance is sum of all variances tot.var<-sum(re, res) # Get proportion of variance h<-geno.var/tot.var t<-treat.var/tot.var e<-res/tot.var gxt<-gxt.var/tot.var # Append variables to a vector of variables H2<-c(H2,h) t2<-c(t2,t) e2<-c(e2,e) gxt2<-c(gxt2, gxt) } variance<-rbind(H2, t2, gxt2, e2) colnames(variance)<-colnames(pheno)[3:length(pheno)] rownames(variance)<-c('Genotype', 'Treatment', 'G x Treatment', 'Error') return(variance) } # Total variance partition field (includes plot) get_total_var_field<-function(data){ i.treat<-unique(data$treatment) pheno<-data year<-unique(pheno[,3]) exp<-unique(pheno[,2]) pheno<-pheno[,c(7,4,9:length(colnames(pheno)))] colnames(pheno)[c(1,2)]<-c("id", "treatment") pheno[pheno == "."] <- NA colnames(pheno)[3:ncol(pheno)]<-paste(colnames(pheno)[3:ncol(pheno)] , exp, sep="_") H2<-c() t2<-c() e2<-c() p2<-c() gxt2<-c() for (i in 5:length(colnames(pheno))){ # Get complete cases cc.pheno<-pheno[complete.cases(pheno[,i]),c(1:2,i)] # Build linear model each cofactor is a random effect model<-lmer(cc.pheno[,3]~(1|id)+(1|treatment)+(1|plot)+(1|id:treatment), data=cc.pheno) # Extract variance from model object, save individual components in vector 're' and residual variance as a scalar named 'res' re<-as.numeric(VarCorr(model)) res<-attr(VarCorr(model), "sc")^2 # Extract individual components (order will remain the same) gxt.var<-re[1] geno.var<-re[2] plot.var<-re[3] treat.var<-re[4] # Total variance is sum of all variances tot.var<-sum(re, res) # Get proportion of variance for all factors h<-geno.var/tot.var t<-treat.var/tot.var e<-res/tot.var gxt<-gxt.var/tot.var p<-plot.var/tot.var # Append variables to a vector of variables H2<-c(H2,h) t2<-c(t2,t) e2<-c(e2,e) gxt2<-c(gxt2, gxt) p2<-c(p2, p) } variance<-rbind(H2, t2, p2, gxt2, e2) colnames(variance)<-colnames(pheno)[3:length(pheno)] rownames(variance)<-c('Genotype', 'Treatment','Plot', 'G x Treatment', 'Error') return(variance) } loess.fit.for.h2<-function(data, trait){ barcodes<-unique(data$plantbarcode) dap_i<-sort(unique(data$dap_i)) plant_id_loess_model_fit<-c() for(b in barcodes){ print(b) temp<-data[data$plantbarcode == b,] genotype<-unique(temp$genotype) treatment<-unique(temp$treatment) #colnumber <- which(colnames(temp) %in% trait) out.loess<-loess(get(trait)~dap_i, data=temp) times = seq(from = min(temp$dap_i), to = max(temp$dap_i), by=0.1) output<-get.loess.estimates(out.loess, times, b) output<-as.data.frame(output) output<-output[output$times %in% dap_i,] output$genotype<-rep(genotype, nrow(output)) output$treatment<-rep(treatment, nrow(output)) colnames(output)<-c("plantbarcode", "dap_i", trait,paste(trait, ".lo" ,sep=""),paste(trait, ".hi", sep=""), paste(trait, ".slope", sep=""), 'genotype', 'treatment') output<-output[,c(1,7,8,2:6)] plant_id_loess_model_fit<-rbind(plant_id_loess_model_fit, output) } return(plant_id_loess_model_fit) } get.loess.estimates<-function(fit, times, id) { return_df<-c() predict.vals<-predict(fit, times, se=T) ids<-rep(as.character(id), length(times)) #condition<-rep(as.character(cond), length(times)) M<-predict.vals$fit M.lo<-M - predict.vals$se.fit M.hi<-M + predict.vals$se.fit slope<-c(0) for(s in 2:length(times)) { s.temp<-(M[s] - M[s-1]) / 2 slope<-c(slope, s.temp) } slope<-slope*10 return_df<-cbind(ids,times, M, M.lo, M.hi, slope) return(return_df) } ############################################## # Lets define a function to merge the QTL results based upon a single marker ############################################## merged_table<-c() unify_chr<-function(temp){ all_qtl<-sort(table(temp$marker), decreasing=T) if(length(all_qtl) > 1){ #m.name<-names(all_qtl)[1] m.name<-temp[temp$lod == max(temp$lod),'marker'] m.name<-m.name[1] ave.pos<-mean(temp[temp$marker == m.name, 'pos']) #cr<-unique(temp[temp$marker == names(all_qtl)[1],'chr']) #po<-unique(temp[temp$marker == names(all_qtl)[1],'pos']) cr<-unique(temp[temp$marker == m.name,'chr']) po<-unique(temp[temp$marker == m.name,'pos']) max.pos<-ave.pos+10 min.pos<-ave.pos-10 subset<-temp[temp$pos > min.pos & temp$pos < max.pos,] subset$marker<-rep(m.name, length(subset$marker)) subset$chr<-rep(cr, length(subset$chr)) subset$pos<-rep(po, length(subset$pos)) temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] merged_table<<-rbind(merged_table, subset) unify_chr(temp) } if(length(all_qtl) == 1){ merged_table<<-rbind(merged_table, temp) #unify_chr(temp) } } unify_marker<-function(input){ chrs<-sort(unique(input$chr)) merged_table<<-c() for(ch in chrs) { temp<-input[input$chr == ch,] temp$marker<-as.character(temp$marker) unify_chr(temp) } return(merged_table) } ###### This is a function to get unique qtl from a qtl summary table # Basically collpase redundant QTL into 10 cM intervals remove_dup_qtl<-function(temp){ all_qtl<-sort(table(temp$marker), decreasing=T) if (length(all_qtl) == 1) { treatments<-as.character(unique(temp$treatment)) if(length(treatments) == 1) { m.names<<-c(m.names, names(all_qtl)[1]) # <<- means change the global variable (chr<<-max) changes the global variable chr to local variable max chr<<-c(chr,unique(temp[temp$marker == names(all_qtl)[1],'chr'])) pos<<-c(pos,unique(temp[temp$marker == names(all_qtl)[1],'pos'])) t<-as.character(unique(temp$treatment)) condition<<-c(condition, t) qtl_count<<-c(qtl_count, 1) max.lod<<-c(max.lod, max(temp[temp$marker == names(all_qtl)[1],'lod'], na.rm = T)) max.prop.var<<-c(max.prop.var, max(temp[temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) max.fx<<-c(max.fx, max(temp[temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) max.fx_se<<-c(max.fx_se, max(temp[temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) max.L.CI_pos<<-c(max.L.CI_pos, max(temp[temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) max.R.CI_pos<<-c(max.R.CI_pos, max(temp[temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) med.lod<<-c(med.lod, median(temp[temp$marker == names(all_qtl)[1],'lod'], na.rm = T)) med.prop.var<<-c(med.prop.var, median(temp[temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) med.fx<<-c(med.fx, median(temp[temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) med.fx_se<<-c(med.fx_se, median(temp[temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) med.L.CI_pos<<-c(med.L.CI_pos, median(temp[temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) med.R.CI_pos<<-c(med.R.CI_pos, median(temp[temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) min.lod<<-c(min.lod, min(temp[temp$marker == names(all_qtl)[1],'lod'], na.rm = T)) min.prop.var<<-c(min.prop.var, min(temp[temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) min.fx<<-c(min.fx, min(temp[temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) min.fx_se<<-c(min.fx_se, min(temp[temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) min.L.CI_pos<<-c(min.L.CI_pos, min(temp[temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) min.R.CI_pos<<-c(min.R.CI_pos, min(temp[temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) print(chr) print(pos) print(qtl_count) print(med.lod) } if(length(treatments) > 1){ for(t in treatments) { t.temp<-temp[temp$treatment == t,] m.names<<-c(m.names, names(all_qtl)[1]) # <<- means change the global variable (chr<<-max) changes the global variable chr to local variable max chr<<-c(chr,unique(t.temp[t.temp$marker == names(all_qtl)[1],'chr'])) pos<<-c(pos,unique(t.temp[t.temp$marker == names(all_qtl)[1],'pos'])) condition<<-c(condition, t) qtl_count<<-c(qtl_count, 1) max.lod<<-c(max.lod, max(t.temp[t.temp$marker == names(all_qtl)[1],'lod'], na.rm=T)) max.prop.var<<-c(max.prop.var, max(t.temp[t.temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) max.fx<<-c(max.fx, max(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) max.fx_se<<-c(max.fx_se, max(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) max.L.CI_pos<<-c(max.L.CI_pos, max(t.temp[t.temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) max.R.CI_pos<<-c(max.R.CI_pos, max(t.temp[t.temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) med.lod<<-c(med.lod, median(t.temp[t.temp$marker == names(all_qtl)[1],'lod'], na.rm=T)) med.prop.var<<-c(med.prop.var, median(t.temp[t.temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) med.fx<<-c(med.fx, median(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) med.fx_se<<-c(med.fx_se, median(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) med.L.CI_pos<<-c(med.L.CI_pos, median(t.temp[t.temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) med.R.CI_pos<<-c(med.R.CI_pos, median(t.temp[t.temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) min.lod<<-c(min.lod, min(t.temp[t.temp$marker == names(all_qtl)[1],'lod'], na.rm=T)) min.prop.var<<-c(min.prop.var, min(t.temp[t.temp$marker == names(all_qtl)[1],'prop.var'], na.rm = T)) min.fx<<-c(min.fx, min(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx'], na.rm = T)) min.fx_se<<-c(min.fx_se, min(t.temp[t.temp$marker == names(all_qtl)[1],'additive.fx_se'], na.rm = T)) min.L.CI_pos<<-c(min.L.CI_pos, min(t.temp[t.temp$marker == names(all_qtl)[1],'L.CI_pos'], na.rm = T)) min.R.CI_pos<<-c(min.R.CI_pos, min(t.temp[t.temp$marker == names(all_qtl)[1],'R.CI_pos'], na.rm = T)) print(chr) print(pos) print(qtl_count) print(med.lod) } } } if (length(all_qtl) > 1) { #name<-names(all_qtl)[1] name<-temp[temp$lod == max(temp$lod),'marker'] name<-name[1] tester<-temp[temp$marker == name,] treatments<-as.character(unique(tester$treatment)) if(length(treatments) == 1) { ave.pos<-mean(temp[temp$marker == name, 'pos']) #m.name<-names(all_qtl)[1] m.name<-name #cr<-unique(temp[temp$marker == names(all_qtl)[1],'chr']) #po<-unique(temp[temp$marker == names(all_qtl)[1],'pos']) cr<-unique(temp[temp$marker == m.name,'chr']) po<-unique(temp[temp$marker == m.name,'pos']) max.pos<-ave.pos+10 min.pos<-ave.pos-10 subset<-temp[temp$pos > min.pos & temp$pos < max.pos,] if(length(unique(subset$treatment)) == 1){ m.names<<-c(m.names, m.name) chr<<-c(chr, cr) pos<<-c(pos, po) qtl_c<-nrow(subset) x.lod<-max(subset$lod, na.rm = T) x.prop.var<-max(subset$prop.var, na.rm = T) x.fx<-max(subset$additive.fx, na.rm = T) x.fx_se<-max(subset$additive.fx_se, na.rm = T) x.L.CI_pos<-max(subset$L.CI_pos, na.rm = T) x.R.CI_pos<-max(subset$R.CI_pos, na.rm = T) m.lod<-median(subset$lod, na.rm = T) m.prop.var<-median(subset$prop.var, na.rm = T) m.fx<-median(subset$additive.fx, na.rm = T) m.fx_se<-median(subset$additive.fx_se, na.rm = T) m.L.CI_pos<-median(subset$L.CI_pos, na.rm = T) m.R.CI_pos<-median(subset$R.CI_pos, na.rm = T) n.lod<-min(subset$lod, na.rm = T) n.prop.var<-min(subset$prop.var, na.rm = T) n.fx<-min(subset$additive.fx, na.rm = T) n.fx_se<-min(subset$additive.fx_se, na.rm = T) n.L.CI_pos<-min(subset$L.CI_pos, na.rm = T) n.R.CI_pos<-min(subset$R.CI_pos, na.rm = T) temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] print(ave.pos) print(chr) print(pos) #print(collapsed_qtl) print(med.lod) condition<<-c(condition, treatments[1]) qtl_count<<-c(qtl_count, qtl_c) max.lod<<-c(max.lod, x.lod) max.prop.var<<-c(max.prop.var, x.prop.var) max.fx<<-c(max.fx, x.fx) max.fx_se<<-c(max.fx_se, x.fx_se) max.L.CI_pos<<-c(max.L.CI_pos, x.L.CI_pos) max.R.CI_pos<<-c(max.R.CI_pos, x.R.CI_pos) med.lod<<-c(med.lod, m.lod) med.prop.var<<-c(med.prop.var, m.prop.var) med.fx<<-c(med.fx, m.fx) med.fx_se<<-c(med.fx_se, m.fx_se) med.L.CI_pos<<-c(med.L.CI_pos, m.L.CI_pos) med.R.CI_pos<<-c(med.R.CI_pos, m.R.CI_pos) min.lod<<-c(min.lod, n.lod) min.prop.var<<-c(min.prop.var, n.prop.var) min.fx<<-c(min.fx, n.fx) min.fx_se<<-c(min.fx_se, n.fx_se) min.L.CI_pos<<-c(min.L.CI_pos, n.L.CI_pos) min.R.CI_pos<<-c(min.R.CI_pos, n.R.CI_pos) remove_dup_qtl(temp) } if(length(unique(subset$treatment)) > 1){ subset.ts<-unique(subset$treatment) for (t in subset.ts) { m.names<<-c(m.names, m.name) chr<<-c(chr, cr) pos<<-c(pos, po) t.subset<-subset[subset$treatment == t,] qtl_c<-nrow(t.subset) x.lod<-max(t.subset$lod, na.rm = T) x.prop.var<-max(t.subset$prop.var, na.rm = T) x.fx<-max(t.subset$additive.fx, na.rm = T) x.fx_se<-max(t.subset$additive.fx_se, na.rm = T) x.L.CI_pos<-max(t.subset$L.CI_pos, na.rm = T) x.R.CI_pos<-max(t.subset$R.CI_pos, na.rm = T) m.lod<-median(t.subset$lod, na.rm = T) m.prop.var<-median(t.subset$prop.var, na.rm = T) m.fx<-median(t.subset$additive.fx, na.rm = T) m.fx_se<-median(t.subset$additive.fx_se, na.rm = T) m.L.CI_pos<-median(t.subset$L.CI_pos, na.rm = T) m.R.CI_pos<-median(t.subset$R.CI_pos, na.rm = T) n.lod<-min(t.subset$lod, na.rm = T) n.prop.var<-min(t.subset$prop.var, na.rm = T) n.fx<-min(t.subset$additive.fx, na.rm = T) n.fx_se<-min(t.subset$additive.fx_se, na.rm = T) n.L.CI_pos<-min(t.subset$L.CI_pos, na.rm = T) n.R.CI_pos<-min(t.subset$R.CI_pos, na.rm = T) print(ave.pos) print(chr) print(pos) #print(collapsed_qtl) print(med.lod) condition<<-c(condition, t) qtl_count<<-c(qtl_count, qtl_c) max.lod<<-c(max.lod, x.lod) max.prop.var<<-c(max.prop.var, x.prop.var) max.fx<<-c(max.fx, x.fx) max.fx_se<<-c(max.fx_se, x.fx_se) max.L.CI_pos<<-c(max.L.CI_pos, x.L.CI_pos) max.R.CI_pos<<-c(max.R.CI_pos, x.R.CI_pos) med.lod<<-c(med.lod, m.lod) med.prop.var<<-c(med.prop.var, m.prop.var) med.fx<<-c(med.fx, m.fx) med.fx_se<<-c(med.fx_se, m.fx_se) med.L.CI_pos<<-c(med.L.CI_pos, m.L.CI_pos) med.R.CI_pos<<-c(med.R.CI_pos, m.R.CI_pos) min.lod<<-c(min.lod, n.lod) min.prop.var<<-c(min.prop.var, n.prop.var) min.fx<<-c(min.fx, n.fx) min.fx_se<<-c(min.fx_se, n.fx_se) min.L.CI_pos<<-c(min.L.CI_pos, n.L.CI_pos) min.R.CI_pos<<-c(min.R.CI_pos, n.R.CI_pos) } temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] remove_dup_qtl(temp) } } if(length(treatments) > 1) { #for (t in treatments) { ave.pos<-mean(temp[temp$marker == name, 'pos']) #m.name<-names(all_qtl)[1] #cr<-unique(temp[temp$marker == names(all_qtl)[1],'chr']) #po<-unique(temp[temp$marker == names(all_qtl)[1],'pos']) m.name<-name cr<-unique(temp[temp$marker == m.name,'chr']) po<-unique(temp[temp$marker == m.name,'pos']) max.pos<-ave.pos+10 min.pos<-ave.pos-10 subset<-temp[temp$pos > min.pos & temp$pos < max.pos,] subset.ts<-unique(subset$treatment) for (t in subset.ts) { m.names<<-c(m.names, m.name) chr<<-c(chr, cr) pos<<-c(pos, po) t.subset<-subset[subset$treatment == t,] qtl_c<-nrow(t.subset) x.lod<-max(t.subset$lod, na.rm = T) x.prop.var<-max(t.subset$prop.var, na.rm = T) x.fx<-max(t.subset$additive.fx, na.rm = T) x.fx_se<-max(t.subset$additive.fx_se, na.rm = T) x.L.CI_pos<-max(t.subset$L.CI_pos, na.rm = T) x.R.CI_pos<-max(t.subset$R.CI_pos, na.rm = T) m.lod<-median(t.subset$lod, na.rm = T) m.prop.var<-median(t.subset$prop.var, na.rm = T) m.fx<-median(t.subset$additive.fx, na.rm = T) m.fx_se<-median(t.subset$additive.fx_se, na.rm = T) m.L.CI_pos<-median(t.subset$L.CI_pos, na.rm = T) m.R.CI_pos<-median(t.subset$R.CI_pos, na.rm = T) n.lod<-min(t.subset$lod, na.rm = T) n.prop.var<-min(t.subset$prop.var, na.rm = T) n.fx<-min(t.subset$additive.fx, na.rm = T) n.fx_se<-min(t.subset$additive.fx_se, na.rm = T) n.L.CI_pos<-min(t.subset$L.CI_pos, na.rm = T) n.R.CI_pos<-min(t.subset$R.CI_pos, na.rm = T) print(ave.pos) print(chr) print(pos) #print(collapsed_qtl) print(med.lod) condition<<-c(condition, t) qtl_count<<-c(qtl_count, qtl_c) max.lod<<-c(max.lod, x.lod) max.prop.var<<-c(max.prop.var, x.prop.var) max.fx<<-c(max.fx, x.fx) max.fx_se<<-c(max.fx_se, x.fx_se) max.L.CI_pos<<-c(max.L.CI_pos, x.L.CI_pos) max.R.CI_pos<<-c(max.R.CI_pos, x.R.CI_pos) med.lod<<-c(med.lod, m.lod) med.prop.var<<-c(med.prop.var, m.prop.var) med.fx<<-c(med.fx, m.fx) med.fx_se<<-c(med.fx_se, m.fx_se) med.L.CI_pos<<-c(med.L.CI_pos, m.L.CI_pos) med.R.CI_pos<<-c(med.R.CI_pos, m.R.CI_pos) min.lod<<-c(min.lod, n.lod) min.prop.var<<-c(min.prop.var, n.prop.var) min.fx<<-c(min.fx, n.fx) min.fx_se<<-c(min.fx_se, n.fx_se) min.L.CI_pos<<-c(min.L.CI_pos, n.L.CI_pos) min.R.CI_pos<<-c(min.R.CI_pos, n.R.CI_pos) } temp<-temp[temp$pos < min.pos | temp$pos > max.pos,] remove_dup_qtl(temp) } } } condense_qtl<-function(input){ chrs<-sort(unique(input$chr)) m.names<<-c() chr<<-c() pos<<-c() condition<<-c() qtl_count<<-c() med.lod<<-c() med.prop.var<<-c() med.fx<<-c() med.fx_se<<-c() med.L.CI_pos<<-c() med.R.CI_pos<<-c() max.lod<<-c() max.prop.var<<-c() max.fx<<-c() max.fx_se<<-c() max.L.CI_pos<<-c() max.R.CI_pos<<-c() min.lod<<-c() min.prop.var<<-c() min.fx<<-c() min.fx_se<<-c() min.L.CI_pos<<-c() min.R.CI_pos<<-c() for(ch in chrs) { temp<-input[input$chr == ch,] temp$marker<-as.character(temp$marker) remove_dup_qtl(temp) } input.collapsed<-as.data.frame(cbind(m.names, chr, pos, condition, qtl_count, max.lod, max.prop.var, max.fx, max.fx_se, max.L.CI_pos, max.R.CI_pos,med.lod, med.prop.var, med.fx, med.fx_se, med.L.CI_pos, med.R.CI_pos,min.lod, min.prop.var, min.fx, min.fx_se, min.L.CI_pos, min.R.CI_pos)) return(input.collapsed) } ############################################## # Lets define a function to make common QTL plots ############################################## make_qtl_common_plot<-function(all.qtl, plotname) { all.qtl$chr<-factor(all.qtl$chr, levels=c(1,2,3,4,5,6,7,8,9)) fx.size<-all.qtl$additive.fx fx.size<-as.numeric(as.character(fx.size)) plot.char<-c() for(i in 1:length(fx.size)){ if (fx.size[i] > 0) {plot.char<-c(plot.char, '24')} if (fx.size[i] < 0) {plot.char<-c(plot.char, '25')} } all.qtl$plot.char<-plot.char all.qtl$plot.char<-as.factor(all.qtl$plot.char) all.qtl$group<-paste(all.qtl$exp, all.qtl$year, all.qtl$treatment, sep="_") treatments<-as.character(all.qtl$treatment) treatment.name<-unique(treatments) plot.col<-c() for(i in 1:length(treatments)){ logical<-treatments[i] == treatment.name col<-which(logical, arr.ind=TRUE) plot.col<-c(plot.col, col) } all.qtl$plot.col<-plot.col pdf(plotname) p<-ggplot() + geom_point(data = all.qtl, aes(x = pos, y = prop.var, shape=plot.char, colour=as.character(plot.col), fill=as.character(plot.col)),size=3, alpha=0.5) + geom_blank(data = blank_data, aes(x = x, y = y)) + facet_wrap(~chr, scales = "free_x") + expand_limits(x = 0) + scale_x_continuous(expand = c(0, 0)) + theme_bw() + scale_shape_manual(values=c(24,25)) print(p + scale_color_manual(values=c("1" = "orange", "2" = "blue")) + scale_fill_manual(values=c("1" = "orange", "2" = "blue")) + ylab("% Variance") + xlab("Genome Position") + theme(legend.position = "none")) dev.off() } make_qtl_common_plot_diff<-function(all.qtl, plotname) { all.qtl$chr<-factor(all.qtl$chr, levels=c(1,2,3,4,5,6,7,8,9)) fx.size<-all.qtl$additive.fx fx.size<-as.numeric(as.character(fx.size)) plot.char<-c() for(i in 1:length(fx.size)){ if (fx.size[i] > 0) {plot.char<-c(plot.char, '24')} if (fx.size[i] < 0) {plot.char<-c(plot.char, '25')} } all.qtl$plot.char<-plot.char all.qtl$plot.char<-as.factor(all.qtl$plot.char) all.qtl$group<-paste(all.qtl$exp, all.qtl$year, all.qtl$treatment, sep="_") treatments<-as.character(all.qtl$treatment) treatment.name<-unique(treatments) plot.col<-c() for(i in 1:length(treatments)){ logical<-treatments[i] == treatment.name col<-which(logical, arr.ind=TRUE) plot.col<-c(plot.col, col) } all.qtl$plot.col<-plot.col pdf(plotname) p<-ggplot() + geom_point(data = all.qtl, aes(x = pos, y = prop.var, shape=plot.char, colour=as.character(plot.col), fill=as.character(plot.col)),size=3, alpha=0.5) + geom_blank(data = blank_data, aes(x = x, y = y)) + facet_wrap(~chr, scales = "free_x") + expand_limits(x = 0) + scale_x_continuous(expand = c(0, 0)) + theme_bw() + scale_shape_manual(values=c(24,25)) print(p + scale_color_manual(values=c("1" = "orange", "2" = "blue", "3" = "grey")) + scale_fill_manual(values=c("1" = "orange", "2" = "blue", "3" = "grey")) + ylab("% Variance") + xlab("Genome Position") + theme(legend.position = "none")) dev.off() } ############################################## # Lets define a function to calculated predicted values and residuals from a major axis model ############################################## get.lmodel2.values<-function(model, rma){ if(rma == 'N') { # Get model intercepts ols.int<-model$regression.results$Intercept[1] ma.int<-model$regression.results$Intercept[2] sma.int<-model$regression.results$Intercept[3] # Get model slope ols.slope<-model$regression.results$Slope[1] ma.slope<-model$regression.results$Slope[2] sma.slope<-model$regression.results$Slope[3] # Get values you specified as X x<-model$x y<-model$y # Get predicted values y.ols.pred<-(ols.slope * x) + ols.int y.ma.pred<-(ma.slope * x) + ma.int y.sma.pred<-(sma.slope * x) + sma.int # Get residuals from the fit y.ols.res<-(y-y.ols.pred) y.ma.res<-(y-y.ma.pred) y.sma.res<-(y-y.sma.pred) # Format results out<-cbind(x,y,y.ols.pred,y.ma.pred,y.sma.pred,y.ols.res,y.ma.res,y.sma.res) colnames(out)<-c('x','y','ols.pred','ma.pred','sma.pred','ols.res','ma.res','sma.res') } if(rma == 'N') { # Get model intercepts ols.int<-model$regression.results$Intercept[1] ma.int<-model$regression.results$Intercept[2] sma.int<-model$regression.results$Intercept[3] rma.int<-model$regression.results$Intercept[4] # Get model slope ols.slope<-model$regression.results$Slope[1] ma.slope<-model$regression.results$Slope[2] sma.slope<-model$regression.results$Slope[3] rma.slope<-model$regression.results$Slope[4] # Get values you specified as X x<-model$x y<-model$y # Get predicted values y.ols.pred<-(ols.slope * x) + ols.int y.ma.pred<-(ma.slope * x) + ma.int y.sma.pred<-(sma.slope * x) + sma.int y.rma.pred<-(rma.slope * x) + rma.int # Get residuals from the fit y.ols.res<-(y.ols.pred-y) y.ma.res<-(y.ma.pred-y) y.sma.res<-(y.sma.pred-y) y.rma.res<-(y.rma.pred-y) # Format results out<-cbind(x,y,y.ols.pred,y.ma.pred,y.sma.pred,y.rma.pred,y.ols.res,y.ma.res,y.sma.res,y.rma.res) colnames(out)<-c('x','y','ols.pred','ma.pred','sma.pred','rma.pred','ols.res','ma.res','sma.res','rma.res') } return(out) } setwd(home.dir) save.image('analysis_fxns.Rdata')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_data.R \name{check_data} \alias{check_data} \title{Function check_data} \usage{ check_data(projectpath) } \description{ Function check_data }
/man/check_data.Rd
no_license
haihaba/gcms
R
false
true
226
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_data.R \name{check_data} \alias{check_data} \title{Function check_data} \usage{ check_data(projectpath) } \description{ Function check_data }
library(rmr) library(tm) library(topicmodels) rmr.options.set(backend="local") stop_file = file("~/Data/stopwords.txt") stopwords = readLines(stop_file) close(stop_file) stopexpr = do.call(paste, args=as.list(c(stopwords, sep="\\b|\\b"))) time_extractor = function(tweet) { floor(unclass(as.POSIXct( tweet["created_at"], format='%a, %d %b %Y %X %z', tz="UTC" ))[1]/3600) } tweet_mapper = function(null,tweet_text) { tweet = fromJSON(tweet_text) tweet_time_frame = time_extractor(tweet) tweet_text = gsub(pattern=",|'", x=tweet["text"], perl=T, replacement="") tweet_text = gsub(pattern="-|\\?|\\.|;", x=tweet_text, perl=T, replacement="") tweet_text = gsub(pattern=stopexpr, x=tweet_text, perl=T, replacement="") tweet_text = gsub(pattern="^\\s+|\\s+$", x = tweet_text, perl=T, replacement="") tweet_text = gsub(pattern="\\s+\\w\\s", x = tweet_text, perl=T, replacement="") tweet_text = gsub(pattern="\\s+", x = tweet_text, perl=T, replacement=" ") if(nchar(tweet_text)>20) keyval(tweet_time_frame, tweet_text) else return() } error_func= function(e) print(e) topics_reducer = function(time,tweets){ # create a corpus using the functions Corpus() and VectorSource() # create a document term matrix using DocumentTermMatrix() # create a topic model with 3 topics using LDA() # return a keyval using the time as the key and topic model # as the value } tweet_timeframes = from.dfs(mapreduce("~/Data/small_sample_twitter_data", input.format="text", map=tweet_mapper, reduce=topics_reducer )) n_results = 5 terms = lapply(tweet_timeframes, function(kv) terms(kv$val, n_results)) topics = lapply(tweet_timeframes, function(kv){ topics = posterior(kv$val)$topics topics = topics[order(topics, decreasing=T)] topics[1:n_results] }) barplot(unlist(topics)[1:30]-.99, names.arg=unlist(terms)[1:30], col=rainbow(5))
/Fill-In/sliding-window.R
no_license
RodavLasIlad/rhadoop-examples
R
false
false
2,037
r
library(rmr) library(tm) library(topicmodels) rmr.options.set(backend="local") stop_file = file("~/Data/stopwords.txt") stopwords = readLines(stop_file) close(stop_file) stopexpr = do.call(paste, args=as.list(c(stopwords, sep="\\b|\\b"))) time_extractor = function(tweet) { floor(unclass(as.POSIXct( tweet["created_at"], format='%a, %d %b %Y %X %z', tz="UTC" ))[1]/3600) } tweet_mapper = function(null,tweet_text) { tweet = fromJSON(tweet_text) tweet_time_frame = time_extractor(tweet) tweet_text = gsub(pattern=",|'", x=tweet["text"], perl=T, replacement="") tweet_text = gsub(pattern="-|\\?|\\.|;", x=tweet_text, perl=T, replacement="") tweet_text = gsub(pattern=stopexpr, x=tweet_text, perl=T, replacement="") tweet_text = gsub(pattern="^\\s+|\\s+$", x = tweet_text, perl=T, replacement="") tweet_text = gsub(pattern="\\s+\\w\\s", x = tweet_text, perl=T, replacement="") tweet_text = gsub(pattern="\\s+", x = tweet_text, perl=T, replacement=" ") if(nchar(tweet_text)>20) keyval(tweet_time_frame, tweet_text) else return() } error_func= function(e) print(e) topics_reducer = function(time,tweets){ # create a corpus using the functions Corpus() and VectorSource() # create a document term matrix using DocumentTermMatrix() # create a topic model with 3 topics using LDA() # return a keyval using the time as the key and topic model # as the value } tweet_timeframes = from.dfs(mapreduce("~/Data/small_sample_twitter_data", input.format="text", map=tweet_mapper, reduce=topics_reducer )) n_results = 5 terms = lapply(tweet_timeframes, function(kv) terms(kv$val, n_results)) topics = lapply(tweet_timeframes, function(kv){ topics = posterior(kv$val)$topics topics = topics[order(topics, decreasing=T)] topics[1:n_results] }) barplot(unlist(topics)[1:30]-.99, names.arg=unlist(terms)[1:30], col=rainbow(5))
## returns significance code given the p-value getsigcode <- function(pval){ if (pval<1) sigcode<-"" if (pval<0.1) sigcode<-"." if (pval<0.05) sigcode<-"*" if (pval<0.01) sigcode<-"**" if (pval<0.001) sigcode<-"***" if (pval<0.0001) sigcode<-"****" return(sigcode) }
/analysis/utils_R/getsigcode.R
no_license
Karagul/Subjective_Speaker_Characteristics_RMarkdown
R
false
false
294
r
## returns significance code given the p-value getsigcode <- function(pval){ if (pval<1) sigcode<-"" if (pval<0.1) sigcode<-"." if (pval<0.05) sigcode<-"*" if (pval<0.01) sigcode<-"**" if (pval<0.001) sigcode<-"***" if (pval<0.0001) sigcode<-"****" return(sigcode) }
#!/usr/bin/env Rscript x = matrix(1:6, nrow=2, ncol=3, byrow=TRUE) x y = matrix(1:2, ncol=1) y z = matrix(3:1, ncol=3) z # column concatenation cbind(x,y) # row concatenation rbind(x,z) A = matrix(1:4, 2, 2) B = matrix(c(0, 2, -10, 2), 2, 2) v = c(1,2) # element wise multiplication A*B # matrix product A%*%B # sinus element wise sin(A) # exponential element wise exp(B) # matrix transposition t(x) # identity matrix diag(5) # diagonal matrix with vector values in it diag(v) # cross product crossprod(x, y) # determinant det(x) # singular values decomposition svd(x) # matrix diagonalisation eigen(x) # matrix inversion solve(x) # linear system solving solve(A, B) # cholesky decomposition chol(y) # qr decomposition qr(y)
/Openclassrooms/matrix.r
no_license
MarcPartensky/R
R
false
false
744
r
#!/usr/bin/env Rscript x = matrix(1:6, nrow=2, ncol=3, byrow=TRUE) x y = matrix(1:2, ncol=1) y z = matrix(3:1, ncol=3) z # column concatenation cbind(x,y) # row concatenation rbind(x,z) A = matrix(1:4, 2, 2) B = matrix(c(0, 2, -10, 2), 2, 2) v = c(1,2) # element wise multiplication A*B # matrix product A%*%B # sinus element wise sin(A) # exponential element wise exp(B) # matrix transposition t(x) # identity matrix diag(5) # diagonal matrix with vector values in it diag(v) # cross product crossprod(x, y) # determinant det(x) # singular values decomposition svd(x) # matrix diagonalisation eigen(x) # matrix inversion solve(x) # linear system solving solve(A, B) # cholesky decomposition chol(y) # qr decomposition qr(y)
\name{overdispersed.binomial.ratio} \alias{Quasibin.ratio} \alias{Betabin.ratio} \alias{ODbin.ratio} \title{Confidence intervals for risk ratios of overdispersed binomial data} \description{Calculate approximate confidence intervals for ratios of proportions (risk ratios) of two samples. Three functions are available for intervals assuming the beta binomial distribution, based on a generalized assumption of overdispersion estimated from the residuals, and based on the quasibinomial assumption using a generalized linear model.} \usage{ Betabin.ratio(x, y, conf.level=0.95, alternative="two.sided", CImethod=c("FBB", "LBB"), iccpool=FALSE, resbin=FALSE) ODbin.ratio(x, y, conf.level=0.95, alternative="two.sided", CImethod=c("FOD", "LOD"), varmethod=c("res", "san"), resbin=FALSE) Quasibin.ratio(x, y, conf.level = 0.95, alternative = "two.sided", grid = NULL) } \arguments{ \item{x}{ a matrix or data.frame of the first sample, with two columns giving the counts of successes and failures in the two columns; each row should correspond to one experimental or observational unit; first column will be treated as 'success'} \item{y}{ a matrix or data.frame of the second sample, with two columns giving the counts of successes and failures in the two columns; each row should correspond to one experimental or observational unit; first column will be treated as 'success'} \item{conf.level}{ a single numeric value between 0 and 1, the confidence level} \item{alternative}{a character string, \code{"two.sided"} for two-sided intervals, \code{"less"} for upper limits, \code{"greater"} for lower limits only} \item{CImethod}{a character string, chossing between available methods for interval computation: in \code{betabin.ratio}: assuming the beta binomial distribution \code{"FBB"} invokes the Fieller-Bailey interval and \code{"LBB"} invokes the delta-method on the log scale (Lui et al. 2000); in \code{ODbin.ratio}: without particular assumptions w.r.t to the distibution, \code{"FOD"} invokes the Fieller-Bailey interval and \code{"LOD"} invokes the delta-method on the log scale as described by Zaihra and Paul (2010)} \item{iccpool}{logical, if \code{FALSE}, a separate intra-class-correlation coefficient is estimated for each sample (assuming different levels of overdispersion in the two samples); if \code{TRUE}, a joint intra-class-correlation coefficient (assuming equal ICCs in the two samples)} \item{resbin}{logical: if \code{FALSE}, underdispersion is allowed in estimation; if \code{TRUE}, underdispersion not allowed: when sample estimates suggest underdispersion, the variance is fixed at the binommial variance} \item{varmethod}{a character string specifying te type of variance estimation in \code{ODbin.ratio}: \code{"res"} is the residual variance, \code{"san"} corresponds to a sandwich estimator, details see (Zaihra and Paul, 2010)} \item{grid}{optional, a numeric vector to be supplied to the profiling used internally in \code{quasibin.ratio} to obtain profile deviance intervals for each samples proportion on the logit-scale.} } \details{ The methods in \code{betabin.ratio} are described by Lui et al., (2000), where different estimates for ICC (and thus overdispersion) are computed for each sample. For small sample size or extreme proportions, one may restrict the variance to that of the binomial distribution and/or use a pooled estimator of ICC for a joint model of overdispersion in the two samples. The methods in \code{ODbin.ratio} are described by Zaihra and Paul (2010), where different estimates for overdispersion are computed for each sample. Zaihra and Paul refer to two different methods of estimating the variance (MR, MS), here referred to as "res", "san", respectively. As above one may restrict the variance to that of the binomial distribution. The method to compute intervals under the quasibinomial assumptions in \code{quasibin.ratio} uses a quasibinomial generalized linear model to obtain profile deviance intervals (e.g. Bates and Watts, 1988; relying on package mcprofile), and then applies the MOVERR method by Donner and Zhou(2012); experimental! } \value{ a list with elements \item{conf.int }{confidence limits for the ratio of proprotions in x over that in y} \item{estimate }{the point estimate for the ratio of proprotions in x over that in y} } \references{ \emph{Lui K-L, Mayer JA, Eckhardt L (2000):} Confidence intervals for the risk ratio under cluster sampling based on the beta-binomial model. Statistics in Medicine 19, 2933-2942. \emph{Zaihra, T and Paul, S (2010):} Interval Estimation of Some Epidemiological Measures of Association. The International Journal of Biostatistics. 6 (1), Article 35. \emph{Bates and Watts(1988):} Nonlinear Regression Analysis and Its Applications, Wiley, Ch.6 \emph{Donner and Zou (2012):} Closed-form confidence intervals for functions of the normal mean and standard deviation. Statistical Methods in Medical Research 21(4):347-359. } \author{ Frank Schaarschmidt } \note{ The method in \code{quasibin.ratio} is experimental. } \examples{ # Toxicologoical data: Number of Pups alive four days after birth (16 litters of rats) # Original source: Weil, 1970: Selection of valid number[...]. # Food and Cosmetics, Toxicology, 8, 177-182. # Cited from Zaihra and Paul(2010): Interval Estimation of Some Epidemiological # Measures of Association. Int. J Biostatistics, 6(1), Article 35. mchem = c(12, 11, 10, 9, 11, 10, 10, 9, 9, 5, 9, 7, 10, 6, 10, 7) xchem = c(12, 11, 10, 9, 10, 9, 9, 8, 8, 4, 7, 4, 5, 3, 3, 0) dchem <- cbind("alive"=xchem, "dead"=mchem-xchem) mcon = c(13, 12, 9, 9, 8, 8, 13, 12, 10, 10, 9, 13, 5, 7, 10, 10) xcon = c(13, 12, 9, 9, 8, 8, 12, 11, 9, 9, 8, 11, 4, 5, 7, 7) dcon <- cbind("alive"=xcon, "dead"=mcon-xcon) # Zaihra and Paul report: MR2: [0.714; 1.034] ODbin.ratio(x=dchem, y=dcon, CImethod="LOD", resbin=FALSE) # Zaihra and Paul report: MR4: [0.710; 1.029] ODbin.ratio(x=dchem, y=dcon, CImethod="FOD", resbin=FALSE) Betabin.ratio(x=dchem, y=dcon, CImethod="FBB", iccpool=TRUE, resbin=TRUE) Quasibin.ratio(x=dchem, y=dcon) # Solar protection data: intervention and control group (Mayer, 1997:) # Number of children with adequate level of solar protection # Source: Mayer, Slymen, Eckhardt et al.(1997): Reducing ultraviolat # radiation exposure in children. Preventive Medicine 26, 845-861. # Cited from: Lui et al. (2000) mint=c(3,2,2,5,4,3,1,2,2,2,1,3,1,3,2,2,6,2,4,2,2,2,2,1,1,1,1,1,1) xint=c(1,1,1,0,1,2,1,2,2,1,1,2,1,2,2,0,0,0,0,1,2,1,1,1,1,0,0,0,0) dint <- cbind("adequate"=xint, "non-adequate"=mint-xint) mcont=c(2,4,3,2,3,4,4,2,2,3,2,2,4,3,2,3,1,1,2,2,2,3,3,4,1,1,1,1,1) xcont=c(0,0,2,2,0,4,2,1,1,3,2,1,1,3,2,3,1,0,1,2,1,1,2,4,1,1,1,0,0) dcont <- cbind("adequate"=xcont, "non-adequate"=mcont-xcont) # Lui et al.(2000) report for the Fieller-Bailey method # with pooled ICC: 905% CI = [0.964; 2.281] Betabin.ratio(x=dcont, y=dint, conf.level=0.95, alternative="two.sided", CImethod="FBB", iccpool=TRUE, resbin=FALSE) # and for the Log-scale delta method with pooled ICC: # 95% CI = [0.954; 2.248] Betabin.ratio(x=dcont, y=dint, conf.level=0.95, alternative="two.sided", CImethod="LBB", iccpool=TRUE, resbin=FALSE) ODbin.ratio(x=dcont, y=dint, conf.level=0.95, alternative="two.sided", CImethod="FOD", resbin=TRUE) Quasibin.ratio(x=dcont, y=dint, conf.level = 0.95, alternative = "two.sided") } \keyword{ htest }
/man/Overdispersed.binomial.ratio.Rd
no_license
cran/pairwiseCI
R
false
false
7,581
rd
\name{overdispersed.binomial.ratio} \alias{Quasibin.ratio} \alias{Betabin.ratio} \alias{ODbin.ratio} \title{Confidence intervals for risk ratios of overdispersed binomial data} \description{Calculate approximate confidence intervals for ratios of proportions (risk ratios) of two samples. Three functions are available for intervals assuming the beta binomial distribution, based on a generalized assumption of overdispersion estimated from the residuals, and based on the quasibinomial assumption using a generalized linear model.} \usage{ Betabin.ratio(x, y, conf.level=0.95, alternative="two.sided", CImethod=c("FBB", "LBB"), iccpool=FALSE, resbin=FALSE) ODbin.ratio(x, y, conf.level=0.95, alternative="two.sided", CImethod=c("FOD", "LOD"), varmethod=c("res", "san"), resbin=FALSE) Quasibin.ratio(x, y, conf.level = 0.95, alternative = "two.sided", grid = NULL) } \arguments{ \item{x}{ a matrix or data.frame of the first sample, with two columns giving the counts of successes and failures in the two columns; each row should correspond to one experimental or observational unit; first column will be treated as 'success'} \item{y}{ a matrix or data.frame of the second sample, with two columns giving the counts of successes and failures in the two columns; each row should correspond to one experimental or observational unit; first column will be treated as 'success'} \item{conf.level}{ a single numeric value between 0 and 1, the confidence level} \item{alternative}{a character string, \code{"two.sided"} for two-sided intervals, \code{"less"} for upper limits, \code{"greater"} for lower limits only} \item{CImethod}{a character string, chossing between available methods for interval computation: in \code{betabin.ratio}: assuming the beta binomial distribution \code{"FBB"} invokes the Fieller-Bailey interval and \code{"LBB"} invokes the delta-method on the log scale (Lui et al. 2000); in \code{ODbin.ratio}: without particular assumptions w.r.t to the distibution, \code{"FOD"} invokes the Fieller-Bailey interval and \code{"LOD"} invokes the delta-method on the log scale as described by Zaihra and Paul (2010)} \item{iccpool}{logical, if \code{FALSE}, a separate intra-class-correlation coefficient is estimated for each sample (assuming different levels of overdispersion in the two samples); if \code{TRUE}, a joint intra-class-correlation coefficient (assuming equal ICCs in the two samples)} \item{resbin}{logical: if \code{FALSE}, underdispersion is allowed in estimation; if \code{TRUE}, underdispersion not allowed: when sample estimates suggest underdispersion, the variance is fixed at the binommial variance} \item{varmethod}{a character string specifying te type of variance estimation in \code{ODbin.ratio}: \code{"res"} is the residual variance, \code{"san"} corresponds to a sandwich estimator, details see (Zaihra and Paul, 2010)} \item{grid}{optional, a numeric vector to be supplied to the profiling used internally in \code{quasibin.ratio} to obtain profile deviance intervals for each samples proportion on the logit-scale.} } \details{ The methods in \code{betabin.ratio} are described by Lui et al., (2000), where different estimates for ICC (and thus overdispersion) are computed for each sample. For small sample size or extreme proportions, one may restrict the variance to that of the binomial distribution and/or use a pooled estimator of ICC for a joint model of overdispersion in the two samples. The methods in \code{ODbin.ratio} are described by Zaihra and Paul (2010), where different estimates for overdispersion are computed for each sample. Zaihra and Paul refer to two different methods of estimating the variance (MR, MS), here referred to as "res", "san", respectively. As above one may restrict the variance to that of the binomial distribution. The method to compute intervals under the quasibinomial assumptions in \code{quasibin.ratio} uses a quasibinomial generalized linear model to obtain profile deviance intervals (e.g. Bates and Watts, 1988; relying on package mcprofile), and then applies the MOVERR method by Donner and Zhou(2012); experimental! } \value{ a list with elements \item{conf.int }{confidence limits for the ratio of proprotions in x over that in y} \item{estimate }{the point estimate for the ratio of proprotions in x over that in y} } \references{ \emph{Lui K-L, Mayer JA, Eckhardt L (2000):} Confidence intervals for the risk ratio under cluster sampling based on the beta-binomial model. Statistics in Medicine 19, 2933-2942. \emph{Zaihra, T and Paul, S (2010):} Interval Estimation of Some Epidemiological Measures of Association. The International Journal of Biostatistics. 6 (1), Article 35. \emph{Bates and Watts(1988):} Nonlinear Regression Analysis and Its Applications, Wiley, Ch.6 \emph{Donner and Zou (2012):} Closed-form confidence intervals for functions of the normal mean and standard deviation. Statistical Methods in Medical Research 21(4):347-359. } \author{ Frank Schaarschmidt } \note{ The method in \code{quasibin.ratio} is experimental. } \examples{ # Toxicologoical data: Number of Pups alive four days after birth (16 litters of rats) # Original source: Weil, 1970: Selection of valid number[...]. # Food and Cosmetics, Toxicology, 8, 177-182. # Cited from Zaihra and Paul(2010): Interval Estimation of Some Epidemiological # Measures of Association. Int. J Biostatistics, 6(1), Article 35. mchem = c(12, 11, 10, 9, 11, 10, 10, 9, 9, 5, 9, 7, 10, 6, 10, 7) xchem = c(12, 11, 10, 9, 10, 9, 9, 8, 8, 4, 7, 4, 5, 3, 3, 0) dchem <- cbind("alive"=xchem, "dead"=mchem-xchem) mcon = c(13, 12, 9, 9, 8, 8, 13, 12, 10, 10, 9, 13, 5, 7, 10, 10) xcon = c(13, 12, 9, 9, 8, 8, 12, 11, 9, 9, 8, 11, 4, 5, 7, 7) dcon <- cbind("alive"=xcon, "dead"=mcon-xcon) # Zaihra and Paul report: MR2: [0.714; 1.034] ODbin.ratio(x=dchem, y=dcon, CImethod="LOD", resbin=FALSE) # Zaihra and Paul report: MR4: [0.710; 1.029] ODbin.ratio(x=dchem, y=dcon, CImethod="FOD", resbin=FALSE) Betabin.ratio(x=dchem, y=dcon, CImethod="FBB", iccpool=TRUE, resbin=TRUE) Quasibin.ratio(x=dchem, y=dcon) # Solar protection data: intervention and control group (Mayer, 1997:) # Number of children with adequate level of solar protection # Source: Mayer, Slymen, Eckhardt et al.(1997): Reducing ultraviolat # radiation exposure in children. Preventive Medicine 26, 845-861. # Cited from: Lui et al. (2000) mint=c(3,2,2,5,4,3,1,2,2,2,1,3,1,3,2,2,6,2,4,2,2,2,2,1,1,1,1,1,1) xint=c(1,1,1,0,1,2,1,2,2,1,1,2,1,2,2,0,0,0,0,1,2,1,1,1,1,0,0,0,0) dint <- cbind("adequate"=xint, "non-adequate"=mint-xint) mcont=c(2,4,3,2,3,4,4,2,2,3,2,2,4,3,2,3,1,1,2,2,2,3,3,4,1,1,1,1,1) xcont=c(0,0,2,2,0,4,2,1,1,3,2,1,1,3,2,3,1,0,1,2,1,1,2,4,1,1,1,0,0) dcont <- cbind("adequate"=xcont, "non-adequate"=mcont-xcont) # Lui et al.(2000) report for the Fieller-Bailey method # with pooled ICC: 905% CI = [0.964; 2.281] Betabin.ratio(x=dcont, y=dint, conf.level=0.95, alternative="two.sided", CImethod="FBB", iccpool=TRUE, resbin=FALSE) # and for the Log-scale delta method with pooled ICC: # 95% CI = [0.954; 2.248] Betabin.ratio(x=dcont, y=dint, conf.level=0.95, alternative="two.sided", CImethod="LBB", iccpool=TRUE, resbin=FALSE) ODbin.ratio(x=dcont, y=dint, conf.level=0.95, alternative="two.sided", CImethod="FOD", resbin=TRUE) Quasibin.ratio(x=dcont, y=dint, conf.level = 0.95, alternative = "two.sided") } \keyword{ htest }
##Run Stan # !diagnostics off library(dplyr) library(readr) library(survival) library(rstan) library(loo) library(caret) library(Biobase) md <- read_rds("C:/RFactory/bymetabric_files/rdsmetabric/Med_Data_Clean.rds") gd <- read_rds("C:/RFactory/bymetabric_files/rdsmetabric/Gen_Data.rds") md <- read_rds("Med_Data_Clean.rds") gd <- read_rds("Gen_Data.rds") load("Gen_data_fun.Rdata") # Run null model stan_file_null <- "bybrca/stan/null.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stannull <- rstan::stan(stan_file_null, data = gen_stan_data0(md), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits0()) log_liknull <- loo::extract_log_lik(stannull, parameter_name = "log_lik") loonull <- loo::loo(log_liknull) print(loonull) saveRDS(stannull, file = "bysfit/stannul.rds") rm(list = c('stannull', 'log_liknull')) #Run Null with Multilevel for cohort stan_file_null_ml <- "bybrca/stan/nullml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stannullml <- rstan::stan(stan_file_null_ml, data = gen_stan_data1(md), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits1(J1 = 5, J2 = 11)) log_liknullml <- loo::extract_log_lik(stannullml, parameter_name = "log_lik") loonullml <- loo::loo(log_liknullml) print(loonullml) compare(loonull, loonullml) #preference for the second model! saveRDS(stannullml, file = "C:/RFactory/bymetabric_files/bysfit/nullml.rds") rm(list = c('stannullml', 'log_liknullml')) #--------------------------------------- ##Run Multilevel with classical clinical vars stan_file_clin <- "bybrca/stan/clinml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stanclinml <- rstan::stan(stan_file_clin, data = gen_stan_data2(md, formula = "~ size + grade +tumor_stage "), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits2(J1 = 5, J2 = 11, M = 7)) # if (interactive()) # shinystan::launch_shinystan(stanfit) log_likclin <- loo::extract_log_lik(stanclinml, parameter_name = "log_lik") looclin <- loo(log_likclin) print(looclin) compare(loonullml, looclin) saveRDS(stanclinml, file = "bysfit/clin.rds") rm(list = c('stanclinml', 'log_likclin')) #--------------------------------------- ##Run Ml Stan with genomic #stan_file_gen <- "bybrca/stan/genselect.stan" stan_file_gen <-"C:/RFactory/bymetabric/bybrca/stan/genml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 1 stangene <- rstan::stan(stan_file_gen, data = gen_stan_data3(md, es = gd), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 50, init = gen_inits3(J1 = 5, J2 = 11, M = 4715)) # if (interactive()) # shinystan::launch_shinystan(stanfit) likgene <- loo::extract_log_lik(stangene, parameter_name = "log_lik") loogene <- loo(likgene) saveRDS(stangene, file = "bysfit/gene.rds") rm(list = c('stangene', 'likgene')) #--------------------------------------- ##Run ClinicoGenomic Stan stan_file_clingen <- "bys/bys/clingenml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stanclingene <- rstan::stan(stan_file_clingen, data = gen_stan_data4(md, es = brcaES, formula = "~ stage + er + pr+ her2 "), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits4(J = 6, M = 12, M_g = 14666)) # if (interactive()) # shinystan::launch_shinystan(stanfit) likclingene <- loo::extract_log_lik(stanclingene, parameter_name = "log_lik") looclingene <- loo(likclingene) saveRDS(stanclingene, file = "bysfit/clingene.rds") rm(list = c('stanclingene', 'likclingene'))
/rscript/runstan.R
no_license
csetraynor/bybrca
R
false
false
4,513
r
##Run Stan # !diagnostics off library(dplyr) library(readr) library(survival) library(rstan) library(loo) library(caret) library(Biobase) md <- read_rds("C:/RFactory/bymetabric_files/rdsmetabric/Med_Data_Clean.rds") gd <- read_rds("C:/RFactory/bymetabric_files/rdsmetabric/Gen_Data.rds") md <- read_rds("Med_Data_Clean.rds") gd <- read_rds("Gen_Data.rds") load("Gen_data_fun.Rdata") # Run null model stan_file_null <- "bybrca/stan/null.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stannull <- rstan::stan(stan_file_null, data = gen_stan_data0(md), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits0()) log_liknull <- loo::extract_log_lik(stannull, parameter_name = "log_lik") loonull <- loo::loo(log_liknull) print(loonull) saveRDS(stannull, file = "bysfit/stannul.rds") rm(list = c('stannull', 'log_liknull')) #Run Null with Multilevel for cohort stan_file_null_ml <- "bybrca/stan/nullml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stannullml <- rstan::stan(stan_file_null_ml, data = gen_stan_data1(md), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits1(J1 = 5, J2 = 11)) log_liknullml <- loo::extract_log_lik(stannullml, parameter_name = "log_lik") loonullml <- loo::loo(log_liknullml) print(loonullml) compare(loonull, loonullml) #preference for the second model! saveRDS(stannullml, file = "C:/RFactory/bymetabric_files/bysfit/nullml.rds") rm(list = c('stannullml', 'log_liknullml')) #--------------------------------------- ##Run Multilevel with classical clinical vars stan_file_clin <- "bybrca/stan/clinml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stanclinml <- rstan::stan(stan_file_clin, data = gen_stan_data2(md, formula = "~ size + grade +tumor_stage "), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits2(J1 = 5, J2 = 11, M = 7)) # if (interactive()) # shinystan::launch_shinystan(stanfit) log_likclin <- loo::extract_log_lik(stanclinml, parameter_name = "log_lik") looclin <- loo(log_likclin) print(looclin) compare(loonullml, looclin) saveRDS(stanclinml, file = "bysfit/clin.rds") rm(list = c('stanclinml', 'log_likclin')) #--------------------------------------- ##Run Ml Stan with genomic #stan_file_gen <- "bybrca/stan/genselect.stan" stan_file_gen <-"C:/RFactory/bymetabric/bybrca/stan/genml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 1 stangene <- rstan::stan(stan_file_gen, data = gen_stan_data3(md, es = gd), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 50, init = gen_inits3(J1 = 5, J2 = 11, M = 4715)) # if (interactive()) # shinystan::launch_shinystan(stanfit) likgene <- loo::extract_log_lik(stangene, parameter_name = "log_lik") loogene <- loo(likgene) saveRDS(stangene, file = "bysfit/gene.rds") rm(list = c('stangene', 'likgene')) #--------------------------------------- ##Run ClinicoGenomic Stan stan_file_clingen <- "bys/bys/clingenml.stan" options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) nChain <- 4 stanclingene <- rstan::stan(stan_file_clingen, data = gen_stan_data4(md, es = brcaES, formula = "~ stage + er + pr+ her2 "), cores = min(nChain, parallel::detectCores()), chains = nChain, iter = 1000, init = gen_inits4(J = 6, M = 12, M_g = 14666)) # if (interactive()) # shinystan::launch_shinystan(stanfit) likclingene <- loo::extract_log_lik(stanclingene, parameter_name = "log_lik") looclingene <- loo(likclingene) saveRDS(stanclingene, file = "bysfit/clingene.rds") rm(list = c('stanclingene', 'likclingene'))
## covr: skip=all .onAttach <- function(libname, pkgname) { pd <- utils::packageDescription(pkgname) msg <- sprintf("%s v%s", pkgname, pd$Version) if (!is.null(pd$Date)) msg <- sprintf("%s (%s)", msg, pd$Date) msg <- sprintf("%s successfully loaded. See ?%s for help.", msg, pkgname) pkgStartupMessage(msg) }
/R.methodsS3/R/zzz.R
no_license
ingted/R-Examples
R
false
false
320
r
## covr: skip=all .onAttach <- function(libname, pkgname) { pd <- utils::packageDescription(pkgname) msg <- sprintf("%s v%s", pkgname, pd$Version) if (!is.null(pd$Date)) msg <- sprintf("%s (%s)", msg, pd$Date) msg <- sprintf("%s successfully loaded. See ?%s for help.", msg, pkgname) pkgStartupMessage(msg) }
# Detailed prediction get_prediction_n <- function(x,t,n){ total_time <- 0 print("Inside function") print(t) i <- 0 if('JM_N0' %in% names(x)){ while( total_time < t){ i <- i + 1 total_time <- (1/(x$JM_Phi*(x$JM_N0-(t+i-1)))) + total_time if(total_time > t){ i <- i - 1 } } } else if('GM_D0' %in% names(x)){ while( total_time < t){ i <- i +1 total_time <- (1/(x$GM_D0*(x$GM_Phi)^(t+i-1))) + total_time if(total_time > t){ i <- i - 1 } } } else if('GO_aMLE' %in% names(x)){ while(total_time < t){ i <- i +1 total_time <- (1/(x$GO_aMLE*(x$GO_bMLE)^(t+i-1))) + total_time if(total_time > t){ i <- i - 1 } } } else{ i <- "Model parameter not defined" } i } get_prediction_t <- function(x,steps,n){ t <-0 time_indexes <- c() if('JM_N0' %in% names(x)){ t <- 0 if(steps!=0){ for(i in 1:steps){ if((x$JM_N0 - n)>=i){ t <- (1/(x$JM_Phi*(x$JM_N0-(n+i-1)))) + t time_indexes[i] <- t } else{ time_indexes[i] <- "NA" } } } } else if('GM_D0' %in% names(x)){ t <- 0 if(steps!=0){ for(i in 1:steps){ t <- (1/(x$GM_D0*(x$GM_Phi)^(n+i-1)))+ t time_indexes[i] <- t # if((x$GM_N0 - n)>=i){ # t <- (1/(x$GM_D0*(x$GM_Phi)^(n+i-1)))+ t # time_indexes[i] <- t # } # else{ # time_indexes[i] <- "NA" # } } } } else if("GO_aMLE" %in% names(x)){ t <- 0 if(steps!=0){ t_prev <- 0 for(i in 1:steps){ t_now <- (1/(x$GO_aMLE*(x$GO_bMLE)^(n+i-1))) t <- t_now + t_prev time_indexes[i] <- t t_prev <-t_now } } } else{ return("Model Parameter not defined") } time_indexes } mvf_nhpp <- function(a,b,t){ return(a*(1-exp(-b*t))) } reliability_nhpp <- function(a,b,cur_time,delta){ return(exp(-(mvf_nhpp(a,b,(cur_time+delta)) - mvf_nhpp(a,b,cur_time)))) } reliability_nhpp_mle <- function(a,b,cur_time,delta, reliability){ target_mission_time <- reliability - exp(a*(1-exp(-b*cur_time)) -a*(1-exp(-b*(cur_time+delta)))) return(target_mission_time) } maxiter <- 1000 reliability_target_time <- function(a,b,cur_time,delta, reliability){ f <- function(t){ return(reliability_nhpp_mle(a,b,t,delta, reliability)) } current_rel <- reliability_nhpp(a,b,cur_time,delta) if(current_rel < reliability){ sol <- tryCatch( uniroot(f, c(cur_time,cur_time + 50),extendInt="yes", maxiter=maxiter, tol=1e-10)$root, warning = function(w){ #print(f.lower) if(length(grep("_NOT_ converged",w[1]))>0){ maxiter <<- maxiter+10 print(paste("recursive", maxiter,sep='_')) reliability_target_time(a,b,cur_time,delta, reliability) } }, error = function(e){ print(e) #return(e) }) } else { sol <- "Target reliability already achieved" } sol } reliability_target_time_plot <- function(a,b,cur_time,delta, reliability){ r <-data.frame() tt_index <- seq(0,cur_time,cur_time/1000) for(i in 1:length(tt_index)){ r[i,1] <- tt_index[i] temp <- reliability_nhpp(a,b,tt_index[i],delta) #print(typeof(temp)) if(typeof(temp) != typeof("character")){ r[i,2] <- temp } else{ r[i,2] <- "NA" } } g <- data.frame(r[1],r[2]) names(g) <- c("Time","Reliability") #print(g) g }
/Detailed_prediction.R
no_license
betienne12/SRT
R
false
false
3,364
r
# Detailed prediction get_prediction_n <- function(x,t,n){ total_time <- 0 print("Inside function") print(t) i <- 0 if('JM_N0' %in% names(x)){ while( total_time < t){ i <- i + 1 total_time <- (1/(x$JM_Phi*(x$JM_N0-(t+i-1)))) + total_time if(total_time > t){ i <- i - 1 } } } else if('GM_D0' %in% names(x)){ while( total_time < t){ i <- i +1 total_time <- (1/(x$GM_D0*(x$GM_Phi)^(t+i-1))) + total_time if(total_time > t){ i <- i - 1 } } } else if('GO_aMLE' %in% names(x)){ while(total_time < t){ i <- i +1 total_time <- (1/(x$GO_aMLE*(x$GO_bMLE)^(t+i-1))) + total_time if(total_time > t){ i <- i - 1 } } } else{ i <- "Model parameter not defined" } i } get_prediction_t <- function(x,steps,n){ t <-0 time_indexes <- c() if('JM_N0' %in% names(x)){ t <- 0 if(steps!=0){ for(i in 1:steps){ if((x$JM_N0 - n)>=i){ t <- (1/(x$JM_Phi*(x$JM_N0-(n+i-1)))) + t time_indexes[i] <- t } else{ time_indexes[i] <- "NA" } } } } else if('GM_D0' %in% names(x)){ t <- 0 if(steps!=0){ for(i in 1:steps){ t <- (1/(x$GM_D0*(x$GM_Phi)^(n+i-1)))+ t time_indexes[i] <- t # if((x$GM_N0 - n)>=i){ # t <- (1/(x$GM_D0*(x$GM_Phi)^(n+i-1)))+ t # time_indexes[i] <- t # } # else{ # time_indexes[i] <- "NA" # } } } } else if("GO_aMLE" %in% names(x)){ t <- 0 if(steps!=0){ t_prev <- 0 for(i in 1:steps){ t_now <- (1/(x$GO_aMLE*(x$GO_bMLE)^(n+i-1))) t <- t_now + t_prev time_indexes[i] <- t t_prev <-t_now } } } else{ return("Model Parameter not defined") } time_indexes } mvf_nhpp <- function(a,b,t){ return(a*(1-exp(-b*t))) } reliability_nhpp <- function(a,b,cur_time,delta){ return(exp(-(mvf_nhpp(a,b,(cur_time+delta)) - mvf_nhpp(a,b,cur_time)))) } reliability_nhpp_mle <- function(a,b,cur_time,delta, reliability){ target_mission_time <- reliability - exp(a*(1-exp(-b*cur_time)) -a*(1-exp(-b*(cur_time+delta)))) return(target_mission_time) } maxiter <- 1000 reliability_target_time <- function(a,b,cur_time,delta, reliability){ f <- function(t){ return(reliability_nhpp_mle(a,b,t,delta, reliability)) } current_rel <- reliability_nhpp(a,b,cur_time,delta) if(current_rel < reliability){ sol <- tryCatch( uniroot(f, c(cur_time,cur_time + 50),extendInt="yes", maxiter=maxiter, tol=1e-10)$root, warning = function(w){ #print(f.lower) if(length(grep("_NOT_ converged",w[1]))>0){ maxiter <<- maxiter+10 print(paste("recursive", maxiter,sep='_')) reliability_target_time(a,b,cur_time,delta, reliability) } }, error = function(e){ print(e) #return(e) }) } else { sol <- "Target reliability already achieved" } sol } reliability_target_time_plot <- function(a,b,cur_time,delta, reliability){ r <-data.frame() tt_index <- seq(0,cur_time,cur_time/1000) for(i in 1:length(tt_index)){ r[i,1] <- tt_index[i] temp <- reliability_nhpp(a,b,tt_index[i],delta) #print(typeof(temp)) if(typeof(temp) != typeof("character")){ r[i,2] <- temp } else{ r[i,2] <- "NA" } } g <- data.frame(r[1],r[2]) names(g) <- c("Time","Reliability") #print(g) g }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simPwr.mod.R \name{plot.simPwr.mod} \alias{plot.simPwr.mod} \title{Plot method for simPwr.mod} \usage{ \method{plot}{simPwr.mod}(x, pval = 0.05, ...) } \arguments{ \item{x}{an object of class simPower.moderation} \item{pval}{type I error} } \description{ This function allows you to plot the power of an object of class simPower.moderation } \examples{ plot(x) } \keyword{interaction} \keyword{moderation} \keyword{plot} \keyword{powercurve} \keyword{regression}
/simPower/man/plot.simPwr.mod.Rd
no_license
PeterVerboon/Power-computations
R
false
true
542
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simPwr.mod.R \name{plot.simPwr.mod} \alias{plot.simPwr.mod} \title{Plot method for simPwr.mod} \usage{ \method{plot}{simPwr.mod}(x, pval = 0.05, ...) } \arguments{ \item{x}{an object of class simPower.moderation} \item{pval}{type I error} } \description{ This function allows you to plot the power of an object of class simPower.moderation } \examples{ plot(x) } \keyword{interaction} \keyword{moderation} \keyword{plot} \keyword{powercurve} \keyword{regression}